1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2009-2015 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 *******************************************************************/ 20 21 #include <linux/interrupt.h> 22 #include <linux/mempool.h> 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/delay.h> 26 #include <linux/list.h> 27 #include <linux/bsg-lib.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_transport_fc.h> 32 #include <scsi/scsi_bsg_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 35 #include "lpfc_hw4.h" 36 #include "lpfc_hw.h" 37 #include "lpfc_sli.h" 38 #include "lpfc_sli4.h" 39 #include "lpfc_nl.h" 40 #include "lpfc_bsg.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_logmsg.h" 45 #include "lpfc_crtn.h" 46 #include "lpfc_debugfs.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_version.h" 49 50 struct lpfc_bsg_event { 51 struct list_head node; 52 struct kref kref; 53 wait_queue_head_t wq; 54 55 /* Event type and waiter identifiers */ 56 uint32_t type_mask; 57 uint32_t req_id; 58 uint32_t reg_id; 59 60 /* next two flags are here for the auto-delete logic */ 61 unsigned long wait_time_stamp; 62 int waiting; 63 64 /* seen and not seen events */ 65 struct list_head events_to_get; 66 struct list_head events_to_see; 67 68 /* driver data associated with the job */ 69 void *dd_data; 70 }; 71 72 struct lpfc_bsg_iocb { 73 struct lpfc_iocbq *cmdiocbq; 74 struct lpfc_dmabuf *rmp; 75 struct lpfc_nodelist *ndlp; 76 }; 77 78 struct lpfc_bsg_mbox { 79 LPFC_MBOXQ_t *pmboxq; 80 MAILBOX_t *mb; 81 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ 82 uint8_t *ext; /* extended mailbox data */ 83 uint32_t mbOffset; /* from app */ 84 uint32_t inExtWLen; /* from app */ 85 uint32_t outExtWLen; /* from app */ 86 }; 87 88 #define MENLO_DID 0x0000FC0E 89 90 struct lpfc_bsg_menlo { 91 struct lpfc_iocbq *cmdiocbq; 92 struct lpfc_dmabuf *rmp; 93 }; 94 95 #define TYPE_EVT 1 96 #define TYPE_IOCB 2 97 #define TYPE_MBOX 3 98 #define TYPE_MENLO 4 99 struct bsg_job_data { 100 uint32_t type; 101 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 102 union { 103 struct lpfc_bsg_event *evt; 104 struct lpfc_bsg_iocb iocb; 105 struct lpfc_bsg_mbox mbox; 106 struct lpfc_bsg_menlo menlo; 107 } context_un; 108 }; 109 110 struct event_data { 111 struct list_head node; 112 uint32_t type; 113 uint32_t immed_dat; 114 void *data; 115 uint32_t len; 116 }; 117 118 #define BUF_SZ_4K 4096 119 #define SLI_CT_ELX_LOOPBACK 0x10 120 121 enum ELX_LOOPBACK_CMD { 122 ELX_LOOPBACK_XRI_SETUP, 123 ELX_LOOPBACK_DATA, 124 }; 125 126 #define ELX_LOOPBACK_HEADER_SZ \ 127 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 128 129 struct lpfc_dmabufext { 130 struct lpfc_dmabuf dma; 131 uint32_t size; 132 uint32_t flag; 133 }; 134 135 static void 136 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 137 { 138 struct lpfc_dmabuf *mlast, *next_mlast; 139 140 if (mlist) { 141 list_for_each_entry_safe(mlast, next_mlast, &mlist->list, 142 list) { 143 lpfc_mbuf_free(phba, mlast->virt, mlast->phys); 144 list_del(&mlast->list); 145 kfree(mlast); 146 } 147 lpfc_mbuf_free(phba, mlist->virt, mlist->phys); 148 kfree(mlist); 149 } 150 return; 151 } 152 153 static struct lpfc_dmabuf * 154 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, 155 int outbound_buffers, struct ulp_bde64 *bpl, 156 int *bpl_entries) 157 { 158 struct lpfc_dmabuf *mlist = NULL; 159 struct lpfc_dmabuf *mp; 160 unsigned int bytes_left = size; 161 162 /* Verify we can support the size specified */ 163 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) 164 return NULL; 165 166 /* Determine the number of dma buffers to allocate */ 167 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : 168 size/LPFC_BPL_SIZE); 169 170 /* Allocate dma buffer and place in BPL passed */ 171 while (bytes_left) { 172 /* Allocate dma buffer */ 173 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 174 if (!mp) { 175 if (mlist) 176 lpfc_free_bsg_buffers(phba, mlist); 177 return NULL; 178 } 179 180 INIT_LIST_HEAD(&mp->list); 181 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 182 183 if (!mp->virt) { 184 kfree(mp); 185 if (mlist) 186 lpfc_free_bsg_buffers(phba, mlist); 187 return NULL; 188 } 189 190 /* Queue it to a linked list */ 191 if (!mlist) 192 mlist = mp; 193 else 194 list_add_tail(&mp->list, &mlist->list); 195 196 /* Add buffer to buffer pointer list */ 197 if (outbound_buffers) 198 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 199 else 200 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 201 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); 202 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); 203 bpl->tus.f.bdeSize = (uint16_t) 204 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : 205 bytes_left); 206 bytes_left -= bpl->tus.f.bdeSize; 207 bpl->tus.w = le32_to_cpu(bpl->tus.w); 208 bpl++; 209 } 210 return mlist; 211 } 212 213 static unsigned int 214 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, 215 struct bsg_buffer *bsg_buffers, 216 unsigned int bytes_to_transfer, int to_buffers) 217 { 218 219 struct lpfc_dmabuf *mp; 220 unsigned int transfer_bytes, bytes_copied = 0; 221 unsigned int sg_offset, dma_offset; 222 unsigned char *dma_address, *sg_address; 223 LIST_HEAD(temp_list); 224 struct sg_mapping_iter miter; 225 unsigned long flags; 226 unsigned int sg_flags = SG_MITER_ATOMIC; 227 bool sg_valid; 228 229 list_splice_init(&dma_buffers->list, &temp_list); 230 list_add(&dma_buffers->list, &temp_list); 231 sg_offset = 0; 232 if (to_buffers) 233 sg_flags |= SG_MITER_FROM_SG; 234 else 235 sg_flags |= SG_MITER_TO_SG; 236 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, 237 sg_flags); 238 local_irq_save(flags); 239 sg_valid = sg_miter_next(&miter); 240 list_for_each_entry(mp, &temp_list, list) { 241 dma_offset = 0; 242 while (bytes_to_transfer && sg_valid && 243 (dma_offset < LPFC_BPL_SIZE)) { 244 dma_address = mp->virt + dma_offset; 245 if (sg_offset) { 246 /* Continue previous partial transfer of sg */ 247 sg_address = miter.addr + sg_offset; 248 transfer_bytes = miter.length - sg_offset; 249 } else { 250 sg_address = miter.addr; 251 transfer_bytes = miter.length; 252 } 253 if (bytes_to_transfer < transfer_bytes) 254 transfer_bytes = bytes_to_transfer; 255 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) 256 transfer_bytes = LPFC_BPL_SIZE - dma_offset; 257 if (to_buffers) 258 memcpy(dma_address, sg_address, transfer_bytes); 259 else 260 memcpy(sg_address, dma_address, transfer_bytes); 261 dma_offset += transfer_bytes; 262 sg_offset += transfer_bytes; 263 bytes_to_transfer -= transfer_bytes; 264 bytes_copied += transfer_bytes; 265 if (sg_offset >= miter.length) { 266 sg_offset = 0; 267 sg_valid = sg_miter_next(&miter); 268 } 269 } 270 } 271 sg_miter_stop(&miter); 272 local_irq_restore(flags); 273 list_del_init(&dma_buffers->list); 274 list_splice(&temp_list, &dma_buffers->list); 275 return bytes_copied; 276 } 277 278 /** 279 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 280 * @phba: Pointer to HBA context object. 281 * @cmdiocbq: Pointer to command iocb. 282 * @rspiocbq: Pointer to response iocb. 283 * 284 * This function is the completion handler for iocbs issued using 285 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 286 * ring event handler function without any lock held. This function 287 * can be called from both worker thread context and interrupt 288 * context. This function also can be called from another thread which 289 * cleans up the SLI layer objects. 290 * This function copies the contents of the response iocb to the 291 * response iocb memory object provided by the caller of 292 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 293 * sleeps for the iocb completion. 294 **/ 295 static void 296 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 297 struct lpfc_iocbq *cmdiocbq, 298 struct lpfc_iocbq *rspiocbq) 299 { 300 struct bsg_job_data *dd_data; 301 struct bsg_job *job; 302 struct fc_bsg_reply *bsg_reply; 303 IOCB_t *rsp; 304 struct lpfc_dmabuf *bmp, *cmp, *rmp; 305 struct lpfc_nodelist *ndlp; 306 struct lpfc_bsg_iocb *iocb; 307 unsigned long flags; 308 unsigned int rsp_size; 309 int rc = 0; 310 311 dd_data = cmdiocbq->context1; 312 313 /* Determine if job has been aborted */ 314 spin_lock_irqsave(&phba->ct_ev_lock, flags); 315 job = dd_data->set_job; 316 if (job) { 317 bsg_reply = job->reply; 318 /* Prevent timeout handling from trying to abort job */ 319 job->dd_data = NULL; 320 } 321 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 322 323 /* Close the timeout handler abort window */ 324 spin_lock_irqsave(&phba->hbalock, flags); 325 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 326 spin_unlock_irqrestore(&phba->hbalock, flags); 327 328 iocb = &dd_data->context_un.iocb; 329 ndlp = iocb->ndlp; 330 rmp = iocb->rmp; 331 cmp = cmdiocbq->context2; 332 bmp = cmdiocbq->context3; 333 rsp = &rspiocbq->iocb; 334 335 /* Copy the completed data or set the error status */ 336 337 if (job) { 338 if (rsp->ulpStatus) { 339 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 340 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 341 case IOERR_SEQUENCE_TIMEOUT: 342 rc = -ETIMEDOUT; 343 break; 344 case IOERR_INVALID_RPI: 345 rc = -EFAULT; 346 break; 347 default: 348 rc = -EACCES; 349 break; 350 } 351 } else { 352 rc = -EACCES; 353 } 354 } else { 355 rsp_size = rsp->un.genreq64.bdl.bdeSize; 356 bsg_reply->reply_payload_rcv_len = 357 lpfc_bsg_copy_data(rmp, &job->reply_payload, 358 rsp_size, 0); 359 } 360 } 361 362 lpfc_free_bsg_buffers(phba, cmp); 363 lpfc_free_bsg_buffers(phba, rmp); 364 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 365 kfree(bmp); 366 lpfc_sli_release_iocbq(phba, cmdiocbq); 367 lpfc_nlp_put(ndlp); 368 kfree(dd_data); 369 370 /* Complete the job if the job is still active */ 371 372 if (job) { 373 bsg_reply->result = rc; 374 bsg_job_done(job, bsg_reply->result, 375 bsg_reply->reply_payload_rcv_len); 376 } 377 return; 378 } 379 380 /** 381 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 382 * @job: fc_bsg_job to handle 383 **/ 384 static int 385 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) 386 { 387 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 388 struct lpfc_hba *phba = vport->phba; 389 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 390 struct lpfc_nodelist *ndlp = rdata->pnode; 391 struct fc_bsg_reply *bsg_reply = job->reply; 392 struct ulp_bde64 *bpl = NULL; 393 uint32_t timeout; 394 struct lpfc_iocbq *cmdiocbq = NULL; 395 IOCB_t *cmd; 396 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 397 int request_nseg; 398 int reply_nseg; 399 struct bsg_job_data *dd_data; 400 unsigned long flags; 401 uint32_t creg_val; 402 int rc = 0; 403 int iocb_stat; 404 405 /* in case no data is transferred */ 406 bsg_reply->reply_payload_rcv_len = 0; 407 408 /* allocate our bsg tracking structure */ 409 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 410 if (!dd_data) { 411 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 412 "2733 Failed allocation of dd_data\n"); 413 rc = -ENOMEM; 414 goto no_dd_data; 415 } 416 417 if (!lpfc_nlp_get(ndlp)) { 418 rc = -ENODEV; 419 goto no_ndlp; 420 } 421 422 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 423 rc = -ENODEV; 424 goto free_ndlp; 425 } 426 427 cmdiocbq = lpfc_sli_get_iocbq(phba); 428 if (!cmdiocbq) { 429 rc = -ENOMEM; 430 goto free_ndlp; 431 } 432 433 cmd = &cmdiocbq->iocb; 434 435 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 436 if (!bmp) { 437 rc = -ENOMEM; 438 goto free_cmdiocbq; 439 } 440 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 441 if (!bmp->virt) { 442 rc = -ENOMEM; 443 goto free_bmp; 444 } 445 446 INIT_LIST_HEAD(&bmp->list); 447 448 bpl = (struct ulp_bde64 *) bmp->virt; 449 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 450 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 451 1, bpl, &request_nseg); 452 if (!cmp) { 453 rc = -ENOMEM; 454 goto free_bmp; 455 } 456 lpfc_bsg_copy_data(cmp, &job->request_payload, 457 job->request_payload.payload_len, 1); 458 459 bpl += request_nseg; 460 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 461 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 462 bpl, &reply_nseg); 463 if (!rmp) { 464 rc = -ENOMEM; 465 goto free_cmp; 466 } 467 468 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 469 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 470 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 471 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 472 cmd->un.genreq64.bdl.bdeSize = 473 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 474 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 475 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 476 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 477 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 478 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 479 cmd->ulpBdeCount = 1; 480 cmd->ulpLe = 1; 481 cmd->ulpClass = CLASS3; 482 cmd->ulpContext = ndlp->nlp_rpi; 483 if (phba->sli_rev == LPFC_SLI_REV4) 484 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 485 cmd->ulpOwner = OWN_CHIP; 486 cmdiocbq->vport = phba->pport; 487 cmdiocbq->context3 = bmp; 488 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 489 timeout = phba->fc_ratov * 2; 490 cmd->ulpTimeout = timeout; 491 492 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 493 cmdiocbq->context1 = dd_data; 494 cmdiocbq->context2 = cmp; 495 cmdiocbq->context3 = bmp; 496 cmdiocbq->context_un.ndlp = ndlp; 497 dd_data->type = TYPE_IOCB; 498 dd_data->set_job = job; 499 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 500 dd_data->context_un.iocb.ndlp = ndlp; 501 dd_data->context_un.iocb.rmp = rmp; 502 job->dd_data = dd_data; 503 504 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 505 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 506 rc = -EIO ; 507 goto free_rmp; 508 } 509 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 510 writel(creg_val, phba->HCregaddr); 511 readl(phba->HCregaddr); /* flush */ 512 } 513 514 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 515 516 if (iocb_stat == IOCB_SUCCESS) { 517 spin_lock_irqsave(&phba->hbalock, flags); 518 /* make sure the I/O had not been completed yet */ 519 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 520 /* open up abort window to timeout handler */ 521 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 522 } 523 spin_unlock_irqrestore(&phba->hbalock, flags); 524 return 0; /* done for now */ 525 } else if (iocb_stat == IOCB_BUSY) { 526 rc = -EAGAIN; 527 } else { 528 rc = -EIO; 529 } 530 531 /* iocb failed so cleanup */ 532 job->dd_data = NULL; 533 534 free_rmp: 535 lpfc_free_bsg_buffers(phba, rmp); 536 free_cmp: 537 lpfc_free_bsg_buffers(phba, cmp); 538 free_bmp: 539 if (bmp->virt) 540 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 541 kfree(bmp); 542 free_cmdiocbq: 543 lpfc_sli_release_iocbq(phba, cmdiocbq); 544 free_ndlp: 545 lpfc_nlp_put(ndlp); 546 no_ndlp: 547 kfree(dd_data); 548 no_dd_data: 549 /* make error code available to userspace */ 550 bsg_reply->result = rc; 551 job->dd_data = NULL; 552 return rc; 553 } 554 555 /** 556 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 557 * @phba: Pointer to HBA context object. 558 * @cmdiocbq: Pointer to command iocb. 559 * @rspiocbq: Pointer to response iocb. 560 * 561 * This function is the completion handler for iocbs issued using 562 * lpfc_bsg_rport_els_cmp function. This function is called by the 563 * ring event handler function without any lock held. This function 564 * can be called from both worker thread context and interrupt 565 * context. This function also can be called from other thread which 566 * cleans up the SLI layer objects. 567 * This function copies the contents of the response iocb to the 568 * response iocb memory object provided by the caller of 569 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 570 * sleeps for the iocb completion. 571 **/ 572 static void 573 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 574 struct lpfc_iocbq *cmdiocbq, 575 struct lpfc_iocbq *rspiocbq) 576 { 577 struct bsg_job_data *dd_data; 578 struct bsg_job *job; 579 struct fc_bsg_reply *bsg_reply; 580 IOCB_t *rsp; 581 struct lpfc_nodelist *ndlp; 582 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; 583 struct fc_bsg_ctels_reply *els_reply; 584 uint8_t *rjt_data; 585 unsigned long flags; 586 unsigned int rsp_size; 587 int rc = 0; 588 589 dd_data = cmdiocbq->context1; 590 ndlp = dd_data->context_un.iocb.ndlp; 591 cmdiocbq->context1 = ndlp; 592 593 /* Determine if job has been aborted */ 594 spin_lock_irqsave(&phba->ct_ev_lock, flags); 595 job = dd_data->set_job; 596 if (job) { 597 bsg_reply = job->reply; 598 /* Prevent timeout handling from trying to abort job */ 599 job->dd_data = NULL; 600 } 601 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 602 603 /* Close the timeout handler abort window */ 604 spin_lock_irqsave(&phba->hbalock, flags); 605 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 606 spin_unlock_irqrestore(&phba->hbalock, flags); 607 608 rsp = &rspiocbq->iocb; 609 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; 610 prsp = (struct lpfc_dmabuf *)pcmd->list.next; 611 612 /* Copy the completed job data or determine the job status if job is 613 * still active 614 */ 615 616 if (job) { 617 if (rsp->ulpStatus == IOSTAT_SUCCESS) { 618 rsp_size = rsp->un.elsreq64.bdl.bdeSize; 619 bsg_reply->reply_payload_rcv_len = 620 sg_copy_from_buffer(job->reply_payload.sg_list, 621 job->reply_payload.sg_cnt, 622 prsp->virt, 623 rsp_size); 624 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 625 bsg_reply->reply_payload_rcv_len = 626 sizeof(struct fc_bsg_ctels_reply); 627 /* LS_RJT data returned in word 4 */ 628 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 629 els_reply = &bsg_reply->reply_data.ctels_reply; 630 els_reply->status = FC_CTELS_STATUS_REJECT; 631 els_reply->rjt_data.action = rjt_data[3]; 632 els_reply->rjt_data.reason_code = rjt_data[2]; 633 els_reply->rjt_data.reason_explanation = rjt_data[1]; 634 els_reply->rjt_data.vendor_unique = rjt_data[0]; 635 } else { 636 rc = -EIO; 637 } 638 } 639 640 lpfc_nlp_put(ndlp); 641 lpfc_els_free_iocb(phba, cmdiocbq); 642 kfree(dd_data); 643 644 /* Complete the job if the job is still active */ 645 646 if (job) { 647 bsg_reply->result = rc; 648 bsg_job_done(job, bsg_reply->result, 649 bsg_reply->reply_payload_rcv_len); 650 } 651 return; 652 } 653 654 /** 655 * lpfc_bsg_rport_els - send an ELS command from a bsg request 656 * @job: fc_bsg_job to handle 657 **/ 658 static int 659 lpfc_bsg_rport_els(struct bsg_job *job) 660 { 661 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 662 struct lpfc_hba *phba = vport->phba; 663 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 664 struct lpfc_nodelist *ndlp = rdata->pnode; 665 struct fc_bsg_request *bsg_request = job->request; 666 struct fc_bsg_reply *bsg_reply = job->reply; 667 uint32_t elscmd; 668 uint32_t cmdsize; 669 struct lpfc_iocbq *cmdiocbq; 670 uint16_t rpi = 0; 671 struct bsg_job_data *dd_data; 672 unsigned long flags; 673 uint32_t creg_val; 674 int rc = 0; 675 676 /* in case no data is transferred */ 677 bsg_reply->reply_payload_rcv_len = 0; 678 679 /* verify the els command is not greater than the 680 * maximum ELS transfer size. 681 */ 682 683 if (job->request_payload.payload_len > FCELSSIZE) { 684 rc = -EINVAL; 685 goto no_dd_data; 686 } 687 688 /* allocate our bsg tracking structure */ 689 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 690 if (!dd_data) { 691 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 692 "2735 Failed allocation of dd_data\n"); 693 rc = -ENOMEM; 694 goto no_dd_data; 695 } 696 697 elscmd = bsg_request->rqst_data.r_els.els_code; 698 cmdsize = job->request_payload.payload_len; 699 700 if (!lpfc_nlp_get(ndlp)) { 701 rc = -ENODEV; 702 goto free_dd_data; 703 } 704 705 /* We will use the allocated dma buffers by prep els iocb for command 706 * and response to ensure if the job times out and the request is freed, 707 * we won't be dma into memory that is no longer allocated to for the 708 * request. 709 */ 710 711 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 712 ndlp->nlp_DID, elscmd); 713 if (!cmdiocbq) { 714 rc = -EIO; 715 goto release_ndlp; 716 } 717 718 rpi = ndlp->nlp_rpi; 719 720 /* Transfer the request payload to allocated command dma buffer */ 721 722 sg_copy_to_buffer(job->request_payload.sg_list, 723 job->request_payload.sg_cnt, 724 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt, 725 cmdsize); 726 727 if (phba->sli_rev == LPFC_SLI_REV4) 728 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; 729 else 730 cmdiocbq->iocb.ulpContext = rpi; 731 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 732 cmdiocbq->context1 = dd_data; 733 cmdiocbq->context_un.ndlp = ndlp; 734 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; 735 dd_data->type = TYPE_IOCB; 736 dd_data->set_job = job; 737 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 738 dd_data->context_un.iocb.ndlp = ndlp; 739 dd_data->context_un.iocb.rmp = NULL; 740 job->dd_data = dd_data; 741 742 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 743 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 744 rc = -EIO; 745 goto linkdown_err; 746 } 747 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 748 writel(creg_val, phba->HCregaddr); 749 readl(phba->HCregaddr); /* flush */ 750 } 751 752 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 753 754 if (rc == IOCB_SUCCESS) { 755 spin_lock_irqsave(&phba->hbalock, flags); 756 /* make sure the I/O had not been completed/released */ 757 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 758 /* open up abort window to timeout handler */ 759 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 760 } 761 spin_unlock_irqrestore(&phba->hbalock, flags); 762 return 0; /* done for now */ 763 } else if (rc == IOCB_BUSY) { 764 rc = -EAGAIN; 765 } else { 766 rc = -EIO; 767 } 768 769 /* iocb failed so cleanup */ 770 job->dd_data = NULL; 771 772 linkdown_err: 773 cmdiocbq->context1 = ndlp; 774 lpfc_els_free_iocb(phba, cmdiocbq); 775 776 release_ndlp: 777 lpfc_nlp_put(ndlp); 778 779 free_dd_data: 780 kfree(dd_data); 781 782 no_dd_data: 783 /* make error code available to userspace */ 784 bsg_reply->result = rc; 785 job->dd_data = NULL; 786 return rc; 787 } 788 789 /** 790 * lpfc_bsg_event_free - frees an allocated event structure 791 * @kref: Pointer to a kref. 792 * 793 * Called from kref_put. Back cast the kref into an event structure address. 794 * Free any events to get, delete associated nodes, free any events to see, 795 * free any data then free the event itself. 796 **/ 797 static void 798 lpfc_bsg_event_free(struct kref *kref) 799 { 800 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 801 kref); 802 struct event_data *ed; 803 804 list_del(&evt->node); 805 806 while (!list_empty(&evt->events_to_get)) { 807 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 808 list_del(&ed->node); 809 kfree(ed->data); 810 kfree(ed); 811 } 812 813 while (!list_empty(&evt->events_to_see)) { 814 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 815 list_del(&ed->node); 816 kfree(ed->data); 817 kfree(ed); 818 } 819 820 kfree(evt->dd_data); 821 kfree(evt); 822 } 823 824 /** 825 * lpfc_bsg_event_ref - increments the kref for an event 826 * @evt: Pointer to an event structure. 827 **/ 828 static inline void 829 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 830 { 831 kref_get(&evt->kref); 832 } 833 834 /** 835 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 836 * @evt: Pointer to an event structure. 837 **/ 838 static inline void 839 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 840 { 841 kref_put(&evt->kref, lpfc_bsg_event_free); 842 } 843 844 /** 845 * lpfc_bsg_event_new - allocate and initialize a event structure 846 * @ev_mask: Mask of events. 847 * @ev_reg_id: Event reg id. 848 * @ev_req_id: Event request id. 849 **/ 850 static struct lpfc_bsg_event * 851 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 852 { 853 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 854 855 if (!evt) 856 return NULL; 857 858 INIT_LIST_HEAD(&evt->events_to_get); 859 INIT_LIST_HEAD(&evt->events_to_see); 860 evt->type_mask = ev_mask; 861 evt->req_id = ev_req_id; 862 evt->reg_id = ev_reg_id; 863 evt->wait_time_stamp = jiffies; 864 evt->dd_data = NULL; 865 init_waitqueue_head(&evt->wq); 866 kref_init(&evt->kref); 867 return evt; 868 } 869 870 /** 871 * diag_cmd_data_free - Frees an lpfc dma buffer extension 872 * @phba: Pointer to HBA context object. 873 * @mlist: Pointer to an lpfc dma buffer extension. 874 **/ 875 static int 876 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 877 { 878 struct lpfc_dmabufext *mlast; 879 struct pci_dev *pcidev; 880 struct list_head head, *curr, *next; 881 882 if ((!mlist) || (!lpfc_is_link_up(phba) && 883 (phba->link_flag & LS_LOOPBACK_MODE))) { 884 return 0; 885 } 886 887 pcidev = phba->pcidev; 888 list_add_tail(&head, &mlist->dma.list); 889 890 list_for_each_safe(curr, next, &head) { 891 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 892 if (mlast->dma.virt) 893 dma_free_coherent(&pcidev->dev, 894 mlast->size, 895 mlast->dma.virt, 896 mlast->dma.phys); 897 kfree(mlast); 898 } 899 return 0; 900 } 901 902 /** 903 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 904 * @phba: 905 * @pring: 906 * @piocbq: 907 * 908 * This function is called when an unsolicited CT command is received. It 909 * forwards the event to any processes registered to receive CT events. 910 **/ 911 int 912 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 913 struct lpfc_iocbq *piocbq) 914 { 915 uint32_t evt_req_id = 0; 916 uint32_t cmd; 917 struct lpfc_dmabuf *dmabuf = NULL; 918 struct lpfc_bsg_event *evt; 919 struct event_data *evt_dat = NULL; 920 struct lpfc_iocbq *iocbq; 921 size_t offset = 0; 922 struct list_head head; 923 struct ulp_bde64 *bde; 924 dma_addr_t dma_addr; 925 int i; 926 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; 927 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 928 struct lpfc_hbq_entry *hbqe; 929 struct lpfc_sli_ct_request *ct_req; 930 struct bsg_job *job = NULL; 931 struct fc_bsg_reply *bsg_reply; 932 struct bsg_job_data *dd_data = NULL; 933 unsigned long flags; 934 int size = 0; 935 936 INIT_LIST_HEAD(&head); 937 list_add_tail(&head, &piocbq->list); 938 939 if (piocbq->iocb.ulpBdeCount == 0 || 940 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 941 goto error_ct_unsol_exit; 942 943 if (phba->link_state == LPFC_HBA_ERROR || 944 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) 945 goto error_ct_unsol_exit; 946 947 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 948 dmabuf = bdeBuf1; 949 else { 950 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, 951 piocbq->iocb.un.cont64[0].addrLow); 952 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 953 } 954 if (dmabuf == NULL) 955 goto error_ct_unsol_exit; 956 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 957 evt_req_id = ct_req->FsType; 958 cmd = ct_req->CommandResponse.bits.CmdRsp; 959 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 960 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 961 962 spin_lock_irqsave(&phba->ct_ev_lock, flags); 963 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 964 if (!(evt->type_mask & FC_REG_CT_EVENT) || 965 evt->req_id != evt_req_id) 966 continue; 967 968 lpfc_bsg_event_ref(evt); 969 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 970 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 971 if (evt_dat == NULL) { 972 spin_lock_irqsave(&phba->ct_ev_lock, flags); 973 lpfc_bsg_event_unref(evt); 974 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 975 "2614 Memory allocation failed for " 976 "CT event\n"); 977 break; 978 } 979 980 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 981 /* take accumulated byte count from the last iocbq */ 982 iocbq = list_entry(head.prev, typeof(*iocbq), list); 983 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 984 } else { 985 list_for_each_entry(iocbq, &head, list) { 986 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) 987 evt_dat->len += 988 iocbq->iocb.un.cont64[i].tus.f.bdeSize; 989 } 990 } 991 992 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 993 if (evt_dat->data == NULL) { 994 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 995 "2615 Memory allocation failed for " 996 "CT event data, size %d\n", 997 evt_dat->len); 998 kfree(evt_dat); 999 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1000 lpfc_bsg_event_unref(evt); 1001 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1002 goto error_ct_unsol_exit; 1003 } 1004 1005 list_for_each_entry(iocbq, &head, list) { 1006 size = 0; 1007 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1008 bdeBuf1 = iocbq->context2; 1009 bdeBuf2 = iocbq->context3; 1010 } 1011 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 1012 if (phba->sli3_options & 1013 LPFC_SLI3_HBQ_ENABLED) { 1014 if (i == 0) { 1015 hbqe = (struct lpfc_hbq_entry *) 1016 &iocbq->iocb.un.ulpWord[0]; 1017 size = hbqe->bde.tus.f.bdeSize; 1018 dmabuf = bdeBuf1; 1019 } else if (i == 1) { 1020 hbqe = (struct lpfc_hbq_entry *) 1021 &iocbq->iocb.unsli3. 1022 sli3Words[4]; 1023 size = hbqe->bde.tus.f.bdeSize; 1024 dmabuf = bdeBuf2; 1025 } 1026 if ((offset + size) > evt_dat->len) 1027 size = evt_dat->len - offset; 1028 } else { 1029 size = iocbq->iocb.un.cont64[i]. 1030 tus.f.bdeSize; 1031 bde = &iocbq->iocb.un.cont64[i]; 1032 dma_addr = getPaddr(bde->addrHigh, 1033 bde->addrLow); 1034 dmabuf = lpfc_sli_ringpostbuf_get(phba, 1035 pring, dma_addr); 1036 } 1037 if (!dmabuf) { 1038 lpfc_printf_log(phba, KERN_ERR, 1039 LOG_LIBDFC, "2616 No dmabuf " 1040 "found for iocbq 0x%p\n", 1041 iocbq); 1042 kfree(evt_dat->data); 1043 kfree(evt_dat); 1044 spin_lock_irqsave(&phba->ct_ev_lock, 1045 flags); 1046 lpfc_bsg_event_unref(evt); 1047 spin_unlock_irqrestore( 1048 &phba->ct_ev_lock, flags); 1049 goto error_ct_unsol_exit; 1050 } 1051 memcpy((char *)(evt_dat->data) + offset, 1052 dmabuf->virt, size); 1053 offset += size; 1054 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 1055 !(phba->sli3_options & 1056 LPFC_SLI3_HBQ_ENABLED)) { 1057 lpfc_sli_ringpostbuf_put(phba, pring, 1058 dmabuf); 1059 } else { 1060 switch (cmd) { 1061 case ELX_LOOPBACK_DATA: 1062 if (phba->sli_rev < 1063 LPFC_SLI_REV4) 1064 diag_cmd_data_free(phba, 1065 (struct lpfc_dmabufext 1066 *)dmabuf); 1067 break; 1068 case ELX_LOOPBACK_XRI_SETUP: 1069 if ((phba->sli_rev == 1070 LPFC_SLI_REV2) || 1071 (phba->sli3_options & 1072 LPFC_SLI3_HBQ_ENABLED 1073 )) { 1074 lpfc_in_buf_free(phba, 1075 dmabuf); 1076 } else { 1077 lpfc_post_buffer(phba, 1078 pring, 1079 1); 1080 } 1081 break; 1082 default: 1083 if (!(phba->sli3_options & 1084 LPFC_SLI3_HBQ_ENABLED)) 1085 lpfc_post_buffer(phba, 1086 pring, 1087 1); 1088 break; 1089 } 1090 } 1091 } 1092 } 1093 1094 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1095 if (phba->sli_rev == LPFC_SLI_REV4) { 1096 evt_dat->immed_dat = phba->ctx_idx; 1097 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; 1098 /* Provide warning for over-run of the ct_ctx array */ 1099 if (phba->ct_ctx[evt_dat->immed_dat].valid == 1100 UNSOL_VALID) 1101 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1102 "2717 CT context array entry " 1103 "[%d] over-run: oxid:x%x, " 1104 "sid:x%x\n", phba->ctx_idx, 1105 phba->ct_ctx[ 1106 evt_dat->immed_dat].oxid, 1107 phba->ct_ctx[ 1108 evt_dat->immed_dat].SID); 1109 phba->ct_ctx[evt_dat->immed_dat].rxid = 1110 piocbq->iocb.ulpContext; 1111 phba->ct_ctx[evt_dat->immed_dat].oxid = 1112 piocbq->iocb.unsli3.rcvsli3.ox_id; 1113 phba->ct_ctx[evt_dat->immed_dat].SID = 1114 piocbq->iocb.un.rcvels.remoteID; 1115 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; 1116 } else 1117 evt_dat->immed_dat = piocbq->iocb.ulpContext; 1118 1119 evt_dat->type = FC_REG_CT_EVENT; 1120 list_add(&evt_dat->node, &evt->events_to_see); 1121 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 1122 wake_up_interruptible(&evt->wq); 1123 lpfc_bsg_event_unref(evt); 1124 break; 1125 } 1126 1127 list_move(evt->events_to_see.prev, &evt->events_to_get); 1128 1129 dd_data = (struct bsg_job_data *)evt->dd_data; 1130 job = dd_data->set_job; 1131 dd_data->set_job = NULL; 1132 lpfc_bsg_event_unref(evt); 1133 if (job) { 1134 bsg_reply = job->reply; 1135 bsg_reply->reply_payload_rcv_len = size; 1136 /* make error code available to userspace */ 1137 bsg_reply->result = 0; 1138 job->dd_data = NULL; 1139 /* complete the job back to userspace */ 1140 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1141 bsg_job_done(job, bsg_reply->result, 1142 bsg_reply->reply_payload_rcv_len); 1143 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1144 } 1145 } 1146 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1147 1148 error_ct_unsol_exit: 1149 if (!list_empty(&head)) 1150 list_del(&head); 1151 if ((phba->sli_rev < LPFC_SLI_REV4) && 1152 (evt_req_id == SLI_CT_ELX_LOOPBACK)) 1153 return 0; 1154 return 1; 1155 } 1156 1157 /** 1158 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane 1159 * @phba: Pointer to HBA context object. 1160 * @dmabuf: pointer to a dmabuf that describes the FC sequence 1161 * 1162 * This function handles abort to the CT command toward management plane 1163 * for SLI4 port. 1164 * 1165 * If the pending context of a CT command to management plane present, clears 1166 * such context and returns 1 for handled; otherwise, it returns 0 indicating 1167 * no context exists. 1168 **/ 1169 int 1170 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) 1171 { 1172 struct fc_frame_header fc_hdr; 1173 struct fc_frame_header *fc_hdr_ptr = &fc_hdr; 1174 int ctx_idx, handled = 0; 1175 uint16_t oxid, rxid; 1176 uint32_t sid; 1177 1178 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 1179 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); 1180 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); 1181 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); 1182 1183 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { 1184 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) 1185 continue; 1186 if (phba->ct_ctx[ctx_idx].rxid != rxid) 1187 continue; 1188 if (phba->ct_ctx[ctx_idx].oxid != oxid) 1189 continue; 1190 if (phba->ct_ctx[ctx_idx].SID != sid) 1191 continue; 1192 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; 1193 handled = 1; 1194 } 1195 return handled; 1196 } 1197 1198 /** 1199 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1200 * @job: SET_EVENT fc_bsg_job 1201 **/ 1202 static int 1203 lpfc_bsg_hba_set_event(struct bsg_job *job) 1204 { 1205 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1206 struct lpfc_hba *phba = vport->phba; 1207 struct fc_bsg_request *bsg_request = job->request; 1208 struct set_ct_event *event_req; 1209 struct lpfc_bsg_event *evt; 1210 int rc = 0; 1211 struct bsg_job_data *dd_data = NULL; 1212 uint32_t ev_mask; 1213 unsigned long flags; 1214 1215 if (job->request_len < 1216 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1217 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1218 "2612 Received SET_CT_EVENT below minimum " 1219 "size\n"); 1220 rc = -EINVAL; 1221 goto job_error; 1222 } 1223 1224 event_req = (struct set_ct_event *) 1225 bsg_request->rqst_data.h_vendor.vendor_cmd; 1226 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1227 FC_REG_EVENT_MASK); 1228 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1229 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1230 if (evt->reg_id == event_req->ev_reg_id) { 1231 lpfc_bsg_event_ref(evt); 1232 evt->wait_time_stamp = jiffies; 1233 dd_data = (struct bsg_job_data *)evt->dd_data; 1234 break; 1235 } 1236 } 1237 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1238 1239 if (&evt->node == &phba->ct_ev_waiters) { 1240 /* no event waiting struct yet - first call */ 1241 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1242 if (dd_data == NULL) { 1243 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1244 "2734 Failed allocation of dd_data\n"); 1245 rc = -ENOMEM; 1246 goto job_error; 1247 } 1248 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1249 event_req->ev_req_id); 1250 if (!evt) { 1251 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1252 "2617 Failed allocation of event " 1253 "waiter\n"); 1254 rc = -ENOMEM; 1255 goto job_error; 1256 } 1257 dd_data->type = TYPE_EVT; 1258 dd_data->set_job = NULL; 1259 dd_data->context_un.evt = evt; 1260 evt->dd_data = (void *)dd_data; 1261 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1262 list_add(&evt->node, &phba->ct_ev_waiters); 1263 lpfc_bsg_event_ref(evt); 1264 evt->wait_time_stamp = jiffies; 1265 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1266 } 1267 1268 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1269 evt->waiting = 1; 1270 dd_data->set_job = job; /* for unsolicited command */ 1271 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1272 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1273 return 0; /* call job done later */ 1274 1275 job_error: 1276 if (dd_data != NULL) 1277 kfree(dd_data); 1278 1279 job->dd_data = NULL; 1280 return rc; 1281 } 1282 1283 /** 1284 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1285 * @job: GET_EVENT fc_bsg_job 1286 **/ 1287 static int 1288 lpfc_bsg_hba_get_event(struct bsg_job *job) 1289 { 1290 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1291 struct lpfc_hba *phba = vport->phba; 1292 struct fc_bsg_request *bsg_request = job->request; 1293 struct fc_bsg_reply *bsg_reply = job->reply; 1294 struct get_ct_event *event_req; 1295 struct get_ct_event_reply *event_reply; 1296 struct lpfc_bsg_event *evt, *evt_next; 1297 struct event_data *evt_dat = NULL; 1298 unsigned long flags; 1299 uint32_t rc = 0; 1300 1301 if (job->request_len < 1302 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1303 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1304 "2613 Received GET_CT_EVENT request below " 1305 "minimum size\n"); 1306 rc = -EINVAL; 1307 goto job_error; 1308 } 1309 1310 event_req = (struct get_ct_event *) 1311 bsg_request->rqst_data.h_vendor.vendor_cmd; 1312 1313 event_reply = (struct get_ct_event_reply *) 1314 bsg_reply->reply_data.vendor_reply.vendor_rsp; 1315 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1316 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { 1317 if (evt->reg_id == event_req->ev_reg_id) { 1318 if (list_empty(&evt->events_to_get)) 1319 break; 1320 lpfc_bsg_event_ref(evt); 1321 evt->wait_time_stamp = jiffies; 1322 evt_dat = list_entry(evt->events_to_get.prev, 1323 struct event_data, node); 1324 list_del(&evt_dat->node); 1325 break; 1326 } 1327 } 1328 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1329 1330 /* The app may continue to ask for event data until it gets 1331 * an error indicating that there isn't anymore 1332 */ 1333 if (evt_dat == NULL) { 1334 bsg_reply->reply_payload_rcv_len = 0; 1335 rc = -ENOENT; 1336 goto job_error; 1337 } 1338 1339 if (evt_dat->len > job->request_payload.payload_len) { 1340 evt_dat->len = job->request_payload.payload_len; 1341 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1342 "2618 Truncated event data at %d " 1343 "bytes\n", 1344 job->request_payload.payload_len); 1345 } 1346 1347 event_reply->type = evt_dat->type; 1348 event_reply->immed_data = evt_dat->immed_dat; 1349 if (evt_dat->len > 0) 1350 bsg_reply->reply_payload_rcv_len = 1351 sg_copy_from_buffer(job->request_payload.sg_list, 1352 job->request_payload.sg_cnt, 1353 evt_dat->data, evt_dat->len); 1354 else 1355 bsg_reply->reply_payload_rcv_len = 0; 1356 1357 if (evt_dat) { 1358 kfree(evt_dat->data); 1359 kfree(evt_dat); 1360 } 1361 1362 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1363 lpfc_bsg_event_unref(evt); 1364 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1365 job->dd_data = NULL; 1366 bsg_reply->result = 0; 1367 bsg_job_done(job, bsg_reply->result, 1368 bsg_reply->reply_payload_rcv_len); 1369 return 0; 1370 1371 job_error: 1372 job->dd_data = NULL; 1373 bsg_reply->result = rc; 1374 return rc; 1375 } 1376 1377 /** 1378 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1379 * @phba: Pointer to HBA context object. 1380 * @cmdiocbq: Pointer to command iocb. 1381 * @rspiocbq: Pointer to response iocb. 1382 * 1383 * This function is the completion handler for iocbs issued using 1384 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1385 * ring event handler function without any lock held. This function 1386 * can be called from both worker thread context and interrupt 1387 * context. This function also can be called from other thread which 1388 * cleans up the SLI layer objects. 1389 * This function copy the contents of the response iocb to the 1390 * response iocb memory object provided by the caller of 1391 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1392 * sleeps for the iocb completion. 1393 **/ 1394 static void 1395 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1396 struct lpfc_iocbq *cmdiocbq, 1397 struct lpfc_iocbq *rspiocbq) 1398 { 1399 struct bsg_job_data *dd_data; 1400 struct bsg_job *job; 1401 struct fc_bsg_reply *bsg_reply; 1402 IOCB_t *rsp; 1403 struct lpfc_dmabuf *bmp, *cmp; 1404 struct lpfc_nodelist *ndlp; 1405 unsigned long flags; 1406 int rc = 0; 1407 1408 dd_data = cmdiocbq->context1; 1409 1410 /* Determine if job has been aborted */ 1411 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1412 job = dd_data->set_job; 1413 if (job) { 1414 /* Prevent timeout handling from trying to abort job */ 1415 job->dd_data = NULL; 1416 } 1417 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1418 1419 /* Close the timeout handler abort window */ 1420 spin_lock_irqsave(&phba->hbalock, flags); 1421 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 1422 spin_unlock_irqrestore(&phba->hbalock, flags); 1423 1424 ndlp = dd_data->context_un.iocb.ndlp; 1425 cmp = cmdiocbq->context2; 1426 bmp = cmdiocbq->context3; 1427 rsp = &rspiocbq->iocb; 1428 1429 /* Copy the completed job data or set the error status */ 1430 1431 if (job) { 1432 bsg_reply = job->reply; 1433 if (rsp->ulpStatus) { 1434 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1435 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 1436 case IOERR_SEQUENCE_TIMEOUT: 1437 rc = -ETIMEDOUT; 1438 break; 1439 case IOERR_INVALID_RPI: 1440 rc = -EFAULT; 1441 break; 1442 default: 1443 rc = -EACCES; 1444 break; 1445 } 1446 } else { 1447 rc = -EACCES; 1448 } 1449 } else { 1450 bsg_reply->reply_payload_rcv_len = 0; 1451 } 1452 } 1453 1454 lpfc_free_bsg_buffers(phba, cmp); 1455 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1456 kfree(bmp); 1457 lpfc_sli_release_iocbq(phba, cmdiocbq); 1458 lpfc_nlp_put(ndlp); 1459 kfree(dd_data); 1460 1461 /* Complete the job if the job is still active */ 1462 1463 if (job) { 1464 bsg_reply->result = rc; 1465 bsg_job_done(job, bsg_reply->result, 1466 bsg_reply->reply_payload_rcv_len); 1467 } 1468 return; 1469 } 1470 1471 /** 1472 * lpfc_issue_ct_rsp - issue a ct response 1473 * @phba: Pointer to HBA context object. 1474 * @job: Pointer to the job object. 1475 * @tag: tag index value into the ports context exchange array. 1476 * @bmp: Pointer to a dma buffer descriptor. 1477 * @num_entry: Number of enties in the bde. 1478 **/ 1479 static int 1480 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, 1481 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, 1482 int num_entry) 1483 { 1484 IOCB_t *icmd; 1485 struct lpfc_iocbq *ctiocb = NULL; 1486 int rc = 0; 1487 struct lpfc_nodelist *ndlp = NULL; 1488 struct bsg_job_data *dd_data; 1489 unsigned long flags; 1490 uint32_t creg_val; 1491 1492 /* allocate our bsg tracking structure */ 1493 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1494 if (!dd_data) { 1495 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1496 "2736 Failed allocation of dd_data\n"); 1497 rc = -ENOMEM; 1498 goto no_dd_data; 1499 } 1500 1501 /* Allocate buffer for command iocb */ 1502 ctiocb = lpfc_sli_get_iocbq(phba); 1503 if (!ctiocb) { 1504 rc = -ENOMEM; 1505 goto no_ctiocb; 1506 } 1507 1508 icmd = &ctiocb->iocb; 1509 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 1510 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 1511 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 1512 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1513 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 1514 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 1515 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 1516 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; 1517 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1518 1519 /* Fill in rest of iocb */ 1520 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 1521 icmd->ulpBdeCount = 1; 1522 icmd->ulpLe = 1; 1523 icmd->ulpClass = CLASS3; 1524 if (phba->sli_rev == LPFC_SLI_REV4) { 1525 /* Do not issue unsol response if oxid not marked as valid */ 1526 if (phba->ct_ctx[tag].valid != UNSOL_VALID) { 1527 rc = IOCB_ERROR; 1528 goto issue_ct_rsp_exit; 1529 } 1530 icmd->ulpContext = phba->ct_ctx[tag].rxid; 1531 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid; 1532 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1533 if (!ndlp) { 1534 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1535 "2721 ndlp null for oxid %x SID %x\n", 1536 icmd->ulpContext, 1537 phba->ct_ctx[tag].SID); 1538 rc = IOCB_ERROR; 1539 goto issue_ct_rsp_exit; 1540 } 1541 1542 /* Check if the ndlp is active */ 1543 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1544 rc = IOCB_ERROR; 1545 goto issue_ct_rsp_exit; 1546 } 1547 1548 /* get a refernece count so the ndlp doesn't go away while 1549 * we respond 1550 */ 1551 if (!lpfc_nlp_get(ndlp)) { 1552 rc = IOCB_ERROR; 1553 goto issue_ct_rsp_exit; 1554 } 1555 1556 icmd->un.ulpWord[3] = 1557 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1558 1559 /* The exchange is done, mark the entry as invalid */ 1560 phba->ct_ctx[tag].valid = UNSOL_INVALID; 1561 } else 1562 icmd->ulpContext = (ushort) tag; 1563 1564 icmd->ulpTimeout = phba->fc_ratov * 2; 1565 1566 /* Xmit CT response on exchange <xid> */ 1567 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1568 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", 1569 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); 1570 1571 ctiocb->iocb_cmpl = NULL; 1572 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1573 ctiocb->vport = phba->pport; 1574 ctiocb->context1 = dd_data; 1575 ctiocb->context2 = cmp; 1576 ctiocb->context3 = bmp; 1577 ctiocb->context_un.ndlp = ndlp; 1578 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1579 1580 dd_data->type = TYPE_IOCB; 1581 dd_data->set_job = job; 1582 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1583 dd_data->context_un.iocb.ndlp = ndlp; 1584 dd_data->context_un.iocb.rmp = NULL; 1585 job->dd_data = dd_data; 1586 1587 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1588 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1589 rc = -IOCB_ERROR; 1590 goto issue_ct_rsp_exit; 1591 } 1592 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1593 writel(creg_val, phba->HCregaddr); 1594 readl(phba->HCregaddr); /* flush */ 1595 } 1596 1597 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1598 1599 if (rc == IOCB_SUCCESS) { 1600 spin_lock_irqsave(&phba->hbalock, flags); 1601 /* make sure the I/O had not been completed/released */ 1602 if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { 1603 /* open up abort window to timeout handler */ 1604 ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 1605 } 1606 spin_unlock_irqrestore(&phba->hbalock, flags); 1607 return 0; /* done for now */ 1608 } 1609 1610 /* iocb failed so cleanup */ 1611 job->dd_data = NULL; 1612 1613 issue_ct_rsp_exit: 1614 lpfc_sli_release_iocbq(phba, ctiocb); 1615 no_ctiocb: 1616 kfree(dd_data); 1617 no_dd_data: 1618 return rc; 1619 } 1620 1621 /** 1622 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1623 * @job: SEND_MGMT_RESP fc_bsg_job 1624 **/ 1625 static int 1626 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job) 1627 { 1628 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1629 struct lpfc_hba *phba = vport->phba; 1630 struct fc_bsg_request *bsg_request = job->request; 1631 struct fc_bsg_reply *bsg_reply = job->reply; 1632 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1633 bsg_request->rqst_data.h_vendor.vendor_cmd; 1634 struct ulp_bde64 *bpl; 1635 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; 1636 int bpl_entries; 1637 uint32_t tag = mgmt_resp->tag; 1638 unsigned long reqbfrcnt = 1639 (unsigned long)job->request_payload.payload_len; 1640 int rc = 0; 1641 1642 /* in case no data is transferred */ 1643 bsg_reply->reply_payload_rcv_len = 0; 1644 1645 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1646 rc = -ERANGE; 1647 goto send_mgmt_rsp_exit; 1648 } 1649 1650 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1651 if (!bmp) { 1652 rc = -ENOMEM; 1653 goto send_mgmt_rsp_exit; 1654 } 1655 1656 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1657 if (!bmp->virt) { 1658 rc = -ENOMEM; 1659 goto send_mgmt_rsp_free_bmp; 1660 } 1661 1662 INIT_LIST_HEAD(&bmp->list); 1663 bpl = (struct ulp_bde64 *) bmp->virt; 1664 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); 1665 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 1666 1, bpl, &bpl_entries); 1667 if (!cmp) { 1668 rc = -ENOMEM; 1669 goto send_mgmt_rsp_free_bmp; 1670 } 1671 lpfc_bsg_copy_data(cmp, &job->request_payload, 1672 job->request_payload.payload_len, 1); 1673 1674 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); 1675 1676 if (rc == IOCB_SUCCESS) 1677 return 0; /* done for now */ 1678 1679 rc = -EACCES; 1680 1681 lpfc_free_bsg_buffers(phba, cmp); 1682 1683 send_mgmt_rsp_free_bmp: 1684 if (bmp->virt) 1685 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1686 kfree(bmp); 1687 send_mgmt_rsp_exit: 1688 /* make error code available to userspace */ 1689 bsg_reply->result = rc; 1690 job->dd_data = NULL; 1691 return rc; 1692 } 1693 1694 /** 1695 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1696 * @phba: Pointer to HBA context object. 1697 * 1698 * This function is responsible for preparing driver for diag loopback 1699 * on device. 1700 */ 1701 static int 1702 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) 1703 { 1704 struct lpfc_vport **vports; 1705 struct Scsi_Host *shost; 1706 struct lpfc_sli *psli; 1707 struct lpfc_sli_ring *pring; 1708 int i = 0; 1709 1710 psli = &phba->sli; 1711 if (!psli) 1712 return -ENODEV; 1713 1714 pring = &psli->ring[LPFC_FCP_RING]; 1715 if (!pring) 1716 return -ENODEV; 1717 1718 if ((phba->link_state == LPFC_HBA_ERROR) || 1719 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1720 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 1721 return -EACCES; 1722 1723 vports = lpfc_create_vport_work_array(phba); 1724 if (vports) { 1725 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1726 shost = lpfc_shost_from_vport(vports[i]); 1727 scsi_block_requests(shost); 1728 } 1729 lpfc_destroy_vport_work_array(phba, vports); 1730 } else { 1731 shost = lpfc_shost_from_vport(phba->pport); 1732 scsi_block_requests(shost); 1733 } 1734 1735 while (!list_empty(&pring->txcmplq)) { 1736 if (i++ > 500) /* wait up to 5 seconds */ 1737 break; 1738 msleep(10); 1739 } 1740 return 0; 1741 } 1742 1743 /** 1744 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1745 * @phba: Pointer to HBA context object. 1746 * 1747 * This function is responsible for driver exit processing of setting up 1748 * diag loopback mode on device. 1749 */ 1750 static void 1751 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) 1752 { 1753 struct Scsi_Host *shost; 1754 struct lpfc_vport **vports; 1755 int i; 1756 1757 vports = lpfc_create_vport_work_array(phba); 1758 if (vports) { 1759 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1760 shost = lpfc_shost_from_vport(vports[i]); 1761 scsi_unblock_requests(shost); 1762 } 1763 lpfc_destroy_vport_work_array(phba, vports); 1764 } else { 1765 shost = lpfc_shost_from_vport(phba->pport); 1766 scsi_unblock_requests(shost); 1767 } 1768 return; 1769 } 1770 1771 /** 1772 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command 1773 * @phba: Pointer to HBA context object. 1774 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1775 * 1776 * This function is responsible for placing an sli3 port into diagnostic 1777 * loopback mode in order to perform a diagnostic loopback test. 1778 * All new scsi requests are blocked, a small delay is used to allow the 1779 * scsi requests to complete then the link is brought down. If the link is 1780 * is placed in loopback mode then scsi requests are again allowed 1781 * so the scsi mid-layer doesn't give up on the port. 1782 * All of this is done in-line. 1783 */ 1784 static int 1785 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 1786 { 1787 struct fc_bsg_request *bsg_request = job->request; 1788 struct fc_bsg_reply *bsg_reply = job->reply; 1789 struct diag_mode_set *loopback_mode; 1790 uint32_t link_flags; 1791 uint32_t timeout; 1792 LPFC_MBOXQ_t *pmboxq = NULL; 1793 int mbxstatus = MBX_SUCCESS; 1794 int i = 0; 1795 int rc = 0; 1796 1797 /* no data to return just the return code */ 1798 bsg_reply->reply_payload_rcv_len = 0; 1799 1800 if (job->request_len < sizeof(struct fc_bsg_request) + 1801 sizeof(struct diag_mode_set)) { 1802 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1803 "2738 Received DIAG MODE request size:%d " 1804 "below the minimum size:%d\n", 1805 job->request_len, 1806 (int)(sizeof(struct fc_bsg_request) + 1807 sizeof(struct diag_mode_set))); 1808 rc = -EINVAL; 1809 goto job_error; 1810 } 1811 1812 rc = lpfc_bsg_diag_mode_enter(phba); 1813 if (rc) 1814 goto job_error; 1815 1816 /* bring the link to diagnostic mode */ 1817 loopback_mode = (struct diag_mode_set *) 1818 bsg_request->rqst_data.h_vendor.vendor_cmd; 1819 link_flags = loopback_mode->type; 1820 timeout = loopback_mode->timeout * 100; 1821 1822 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1823 if (!pmboxq) { 1824 rc = -ENOMEM; 1825 goto loopback_mode_exit; 1826 } 1827 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1828 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1829 pmboxq->u.mb.mbxOwner = OWN_HOST; 1830 1831 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1832 1833 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1834 /* wait for link down before proceeding */ 1835 i = 0; 1836 while (phba->link_state != LPFC_LINK_DOWN) { 1837 if (i++ > timeout) { 1838 rc = -ETIMEDOUT; 1839 goto loopback_mode_exit; 1840 } 1841 msleep(10); 1842 } 1843 1844 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1845 if (link_flags == INTERNAL_LOOP_BACK) 1846 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1847 else 1848 pmboxq->u.mb.un.varInitLnk.link_flags = 1849 FLAGS_TOPOLOGY_MODE_LOOP; 1850 1851 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1852 pmboxq->u.mb.mbxOwner = OWN_HOST; 1853 1854 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1855 LPFC_MBOX_TMO); 1856 1857 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1858 rc = -ENODEV; 1859 else { 1860 spin_lock_irq(&phba->hbalock); 1861 phba->link_flag |= LS_LOOPBACK_MODE; 1862 spin_unlock_irq(&phba->hbalock); 1863 /* wait for the link attention interrupt */ 1864 msleep(100); 1865 1866 i = 0; 1867 while (phba->link_state != LPFC_HBA_READY) { 1868 if (i++ > timeout) { 1869 rc = -ETIMEDOUT; 1870 break; 1871 } 1872 1873 msleep(10); 1874 } 1875 } 1876 1877 } else 1878 rc = -ENODEV; 1879 1880 loopback_mode_exit: 1881 lpfc_bsg_diag_mode_exit(phba); 1882 1883 /* 1884 * Let SLI layer release mboxq if mbox command completed after timeout. 1885 */ 1886 if (pmboxq && mbxstatus != MBX_TIMEOUT) 1887 mempool_free(pmboxq, phba->mbox_mem_pool); 1888 1889 job_error: 1890 /* make error code available to userspace */ 1891 bsg_reply->result = rc; 1892 /* complete the job back to userspace if no error */ 1893 if (rc == 0) 1894 bsg_job_done(job, bsg_reply->result, 1895 bsg_reply->reply_payload_rcv_len); 1896 return rc; 1897 } 1898 1899 /** 1900 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state 1901 * @phba: Pointer to HBA context object. 1902 * @diag: Flag for set link to diag or nomral operation state. 1903 * 1904 * This function is responsible for issuing a sli4 mailbox command for setting 1905 * link to either diag state or normal operation state. 1906 */ 1907 static int 1908 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) 1909 { 1910 LPFC_MBOXQ_t *pmboxq; 1911 struct lpfc_mbx_set_link_diag_state *link_diag_state; 1912 uint32_t req_len, alloc_len; 1913 int mbxstatus = MBX_SUCCESS, rc; 1914 1915 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1916 if (!pmboxq) 1917 return -ENOMEM; 1918 1919 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 1920 sizeof(struct lpfc_sli4_cfg_mhdr)); 1921 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1922 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 1923 req_len, LPFC_SLI4_MBX_EMBED); 1924 if (alloc_len != req_len) { 1925 rc = -ENOMEM; 1926 goto link_diag_state_set_out; 1927 } 1928 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1929 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", 1930 diag, phba->sli4_hba.lnk_info.lnk_tp, 1931 phba->sli4_hba.lnk_info.lnk_no); 1932 1933 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1934 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, 1935 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); 1936 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1937 phba->sli4_hba.lnk_info.lnk_no); 1938 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1939 phba->sli4_hba.lnk_info.lnk_tp); 1940 if (diag) 1941 bf_set(lpfc_mbx_set_diag_state_diag, 1942 &link_diag_state->u.req, 1); 1943 else 1944 bf_set(lpfc_mbx_set_diag_state_diag, 1945 &link_diag_state->u.req, 0); 1946 1947 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1948 1949 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) 1950 rc = 0; 1951 else 1952 rc = -ENODEV; 1953 1954 link_diag_state_set_out: 1955 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1956 mempool_free(pmboxq, phba->mbox_mem_pool); 1957 1958 return rc; 1959 } 1960 1961 /** 1962 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic 1963 * @phba: Pointer to HBA context object. 1964 * 1965 * This function is responsible for issuing a sli4 mailbox command for setting 1966 * up internal loopback diagnostic. 1967 */ 1968 static int 1969 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) 1970 { 1971 LPFC_MBOXQ_t *pmboxq; 1972 uint32_t req_len, alloc_len; 1973 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1974 int mbxstatus = MBX_SUCCESS, rc = 0; 1975 1976 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1977 if (!pmboxq) 1978 return -ENOMEM; 1979 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - 1980 sizeof(struct lpfc_sli4_cfg_mhdr)); 1981 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1982 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, 1983 req_len, LPFC_SLI4_MBX_EMBED); 1984 if (alloc_len != req_len) { 1985 mempool_free(pmboxq, phba->mbox_mem_pool); 1986 return -ENOMEM; 1987 } 1988 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; 1989 bf_set(lpfc_mbx_set_diag_state_link_num, 1990 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no); 1991 bf_set(lpfc_mbx_set_diag_state_link_type, 1992 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); 1993 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, 1994 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL); 1995 1996 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1997 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { 1998 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1999 "3127 Failed setup loopback mode mailbox " 2000 "command, rc:x%x, status:x%x\n", mbxstatus, 2001 pmboxq->u.mb.mbxStatus); 2002 rc = -ENODEV; 2003 } 2004 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 2005 mempool_free(pmboxq, phba->mbox_mem_pool); 2006 return rc; 2007 } 2008 2009 /** 2010 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic 2011 * @phba: Pointer to HBA context object. 2012 * 2013 * This function set up SLI4 FC port registrations for diagnostic run, which 2014 * includes all the rpis, vfi, and also vpi. 2015 */ 2016 static int 2017 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) 2018 { 2019 int rc; 2020 2021 if (phba->pport->fc_flag & FC_VFI_REGISTERED) { 2022 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2023 "3136 Port still had vfi registered: " 2024 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", 2025 phba->pport->fc_myDID, phba->fcf.fcfi, 2026 phba->sli4_hba.vfi_ids[phba->pport->vfi], 2027 phba->vpi_ids[phba->pport->vpi]); 2028 return -EINVAL; 2029 } 2030 rc = lpfc_issue_reg_vfi(phba->pport); 2031 return rc; 2032 } 2033 2034 /** 2035 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command 2036 * @phba: Pointer to HBA context object. 2037 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2038 * 2039 * This function is responsible for placing an sli4 port into diagnostic 2040 * loopback mode in order to perform a diagnostic loopback test. 2041 */ 2042 static int 2043 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 2044 { 2045 struct fc_bsg_request *bsg_request = job->request; 2046 struct fc_bsg_reply *bsg_reply = job->reply; 2047 struct diag_mode_set *loopback_mode; 2048 uint32_t link_flags, timeout; 2049 int i, rc = 0; 2050 2051 /* no data to return just the return code */ 2052 bsg_reply->reply_payload_rcv_len = 0; 2053 2054 if (job->request_len < sizeof(struct fc_bsg_request) + 2055 sizeof(struct diag_mode_set)) { 2056 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2057 "3011 Received DIAG MODE request size:%d " 2058 "below the minimum size:%d\n", 2059 job->request_len, 2060 (int)(sizeof(struct fc_bsg_request) + 2061 sizeof(struct diag_mode_set))); 2062 rc = -EINVAL; 2063 goto job_error; 2064 } 2065 2066 rc = lpfc_bsg_diag_mode_enter(phba); 2067 if (rc) 2068 goto job_error; 2069 2070 /* indicate we are in loobpack diagnostic mode */ 2071 spin_lock_irq(&phba->hbalock); 2072 phba->link_flag |= LS_LOOPBACK_MODE; 2073 spin_unlock_irq(&phba->hbalock); 2074 2075 /* reset port to start frome scratch */ 2076 rc = lpfc_selective_reset(phba); 2077 if (rc) 2078 goto job_error; 2079 2080 /* bring the link to diagnostic mode */ 2081 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2082 "3129 Bring link to diagnostic state.\n"); 2083 loopback_mode = (struct diag_mode_set *) 2084 bsg_request->rqst_data.h_vendor.vendor_cmd; 2085 link_flags = loopback_mode->type; 2086 timeout = loopback_mode->timeout * 100; 2087 2088 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2089 if (rc) { 2090 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2091 "3130 Failed to bring link to diagnostic " 2092 "state, rc:x%x\n", rc); 2093 goto loopback_mode_exit; 2094 } 2095 2096 /* wait for link down before proceeding */ 2097 i = 0; 2098 while (phba->link_state != LPFC_LINK_DOWN) { 2099 if (i++ > timeout) { 2100 rc = -ETIMEDOUT; 2101 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2102 "3131 Timeout waiting for link to " 2103 "diagnostic mode, timeout:%d ms\n", 2104 timeout * 10); 2105 goto loopback_mode_exit; 2106 } 2107 msleep(10); 2108 } 2109 2110 /* set up loopback mode */ 2111 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2112 "3132 Set up loopback mode:x%x\n", link_flags); 2113 2114 if (link_flags == INTERNAL_LOOP_BACK) 2115 rc = lpfc_sli4_bsg_set_internal_loopback(phba); 2116 else if (link_flags == EXTERNAL_LOOP_BACK) 2117 rc = lpfc_hba_init_link_fc_topology(phba, 2118 FLAGS_TOPOLOGY_MODE_PT_PT, 2119 MBX_NOWAIT); 2120 else { 2121 rc = -EINVAL; 2122 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2123 "3141 Loopback mode:x%x not supported\n", 2124 link_flags); 2125 goto loopback_mode_exit; 2126 } 2127 2128 if (!rc) { 2129 /* wait for the link attention interrupt */ 2130 msleep(100); 2131 i = 0; 2132 while (phba->link_state < LPFC_LINK_UP) { 2133 if (i++ > timeout) { 2134 rc = -ETIMEDOUT; 2135 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2136 "3137 Timeout waiting for link up " 2137 "in loopback mode, timeout:%d ms\n", 2138 timeout * 10); 2139 break; 2140 } 2141 msleep(10); 2142 } 2143 } 2144 2145 /* port resource registration setup for loopback diagnostic */ 2146 if (!rc) { 2147 /* set up a none zero myDID for loopback test */ 2148 phba->pport->fc_myDID = 1; 2149 rc = lpfc_sli4_diag_fcport_reg_setup(phba); 2150 } else 2151 goto loopback_mode_exit; 2152 2153 if (!rc) { 2154 /* wait for the port ready */ 2155 msleep(100); 2156 i = 0; 2157 while (phba->link_state != LPFC_HBA_READY) { 2158 if (i++ > timeout) { 2159 rc = -ETIMEDOUT; 2160 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2161 "3133 Timeout waiting for port " 2162 "loopback mode ready, timeout:%d ms\n", 2163 timeout * 10); 2164 break; 2165 } 2166 msleep(10); 2167 } 2168 } 2169 2170 loopback_mode_exit: 2171 /* clear loopback diagnostic mode */ 2172 if (rc) { 2173 spin_lock_irq(&phba->hbalock); 2174 phba->link_flag &= ~LS_LOOPBACK_MODE; 2175 spin_unlock_irq(&phba->hbalock); 2176 } 2177 lpfc_bsg_diag_mode_exit(phba); 2178 2179 job_error: 2180 /* make error code available to userspace */ 2181 bsg_reply->result = rc; 2182 /* complete the job back to userspace if no error */ 2183 if (rc == 0) 2184 bsg_job_done(job, bsg_reply->result, 2185 bsg_reply->reply_payload_rcv_len); 2186 return rc; 2187 } 2188 2189 /** 2190 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode 2191 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2192 * 2193 * This function is responsible for responding to check and dispatch bsg diag 2194 * command from the user to proper driver action routines. 2195 */ 2196 static int 2197 lpfc_bsg_diag_loopback_mode(struct bsg_job *job) 2198 { 2199 struct Scsi_Host *shost; 2200 struct lpfc_vport *vport; 2201 struct lpfc_hba *phba; 2202 int rc; 2203 2204 shost = fc_bsg_to_shost(job); 2205 if (!shost) 2206 return -ENODEV; 2207 vport = shost_priv(shost); 2208 if (!vport) 2209 return -ENODEV; 2210 phba = vport->phba; 2211 if (!phba) 2212 return -ENODEV; 2213 2214 if (phba->sli_rev < LPFC_SLI_REV4) 2215 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); 2216 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 2217 LPFC_SLI_INTF_IF_TYPE_2) 2218 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); 2219 else 2220 rc = -ENODEV; 2221 2222 return rc; 2223 } 2224 2225 /** 2226 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode 2227 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END 2228 * 2229 * This function is responsible for responding to check and dispatch bsg diag 2230 * command from the user to proper driver action routines. 2231 */ 2232 static int 2233 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) 2234 { 2235 struct fc_bsg_request *bsg_request = job->request; 2236 struct fc_bsg_reply *bsg_reply = job->reply; 2237 struct Scsi_Host *shost; 2238 struct lpfc_vport *vport; 2239 struct lpfc_hba *phba; 2240 struct diag_mode_set *loopback_mode_end_cmd; 2241 uint32_t timeout; 2242 int rc, i; 2243 2244 shost = fc_bsg_to_shost(job); 2245 if (!shost) 2246 return -ENODEV; 2247 vport = shost_priv(shost); 2248 if (!vport) 2249 return -ENODEV; 2250 phba = vport->phba; 2251 if (!phba) 2252 return -ENODEV; 2253 2254 if (phba->sli_rev < LPFC_SLI_REV4) 2255 return -ENODEV; 2256 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2257 LPFC_SLI_INTF_IF_TYPE_2) 2258 return -ENODEV; 2259 2260 /* clear loopback diagnostic mode */ 2261 spin_lock_irq(&phba->hbalock); 2262 phba->link_flag &= ~LS_LOOPBACK_MODE; 2263 spin_unlock_irq(&phba->hbalock); 2264 loopback_mode_end_cmd = (struct diag_mode_set *) 2265 bsg_request->rqst_data.h_vendor.vendor_cmd; 2266 timeout = loopback_mode_end_cmd->timeout * 100; 2267 2268 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2269 if (rc) { 2270 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2271 "3139 Failed to bring link to diagnostic " 2272 "state, rc:x%x\n", rc); 2273 goto loopback_mode_end_exit; 2274 } 2275 2276 /* wait for link down before proceeding */ 2277 i = 0; 2278 while (phba->link_state != LPFC_LINK_DOWN) { 2279 if (i++ > timeout) { 2280 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2281 "3140 Timeout waiting for link to " 2282 "diagnostic mode_end, timeout:%d ms\n", 2283 timeout * 10); 2284 /* there is nothing much we can do here */ 2285 break; 2286 } 2287 msleep(10); 2288 } 2289 2290 /* reset port resource registrations */ 2291 rc = lpfc_selective_reset(phba); 2292 phba->pport->fc_myDID = 0; 2293 2294 loopback_mode_end_exit: 2295 /* make return code available to userspace */ 2296 bsg_reply->result = rc; 2297 /* complete the job back to userspace if no error */ 2298 if (rc == 0) 2299 bsg_job_done(job, bsg_reply->result, 2300 bsg_reply->reply_payload_rcv_len); 2301 return rc; 2302 } 2303 2304 /** 2305 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test 2306 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST 2307 * 2308 * This function is to perform SLI4 diag link test request from the user 2309 * applicaiton. 2310 */ 2311 static int 2312 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) 2313 { 2314 struct fc_bsg_request *bsg_request = job->request; 2315 struct fc_bsg_reply *bsg_reply = job->reply; 2316 struct Scsi_Host *shost; 2317 struct lpfc_vport *vport; 2318 struct lpfc_hba *phba; 2319 LPFC_MBOXQ_t *pmboxq; 2320 struct sli4_link_diag *link_diag_test_cmd; 2321 uint32_t req_len, alloc_len; 2322 struct lpfc_mbx_run_link_diag_test *run_link_diag_test; 2323 union lpfc_sli4_cfg_shdr *shdr; 2324 uint32_t shdr_status, shdr_add_status; 2325 struct diag_status *diag_status_reply; 2326 int mbxstatus, rc = 0; 2327 2328 shost = fc_bsg_to_shost(job); 2329 if (!shost) { 2330 rc = -ENODEV; 2331 goto job_error; 2332 } 2333 vport = shost_priv(shost); 2334 if (!vport) { 2335 rc = -ENODEV; 2336 goto job_error; 2337 } 2338 phba = vport->phba; 2339 if (!phba) { 2340 rc = -ENODEV; 2341 goto job_error; 2342 } 2343 2344 if (phba->sli_rev < LPFC_SLI_REV4) { 2345 rc = -ENODEV; 2346 goto job_error; 2347 } 2348 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2349 LPFC_SLI_INTF_IF_TYPE_2) { 2350 rc = -ENODEV; 2351 goto job_error; 2352 } 2353 2354 if (job->request_len < sizeof(struct fc_bsg_request) + 2355 sizeof(struct sli4_link_diag)) { 2356 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2357 "3013 Received LINK DIAG TEST request " 2358 " size:%d below the minimum size:%d\n", 2359 job->request_len, 2360 (int)(sizeof(struct fc_bsg_request) + 2361 sizeof(struct sli4_link_diag))); 2362 rc = -EINVAL; 2363 goto job_error; 2364 } 2365 2366 rc = lpfc_bsg_diag_mode_enter(phba); 2367 if (rc) 2368 goto job_error; 2369 2370 link_diag_test_cmd = (struct sli4_link_diag *) 2371 bsg_request->rqst_data.h_vendor.vendor_cmd; 2372 2373 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2374 2375 if (rc) 2376 goto job_error; 2377 2378 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2379 if (!pmboxq) { 2380 rc = -ENOMEM; 2381 goto link_diag_test_exit; 2382 } 2383 2384 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 2385 sizeof(struct lpfc_sli4_cfg_mhdr)); 2386 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2387 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 2388 req_len, LPFC_SLI4_MBX_EMBED); 2389 if (alloc_len != req_len) { 2390 rc = -ENOMEM; 2391 goto link_diag_test_exit; 2392 } 2393 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; 2394 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, 2395 phba->sli4_hba.lnk_info.lnk_no); 2396 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, 2397 phba->sli4_hba.lnk_info.lnk_tp); 2398 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, 2399 link_diag_test_cmd->test_id); 2400 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, 2401 link_diag_test_cmd->loops); 2402 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, 2403 link_diag_test_cmd->test_version); 2404 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, 2405 link_diag_test_cmd->error_action); 2406 2407 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2408 2409 shdr = (union lpfc_sli4_cfg_shdr *) 2410 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; 2411 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2412 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2413 if (shdr_status || shdr_add_status || mbxstatus) { 2414 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2415 "3010 Run link diag test mailbox failed with " 2416 "mbx_status x%x status x%x, add_status x%x\n", 2417 mbxstatus, shdr_status, shdr_add_status); 2418 } 2419 2420 diag_status_reply = (struct diag_status *) 2421 bsg_reply->reply_data.vendor_reply.vendor_rsp; 2422 2423 if (job->reply_len < 2424 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { 2425 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2426 "3012 Received Run link diag test reply " 2427 "below minimum size (%d): reply_len:%d\n", 2428 (int)(sizeof(struct fc_bsg_request) + 2429 sizeof(struct diag_status)), 2430 job->reply_len); 2431 rc = -EINVAL; 2432 goto job_error; 2433 } 2434 2435 diag_status_reply->mbox_status = mbxstatus; 2436 diag_status_reply->shdr_status = shdr_status; 2437 diag_status_reply->shdr_add_status = shdr_add_status; 2438 2439 link_diag_test_exit: 2440 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2441 2442 if (pmboxq) 2443 mempool_free(pmboxq, phba->mbox_mem_pool); 2444 2445 lpfc_bsg_diag_mode_exit(phba); 2446 2447 job_error: 2448 /* make error code available to userspace */ 2449 bsg_reply->result = rc; 2450 /* complete the job back to userspace if no error */ 2451 if (rc == 0) 2452 bsg_job_done(job, bsg_reply->result, 2453 bsg_reply->reply_payload_rcv_len); 2454 return rc; 2455 } 2456 2457 /** 2458 * lpfcdiag_loop_self_reg - obtains a remote port login id 2459 * @phba: Pointer to HBA context object 2460 * @rpi: Pointer to a remote port login id 2461 * 2462 * This function obtains a remote port login id so the diag loopback test 2463 * can send and receive its own unsolicited CT command. 2464 **/ 2465 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) 2466 { 2467 LPFC_MBOXQ_t *mbox; 2468 struct lpfc_dmabuf *dmabuff; 2469 int status; 2470 2471 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2472 if (!mbox) 2473 return -ENOMEM; 2474 2475 if (phba->sli_rev < LPFC_SLI_REV4) 2476 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 2477 (uint8_t *)&phba->pport->fc_sparam, 2478 mbox, *rpi); 2479 else { 2480 *rpi = lpfc_sli4_alloc_rpi(phba); 2481 status = lpfc_reg_rpi(phba, phba->pport->vpi, 2482 phba->pport->fc_myDID, 2483 (uint8_t *)&phba->pport->fc_sparam, 2484 mbox, *rpi); 2485 } 2486 2487 if (status) { 2488 mempool_free(mbox, phba->mbox_mem_pool); 2489 if (phba->sli_rev == LPFC_SLI_REV4) 2490 lpfc_sli4_free_rpi(phba, *rpi); 2491 return -ENOMEM; 2492 } 2493 2494 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 2495 mbox->context1 = NULL; 2496 mbox->context2 = NULL; 2497 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2498 2499 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2500 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2501 kfree(dmabuff); 2502 if (status != MBX_TIMEOUT) 2503 mempool_free(mbox, phba->mbox_mem_pool); 2504 if (phba->sli_rev == LPFC_SLI_REV4) 2505 lpfc_sli4_free_rpi(phba, *rpi); 2506 return -ENODEV; 2507 } 2508 2509 if (phba->sli_rev < LPFC_SLI_REV4) 2510 *rpi = mbox->u.mb.un.varWords[0]; 2511 2512 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2513 kfree(dmabuff); 2514 mempool_free(mbox, phba->mbox_mem_pool); 2515 return 0; 2516 } 2517 2518 /** 2519 * lpfcdiag_loop_self_unreg - unregs from the rpi 2520 * @phba: Pointer to HBA context object 2521 * @rpi: Remote port login id 2522 * 2523 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 2524 **/ 2525 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 2526 { 2527 LPFC_MBOXQ_t *mbox; 2528 int status; 2529 2530 /* Allocate mboxq structure */ 2531 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2532 if (mbox == NULL) 2533 return -ENOMEM; 2534 2535 if (phba->sli_rev < LPFC_SLI_REV4) 2536 lpfc_unreg_login(phba, 0, rpi, mbox); 2537 else 2538 lpfc_unreg_login(phba, phba->pport->vpi, 2539 phba->sli4_hba.rpi_ids[rpi], mbox); 2540 2541 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2542 2543 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2544 if (status != MBX_TIMEOUT) 2545 mempool_free(mbox, phba->mbox_mem_pool); 2546 return -EIO; 2547 } 2548 mempool_free(mbox, phba->mbox_mem_pool); 2549 if (phba->sli_rev == LPFC_SLI_REV4) 2550 lpfc_sli4_free_rpi(phba, rpi); 2551 return 0; 2552 } 2553 2554 /** 2555 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 2556 * @phba: Pointer to HBA context object 2557 * @rpi: Remote port login id 2558 * @txxri: Pointer to transmit exchange id 2559 * @rxxri: Pointer to response exchabge id 2560 * 2561 * This function obtains the transmit and receive ids required to send 2562 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 2563 * flags are used to the unsolicted response handler is able to process 2564 * the ct command sent on the same port. 2565 **/ 2566 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 2567 uint16_t *txxri, uint16_t * rxxri) 2568 { 2569 struct lpfc_bsg_event *evt; 2570 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2571 IOCB_t *cmd, *rsp; 2572 struct lpfc_dmabuf *dmabuf; 2573 struct ulp_bde64 *bpl = NULL; 2574 struct lpfc_sli_ct_request *ctreq = NULL; 2575 int ret_val = 0; 2576 int time_left; 2577 int iocb_stat = IOCB_SUCCESS; 2578 unsigned long flags; 2579 2580 *txxri = 0; 2581 *rxxri = 0; 2582 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2583 SLI_CT_ELX_LOOPBACK); 2584 if (!evt) 2585 return -ENOMEM; 2586 2587 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2588 list_add(&evt->node, &phba->ct_ev_waiters); 2589 lpfc_bsg_event_ref(evt); 2590 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2591 2592 cmdiocbq = lpfc_sli_get_iocbq(phba); 2593 rspiocbq = lpfc_sli_get_iocbq(phba); 2594 2595 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2596 if (dmabuf) { 2597 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 2598 if (dmabuf->virt) { 2599 INIT_LIST_HEAD(&dmabuf->list); 2600 bpl = (struct ulp_bde64 *) dmabuf->virt; 2601 memset(bpl, 0, sizeof(*bpl)); 2602 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 2603 bpl->addrHigh = 2604 le32_to_cpu(putPaddrHigh(dmabuf->phys + 2605 sizeof(*bpl))); 2606 bpl->addrLow = 2607 le32_to_cpu(putPaddrLow(dmabuf->phys + 2608 sizeof(*bpl))); 2609 bpl->tus.f.bdeFlags = 0; 2610 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 2611 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2612 } 2613 } 2614 2615 if (cmdiocbq == NULL || rspiocbq == NULL || 2616 dmabuf == NULL || bpl == NULL || ctreq == NULL || 2617 dmabuf->virt == NULL) { 2618 ret_val = -ENOMEM; 2619 goto err_get_xri_exit; 2620 } 2621 2622 cmd = &cmdiocbq->iocb; 2623 rsp = &rspiocbq->iocb; 2624 2625 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2626 2627 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2628 ctreq->RevisionId.bits.InId = 0; 2629 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2630 ctreq->FsSubType = 0; 2631 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 2632 ctreq->CommandResponse.bits.Size = 0; 2633 2634 2635 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); 2636 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); 2637 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2638 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); 2639 2640 cmd->un.xseq64.w5.hcsw.Fctl = LA; 2641 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 2642 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 2643 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 2644 2645 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2646 cmd->ulpBdeCount = 1; 2647 cmd->ulpLe = 1; 2648 cmd->ulpClass = CLASS3; 2649 cmd->ulpContext = rpi; 2650 2651 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2652 cmdiocbq->vport = phba->pport; 2653 cmdiocbq->iocb_cmpl = NULL; 2654 2655 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2656 rspiocbq, 2657 (phba->fc_ratov * 2) 2658 + LPFC_DRVR_TIMEOUT); 2659 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) { 2660 ret_val = -EIO; 2661 goto err_get_xri_exit; 2662 } 2663 *txxri = rsp->ulpContext; 2664 2665 evt->waiting = 1; 2666 evt->wait_time_stamp = jiffies; 2667 time_left = wait_event_interruptible_timeout( 2668 evt->wq, !list_empty(&evt->events_to_see), 2669 msecs_to_jiffies(1000 * 2670 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 2671 if (list_empty(&evt->events_to_see)) 2672 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2673 else { 2674 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2675 list_move(evt->events_to_see.prev, &evt->events_to_get); 2676 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2677 *rxxri = (list_entry(evt->events_to_get.prev, 2678 typeof(struct event_data), 2679 node))->immed_dat; 2680 } 2681 evt->waiting = 0; 2682 2683 err_get_xri_exit: 2684 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2685 lpfc_bsg_event_unref(evt); /* release ref */ 2686 lpfc_bsg_event_unref(evt); /* delete */ 2687 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2688 2689 if (dmabuf) { 2690 if (dmabuf->virt) 2691 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2692 kfree(dmabuf); 2693 } 2694 2695 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 2696 lpfc_sli_release_iocbq(phba, cmdiocbq); 2697 if (rspiocbq) 2698 lpfc_sli_release_iocbq(phba, rspiocbq); 2699 return ret_val; 2700 } 2701 2702 /** 2703 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers 2704 * @phba: Pointer to HBA context object 2705 * 2706 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and. 2707 * returns the pointer to the buffer. 2708 **/ 2709 static struct lpfc_dmabuf * 2710 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) 2711 { 2712 struct lpfc_dmabuf *dmabuf; 2713 struct pci_dev *pcidev = phba->pcidev; 2714 2715 /* allocate dma buffer struct */ 2716 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2717 if (!dmabuf) 2718 return NULL; 2719 2720 INIT_LIST_HEAD(&dmabuf->list); 2721 2722 /* now, allocate dma buffer */ 2723 dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2724 &(dmabuf->phys), GFP_KERNEL); 2725 2726 if (!dmabuf->virt) { 2727 kfree(dmabuf); 2728 return NULL; 2729 } 2730 2731 return dmabuf; 2732 } 2733 2734 /** 2735 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer 2736 * @phba: Pointer to HBA context object. 2737 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. 2738 * 2739 * This routine just simply frees a dma buffer and its associated buffer 2740 * descriptor referred by @dmabuf. 2741 **/ 2742 static void 2743 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) 2744 { 2745 struct pci_dev *pcidev = phba->pcidev; 2746 2747 if (!dmabuf) 2748 return; 2749 2750 if (dmabuf->virt) 2751 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2752 dmabuf->virt, dmabuf->phys); 2753 kfree(dmabuf); 2754 return; 2755 } 2756 2757 /** 2758 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers 2759 * @phba: Pointer to HBA context object. 2760 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. 2761 * 2762 * This routine just simply frees all dma buffers and their associated buffer 2763 * descriptors referred by @dmabuf_list. 2764 **/ 2765 static void 2766 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, 2767 struct list_head *dmabuf_list) 2768 { 2769 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2770 2771 if (list_empty(dmabuf_list)) 2772 return; 2773 2774 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { 2775 list_del_init(&dmabuf->list); 2776 lpfc_bsg_dma_page_free(phba, dmabuf); 2777 } 2778 return; 2779 } 2780 2781 /** 2782 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2783 * @phba: Pointer to HBA context object 2784 * @bpl: Pointer to 64 bit bde structure 2785 * @size: Number of bytes to process 2786 * @nocopydata: Flag to copy user data into the allocated buffer 2787 * 2788 * This function allocates page size buffers and populates an lpfc_dmabufext. 2789 * If allowed the user data pointed to with indataptr is copied into the kernel 2790 * memory. The chained list of page size buffers is returned. 2791 **/ 2792 static struct lpfc_dmabufext * 2793 diag_cmd_data_alloc(struct lpfc_hba *phba, 2794 struct ulp_bde64 *bpl, uint32_t size, 2795 int nocopydata) 2796 { 2797 struct lpfc_dmabufext *mlist = NULL; 2798 struct lpfc_dmabufext *dmp; 2799 int cnt, offset = 0, i = 0; 2800 struct pci_dev *pcidev; 2801 2802 pcidev = phba->pcidev; 2803 2804 while (size) { 2805 /* We get chunks of 4K */ 2806 if (size > BUF_SZ_4K) 2807 cnt = BUF_SZ_4K; 2808 else 2809 cnt = size; 2810 2811 /* allocate struct lpfc_dmabufext buffer header */ 2812 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 2813 if (!dmp) 2814 goto out; 2815 2816 INIT_LIST_HEAD(&dmp->dma.list); 2817 2818 /* Queue it to a linked list */ 2819 if (mlist) 2820 list_add_tail(&dmp->dma.list, &mlist->dma.list); 2821 else 2822 mlist = dmp; 2823 2824 /* allocate buffer */ 2825 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 2826 cnt, 2827 &(dmp->dma.phys), 2828 GFP_KERNEL); 2829 2830 if (!dmp->dma.virt) 2831 goto out; 2832 2833 dmp->size = cnt; 2834 2835 if (nocopydata) { 2836 bpl->tus.f.bdeFlags = 0; 2837 pci_dma_sync_single_for_device(phba->pcidev, 2838 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); 2839 2840 } else { 2841 memset((uint8_t *)dmp->dma.virt, 0, cnt); 2842 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2843 } 2844 2845 /* build buffer ptr list for IOCB */ 2846 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 2847 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 2848 bpl->tus.f.bdeSize = (ushort) cnt; 2849 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2850 bpl++; 2851 2852 i++; 2853 offset += cnt; 2854 size -= cnt; 2855 } 2856 2857 if (mlist) { 2858 mlist->flag = i; 2859 return mlist; 2860 } 2861 out: 2862 diag_cmd_data_free(phba, mlist); 2863 return NULL; 2864 } 2865 2866 /** 2867 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 2868 * @phba: Pointer to HBA context object 2869 * @rxxri: Receive exchange id 2870 * @len: Number of data bytes 2871 * 2872 * This function allocates and posts a data buffer of sufficient size to receive 2873 * an unsolicted CT command. 2874 **/ 2875 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 2876 size_t len) 2877 { 2878 struct lpfc_sli *psli = &phba->sli; 2879 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 2880 struct lpfc_iocbq *cmdiocbq; 2881 IOCB_t *cmd = NULL; 2882 struct list_head head, *curr, *next; 2883 struct lpfc_dmabuf *rxbmp; 2884 struct lpfc_dmabuf *dmp; 2885 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 2886 struct ulp_bde64 *rxbpl = NULL; 2887 uint32_t num_bde; 2888 struct lpfc_dmabufext *rxbuffer = NULL; 2889 int ret_val = 0; 2890 int iocb_stat; 2891 int i = 0; 2892 2893 cmdiocbq = lpfc_sli_get_iocbq(phba); 2894 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2895 if (rxbmp != NULL) { 2896 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2897 if (rxbmp->virt) { 2898 INIT_LIST_HEAD(&rxbmp->list); 2899 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2900 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 2901 } 2902 } 2903 2904 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 2905 ret_val = -ENOMEM; 2906 goto err_post_rxbufs_exit; 2907 } 2908 2909 /* Queue buffers for the receive exchange */ 2910 num_bde = (uint32_t)rxbuffer->flag; 2911 dmp = &rxbuffer->dma; 2912 2913 cmd = &cmdiocbq->iocb; 2914 i = 0; 2915 2916 INIT_LIST_HEAD(&head); 2917 list_add_tail(&head, &dmp->list); 2918 list_for_each_safe(curr, next, &head) { 2919 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 2920 list_del(curr); 2921 2922 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2923 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 2924 cmd->un.quexri64cx.buff.bde.addrHigh = 2925 putPaddrHigh(mp[i]->phys); 2926 cmd->un.quexri64cx.buff.bde.addrLow = 2927 putPaddrLow(mp[i]->phys); 2928 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 2929 ((struct lpfc_dmabufext *)mp[i])->size; 2930 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 2931 cmd->ulpCommand = CMD_QUE_XRI64_CX; 2932 cmd->ulpPU = 0; 2933 cmd->ulpLe = 1; 2934 cmd->ulpBdeCount = 1; 2935 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 2936 2937 } else { 2938 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 2939 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2940 cmd->un.cont64[i].tus.f.bdeSize = 2941 ((struct lpfc_dmabufext *)mp[i])->size; 2942 cmd->ulpBdeCount = ++i; 2943 2944 if ((--num_bde > 0) && (i < 2)) 2945 continue; 2946 2947 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2948 cmd->ulpLe = 1; 2949 } 2950 2951 cmd->ulpClass = CLASS3; 2952 cmd->ulpContext = rxxri; 2953 2954 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2955 0); 2956 if (iocb_stat == IOCB_ERROR) { 2957 diag_cmd_data_free(phba, 2958 (struct lpfc_dmabufext *)mp[0]); 2959 if (mp[1]) 2960 diag_cmd_data_free(phba, 2961 (struct lpfc_dmabufext *)mp[1]); 2962 dmp = list_entry(next, struct lpfc_dmabuf, list); 2963 ret_val = -EIO; 2964 goto err_post_rxbufs_exit; 2965 } 2966 2967 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2968 if (mp[1]) { 2969 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2970 mp[1] = NULL; 2971 } 2972 2973 /* The iocb was freed by lpfc_sli_issue_iocb */ 2974 cmdiocbq = lpfc_sli_get_iocbq(phba); 2975 if (!cmdiocbq) { 2976 dmp = list_entry(next, struct lpfc_dmabuf, list); 2977 ret_val = -EIO; 2978 goto err_post_rxbufs_exit; 2979 } 2980 2981 cmd = &cmdiocbq->iocb; 2982 i = 0; 2983 } 2984 list_del(&head); 2985 2986 err_post_rxbufs_exit: 2987 2988 if (rxbmp) { 2989 if (rxbmp->virt) 2990 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 2991 kfree(rxbmp); 2992 } 2993 2994 if (cmdiocbq) 2995 lpfc_sli_release_iocbq(phba, cmdiocbq); 2996 return ret_val; 2997 } 2998 2999 /** 3000 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself 3001 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 3002 * 3003 * This function receives a user data buffer to be transmitted and received on 3004 * the same port, the link must be up and in loopback mode prior 3005 * to being called. 3006 * 1. A kernel buffer is allocated to copy the user data into. 3007 * 2. The port registers with "itself". 3008 * 3. The transmit and receive exchange ids are obtained. 3009 * 4. The receive exchange id is posted. 3010 * 5. A new els loopback event is created. 3011 * 6. The command and response iocbs are allocated. 3012 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 3013 * 3014 * This function is meant to be called n times while the port is in loopback 3015 * so it is the apps responsibility to issue a reset to take the port out 3016 * of loopback mode. 3017 **/ 3018 static int 3019 lpfc_bsg_diag_loopback_run(struct bsg_job *job) 3020 { 3021 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3022 struct fc_bsg_reply *bsg_reply = job->reply; 3023 struct lpfc_hba *phba = vport->phba; 3024 struct lpfc_bsg_event *evt; 3025 struct event_data *evdat; 3026 struct lpfc_sli *psli = &phba->sli; 3027 uint32_t size; 3028 uint32_t full_size; 3029 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 3030 uint16_t rpi = 0; 3031 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; 3032 IOCB_t *cmd, *rsp = NULL; 3033 struct lpfc_sli_ct_request *ctreq; 3034 struct lpfc_dmabuf *txbmp; 3035 struct ulp_bde64 *txbpl = NULL; 3036 struct lpfc_dmabufext *txbuffer = NULL; 3037 struct list_head head; 3038 struct lpfc_dmabuf *curr; 3039 uint16_t txxri = 0, rxxri; 3040 uint32_t num_bde; 3041 uint8_t *ptr = NULL, *rx_databuf = NULL; 3042 int rc = 0; 3043 int time_left; 3044 int iocb_stat = IOCB_SUCCESS; 3045 unsigned long flags; 3046 void *dataout = NULL; 3047 uint32_t total_mem; 3048 3049 /* in case no data is returned return just the return code */ 3050 bsg_reply->reply_payload_rcv_len = 0; 3051 3052 if (job->request_len < 3053 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 3054 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3055 "2739 Received DIAG TEST request below minimum " 3056 "size\n"); 3057 rc = -EINVAL; 3058 goto loopback_test_exit; 3059 } 3060 3061 if (job->request_payload.payload_len != 3062 job->reply_payload.payload_len) { 3063 rc = -EINVAL; 3064 goto loopback_test_exit; 3065 } 3066 3067 if ((phba->link_state == LPFC_HBA_ERROR) || 3068 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 3069 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 3070 rc = -EACCES; 3071 goto loopback_test_exit; 3072 } 3073 3074 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 3075 rc = -EACCES; 3076 goto loopback_test_exit; 3077 } 3078 3079 size = job->request_payload.payload_len; 3080 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 3081 3082 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 3083 rc = -ERANGE; 3084 goto loopback_test_exit; 3085 } 3086 3087 if (full_size >= BUF_SZ_4K) { 3088 /* 3089 * Allocate memory for ioctl data. If buffer is bigger than 64k, 3090 * then we allocate 64k and re-use that buffer over and over to 3091 * xfer the whole block. This is because Linux kernel has a 3092 * problem allocating more than 120k of kernel space memory. Saw 3093 * problem with GET_FCPTARGETMAPPING... 3094 */ 3095 if (size <= (64 * 1024)) 3096 total_mem = full_size; 3097 else 3098 total_mem = 64 * 1024; 3099 } else 3100 /* Allocate memory for ioctl data */ 3101 total_mem = BUF_SZ_4K; 3102 3103 dataout = kmalloc(total_mem, GFP_KERNEL); 3104 if (dataout == NULL) { 3105 rc = -ENOMEM; 3106 goto loopback_test_exit; 3107 } 3108 3109 ptr = dataout; 3110 ptr += ELX_LOOPBACK_HEADER_SZ; 3111 sg_copy_to_buffer(job->request_payload.sg_list, 3112 job->request_payload.sg_cnt, 3113 ptr, size); 3114 rc = lpfcdiag_loop_self_reg(phba, &rpi); 3115 if (rc) 3116 goto loopback_test_exit; 3117 3118 if (phba->sli_rev < LPFC_SLI_REV4) { 3119 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 3120 if (rc) { 3121 lpfcdiag_loop_self_unreg(phba, rpi); 3122 goto loopback_test_exit; 3123 } 3124 3125 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 3126 if (rc) { 3127 lpfcdiag_loop_self_unreg(phba, rpi); 3128 goto loopback_test_exit; 3129 } 3130 } 3131 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 3132 SLI_CT_ELX_LOOPBACK); 3133 if (!evt) { 3134 lpfcdiag_loop_self_unreg(phba, rpi); 3135 rc = -ENOMEM; 3136 goto loopback_test_exit; 3137 } 3138 3139 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3140 list_add(&evt->node, &phba->ct_ev_waiters); 3141 lpfc_bsg_event_ref(evt); 3142 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3143 3144 cmdiocbq = lpfc_sli_get_iocbq(phba); 3145 if (phba->sli_rev < LPFC_SLI_REV4) 3146 rspiocbq = lpfc_sli_get_iocbq(phba); 3147 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3148 3149 if (txbmp) { 3150 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 3151 if (txbmp->virt) { 3152 INIT_LIST_HEAD(&txbmp->list); 3153 txbpl = (struct ulp_bde64 *) txbmp->virt; 3154 txbuffer = diag_cmd_data_alloc(phba, 3155 txbpl, full_size, 0); 3156 } 3157 } 3158 3159 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { 3160 rc = -ENOMEM; 3161 goto err_loopback_test_exit; 3162 } 3163 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { 3164 rc = -ENOMEM; 3165 goto err_loopback_test_exit; 3166 } 3167 3168 cmd = &cmdiocbq->iocb; 3169 if (phba->sli_rev < LPFC_SLI_REV4) 3170 rsp = &rspiocbq->iocb; 3171 3172 INIT_LIST_HEAD(&head); 3173 list_add_tail(&head, &txbuffer->dma.list); 3174 list_for_each_entry(curr, &head, list) { 3175 segment_len = ((struct lpfc_dmabufext *)curr)->size; 3176 if (current_offset == 0) { 3177 ctreq = curr->virt; 3178 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 3179 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 3180 ctreq->RevisionId.bits.InId = 0; 3181 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 3182 ctreq->FsSubType = 0; 3183 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; 3184 ctreq->CommandResponse.bits.Size = size; 3185 segment_offset = ELX_LOOPBACK_HEADER_SZ; 3186 } else 3187 segment_offset = 0; 3188 3189 BUG_ON(segment_offset >= segment_len); 3190 memcpy(curr->virt + segment_offset, 3191 ptr + current_offset, 3192 segment_len - segment_offset); 3193 3194 current_offset += segment_len - segment_offset; 3195 BUG_ON(current_offset > size); 3196 } 3197 list_del(&head); 3198 3199 /* Build the XMIT_SEQUENCE iocb */ 3200 num_bde = (uint32_t)txbuffer->flag; 3201 3202 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); 3203 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); 3204 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 3205 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); 3206 3207 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 3208 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 3209 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 3210 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 3211 3212 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 3213 cmd->ulpBdeCount = 1; 3214 cmd->ulpLe = 1; 3215 cmd->ulpClass = CLASS3; 3216 3217 if (phba->sli_rev < LPFC_SLI_REV4) { 3218 cmd->ulpContext = txxri; 3219 } else { 3220 cmd->un.xseq64.bdl.ulpIoTag32 = 0; 3221 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi]; 3222 cmdiocbq->context3 = txbmp; 3223 cmdiocbq->sli4_xritag = NO_XRI; 3224 cmd->unsli3.rcvsli3.ox_id = 0xffff; 3225 } 3226 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 3227 cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK; 3228 cmdiocbq->vport = phba->pport; 3229 cmdiocbq->iocb_cmpl = NULL; 3230 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 3231 rspiocbq, (phba->fc_ratov * 2) + 3232 LPFC_DRVR_TIMEOUT); 3233 3234 if ((iocb_stat != IOCB_SUCCESS) || 3235 ((phba->sli_rev < LPFC_SLI_REV4) && 3236 (rsp->ulpStatus != IOSTAT_SUCCESS))) { 3237 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3238 "3126 Failed loopback test issue iocb: " 3239 "iocb_stat:x%x\n", iocb_stat); 3240 rc = -EIO; 3241 goto err_loopback_test_exit; 3242 } 3243 3244 evt->waiting = 1; 3245 time_left = wait_event_interruptible_timeout( 3246 evt->wq, !list_empty(&evt->events_to_see), 3247 msecs_to_jiffies(1000 * 3248 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 3249 evt->waiting = 0; 3250 if (list_empty(&evt->events_to_see)) { 3251 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3252 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3253 "3125 Not receiving unsolicited event, " 3254 "rc:x%x\n", rc); 3255 } else { 3256 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3257 list_move(evt->events_to_see.prev, &evt->events_to_get); 3258 evdat = list_entry(evt->events_to_get.prev, 3259 typeof(*evdat), node); 3260 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3261 rx_databuf = evdat->data; 3262 if (evdat->len != full_size) { 3263 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3264 "1603 Loopback test did not receive expected " 3265 "data length. actual length 0x%x expected " 3266 "length 0x%x\n", 3267 evdat->len, full_size); 3268 rc = -EIO; 3269 } else if (rx_databuf == NULL) 3270 rc = -EIO; 3271 else { 3272 rc = IOCB_SUCCESS; 3273 /* skip over elx loopback header */ 3274 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 3275 bsg_reply->reply_payload_rcv_len = 3276 sg_copy_from_buffer(job->reply_payload.sg_list, 3277 job->reply_payload.sg_cnt, 3278 rx_databuf, size); 3279 bsg_reply->reply_payload_rcv_len = size; 3280 } 3281 } 3282 3283 err_loopback_test_exit: 3284 lpfcdiag_loop_self_unreg(phba, rpi); 3285 3286 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3287 lpfc_bsg_event_unref(evt); /* release ref */ 3288 lpfc_bsg_event_unref(evt); /* delete */ 3289 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3290 3291 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) 3292 lpfc_sli_release_iocbq(phba, cmdiocbq); 3293 3294 if (rspiocbq != NULL) 3295 lpfc_sli_release_iocbq(phba, rspiocbq); 3296 3297 if (txbmp != NULL) { 3298 if (txbpl != NULL) { 3299 if (txbuffer != NULL) 3300 diag_cmd_data_free(phba, txbuffer); 3301 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 3302 } 3303 kfree(txbmp); 3304 } 3305 3306 loopback_test_exit: 3307 kfree(dataout); 3308 /* make error code available to userspace */ 3309 bsg_reply->result = rc; 3310 job->dd_data = NULL; 3311 /* complete the job back to userspace if no error */ 3312 if (rc == IOCB_SUCCESS) 3313 bsg_job_done(job, bsg_reply->result, 3314 bsg_reply->reply_payload_rcv_len); 3315 return rc; 3316 } 3317 3318 /** 3319 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 3320 * @job: GET_DFC_REV fc_bsg_job 3321 **/ 3322 static int 3323 lpfc_bsg_get_dfc_rev(struct bsg_job *job) 3324 { 3325 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3326 struct fc_bsg_reply *bsg_reply = job->reply; 3327 struct lpfc_hba *phba = vport->phba; 3328 struct get_mgmt_rev_reply *event_reply; 3329 int rc = 0; 3330 3331 if (job->request_len < 3332 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 3333 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3334 "2740 Received GET_DFC_REV request below " 3335 "minimum size\n"); 3336 rc = -EINVAL; 3337 goto job_error; 3338 } 3339 3340 event_reply = (struct get_mgmt_rev_reply *) 3341 bsg_reply->reply_data.vendor_reply.vendor_rsp; 3342 3343 if (job->reply_len < 3344 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { 3345 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3346 "2741 Received GET_DFC_REV reply below " 3347 "minimum size\n"); 3348 rc = -EINVAL; 3349 goto job_error; 3350 } 3351 3352 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 3353 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 3354 job_error: 3355 bsg_reply->result = rc; 3356 if (rc == 0) 3357 bsg_job_done(job, bsg_reply->result, 3358 bsg_reply->reply_payload_rcv_len); 3359 return rc; 3360 } 3361 3362 /** 3363 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler 3364 * @phba: Pointer to HBA context object. 3365 * @pmboxq: Pointer to mailbox command. 3366 * 3367 * This is completion handler function for mailbox commands issued from 3368 * lpfc_bsg_issue_mbox function. This function is called by the 3369 * mailbox event handler function with no lock held. This function 3370 * will wake up thread waiting on the wait queue pointed by context1 3371 * of the mailbox. 3372 **/ 3373 static void 3374 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3375 { 3376 struct bsg_job_data *dd_data; 3377 struct fc_bsg_reply *bsg_reply; 3378 struct bsg_job *job; 3379 uint32_t size; 3380 unsigned long flags; 3381 uint8_t *pmb, *pmb_buf; 3382 3383 dd_data = pmboxq->context1; 3384 3385 /* 3386 * The outgoing buffer is readily referred from the dma buffer, 3387 * just need to get header part from mailboxq structure. 3388 */ 3389 pmb = (uint8_t *)&pmboxq->u.mb; 3390 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3391 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3392 3393 /* Determine if job has been aborted */ 3394 3395 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3396 job = dd_data->set_job; 3397 if (job) { 3398 /* Prevent timeout handling from trying to abort job */ 3399 job->dd_data = NULL; 3400 } 3401 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3402 3403 /* Copy the mailbox data to the job if it is still active */ 3404 3405 if (job) { 3406 bsg_reply = job->reply; 3407 size = job->reply_payload.payload_len; 3408 bsg_reply->reply_payload_rcv_len = 3409 sg_copy_from_buffer(job->reply_payload.sg_list, 3410 job->reply_payload.sg_cnt, 3411 pmb_buf, size); 3412 } 3413 3414 dd_data->set_job = NULL; 3415 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3416 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3417 kfree(dd_data); 3418 3419 /* Complete the job if the job is still active */ 3420 3421 if (job) { 3422 bsg_reply->result = 0; 3423 bsg_job_done(job, bsg_reply->result, 3424 bsg_reply->reply_payload_rcv_len); 3425 } 3426 return; 3427 } 3428 3429 /** 3430 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 3431 * @phba: Pointer to HBA context object. 3432 * @mb: Pointer to a mailbox object. 3433 * @vport: Pointer to a vport object. 3434 * 3435 * Some commands require the port to be offline, some may not be called from 3436 * the application. 3437 **/ 3438 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 3439 MAILBOX_t *mb, struct lpfc_vport *vport) 3440 { 3441 /* return negative error values for bsg job */ 3442 switch (mb->mbxCommand) { 3443 /* Offline only */ 3444 case MBX_INIT_LINK: 3445 case MBX_DOWN_LINK: 3446 case MBX_CONFIG_LINK: 3447 case MBX_CONFIG_RING: 3448 case MBX_RESET_RING: 3449 case MBX_UNREG_LOGIN: 3450 case MBX_CLEAR_LA: 3451 case MBX_DUMP_CONTEXT: 3452 case MBX_RUN_DIAGS: 3453 case MBX_RESTART: 3454 case MBX_SET_MASK: 3455 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3456 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3457 "2743 Command 0x%x is illegal in on-line " 3458 "state\n", 3459 mb->mbxCommand); 3460 return -EPERM; 3461 } 3462 case MBX_WRITE_NV: 3463 case MBX_WRITE_VPARMS: 3464 case MBX_LOAD_SM: 3465 case MBX_READ_NV: 3466 case MBX_READ_CONFIG: 3467 case MBX_READ_RCONFIG: 3468 case MBX_READ_STATUS: 3469 case MBX_READ_XRI: 3470 case MBX_READ_REV: 3471 case MBX_READ_LNK_STAT: 3472 case MBX_DUMP_MEMORY: 3473 case MBX_DOWN_LOAD: 3474 case MBX_UPDATE_CFG: 3475 case MBX_KILL_BOARD: 3476 case MBX_READ_TOPOLOGY: 3477 case MBX_LOAD_AREA: 3478 case MBX_LOAD_EXP_ROM: 3479 case MBX_BEACON: 3480 case MBX_DEL_LD_ENTRY: 3481 case MBX_SET_DEBUG: 3482 case MBX_WRITE_WWN: 3483 case MBX_SLI4_CONFIG: 3484 case MBX_READ_EVENT_LOG: 3485 case MBX_READ_EVENT_LOG_STATUS: 3486 case MBX_WRITE_EVENT_LOG: 3487 case MBX_PORT_CAPABILITIES: 3488 case MBX_PORT_IOV_CONTROL: 3489 case MBX_RUN_BIU_DIAG64: 3490 break; 3491 case MBX_SET_VARIABLE: 3492 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3493 "1226 mbox: set_variable 0x%x, 0x%x\n", 3494 mb->un.varWords[0], 3495 mb->un.varWords[1]); 3496 if ((mb->un.varWords[0] == SETVAR_MLOMNT) 3497 && (mb->un.varWords[1] == 1)) { 3498 phba->wait_4_mlo_maint_flg = 1; 3499 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 3500 spin_lock_irq(&phba->hbalock); 3501 phba->link_flag &= ~LS_LOOPBACK_MODE; 3502 spin_unlock_irq(&phba->hbalock); 3503 phba->fc_topology = LPFC_TOPOLOGY_PT_PT; 3504 } 3505 break; 3506 case MBX_READ_SPARM64: 3507 case MBX_REG_LOGIN: 3508 case MBX_REG_LOGIN64: 3509 case MBX_CONFIG_PORT: 3510 case MBX_RUN_BIU_DIAG: 3511 default: 3512 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3513 "2742 Unknown Command 0x%x\n", 3514 mb->mbxCommand); 3515 return -EPERM; 3516 } 3517 3518 return 0; /* ok */ 3519 } 3520 3521 /** 3522 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session 3523 * @phba: Pointer to HBA context object. 3524 * 3525 * This is routine clean up and reset BSG handling of multi-buffer mbox 3526 * command session. 3527 **/ 3528 static void 3529 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) 3530 { 3531 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) 3532 return; 3533 3534 /* free all memory, including dma buffers */ 3535 lpfc_bsg_dma_page_list_free(phba, 3536 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3537 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); 3538 /* multi-buffer write mailbox command pass-through complete */ 3539 memset((char *)&phba->mbox_ext_buf_ctx, 0, 3540 sizeof(struct lpfc_mbox_ext_buf_ctx)); 3541 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3542 3543 return; 3544 } 3545 3546 /** 3547 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl 3548 * @phba: Pointer to HBA context object. 3549 * @pmboxq: Pointer to mailbox command. 3550 * 3551 * This is routine handles BSG job for mailbox commands completions with 3552 * multiple external buffers. 3553 **/ 3554 static struct bsg_job * 3555 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3556 { 3557 struct bsg_job_data *dd_data; 3558 struct bsg_job *job; 3559 struct fc_bsg_reply *bsg_reply; 3560 uint8_t *pmb, *pmb_buf; 3561 unsigned long flags; 3562 uint32_t size; 3563 int rc = 0; 3564 struct lpfc_dmabuf *dmabuf; 3565 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3566 uint8_t *pmbx; 3567 3568 dd_data = pmboxq->context1; 3569 3570 /* Determine if job has been aborted */ 3571 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3572 job = dd_data->set_job; 3573 if (job) { 3574 bsg_reply = job->reply; 3575 /* Prevent timeout handling from trying to abort job */ 3576 job->dd_data = NULL; 3577 } 3578 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3579 3580 /* 3581 * The outgoing buffer is readily referred from the dma buffer, 3582 * just need to get header part from mailboxq structure. 3583 */ 3584 3585 pmb = (uint8_t *)&pmboxq->u.mb; 3586 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3587 /* Copy the byte swapped response mailbox back to the user */ 3588 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3589 /* if there is any non-embedded extended data copy that too */ 3590 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; 3591 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3592 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 3593 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 3594 pmbx = (uint8_t *)dmabuf->virt; 3595 /* byte swap the extended data following the mailbox command */ 3596 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3597 &pmbx[sizeof(MAILBOX_t)], 3598 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3599 } 3600 3601 /* Complete the job if the job is still active */ 3602 3603 if (job) { 3604 size = job->reply_payload.payload_len; 3605 bsg_reply->reply_payload_rcv_len = 3606 sg_copy_from_buffer(job->reply_payload.sg_list, 3607 job->reply_payload.sg_cnt, 3608 pmb_buf, size); 3609 3610 /* result for successful */ 3611 bsg_reply->result = 0; 3612 3613 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3614 "2937 SLI_CONFIG ext-buffer maibox command " 3615 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3616 phba->mbox_ext_buf_ctx.nembType, 3617 phba->mbox_ext_buf_ctx.mboxType, size); 3618 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, 3619 phba->mbox_ext_buf_ctx.nembType, 3620 phba->mbox_ext_buf_ctx.mboxType, 3621 dma_ebuf, sta_pos_addr, 3622 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3623 } else { 3624 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3625 "2938 SLI_CONFIG ext-buffer maibox " 3626 "command (x%x/x%x) failure, rc:x%x\n", 3627 phba->mbox_ext_buf_ctx.nembType, 3628 phba->mbox_ext_buf_ctx.mboxType, rc); 3629 } 3630 3631 3632 /* state change */ 3633 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3634 kfree(dd_data); 3635 return job; 3636 } 3637 3638 /** 3639 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox 3640 * @phba: Pointer to HBA context object. 3641 * @pmboxq: Pointer to mailbox command. 3642 * 3643 * This is completion handler function for mailbox read commands with multiple 3644 * external buffers. 3645 **/ 3646 static void 3647 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3648 { 3649 struct bsg_job *job; 3650 struct fc_bsg_reply *bsg_reply; 3651 3652 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3653 3654 /* handle the BSG job with mailbox command */ 3655 if (!job) 3656 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3657 3658 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3659 "2939 SLI_CONFIG ext-buffer rd maibox command " 3660 "complete, ctxState:x%x, mbxStatus:x%x\n", 3661 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3662 3663 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3664 lpfc_bsg_mbox_ext_session_reset(phba); 3665 3666 /* free base driver mailbox structure memory */ 3667 mempool_free(pmboxq, phba->mbox_mem_pool); 3668 3669 /* if the job is still active, call job done */ 3670 if (job) { 3671 bsg_reply = job->reply; 3672 bsg_job_done(job, bsg_reply->result, 3673 bsg_reply->reply_payload_rcv_len); 3674 } 3675 return; 3676 } 3677 3678 /** 3679 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox 3680 * @phba: Pointer to HBA context object. 3681 * @pmboxq: Pointer to mailbox command. 3682 * 3683 * This is completion handler function for mailbox write commands with multiple 3684 * external buffers. 3685 **/ 3686 static void 3687 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3688 { 3689 struct bsg_job *job; 3690 struct fc_bsg_reply *bsg_reply; 3691 3692 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3693 3694 /* handle the BSG job with the mailbox command */ 3695 if (!job) 3696 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3697 3698 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3699 "2940 SLI_CONFIG ext-buffer wr maibox command " 3700 "complete, ctxState:x%x, mbxStatus:x%x\n", 3701 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3702 3703 /* free all memory, including dma buffers */ 3704 mempool_free(pmboxq, phba->mbox_mem_pool); 3705 lpfc_bsg_mbox_ext_session_reset(phba); 3706 3707 /* if the job is still active, call job done */ 3708 if (job) { 3709 bsg_reply = job->reply; 3710 bsg_job_done(job, bsg_reply->result, 3711 bsg_reply->reply_payload_rcv_len); 3712 } 3713 3714 return; 3715 } 3716 3717 static void 3718 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, 3719 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, 3720 struct lpfc_dmabuf *ext_dmabuf) 3721 { 3722 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3723 3724 /* pointer to the start of mailbox command */ 3725 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; 3726 3727 if (nemb_tp == nemb_mse) { 3728 if (index == 0) { 3729 sli_cfg_mbx->un.sli_config_emb0_subsys. 3730 mse[index].pa_hi = 3731 putPaddrHigh(mbx_dmabuf->phys + 3732 sizeof(MAILBOX_t)); 3733 sli_cfg_mbx->un.sli_config_emb0_subsys. 3734 mse[index].pa_lo = 3735 putPaddrLow(mbx_dmabuf->phys + 3736 sizeof(MAILBOX_t)); 3737 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3738 "2943 SLI_CONFIG(mse)[%d], " 3739 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3740 index, 3741 sli_cfg_mbx->un.sli_config_emb0_subsys. 3742 mse[index].buf_len, 3743 sli_cfg_mbx->un.sli_config_emb0_subsys. 3744 mse[index].pa_hi, 3745 sli_cfg_mbx->un.sli_config_emb0_subsys. 3746 mse[index].pa_lo); 3747 } else { 3748 sli_cfg_mbx->un.sli_config_emb0_subsys. 3749 mse[index].pa_hi = 3750 putPaddrHigh(ext_dmabuf->phys); 3751 sli_cfg_mbx->un.sli_config_emb0_subsys. 3752 mse[index].pa_lo = 3753 putPaddrLow(ext_dmabuf->phys); 3754 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3755 "2944 SLI_CONFIG(mse)[%d], " 3756 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3757 index, 3758 sli_cfg_mbx->un.sli_config_emb0_subsys. 3759 mse[index].buf_len, 3760 sli_cfg_mbx->un.sli_config_emb0_subsys. 3761 mse[index].pa_hi, 3762 sli_cfg_mbx->un.sli_config_emb0_subsys. 3763 mse[index].pa_lo); 3764 } 3765 } else { 3766 if (index == 0) { 3767 sli_cfg_mbx->un.sli_config_emb1_subsys. 3768 hbd[index].pa_hi = 3769 putPaddrHigh(mbx_dmabuf->phys + 3770 sizeof(MAILBOX_t)); 3771 sli_cfg_mbx->un.sli_config_emb1_subsys. 3772 hbd[index].pa_lo = 3773 putPaddrLow(mbx_dmabuf->phys + 3774 sizeof(MAILBOX_t)); 3775 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3776 "3007 SLI_CONFIG(hbd)[%d], " 3777 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3778 index, 3779 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3780 &sli_cfg_mbx->un. 3781 sli_config_emb1_subsys.hbd[index]), 3782 sli_cfg_mbx->un.sli_config_emb1_subsys. 3783 hbd[index].pa_hi, 3784 sli_cfg_mbx->un.sli_config_emb1_subsys. 3785 hbd[index].pa_lo); 3786 3787 } else { 3788 sli_cfg_mbx->un.sli_config_emb1_subsys. 3789 hbd[index].pa_hi = 3790 putPaddrHigh(ext_dmabuf->phys); 3791 sli_cfg_mbx->un.sli_config_emb1_subsys. 3792 hbd[index].pa_lo = 3793 putPaddrLow(ext_dmabuf->phys); 3794 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3795 "3008 SLI_CONFIG(hbd)[%d], " 3796 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3797 index, 3798 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3799 &sli_cfg_mbx->un. 3800 sli_config_emb1_subsys.hbd[index]), 3801 sli_cfg_mbx->un.sli_config_emb1_subsys. 3802 hbd[index].pa_hi, 3803 sli_cfg_mbx->un.sli_config_emb1_subsys. 3804 hbd[index].pa_lo); 3805 } 3806 } 3807 return; 3808 } 3809 3810 /** 3811 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read 3812 * @phba: Pointer to HBA context object. 3813 * @mb: Pointer to a BSG mailbox object. 3814 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3815 * @dmabuff: Pointer to a DMA buffer descriptor. 3816 * 3817 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with 3818 * non-embedded external bufffers. 3819 **/ 3820 static int 3821 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 3822 enum nemb_type nemb_tp, 3823 struct lpfc_dmabuf *dmabuf) 3824 { 3825 struct fc_bsg_request *bsg_request = job->request; 3826 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3827 struct dfc_mbox_req *mbox_req; 3828 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; 3829 uint32_t ext_buf_cnt, ext_buf_index; 3830 struct lpfc_dmabuf *ext_dmabuf = NULL; 3831 struct bsg_job_data *dd_data = NULL; 3832 LPFC_MBOXQ_t *pmboxq = NULL; 3833 MAILBOX_t *pmb; 3834 uint8_t *pmbx; 3835 int rc, i; 3836 3837 mbox_req = 3838 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 3839 3840 /* pointer to the start of mailbox command */ 3841 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3842 3843 if (nemb_tp == nemb_mse) { 3844 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3845 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3846 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3847 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3848 "2945 Handled SLI_CONFIG(mse) rd, " 3849 "ext_buf_cnt(%d) out of range(%d)\n", 3850 ext_buf_cnt, 3851 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3852 rc = -ERANGE; 3853 goto job_error; 3854 } 3855 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3856 "2941 Handled SLI_CONFIG(mse) rd, " 3857 "ext_buf_cnt:%d\n", ext_buf_cnt); 3858 } else { 3859 /* sanity check on interface type for support */ 3860 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3861 LPFC_SLI_INTF_IF_TYPE_2) { 3862 rc = -ENODEV; 3863 goto job_error; 3864 } 3865 /* nemb_tp == nemb_hbd */ 3866 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3867 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3868 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3869 "2946 Handled SLI_CONFIG(hbd) rd, " 3870 "ext_buf_cnt(%d) out of range(%d)\n", 3871 ext_buf_cnt, 3872 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3873 rc = -ERANGE; 3874 goto job_error; 3875 } 3876 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3877 "2942 Handled SLI_CONFIG(hbd) rd, " 3878 "ext_buf_cnt:%d\n", ext_buf_cnt); 3879 } 3880 3881 /* before dma descriptor setup */ 3882 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3883 sta_pre_addr, dmabuf, ext_buf_cnt); 3884 3885 /* reject non-embedded mailbox command with none external buffer */ 3886 if (ext_buf_cnt == 0) { 3887 rc = -EPERM; 3888 goto job_error; 3889 } else if (ext_buf_cnt > 1) { 3890 /* additional external read buffers */ 3891 for (i = 1; i < ext_buf_cnt; i++) { 3892 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); 3893 if (!ext_dmabuf) { 3894 rc = -ENOMEM; 3895 goto job_error; 3896 } 3897 list_add_tail(&ext_dmabuf->list, 3898 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3899 } 3900 } 3901 3902 /* bsg tracking structure */ 3903 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3904 if (!dd_data) { 3905 rc = -ENOMEM; 3906 goto job_error; 3907 } 3908 3909 /* mailbox command structure for base driver */ 3910 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3911 if (!pmboxq) { 3912 rc = -ENOMEM; 3913 goto job_error; 3914 } 3915 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3916 3917 /* for the first external buffer */ 3918 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3919 3920 /* for the rest of external buffer descriptors if any */ 3921 if (ext_buf_cnt > 1) { 3922 ext_buf_index = 1; 3923 list_for_each_entry_safe(curr_dmabuf, next_dmabuf, 3924 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { 3925 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 3926 ext_buf_index, dmabuf, 3927 curr_dmabuf); 3928 ext_buf_index++; 3929 } 3930 } 3931 3932 /* after dma descriptor setup */ 3933 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3934 sta_pos_addr, dmabuf, ext_buf_cnt); 3935 3936 /* construct base driver mbox command */ 3937 pmb = &pmboxq->u.mb; 3938 pmbx = (uint8_t *)dmabuf->virt; 3939 memcpy(pmb, pmbx, sizeof(*pmb)); 3940 pmb->mbxOwner = OWN_HOST; 3941 pmboxq->vport = phba->pport; 3942 3943 /* multi-buffer handling context */ 3944 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 3945 phba->mbox_ext_buf_ctx.mboxType = mbox_rd; 3946 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 3947 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 3948 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 3949 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 3950 3951 /* callback for multi-buffer read mailbox command */ 3952 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; 3953 3954 /* context fields to callback function */ 3955 pmboxq->context1 = dd_data; 3956 dd_data->type = TYPE_MBOX; 3957 dd_data->set_job = job; 3958 dd_data->context_un.mbox.pmboxq = pmboxq; 3959 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3960 job->dd_data = dd_data; 3961 3962 /* state change */ 3963 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3964 3965 /* 3966 * Non-embedded mailbox subcommand data gets byte swapped here because 3967 * the lower level driver code only does the first 64 mailbox words. 3968 */ 3969 if ((!bsg_bf_get(lpfc_mbox_hdr_emb, 3970 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && 3971 (nemb_tp == nemb_mse)) 3972 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3973 &pmbx[sizeof(MAILBOX_t)], 3974 sli_cfg_mbx->un.sli_config_emb0_subsys. 3975 mse[0].buf_len); 3976 3977 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3978 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3979 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3980 "2947 Issued SLI_CONFIG ext-buffer " 3981 "maibox command, rc:x%x\n", rc); 3982 return SLI_CONFIG_HANDLED; 3983 } 3984 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3985 "2948 Failed to issue SLI_CONFIG ext-buffer " 3986 "maibox command, rc:x%x\n", rc); 3987 rc = -EPIPE; 3988 3989 job_error: 3990 if (pmboxq) 3991 mempool_free(pmboxq, phba->mbox_mem_pool); 3992 lpfc_bsg_dma_page_list_free(phba, 3993 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3994 kfree(dd_data); 3995 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 3996 return rc; 3997 } 3998 3999 /** 4000 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write 4001 * @phba: Pointer to HBA context object. 4002 * @mb: Pointer to a BSG mailbox object. 4003 * @dmabuff: Pointer to a DMA buffer descriptor. 4004 * 4005 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with 4006 * non-embedded external bufffers. 4007 **/ 4008 static int 4009 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 4010 enum nemb_type nemb_tp, 4011 struct lpfc_dmabuf *dmabuf) 4012 { 4013 struct fc_bsg_request *bsg_request = job->request; 4014 struct fc_bsg_reply *bsg_reply = job->reply; 4015 struct dfc_mbox_req *mbox_req; 4016 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4017 uint32_t ext_buf_cnt; 4018 struct bsg_job_data *dd_data = NULL; 4019 LPFC_MBOXQ_t *pmboxq = NULL; 4020 MAILBOX_t *pmb; 4021 uint8_t *mbx; 4022 int rc = SLI_CONFIG_NOT_HANDLED, i; 4023 4024 mbox_req = 4025 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4026 4027 /* pointer to the start of mailbox command */ 4028 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4029 4030 if (nemb_tp == nemb_mse) { 4031 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 4032 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 4033 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 4034 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4035 "2953 Failed SLI_CONFIG(mse) wr, " 4036 "ext_buf_cnt(%d) out of range(%d)\n", 4037 ext_buf_cnt, 4038 LPFC_MBX_SLI_CONFIG_MAX_MSE); 4039 return -ERANGE; 4040 } 4041 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4042 "2949 Handled SLI_CONFIG(mse) wr, " 4043 "ext_buf_cnt:%d\n", ext_buf_cnt); 4044 } else { 4045 /* sanity check on interface type for support */ 4046 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 4047 LPFC_SLI_INTF_IF_TYPE_2) 4048 return -ENODEV; 4049 /* nemb_tp == nemb_hbd */ 4050 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 4051 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 4052 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4053 "2954 Failed SLI_CONFIG(hbd) wr, " 4054 "ext_buf_cnt(%d) out of range(%d)\n", 4055 ext_buf_cnt, 4056 LPFC_MBX_SLI_CONFIG_MAX_HBD); 4057 return -ERANGE; 4058 } 4059 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4060 "2950 Handled SLI_CONFIG(hbd) wr, " 4061 "ext_buf_cnt:%d\n", ext_buf_cnt); 4062 } 4063 4064 /* before dma buffer descriptor setup */ 4065 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4066 sta_pre_addr, dmabuf, ext_buf_cnt); 4067 4068 if (ext_buf_cnt == 0) 4069 return -EPERM; 4070 4071 /* for the first external buffer */ 4072 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 4073 4074 /* after dma descriptor setup */ 4075 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4076 sta_pos_addr, dmabuf, ext_buf_cnt); 4077 4078 /* log for looking forward */ 4079 for (i = 1; i < ext_buf_cnt; i++) { 4080 if (nemb_tp == nemb_mse) 4081 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4082 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", 4083 i, sli_cfg_mbx->un.sli_config_emb0_subsys. 4084 mse[i].buf_len); 4085 else 4086 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4087 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", 4088 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4089 &sli_cfg_mbx->un.sli_config_emb1_subsys. 4090 hbd[i])); 4091 } 4092 4093 /* multi-buffer handling context */ 4094 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 4095 phba->mbox_ext_buf_ctx.mboxType = mbox_wr; 4096 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 4097 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 4098 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 4099 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 4100 4101 if (ext_buf_cnt == 1) { 4102 /* bsg tracking structure */ 4103 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4104 if (!dd_data) { 4105 rc = -ENOMEM; 4106 goto job_error; 4107 } 4108 4109 /* mailbox command structure for base driver */ 4110 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4111 if (!pmboxq) { 4112 rc = -ENOMEM; 4113 goto job_error; 4114 } 4115 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4116 pmb = &pmboxq->u.mb; 4117 mbx = (uint8_t *)dmabuf->virt; 4118 memcpy(pmb, mbx, sizeof(*pmb)); 4119 pmb->mbxOwner = OWN_HOST; 4120 pmboxq->vport = phba->pport; 4121 4122 /* callback for multi-buffer read mailbox command */ 4123 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4124 4125 /* context fields to callback function */ 4126 pmboxq->context1 = dd_data; 4127 dd_data->type = TYPE_MBOX; 4128 dd_data->set_job = job; 4129 dd_data->context_un.mbox.pmboxq = pmboxq; 4130 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 4131 job->dd_data = dd_data; 4132 4133 /* state change */ 4134 4135 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4136 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4137 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4138 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4139 "2955 Issued SLI_CONFIG ext-buffer " 4140 "maibox command, rc:x%x\n", rc); 4141 return SLI_CONFIG_HANDLED; 4142 } 4143 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4144 "2956 Failed to issue SLI_CONFIG ext-buffer " 4145 "maibox command, rc:x%x\n", rc); 4146 rc = -EPIPE; 4147 goto job_error; 4148 } 4149 4150 /* wait for additoinal external buffers */ 4151 4152 bsg_reply->result = 0; 4153 bsg_job_done(job, bsg_reply->result, 4154 bsg_reply->reply_payload_rcv_len); 4155 return SLI_CONFIG_HANDLED; 4156 4157 job_error: 4158 if (pmboxq) 4159 mempool_free(pmboxq, phba->mbox_mem_pool); 4160 kfree(dd_data); 4161 4162 return rc; 4163 } 4164 4165 /** 4166 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer 4167 * @phba: Pointer to HBA context object. 4168 * @mb: Pointer to a BSG mailbox object. 4169 * @dmabuff: Pointer to a DMA buffer descriptor. 4170 * 4171 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded 4172 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B 4173 * with embedded sussystem 0x1 and opcodes with external HBDs. 4174 **/ 4175 static int 4176 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4177 struct lpfc_dmabuf *dmabuf) 4178 { 4179 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4180 uint32_t subsys; 4181 uint32_t opcode; 4182 int rc = SLI_CONFIG_NOT_HANDLED; 4183 4184 /* state change on new multi-buffer pass-through mailbox command */ 4185 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; 4186 4187 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4188 4189 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 4190 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 4191 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, 4192 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4193 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, 4194 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4195 if (subsys == SLI_CONFIG_SUBSYS_FCOE) { 4196 switch (opcode) { 4197 case FCOE_OPCODE_READ_FCF: 4198 case FCOE_OPCODE_GET_DPORT_RESULTS: 4199 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4200 "2957 Handled SLI_CONFIG " 4201 "subsys_fcoe, opcode:x%x\n", 4202 opcode); 4203 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4204 nemb_mse, dmabuf); 4205 break; 4206 case FCOE_OPCODE_ADD_FCF: 4207 case FCOE_OPCODE_SET_DPORT_MODE: 4208 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: 4209 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4210 "2958 Handled SLI_CONFIG " 4211 "subsys_fcoe, opcode:x%x\n", 4212 opcode); 4213 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4214 nemb_mse, dmabuf); 4215 break; 4216 default: 4217 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4218 "2959 Reject SLI_CONFIG " 4219 "subsys_fcoe, opcode:x%x\n", 4220 opcode); 4221 rc = -EPERM; 4222 break; 4223 } 4224 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4225 switch (opcode) { 4226 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 4227 case COMN_OPCODE_GET_CNTL_ATTRIBUTES: 4228 case COMN_OPCODE_GET_PROFILE_CONFIG: 4229 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4230 "3106 Handled SLI_CONFIG " 4231 "subsys_comn, opcode:x%x\n", 4232 opcode); 4233 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4234 nemb_mse, dmabuf); 4235 break; 4236 default: 4237 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4238 "3107 Reject SLI_CONFIG " 4239 "subsys_comn, opcode:x%x\n", 4240 opcode); 4241 rc = -EPERM; 4242 break; 4243 } 4244 } else { 4245 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4246 "2977 Reject SLI_CONFIG " 4247 "subsys:x%d, opcode:x%x\n", 4248 subsys, opcode); 4249 rc = -EPERM; 4250 } 4251 } else { 4252 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, 4253 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4254 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, 4255 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4256 if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4257 switch (opcode) { 4258 case COMN_OPCODE_READ_OBJECT: 4259 case COMN_OPCODE_READ_OBJECT_LIST: 4260 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4261 "2960 Handled SLI_CONFIG " 4262 "subsys_comn, opcode:x%x\n", 4263 opcode); 4264 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4265 nemb_hbd, dmabuf); 4266 break; 4267 case COMN_OPCODE_WRITE_OBJECT: 4268 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4269 "2961 Handled SLI_CONFIG " 4270 "subsys_comn, opcode:x%x\n", 4271 opcode); 4272 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4273 nemb_hbd, dmabuf); 4274 break; 4275 default: 4276 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4277 "2962 Not handled SLI_CONFIG " 4278 "subsys_comn, opcode:x%x\n", 4279 opcode); 4280 rc = SLI_CONFIG_NOT_HANDLED; 4281 break; 4282 } 4283 } else { 4284 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4285 "2978 Not handled SLI_CONFIG " 4286 "subsys:x%d, opcode:x%x\n", 4287 subsys, opcode); 4288 rc = SLI_CONFIG_NOT_HANDLED; 4289 } 4290 } 4291 4292 /* state reset on not handled new multi-buffer mailbox command */ 4293 if (rc != SLI_CONFIG_HANDLED) 4294 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4295 4296 return rc; 4297 } 4298 4299 /** 4300 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers 4301 * @phba: Pointer to HBA context object. 4302 * 4303 * This routine is for requesting to abort a pass-through mailbox command with 4304 * multiple external buffers due to error condition. 4305 **/ 4306 static void 4307 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) 4308 { 4309 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 4310 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 4311 else 4312 lpfc_bsg_mbox_ext_session_reset(phba); 4313 return; 4314 } 4315 4316 /** 4317 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer 4318 * @phba: Pointer to HBA context object. 4319 * @dmabuf: Pointer to a DMA buffer descriptor. 4320 * 4321 * This routine extracts the next mailbox read external buffer back to 4322 * user space through BSG. 4323 **/ 4324 static int 4325 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) 4326 { 4327 struct fc_bsg_reply *bsg_reply = job->reply; 4328 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4329 struct lpfc_dmabuf *dmabuf; 4330 uint8_t *pbuf; 4331 uint32_t size; 4332 uint32_t index; 4333 4334 index = phba->mbox_ext_buf_ctx.seqNum; 4335 phba->mbox_ext_buf_ctx.seqNum++; 4336 4337 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4338 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4339 4340 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4341 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, 4342 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); 4343 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4344 "2963 SLI_CONFIG (mse) ext-buffer rd get " 4345 "buffer[%d], size:%d\n", index, size); 4346 } else { 4347 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4348 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); 4349 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4350 "2964 SLI_CONFIG (hbd) ext-buffer rd get " 4351 "buffer[%d], size:%d\n", index, size); 4352 } 4353 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) 4354 return -EPIPE; 4355 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 4356 struct lpfc_dmabuf, list); 4357 list_del_init(&dmabuf->list); 4358 4359 /* after dma buffer descriptor setup */ 4360 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4361 mbox_rd, dma_ebuf, sta_pos_addr, 4362 dmabuf, index); 4363 4364 pbuf = (uint8_t *)dmabuf->virt; 4365 bsg_reply->reply_payload_rcv_len = 4366 sg_copy_from_buffer(job->reply_payload.sg_list, 4367 job->reply_payload.sg_cnt, 4368 pbuf, size); 4369 4370 lpfc_bsg_dma_page_free(phba, dmabuf); 4371 4372 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4373 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4374 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " 4375 "command session done\n"); 4376 lpfc_bsg_mbox_ext_session_reset(phba); 4377 } 4378 4379 bsg_reply->result = 0; 4380 bsg_job_done(job, bsg_reply->result, 4381 bsg_reply->reply_payload_rcv_len); 4382 4383 return SLI_CONFIG_HANDLED; 4384 } 4385 4386 /** 4387 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer 4388 * @phba: Pointer to HBA context object. 4389 * @dmabuf: Pointer to a DMA buffer descriptor. 4390 * 4391 * This routine sets up the next mailbox read external buffer obtained 4392 * from user space through BSG. 4393 **/ 4394 static int 4395 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, 4396 struct lpfc_dmabuf *dmabuf) 4397 { 4398 struct fc_bsg_reply *bsg_reply = job->reply; 4399 struct bsg_job_data *dd_data = NULL; 4400 LPFC_MBOXQ_t *pmboxq = NULL; 4401 MAILBOX_t *pmb; 4402 enum nemb_type nemb_tp; 4403 uint8_t *pbuf; 4404 uint32_t size; 4405 uint32_t index; 4406 int rc; 4407 4408 index = phba->mbox_ext_buf_ctx.seqNum; 4409 phba->mbox_ext_buf_ctx.seqNum++; 4410 nemb_tp = phba->mbox_ext_buf_ctx.nembType; 4411 4412 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4413 if (!dd_data) { 4414 rc = -ENOMEM; 4415 goto job_error; 4416 } 4417 4418 pbuf = (uint8_t *)dmabuf->virt; 4419 size = job->request_payload.payload_len; 4420 sg_copy_to_buffer(job->request_payload.sg_list, 4421 job->request_payload.sg_cnt, 4422 pbuf, size); 4423 4424 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4425 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4426 "2966 SLI_CONFIG (mse) ext-buffer wr set " 4427 "buffer[%d], size:%d\n", 4428 phba->mbox_ext_buf_ctx.seqNum, size); 4429 4430 } else { 4431 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4432 "2967 SLI_CONFIG (hbd) ext-buffer wr set " 4433 "buffer[%d], size:%d\n", 4434 phba->mbox_ext_buf_ctx.seqNum, size); 4435 4436 } 4437 4438 /* set up external buffer descriptor and add to external buffer list */ 4439 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, 4440 phba->mbox_ext_buf_ctx.mbx_dmabuf, 4441 dmabuf); 4442 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4443 4444 /* after write dma buffer */ 4445 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4446 mbox_wr, dma_ebuf, sta_pos_addr, 4447 dmabuf, index); 4448 4449 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4450 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4451 "2968 SLI_CONFIG ext-buffer wr all %d " 4452 "ebuffers received\n", 4453 phba->mbox_ext_buf_ctx.numBuf); 4454 /* mailbox command structure for base driver */ 4455 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4456 if (!pmboxq) { 4457 rc = -ENOMEM; 4458 goto job_error; 4459 } 4460 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4461 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4462 pmb = &pmboxq->u.mb; 4463 memcpy(pmb, pbuf, sizeof(*pmb)); 4464 pmb->mbxOwner = OWN_HOST; 4465 pmboxq->vport = phba->pport; 4466 4467 /* callback for multi-buffer write mailbox command */ 4468 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4469 4470 /* context fields to callback function */ 4471 pmboxq->context1 = dd_data; 4472 dd_data->type = TYPE_MBOX; 4473 dd_data->set_job = job; 4474 dd_data->context_un.mbox.pmboxq = pmboxq; 4475 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4476 job->dd_data = dd_data; 4477 4478 /* state change */ 4479 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4480 4481 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4482 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4483 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4484 "2969 Issued SLI_CONFIG ext-buffer " 4485 "maibox command, rc:x%x\n", rc); 4486 return SLI_CONFIG_HANDLED; 4487 } 4488 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4489 "2970 Failed to issue SLI_CONFIG ext-buffer " 4490 "maibox command, rc:x%x\n", rc); 4491 rc = -EPIPE; 4492 goto job_error; 4493 } 4494 4495 /* wait for additoinal external buffers */ 4496 bsg_reply->result = 0; 4497 bsg_job_done(job, bsg_reply->result, 4498 bsg_reply->reply_payload_rcv_len); 4499 return SLI_CONFIG_HANDLED; 4500 4501 job_error: 4502 lpfc_bsg_dma_page_free(phba, dmabuf); 4503 kfree(dd_data); 4504 4505 return rc; 4506 } 4507 4508 /** 4509 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd 4510 * @phba: Pointer to HBA context object. 4511 * @mb: Pointer to a BSG mailbox object. 4512 * @dmabuff: Pointer to a DMA buffer descriptor. 4513 * 4514 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox 4515 * command with multiple non-embedded external buffers. 4516 **/ 4517 static int 4518 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, 4519 struct lpfc_dmabuf *dmabuf) 4520 { 4521 int rc; 4522 4523 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4524 "2971 SLI_CONFIG buffer (type:x%x)\n", 4525 phba->mbox_ext_buf_ctx.mboxType); 4526 4527 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { 4528 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { 4529 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4530 "2972 SLI_CONFIG rd buffer state " 4531 "mismatch:x%x\n", 4532 phba->mbox_ext_buf_ctx.state); 4533 lpfc_bsg_mbox_ext_abort(phba); 4534 return -EPIPE; 4535 } 4536 rc = lpfc_bsg_read_ebuf_get(phba, job); 4537 if (rc == SLI_CONFIG_HANDLED) 4538 lpfc_bsg_dma_page_free(phba, dmabuf); 4539 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ 4540 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { 4541 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4542 "2973 SLI_CONFIG wr buffer state " 4543 "mismatch:x%x\n", 4544 phba->mbox_ext_buf_ctx.state); 4545 lpfc_bsg_mbox_ext_abort(phba); 4546 return -EPIPE; 4547 } 4548 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); 4549 } 4550 return rc; 4551 } 4552 4553 /** 4554 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer 4555 * @phba: Pointer to HBA context object. 4556 * @mb: Pointer to a BSG mailbox object. 4557 * @dmabuff: Pointer to a DMA buffer descriptor. 4558 * 4559 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG 4560 * (0x9B) mailbox commands and external buffers. 4561 **/ 4562 static int 4563 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job, 4564 struct lpfc_dmabuf *dmabuf) 4565 { 4566 struct fc_bsg_request *bsg_request = job->request; 4567 struct dfc_mbox_req *mbox_req; 4568 int rc = SLI_CONFIG_NOT_HANDLED; 4569 4570 mbox_req = 4571 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4572 4573 /* mbox command with/without single external buffer */ 4574 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4575 return rc; 4576 4577 /* mbox command and first external buffer */ 4578 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4579 if (mbox_req->extSeqNum == 1) { 4580 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4581 "2974 SLI_CONFIG mailbox: tag:%d, " 4582 "seq:%d\n", mbox_req->extMboxTag, 4583 mbox_req->extSeqNum); 4584 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); 4585 return rc; 4586 } else 4587 goto sli_cfg_ext_error; 4588 } 4589 4590 /* 4591 * handle additional external buffers 4592 */ 4593 4594 /* check broken pipe conditions */ 4595 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) 4596 goto sli_cfg_ext_error; 4597 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) 4598 goto sli_cfg_ext_error; 4599 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) 4600 goto sli_cfg_ext_error; 4601 4602 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4603 "2975 SLI_CONFIG mailbox external buffer: " 4604 "extSta:x%x, tag:%d, seq:%d\n", 4605 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, 4606 mbox_req->extSeqNum); 4607 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); 4608 return rc; 4609 4610 sli_cfg_ext_error: 4611 /* all other cases, broken pipe */ 4612 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4613 "2976 SLI_CONFIG mailbox broken pipe: " 4614 "ctxSta:x%x, ctxNumBuf:%d " 4615 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", 4616 phba->mbox_ext_buf_ctx.state, 4617 phba->mbox_ext_buf_ctx.numBuf, 4618 phba->mbox_ext_buf_ctx.mbxTag, 4619 phba->mbox_ext_buf_ctx.seqNum, 4620 mbox_req->extMboxTag, mbox_req->extSeqNum); 4621 4622 lpfc_bsg_mbox_ext_session_reset(phba); 4623 4624 return -EPIPE; 4625 } 4626 4627 /** 4628 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4629 * @phba: Pointer to HBA context object. 4630 * @mb: Pointer to a mailbox object. 4631 * @vport: Pointer to a vport object. 4632 * 4633 * Allocate a tracking object, mailbox command memory, get a mailbox 4634 * from the mailbox pool, copy the caller mailbox command. 4635 * 4636 * If offline and the sli is active we need to poll for the command (port is 4637 * being reset) and com-plete the job, otherwise issue the mailbox command and 4638 * let our completion handler finish the command. 4639 **/ 4640 static int 4641 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4642 struct lpfc_vport *vport) 4643 { 4644 struct fc_bsg_request *bsg_request = job->request; 4645 struct fc_bsg_reply *bsg_reply = job->reply; 4646 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4647 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4648 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4649 uint8_t *pmbx = NULL; 4650 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4651 struct lpfc_dmabuf *dmabuf = NULL; 4652 struct dfc_mbox_req *mbox_req; 4653 struct READ_EVENT_LOG_VAR *rdEventLog; 4654 uint32_t transmit_length, receive_length, mode; 4655 struct lpfc_mbx_sli4_config *sli4_config; 4656 struct lpfc_mbx_nembed_cmd *nembed_sge; 4657 struct ulp_bde64 *bde; 4658 uint8_t *ext = NULL; 4659 int rc = 0; 4660 uint8_t *from; 4661 uint32_t size; 4662 4663 /* in case no data is transferred */ 4664 bsg_reply->reply_payload_rcv_len = 0; 4665 4666 /* sanity check to protect driver */ 4667 if (job->reply_payload.payload_len > BSG_MBOX_SIZE || 4668 job->request_payload.payload_len > BSG_MBOX_SIZE) { 4669 rc = -ERANGE; 4670 goto job_done; 4671 } 4672 4673 /* 4674 * Don't allow mailbox commands to be sent when blocked or when in 4675 * the middle of discovery 4676 */ 4677 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 4678 rc = -EAGAIN; 4679 goto job_done; 4680 } 4681 4682 mbox_req = 4683 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4684 4685 /* check if requested extended data lengths are valid */ 4686 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4687 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4688 rc = -ERANGE; 4689 goto job_done; 4690 } 4691 4692 dmabuf = lpfc_bsg_dma_page_alloc(phba); 4693 if (!dmabuf || !dmabuf->virt) { 4694 rc = -ENOMEM; 4695 goto job_done; 4696 } 4697 4698 /* Get the mailbox command or external buffer from BSG */ 4699 pmbx = (uint8_t *)dmabuf->virt; 4700 size = job->request_payload.payload_len; 4701 sg_copy_to_buffer(job->request_payload.sg_list, 4702 job->request_payload.sg_cnt, pmbx, size); 4703 4704 /* Handle possible SLI_CONFIG with non-embedded payloads */ 4705 if (phba->sli_rev == LPFC_SLI_REV4) { 4706 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); 4707 if (rc == SLI_CONFIG_HANDLED) 4708 goto job_cont; 4709 if (rc) 4710 goto job_done; 4711 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ 4712 } 4713 4714 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); 4715 if (rc != 0) 4716 goto job_done; /* must be negative */ 4717 4718 /* allocate our bsg tracking structure */ 4719 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4720 if (!dd_data) { 4721 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4722 "2727 Failed allocation of dd_data\n"); 4723 rc = -ENOMEM; 4724 goto job_done; 4725 } 4726 4727 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4728 if (!pmboxq) { 4729 rc = -ENOMEM; 4730 goto job_done; 4731 } 4732 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4733 4734 pmb = &pmboxq->u.mb; 4735 memcpy(pmb, pmbx, sizeof(*pmb)); 4736 pmb->mbxOwner = OWN_HOST; 4737 pmboxq->vport = vport; 4738 4739 /* If HBA encountered an error attention, allow only DUMP 4740 * or RESTART mailbox commands until the HBA is restarted. 4741 */ 4742 if (phba->pport->stopped && 4743 pmb->mbxCommand != MBX_DUMP_MEMORY && 4744 pmb->mbxCommand != MBX_RESTART && 4745 pmb->mbxCommand != MBX_WRITE_VPARMS && 4746 pmb->mbxCommand != MBX_WRITE_WWN) 4747 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 4748 "2797 mbox: Issued mailbox cmd " 4749 "0x%x while in stopped state.\n", 4750 pmb->mbxCommand); 4751 4752 /* extended mailbox commands will need an extended buffer */ 4753 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4754 from = pmbx; 4755 ext = from + sizeof(MAILBOX_t); 4756 pmboxq->context2 = ext; 4757 pmboxq->in_ext_byte_len = 4758 mbox_req->inExtWLen * sizeof(uint32_t); 4759 pmboxq->out_ext_byte_len = 4760 mbox_req->outExtWLen * sizeof(uint32_t); 4761 pmboxq->mbox_offset_word = mbox_req->mbOffset; 4762 } 4763 4764 /* biu diag will need a kernel buffer to transfer the data 4765 * allocate our own buffer and setup the mailbox command to 4766 * use ours 4767 */ 4768 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 4769 transmit_length = pmb->un.varWords[1]; 4770 receive_length = pmb->un.varWords[4]; 4771 /* transmit length cannot be greater than receive length or 4772 * mailbox extension size 4773 */ 4774 if ((transmit_length > receive_length) || 4775 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4776 rc = -ERANGE; 4777 goto job_done; 4778 } 4779 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4780 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); 4781 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4782 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); 4783 4784 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4785 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) 4786 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4787 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4788 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) 4789 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4790 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4791 rdEventLog = &pmb->un.varRdEventLog; 4792 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4793 mode = bf_get(lpfc_event_log, rdEventLog); 4794 4795 /* receive length cannot be greater than mailbox 4796 * extension size 4797 */ 4798 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4799 rc = -ERANGE; 4800 goto job_done; 4801 } 4802 4803 /* mode zero uses a bde like biu diags command */ 4804 if (mode == 0) { 4805 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4806 + sizeof(MAILBOX_t)); 4807 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4808 + sizeof(MAILBOX_t)); 4809 } 4810 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4811 /* Let type 4 (well known data) through because the data is 4812 * returned in varwords[4-8] 4813 * otherwise check the recieve length and fetch the buffer addr 4814 */ 4815 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && 4816 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { 4817 /* rebuild the command for sli4 using our own buffers 4818 * like we do for biu diags 4819 */ 4820 receive_length = pmb->un.varWords[2]; 4821 /* receive length cannot be greater than mailbox 4822 * extension size 4823 */ 4824 if (receive_length == 0) { 4825 rc = -ERANGE; 4826 goto job_done; 4827 } 4828 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4829 + sizeof(MAILBOX_t)); 4830 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4831 + sizeof(MAILBOX_t)); 4832 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4833 pmb->un.varUpdateCfg.co) { 4834 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4835 4836 /* bde size cannot be greater than mailbox ext size */ 4837 if (bde->tus.f.bdeSize > 4838 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4839 rc = -ERANGE; 4840 goto job_done; 4841 } 4842 bde->addrHigh = putPaddrHigh(dmabuf->phys 4843 + sizeof(MAILBOX_t)); 4844 bde->addrLow = putPaddrLow(dmabuf->phys 4845 + sizeof(MAILBOX_t)); 4846 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4847 /* Handling non-embedded SLI_CONFIG mailbox command */ 4848 sli4_config = &pmboxq->u.mqe.un.sli4_config; 4849 if (!bf_get(lpfc_mbox_hdr_emb, 4850 &sli4_config->header.cfg_mhdr)) { 4851 /* rebuild the command for sli4 using our 4852 * own buffers like we do for biu diags 4853 */ 4854 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4855 &pmb->un.varWords[0]; 4856 receive_length = nembed_sge->sge[0].length; 4857 4858 /* receive length cannot be greater than 4859 * mailbox extension size 4860 */ 4861 if ((receive_length == 0) || 4862 (receive_length > 4863 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4864 rc = -ERANGE; 4865 goto job_done; 4866 } 4867 4868 nembed_sge->sge[0].pa_hi = 4869 putPaddrHigh(dmabuf->phys 4870 + sizeof(MAILBOX_t)); 4871 nembed_sge->sge[0].pa_lo = 4872 putPaddrLow(dmabuf->phys 4873 + sizeof(MAILBOX_t)); 4874 } 4875 } 4876 } 4877 4878 dd_data->context_un.mbox.dmabuffers = dmabuf; 4879 4880 /* setup wake call as IOCB callback */ 4881 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; 4882 4883 /* setup context field to pass wait_queue pointer to wake function */ 4884 pmboxq->context1 = dd_data; 4885 dd_data->type = TYPE_MBOX; 4886 dd_data->set_job = job; 4887 dd_data->context_un.mbox.pmboxq = pmboxq; 4888 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4889 dd_data->context_un.mbox.ext = ext; 4890 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4891 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4892 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 4893 job->dd_data = dd_data; 4894 4895 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4896 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 4897 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4898 if (rc != MBX_SUCCESS) { 4899 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 4900 goto job_done; 4901 } 4902 4903 /* job finished, copy the data */ 4904 memcpy(pmbx, pmb, sizeof(*pmb)); 4905 bsg_reply->reply_payload_rcv_len = 4906 sg_copy_from_buffer(job->reply_payload.sg_list, 4907 job->reply_payload.sg_cnt, 4908 pmbx, size); 4909 /* not waiting mbox already done */ 4910 rc = 0; 4911 goto job_done; 4912 } 4913 4914 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4915 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 4916 return 1; /* job started */ 4917 4918 job_done: 4919 /* common exit for error or job completed inline */ 4920 if (pmboxq) 4921 mempool_free(pmboxq, phba->mbox_mem_pool); 4922 lpfc_bsg_dma_page_free(phba, dmabuf); 4923 kfree(dd_data); 4924 4925 job_cont: 4926 return rc; 4927 } 4928 4929 /** 4930 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 4931 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 4932 **/ 4933 static int 4934 lpfc_bsg_mbox_cmd(struct bsg_job *job) 4935 { 4936 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 4937 struct fc_bsg_request *bsg_request = job->request; 4938 struct fc_bsg_reply *bsg_reply = job->reply; 4939 struct lpfc_hba *phba = vport->phba; 4940 struct dfc_mbox_req *mbox_req; 4941 int rc = 0; 4942 4943 /* mix-and-match backward compatibility */ 4944 bsg_reply->reply_payload_rcv_len = 0; 4945 if (job->request_len < 4946 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4947 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4948 "2737 Mix-and-match backward compatibility " 4949 "between MBOX_REQ old size:%d and " 4950 "new request size:%d\n", 4951 (int)(job->request_len - 4952 sizeof(struct fc_bsg_request)), 4953 (int)sizeof(struct dfc_mbox_req)); 4954 mbox_req = (struct dfc_mbox_req *) 4955 bsg_request->rqst_data.h_vendor.vendor_cmd; 4956 mbox_req->extMboxTag = 0; 4957 mbox_req->extSeqNum = 0; 4958 } 4959 4960 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4961 4962 if (rc == 0) { 4963 /* job done */ 4964 bsg_reply->result = 0; 4965 job->dd_data = NULL; 4966 bsg_job_done(job, bsg_reply->result, 4967 bsg_reply->reply_payload_rcv_len); 4968 } else if (rc == 1) 4969 /* job submitted, will complete later*/ 4970 rc = 0; /* return zero, no error */ 4971 else { 4972 /* some error occurred */ 4973 bsg_reply->result = rc; 4974 job->dd_data = NULL; 4975 } 4976 4977 return rc; 4978 } 4979 4980 /** 4981 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler 4982 * @phba: Pointer to HBA context object. 4983 * @cmdiocbq: Pointer to command iocb. 4984 * @rspiocbq: Pointer to response iocb. 4985 * 4986 * This function is the completion handler for iocbs issued using 4987 * lpfc_menlo_cmd function. This function is called by the 4988 * ring event handler function without any lock held. This function 4989 * can be called from both worker thread context and interrupt 4990 * context. This function also can be called from another thread which 4991 * cleans up the SLI layer objects. 4992 * This function copies the contents of the response iocb to the 4993 * response iocb memory object provided by the caller of 4994 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 4995 * sleeps for the iocb completion. 4996 **/ 4997 static void 4998 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, 4999 struct lpfc_iocbq *cmdiocbq, 5000 struct lpfc_iocbq *rspiocbq) 5001 { 5002 struct bsg_job_data *dd_data; 5003 struct bsg_job *job; 5004 struct fc_bsg_reply *bsg_reply; 5005 IOCB_t *rsp; 5006 struct lpfc_dmabuf *bmp, *cmp, *rmp; 5007 struct lpfc_bsg_menlo *menlo; 5008 unsigned long flags; 5009 struct menlo_response *menlo_resp; 5010 unsigned int rsp_size; 5011 int rc = 0; 5012 5013 dd_data = cmdiocbq->context1; 5014 cmp = cmdiocbq->context2; 5015 bmp = cmdiocbq->context3; 5016 menlo = &dd_data->context_un.menlo; 5017 rmp = menlo->rmp; 5018 rsp = &rspiocbq->iocb; 5019 5020 /* Determine if job has been aborted */ 5021 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5022 job = dd_data->set_job; 5023 if (job) { 5024 bsg_reply = job->reply; 5025 /* Prevent timeout handling from trying to abort job */ 5026 job->dd_data = NULL; 5027 } 5028 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5029 5030 /* Copy the job data or set the failing status for the job */ 5031 5032 if (job) { 5033 /* always return the xri, this would be used in the case 5034 * of a menlo download to allow the data to be sent as a 5035 * continuation of the exchange. 5036 */ 5037 5038 menlo_resp = (struct menlo_response *) 5039 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5040 menlo_resp->xri = rsp->ulpContext; 5041 if (rsp->ulpStatus) { 5042 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 5043 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 5044 case IOERR_SEQUENCE_TIMEOUT: 5045 rc = -ETIMEDOUT; 5046 break; 5047 case IOERR_INVALID_RPI: 5048 rc = -EFAULT; 5049 break; 5050 default: 5051 rc = -EACCES; 5052 break; 5053 } 5054 } else { 5055 rc = -EACCES; 5056 } 5057 } else { 5058 rsp_size = rsp->un.genreq64.bdl.bdeSize; 5059 bsg_reply->reply_payload_rcv_len = 5060 lpfc_bsg_copy_data(rmp, &job->reply_payload, 5061 rsp_size, 0); 5062 } 5063 5064 } 5065 5066 lpfc_sli_release_iocbq(phba, cmdiocbq); 5067 lpfc_free_bsg_buffers(phba, cmp); 5068 lpfc_free_bsg_buffers(phba, rmp); 5069 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5070 kfree(bmp); 5071 kfree(dd_data); 5072 5073 /* Complete the job if active */ 5074 5075 if (job) { 5076 bsg_reply->result = rc; 5077 bsg_job_done(job, bsg_reply->result, 5078 bsg_reply->reply_payload_rcv_len); 5079 } 5080 5081 return; 5082 } 5083 5084 /** 5085 * lpfc_menlo_cmd - send an ioctl for menlo hardware 5086 * @job: fc_bsg_job to handle 5087 * 5088 * This function issues a gen request 64 CR ioctl for all menlo cmd requests, 5089 * all the command completions will return the xri for the command. 5090 * For menlo data requests a gen request 64 CX is used to continue the exchange 5091 * supplied in the menlo request header xri field. 5092 **/ 5093 static int 5094 lpfc_menlo_cmd(struct bsg_job *job) 5095 { 5096 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5097 struct fc_bsg_request *bsg_request = job->request; 5098 struct fc_bsg_reply *bsg_reply = job->reply; 5099 struct lpfc_hba *phba = vport->phba; 5100 struct lpfc_iocbq *cmdiocbq; 5101 IOCB_t *cmd; 5102 int rc = 0; 5103 struct menlo_command *menlo_cmd; 5104 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 5105 int request_nseg; 5106 int reply_nseg; 5107 struct bsg_job_data *dd_data; 5108 struct ulp_bde64 *bpl = NULL; 5109 5110 /* in case no data is returned return just the return code */ 5111 bsg_reply->reply_payload_rcv_len = 0; 5112 5113 if (job->request_len < 5114 sizeof(struct fc_bsg_request) + 5115 sizeof(struct menlo_command)) { 5116 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5117 "2784 Received MENLO_CMD request below " 5118 "minimum size\n"); 5119 rc = -ERANGE; 5120 goto no_dd_data; 5121 } 5122 5123 if (job->reply_len < 5124 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { 5125 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5126 "2785 Received MENLO_CMD reply below " 5127 "minimum size\n"); 5128 rc = -ERANGE; 5129 goto no_dd_data; 5130 } 5131 5132 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { 5133 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5134 "2786 Adapter does not support menlo " 5135 "commands\n"); 5136 rc = -EPERM; 5137 goto no_dd_data; 5138 } 5139 5140 menlo_cmd = (struct menlo_command *) 5141 bsg_request->rqst_data.h_vendor.vendor_cmd; 5142 5143 /* allocate our bsg tracking structure */ 5144 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 5145 if (!dd_data) { 5146 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5147 "2787 Failed allocation of dd_data\n"); 5148 rc = -ENOMEM; 5149 goto no_dd_data; 5150 } 5151 5152 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5153 if (!bmp) { 5154 rc = -ENOMEM; 5155 goto free_dd; 5156 } 5157 5158 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 5159 if (!bmp->virt) { 5160 rc = -ENOMEM; 5161 goto free_bmp; 5162 } 5163 5164 INIT_LIST_HEAD(&bmp->list); 5165 5166 bpl = (struct ulp_bde64 *)bmp->virt; 5167 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 5168 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 5169 1, bpl, &request_nseg); 5170 if (!cmp) { 5171 rc = -ENOMEM; 5172 goto free_bmp; 5173 } 5174 lpfc_bsg_copy_data(cmp, &job->request_payload, 5175 job->request_payload.payload_len, 1); 5176 5177 bpl += request_nseg; 5178 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 5179 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 5180 bpl, &reply_nseg); 5181 if (!rmp) { 5182 rc = -ENOMEM; 5183 goto free_cmp; 5184 } 5185 5186 cmdiocbq = lpfc_sli_get_iocbq(phba); 5187 if (!cmdiocbq) { 5188 rc = -ENOMEM; 5189 goto free_rmp; 5190 } 5191 5192 cmd = &cmdiocbq->iocb; 5193 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 5194 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 5195 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 5196 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 5197 cmd->un.genreq64.bdl.bdeSize = 5198 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 5199 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 5200 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 5201 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; 5202 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ 5203 cmd->ulpBdeCount = 1; 5204 cmd->ulpClass = CLASS3; 5205 cmd->ulpOwner = OWN_CHIP; 5206 cmd->ulpLe = 1; /* Limited Edition */ 5207 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 5208 cmdiocbq->vport = phba->pport; 5209 /* We want the firmware to timeout before we do */ 5210 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 5211 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 5212 cmdiocbq->context1 = dd_data; 5213 cmdiocbq->context2 = cmp; 5214 cmdiocbq->context3 = bmp; 5215 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 5216 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 5217 cmd->ulpPU = MENLO_PU; /* 3 */ 5218 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ 5219 cmd->ulpContext = MENLO_CONTEXT; /* 0 */ 5220 } else { 5221 cmd->ulpCommand = CMD_GEN_REQUEST64_CX; 5222 cmd->ulpPU = 1; 5223 cmd->un.ulpWord[4] = 0; 5224 cmd->ulpContext = menlo_cmd->xri; 5225 } 5226 5227 dd_data->type = TYPE_MENLO; 5228 dd_data->set_job = job; 5229 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 5230 dd_data->context_un.menlo.rmp = rmp; 5231 job->dd_data = dd_data; 5232 5233 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 5234 MENLO_TIMEOUT - 5); 5235 if (rc == IOCB_SUCCESS) 5236 return 0; /* done for now */ 5237 5238 lpfc_sli_release_iocbq(phba, cmdiocbq); 5239 5240 free_rmp: 5241 lpfc_free_bsg_buffers(phba, rmp); 5242 free_cmp: 5243 lpfc_free_bsg_buffers(phba, cmp); 5244 free_bmp: 5245 if (bmp->virt) 5246 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5247 kfree(bmp); 5248 free_dd: 5249 kfree(dd_data); 5250 no_dd_data: 5251 /* make error code available to userspace */ 5252 bsg_reply->result = rc; 5253 job->dd_data = NULL; 5254 return rc; 5255 } 5256 5257 static int 5258 lpfc_forced_link_speed(struct bsg_job *job) 5259 { 5260 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5261 struct lpfc_vport *vport = shost_priv(shost); 5262 struct lpfc_hba *phba = vport->phba; 5263 struct fc_bsg_reply *bsg_reply = job->reply; 5264 struct forced_link_speed_support_reply *forced_reply; 5265 int rc = 0; 5266 5267 if (job->request_len < 5268 sizeof(struct fc_bsg_request) + 5269 sizeof(struct get_forced_link_speed_support)) { 5270 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5271 "0048 Received FORCED_LINK_SPEED request " 5272 "below minimum size\n"); 5273 rc = -EINVAL; 5274 goto job_error; 5275 } 5276 5277 forced_reply = (struct forced_link_speed_support_reply *) 5278 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5279 5280 if (job->reply_len < 5281 sizeof(struct fc_bsg_request) + 5282 sizeof(struct forced_link_speed_support_reply)) { 5283 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5284 "0049 Received FORCED_LINK_SPEED reply below " 5285 "minimum size\n"); 5286 rc = -EINVAL; 5287 goto job_error; 5288 } 5289 5290 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) 5291 ? LPFC_FORCED_LINK_SPEED_SUPPORTED 5292 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; 5293 job_error: 5294 bsg_reply->result = rc; 5295 if (rc == 0) 5296 bsg_job_done(job, bsg_reply->result, 5297 bsg_reply->reply_payload_rcv_len); 5298 return rc; 5299 } 5300 5301 /** 5302 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5303 * @job: fc_bsg_job to handle 5304 **/ 5305 static int 5306 lpfc_bsg_hst_vendor(struct bsg_job *job) 5307 { 5308 struct fc_bsg_request *bsg_request = job->request; 5309 struct fc_bsg_reply *bsg_reply = job->reply; 5310 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 5311 int rc; 5312 5313 switch (command) { 5314 case LPFC_BSG_VENDOR_SET_CT_EVENT: 5315 rc = lpfc_bsg_hba_set_event(job); 5316 break; 5317 case LPFC_BSG_VENDOR_GET_CT_EVENT: 5318 rc = lpfc_bsg_hba_get_event(job); 5319 break; 5320 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 5321 rc = lpfc_bsg_send_mgmt_rsp(job); 5322 break; 5323 case LPFC_BSG_VENDOR_DIAG_MODE: 5324 rc = lpfc_bsg_diag_loopback_mode(job); 5325 break; 5326 case LPFC_BSG_VENDOR_DIAG_MODE_END: 5327 rc = lpfc_sli4_bsg_diag_mode_end(job); 5328 break; 5329 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: 5330 rc = lpfc_bsg_diag_loopback_run(job); 5331 break; 5332 case LPFC_BSG_VENDOR_LINK_DIAG_TEST: 5333 rc = lpfc_sli4_bsg_link_diag_test(job); 5334 break; 5335 case LPFC_BSG_VENDOR_GET_MGMT_REV: 5336 rc = lpfc_bsg_get_dfc_rev(job); 5337 break; 5338 case LPFC_BSG_VENDOR_MBOX: 5339 rc = lpfc_bsg_mbox_cmd(job); 5340 break; 5341 case LPFC_BSG_VENDOR_MENLO_CMD: 5342 case LPFC_BSG_VENDOR_MENLO_DATA: 5343 rc = lpfc_menlo_cmd(job); 5344 break; 5345 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: 5346 rc = lpfc_forced_link_speed(job); 5347 break; 5348 default: 5349 rc = -EINVAL; 5350 bsg_reply->reply_payload_rcv_len = 0; 5351 /* make error code available to userspace */ 5352 bsg_reply->result = rc; 5353 break; 5354 } 5355 5356 return rc; 5357 } 5358 5359 /** 5360 * lpfc_bsg_request - handle a bsg request from the FC transport 5361 * @job: fc_bsg_job to handle 5362 **/ 5363 int 5364 lpfc_bsg_request(struct bsg_job *job) 5365 { 5366 struct fc_bsg_request *bsg_request = job->request; 5367 struct fc_bsg_reply *bsg_reply = job->reply; 5368 uint32_t msgcode; 5369 int rc; 5370 5371 msgcode = bsg_request->msgcode; 5372 switch (msgcode) { 5373 case FC_BSG_HST_VENDOR: 5374 rc = lpfc_bsg_hst_vendor(job); 5375 break; 5376 case FC_BSG_RPT_ELS: 5377 rc = lpfc_bsg_rport_els(job); 5378 break; 5379 case FC_BSG_RPT_CT: 5380 rc = lpfc_bsg_send_mgmt_cmd(job); 5381 break; 5382 default: 5383 rc = -EINVAL; 5384 bsg_reply->reply_payload_rcv_len = 0; 5385 /* make error code available to userspace */ 5386 bsg_reply->result = rc; 5387 break; 5388 } 5389 5390 return rc; 5391 } 5392 5393 /** 5394 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 5395 * @job: fc_bsg_job that has timed out 5396 * 5397 * This function just aborts the job's IOCB. The aborted IOCB will return to 5398 * the waiting function which will handle passing the error back to userspace 5399 **/ 5400 int 5401 lpfc_bsg_timeout(struct bsg_job *job) 5402 { 5403 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5404 struct lpfc_hba *phba = vport->phba; 5405 struct lpfc_iocbq *cmdiocb; 5406 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5407 struct bsg_job_data *dd_data; 5408 unsigned long flags; 5409 int rc = 0; 5410 LIST_HEAD(completions); 5411 struct lpfc_iocbq *check_iocb, *next_iocb; 5412 5413 /* if job's driver data is NULL, the command completed or is in the 5414 * the process of completing. In this case, return status to request 5415 * so the timeout is retried. This avoids double completion issues 5416 * and the request will be pulled off the timer queue when the 5417 * command's completion handler executes. Otherwise, prevent the 5418 * command's completion handler from executing the job done callback 5419 * and continue processing to abort the outstanding the command. 5420 */ 5421 5422 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5423 dd_data = (struct bsg_job_data *)job->dd_data; 5424 if (dd_data) { 5425 dd_data->set_job = NULL; 5426 job->dd_data = NULL; 5427 } else { 5428 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5429 return -EAGAIN; 5430 } 5431 5432 switch (dd_data->type) { 5433 case TYPE_IOCB: 5434 /* Check to see if IOCB was issued to the port or not. If not, 5435 * remove it from the txq queue and call cancel iocbs. 5436 * Otherwise, call abort iotag 5437 */ 5438 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5439 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5440 5441 spin_lock_irqsave(&phba->hbalock, flags); 5442 /* make sure the I/O abort window is still open */ 5443 if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { 5444 spin_unlock_irqrestore(&phba->hbalock, flags); 5445 return -EAGAIN; 5446 } 5447 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5448 list) { 5449 if (check_iocb == cmdiocb) { 5450 list_move_tail(&check_iocb->list, &completions); 5451 break; 5452 } 5453 } 5454 if (list_empty(&completions)) 5455 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5456 spin_unlock_irqrestore(&phba->hbalock, flags); 5457 if (!list_empty(&completions)) { 5458 lpfc_sli_cancel_iocbs(phba, &completions, 5459 IOSTAT_LOCAL_REJECT, 5460 IOERR_SLI_ABORTED); 5461 } 5462 break; 5463 5464 case TYPE_EVT: 5465 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5466 break; 5467 5468 case TYPE_MBOX: 5469 /* Update the ext buf ctx state if needed */ 5470 5471 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5472 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5473 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5474 break; 5475 case TYPE_MENLO: 5476 /* Check to see if IOCB was issued to the port or not. If not, 5477 * remove it from the txq queue and call cancel iocbs. 5478 * Otherwise, call abort iotag. 5479 */ 5480 cmdiocb = dd_data->context_un.menlo.cmdiocbq; 5481 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5482 5483 spin_lock_irqsave(&phba->hbalock, flags); 5484 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5485 list) { 5486 if (check_iocb == cmdiocb) { 5487 list_move_tail(&check_iocb->list, &completions); 5488 break; 5489 } 5490 } 5491 if (list_empty(&completions)) 5492 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5493 spin_unlock_irqrestore(&phba->hbalock, flags); 5494 if (!list_empty(&completions)) { 5495 lpfc_sli_cancel_iocbs(phba, &completions, 5496 IOSTAT_LOCAL_REJECT, 5497 IOERR_SLI_ABORTED); 5498 } 5499 break; 5500 default: 5501 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5502 break; 5503 } 5504 5505 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 5506 * otherwise an error message will be displayed on the console 5507 * so always return success (zero) 5508 */ 5509 return rc; 5510 } 5511