1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2009-2015 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * * 10 * This program is free software; you can redistribute it and/or * 11 * modify it under the terms of version 2 of the GNU General * 12 * Public License as published by the Free Software Foundation. * 13 * This program is distributed in the hope that it will be useful. * 14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 18 * TO BE LEGALLY INVALID. See the GNU General Public License for * 19 * more details, a copy of which can be found in the file COPYING * 20 * included with this package. * 21 *******************************************************************/ 22 23 #include <linux/interrupt.h> 24 #include <linux/mempool.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/delay.h> 28 #include <linux/list.h> 29 #include <linux/bsg-lib.h> 30 #include <linux/vmalloc.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_host.h> 34 #include <scsi/scsi_transport_fc.h> 35 #include <scsi/scsi_bsg_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 38 #include "lpfc_hw4.h" 39 #include "lpfc_hw.h" 40 #include "lpfc_sli.h" 41 #include "lpfc_sli4.h" 42 #include "lpfc_nl.h" 43 #include "lpfc_bsg.h" 44 #include "lpfc_disc.h" 45 #include "lpfc_scsi.h" 46 #include "lpfc.h" 47 #include "lpfc_logmsg.h" 48 #include "lpfc_crtn.h" 49 #include "lpfc_debugfs.h" 50 #include "lpfc_vport.h" 51 #include "lpfc_version.h" 52 53 struct lpfc_bsg_event { 54 struct list_head node; 55 struct kref kref; 56 wait_queue_head_t wq; 57 58 /* Event type and waiter identifiers */ 59 uint32_t type_mask; 60 uint32_t req_id; 61 uint32_t reg_id; 62 63 /* next two flags are here for the auto-delete logic */ 64 unsigned long wait_time_stamp; 65 int waiting; 66 67 /* seen and not seen events */ 68 struct list_head events_to_get; 69 struct list_head events_to_see; 70 71 /* driver data associated with the job */ 72 void *dd_data; 73 }; 74 75 struct lpfc_bsg_iocb { 76 struct lpfc_iocbq *cmdiocbq; 77 struct lpfc_dmabuf *rmp; 78 struct lpfc_nodelist *ndlp; 79 }; 80 81 struct lpfc_bsg_mbox { 82 LPFC_MBOXQ_t *pmboxq; 83 MAILBOX_t *mb; 84 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ 85 uint8_t *ext; /* extended mailbox data */ 86 uint32_t mbOffset; /* from app */ 87 uint32_t inExtWLen; /* from app */ 88 uint32_t outExtWLen; /* from app */ 89 }; 90 91 #define TYPE_EVT 1 92 #define TYPE_IOCB 2 93 #define TYPE_MBOX 3 94 struct bsg_job_data { 95 uint32_t type; 96 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 97 union { 98 struct lpfc_bsg_event *evt; 99 struct lpfc_bsg_iocb iocb; 100 struct lpfc_bsg_mbox mbox; 101 } context_un; 102 }; 103 104 struct event_data { 105 struct list_head node; 106 uint32_t type; 107 uint32_t immed_dat; 108 void *data; 109 uint32_t len; 110 }; 111 112 #define BUF_SZ_4K 4096 113 #define SLI_CT_ELX_LOOPBACK 0x10 114 115 enum ELX_LOOPBACK_CMD { 116 ELX_LOOPBACK_XRI_SETUP, 117 ELX_LOOPBACK_DATA, 118 }; 119 120 #define ELX_LOOPBACK_HEADER_SZ \ 121 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 122 123 struct lpfc_dmabufext { 124 struct lpfc_dmabuf dma; 125 uint32_t size; 126 uint32_t flag; 127 }; 128 129 static void 130 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 131 { 132 struct lpfc_dmabuf *mlast, *next_mlast; 133 134 if (mlist) { 135 list_for_each_entry_safe(mlast, next_mlast, &mlist->list, 136 list) { 137 list_del(&mlast->list); 138 lpfc_mbuf_free(phba, mlast->virt, mlast->phys); 139 kfree(mlast); 140 } 141 lpfc_mbuf_free(phba, mlist->virt, mlist->phys); 142 kfree(mlist); 143 } 144 return; 145 } 146 147 static struct lpfc_dmabuf * 148 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, 149 int outbound_buffers, struct ulp_bde64 *bpl, 150 int *bpl_entries) 151 { 152 struct lpfc_dmabuf *mlist = NULL; 153 struct lpfc_dmabuf *mp; 154 unsigned int bytes_left = size; 155 156 /* Verify we can support the size specified */ 157 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) 158 return NULL; 159 160 /* Determine the number of dma buffers to allocate */ 161 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : 162 size/LPFC_BPL_SIZE); 163 164 /* Allocate dma buffer and place in BPL passed */ 165 while (bytes_left) { 166 /* Allocate dma buffer */ 167 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 168 if (!mp) { 169 if (mlist) 170 lpfc_free_bsg_buffers(phba, mlist); 171 return NULL; 172 } 173 174 INIT_LIST_HEAD(&mp->list); 175 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 176 177 if (!mp->virt) { 178 kfree(mp); 179 if (mlist) 180 lpfc_free_bsg_buffers(phba, mlist); 181 return NULL; 182 } 183 184 /* Queue it to a linked list */ 185 if (!mlist) 186 mlist = mp; 187 else 188 list_add_tail(&mp->list, &mlist->list); 189 190 /* Add buffer to buffer pointer list */ 191 if (outbound_buffers) 192 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 193 else 194 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 195 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); 196 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); 197 bpl->tus.f.bdeSize = (uint16_t) 198 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : 199 bytes_left); 200 bytes_left -= bpl->tus.f.bdeSize; 201 bpl->tus.w = le32_to_cpu(bpl->tus.w); 202 bpl++; 203 } 204 return mlist; 205 } 206 207 static unsigned int 208 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, 209 struct bsg_buffer *bsg_buffers, 210 unsigned int bytes_to_transfer, int to_buffers) 211 { 212 213 struct lpfc_dmabuf *mp; 214 unsigned int transfer_bytes, bytes_copied = 0; 215 unsigned int sg_offset, dma_offset; 216 unsigned char *dma_address, *sg_address; 217 LIST_HEAD(temp_list); 218 struct sg_mapping_iter miter; 219 unsigned long flags; 220 unsigned int sg_flags = SG_MITER_ATOMIC; 221 bool sg_valid; 222 223 list_splice_init(&dma_buffers->list, &temp_list); 224 list_add(&dma_buffers->list, &temp_list); 225 sg_offset = 0; 226 if (to_buffers) 227 sg_flags |= SG_MITER_FROM_SG; 228 else 229 sg_flags |= SG_MITER_TO_SG; 230 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, 231 sg_flags); 232 local_irq_save(flags); 233 sg_valid = sg_miter_next(&miter); 234 list_for_each_entry(mp, &temp_list, list) { 235 dma_offset = 0; 236 while (bytes_to_transfer && sg_valid && 237 (dma_offset < LPFC_BPL_SIZE)) { 238 dma_address = mp->virt + dma_offset; 239 if (sg_offset) { 240 /* Continue previous partial transfer of sg */ 241 sg_address = miter.addr + sg_offset; 242 transfer_bytes = miter.length - sg_offset; 243 } else { 244 sg_address = miter.addr; 245 transfer_bytes = miter.length; 246 } 247 if (bytes_to_transfer < transfer_bytes) 248 transfer_bytes = bytes_to_transfer; 249 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) 250 transfer_bytes = LPFC_BPL_SIZE - dma_offset; 251 if (to_buffers) 252 memcpy(dma_address, sg_address, transfer_bytes); 253 else 254 memcpy(sg_address, dma_address, transfer_bytes); 255 dma_offset += transfer_bytes; 256 sg_offset += transfer_bytes; 257 bytes_to_transfer -= transfer_bytes; 258 bytes_copied += transfer_bytes; 259 if (sg_offset >= miter.length) { 260 sg_offset = 0; 261 sg_valid = sg_miter_next(&miter); 262 } 263 } 264 } 265 sg_miter_stop(&miter); 266 local_irq_restore(flags); 267 list_del_init(&dma_buffers->list); 268 list_splice(&temp_list, &dma_buffers->list); 269 return bytes_copied; 270 } 271 272 /** 273 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 274 * @phba: Pointer to HBA context object. 275 * @cmdiocbq: Pointer to command iocb. 276 * @rspiocbq: Pointer to response iocb. 277 * 278 * This function is the completion handler for iocbs issued using 279 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 280 * ring event handler function without any lock held. This function 281 * can be called from both worker thread context and interrupt 282 * context. This function also can be called from another thread which 283 * cleans up the SLI layer objects. 284 * This function copies the contents of the response iocb to the 285 * response iocb memory object provided by the caller of 286 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 287 * sleeps for the iocb completion. 288 **/ 289 static void 290 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 291 struct lpfc_iocbq *cmdiocbq, 292 struct lpfc_iocbq *rspiocbq) 293 { 294 struct bsg_job_data *dd_data; 295 struct bsg_job *job; 296 struct fc_bsg_reply *bsg_reply; 297 struct lpfc_dmabuf *bmp, *cmp, *rmp; 298 struct lpfc_nodelist *ndlp; 299 struct lpfc_bsg_iocb *iocb; 300 unsigned long flags; 301 int rc = 0; 302 u32 ulp_status, ulp_word4, total_data_placed; 303 304 dd_data = cmdiocbq->context_un.dd_data; 305 306 /* Determine if job has been aborted */ 307 spin_lock_irqsave(&phba->ct_ev_lock, flags); 308 job = dd_data->set_job; 309 if (job) { 310 bsg_reply = job->reply; 311 /* Prevent timeout handling from trying to abort job */ 312 job->dd_data = NULL; 313 } 314 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 315 316 /* Close the timeout handler abort window */ 317 spin_lock_irqsave(&phba->hbalock, flags); 318 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; 319 spin_unlock_irqrestore(&phba->hbalock, flags); 320 321 iocb = &dd_data->context_un.iocb; 322 ndlp = iocb->cmdiocbq->ndlp; 323 rmp = iocb->rmp; 324 cmp = cmdiocbq->cmd_dmabuf; 325 bmp = cmdiocbq->bpl_dmabuf; 326 ulp_status = get_job_ulpstatus(phba, rspiocbq); 327 ulp_word4 = get_job_word4(phba, rspiocbq); 328 total_data_placed = get_job_data_placed(phba, rspiocbq); 329 330 /* Copy the completed data or set the error status */ 331 332 if (job) { 333 if (ulp_status) { 334 if (ulp_status == IOSTAT_LOCAL_REJECT) { 335 switch (ulp_word4 & IOERR_PARAM_MASK) { 336 case IOERR_SEQUENCE_TIMEOUT: 337 rc = -ETIMEDOUT; 338 break; 339 case IOERR_INVALID_RPI: 340 rc = -EFAULT; 341 break; 342 default: 343 rc = -EACCES; 344 break; 345 } 346 } else { 347 rc = -EACCES; 348 } 349 } else { 350 bsg_reply->reply_payload_rcv_len = 351 lpfc_bsg_copy_data(rmp, &job->reply_payload, 352 total_data_placed, 0); 353 } 354 } 355 356 lpfc_free_bsg_buffers(phba, cmp); 357 lpfc_free_bsg_buffers(phba, rmp); 358 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 359 kfree(bmp); 360 lpfc_nlp_put(ndlp); 361 lpfc_sli_release_iocbq(phba, cmdiocbq); 362 kfree(dd_data); 363 364 /* Complete the job if the job is still active */ 365 366 if (job) { 367 bsg_reply->result = rc; 368 bsg_job_done(job, bsg_reply->result, 369 bsg_reply->reply_payload_rcv_len); 370 } 371 return; 372 } 373 374 /** 375 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 376 * @job: fc_bsg_job to handle 377 **/ 378 static int 379 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) 380 { 381 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 382 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 383 struct lpfc_hba *phba = vport->phba; 384 struct lpfc_nodelist *ndlp = rdata->pnode; 385 struct fc_bsg_reply *bsg_reply = job->reply; 386 struct ulp_bde64 *bpl = NULL; 387 struct lpfc_iocbq *cmdiocbq = NULL; 388 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 389 int request_nseg, reply_nseg; 390 u32 num_entry; 391 struct bsg_job_data *dd_data; 392 unsigned long flags; 393 uint32_t creg_val; 394 int rc = 0; 395 int iocb_stat; 396 u16 ulp_context; 397 398 /* in case no data is transferred */ 399 bsg_reply->reply_payload_rcv_len = 0; 400 401 if (test_bit(NLP_PLOGI_SND, &ndlp->nlp_flag) || 402 test_bit(NLP_PRLI_SND, &ndlp->nlp_flag) || 403 test_bit(NLP_ADISC_SND, &ndlp->nlp_flag) || 404 test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || 405 test_bit(NLP_RNID_SND, &ndlp->nlp_flag)) 406 return -ENODEV; 407 408 /* allocate our bsg tracking structure */ 409 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 410 if (!dd_data) { 411 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 412 "2733 Failed allocation of dd_data\n"); 413 rc = -ENOMEM; 414 goto no_dd_data; 415 } 416 417 cmdiocbq = lpfc_sli_get_iocbq(phba); 418 if (!cmdiocbq) { 419 rc = -ENOMEM; 420 goto free_dd; 421 } 422 423 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 424 if (!bmp) { 425 rc = -ENOMEM; 426 goto free_cmdiocbq; 427 } 428 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 429 if (!bmp->virt) { 430 rc = -ENOMEM; 431 goto free_bmp; 432 } 433 434 INIT_LIST_HEAD(&bmp->list); 435 436 bpl = (struct ulp_bde64 *) bmp->virt; 437 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 438 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 439 1, bpl, &request_nseg); 440 if (!cmp) { 441 rc = -ENOMEM; 442 goto free_bmp; 443 } 444 lpfc_bsg_copy_data(cmp, &job->request_payload, 445 job->request_payload.payload_len, 1); 446 447 bpl += request_nseg; 448 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 449 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 450 bpl, &reply_nseg); 451 if (!rmp) { 452 rc = -ENOMEM; 453 goto free_cmp; 454 } 455 456 num_entry = request_nseg + reply_nseg; 457 458 if (phba->sli_rev == LPFC_SLI_REV4) 459 ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 460 else 461 ulp_context = ndlp->nlp_rpi; 462 463 lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry, 464 phba->fc_ratov * 2); 465 466 cmdiocbq->num_bdes = num_entry; 467 cmdiocbq->vport = phba->pport; 468 cmdiocbq->cmd_dmabuf = cmp; 469 cmdiocbq->bpl_dmabuf = bmp; 470 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; 471 472 cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 473 cmdiocbq->context_un.dd_data = dd_data; 474 475 dd_data->type = TYPE_IOCB; 476 dd_data->set_job = job; 477 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 478 dd_data->context_un.iocb.rmp = rmp; 479 job->dd_data = dd_data; 480 481 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 482 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 483 rc = -EIO ; 484 goto free_rmp; 485 } 486 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 487 writel(creg_val, phba->HCregaddr); 488 readl(phba->HCregaddr); /* flush */ 489 } 490 491 cmdiocbq->ndlp = lpfc_nlp_get(ndlp); 492 if (!cmdiocbq->ndlp) { 493 rc = -ENODEV; 494 goto free_rmp; 495 } 496 497 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 498 if (iocb_stat == IOCB_SUCCESS) { 499 spin_lock_irqsave(&phba->hbalock, flags); 500 /* make sure the I/O had not been completed yet */ 501 if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { 502 /* open up abort window to timeout handler */ 503 cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; 504 } 505 spin_unlock_irqrestore(&phba->hbalock, flags); 506 return 0; /* done for now */ 507 } else if (iocb_stat == IOCB_BUSY) { 508 rc = -EAGAIN; 509 } else { 510 rc = -EIO; 511 } 512 513 /* iocb failed so cleanup */ 514 lpfc_nlp_put(ndlp); 515 516 free_rmp: 517 lpfc_free_bsg_buffers(phba, rmp); 518 free_cmp: 519 lpfc_free_bsg_buffers(phba, cmp); 520 free_bmp: 521 if (bmp->virt) 522 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 523 kfree(bmp); 524 free_cmdiocbq: 525 lpfc_sli_release_iocbq(phba, cmdiocbq); 526 free_dd: 527 kfree(dd_data); 528 no_dd_data: 529 /* make error code available to userspace */ 530 bsg_reply->result = rc; 531 job->dd_data = NULL; 532 return rc; 533 } 534 535 /** 536 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 537 * @phba: Pointer to HBA context object. 538 * @cmdiocbq: Pointer to command iocb. 539 * @rspiocbq: Pointer to response iocb. 540 * 541 * This function is the completion handler for iocbs issued using 542 * lpfc_bsg_rport_els_cmp function. This function is called by the 543 * ring event handler function without any lock held. This function 544 * can be called from both worker thread context and interrupt 545 * context. This function also can be called from other thread which 546 * cleans up the SLI layer objects. 547 * This function copies the contents of the response iocb to the 548 * response iocb memory object provided by the caller of 549 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 550 * sleeps for the iocb completion. 551 **/ 552 static void 553 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 554 struct lpfc_iocbq *cmdiocbq, 555 struct lpfc_iocbq *rspiocbq) 556 { 557 struct bsg_job_data *dd_data; 558 struct bsg_job *job; 559 struct fc_bsg_reply *bsg_reply; 560 struct lpfc_nodelist *ndlp; 561 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; 562 struct fc_bsg_ctels_reply *els_reply; 563 uint8_t *rjt_data; 564 unsigned long flags; 565 unsigned int rsp_size; 566 int rc = 0; 567 u32 ulp_status, ulp_word4, total_data_placed; 568 569 dd_data = cmdiocbq->context_un.dd_data; 570 ndlp = dd_data->context_un.iocb.ndlp; 571 cmdiocbq->ndlp = ndlp; 572 573 /* Determine if job has been aborted */ 574 spin_lock_irqsave(&phba->ct_ev_lock, flags); 575 job = dd_data->set_job; 576 if (job) { 577 bsg_reply = job->reply; 578 /* Prevent timeout handling from trying to abort job */ 579 job->dd_data = NULL; 580 } 581 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 582 583 /* Close the timeout handler abort window */ 584 spin_lock_irqsave(&phba->hbalock, flags); 585 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; 586 spin_unlock_irqrestore(&phba->hbalock, flags); 587 588 ulp_status = get_job_ulpstatus(phba, rspiocbq); 589 ulp_word4 = get_job_word4(phba, rspiocbq); 590 total_data_placed = get_job_data_placed(phba, rspiocbq); 591 pcmd = cmdiocbq->cmd_dmabuf; 592 prsp = (struct lpfc_dmabuf *)pcmd->list.next; 593 594 /* Copy the completed job data or determine the job status if job is 595 * still active 596 */ 597 598 if (job) { 599 if (ulp_status == IOSTAT_SUCCESS) { 600 rsp_size = total_data_placed; 601 bsg_reply->reply_payload_rcv_len = 602 sg_copy_from_buffer(job->reply_payload.sg_list, 603 job->reply_payload.sg_cnt, 604 prsp->virt, 605 rsp_size); 606 } else if (ulp_status == IOSTAT_LS_RJT) { 607 bsg_reply->reply_payload_rcv_len = 608 sizeof(struct fc_bsg_ctels_reply); 609 /* LS_RJT data returned in word 4 */ 610 rjt_data = (uint8_t *)&ulp_word4; 611 els_reply = &bsg_reply->reply_data.ctels_reply; 612 els_reply->status = FC_CTELS_STATUS_REJECT; 613 els_reply->rjt_data.action = rjt_data[3]; 614 els_reply->rjt_data.reason_code = rjt_data[2]; 615 els_reply->rjt_data.reason_explanation = rjt_data[1]; 616 els_reply->rjt_data.vendor_unique = rjt_data[0]; 617 } else if (ulp_status == IOSTAT_LOCAL_REJECT && 618 (ulp_word4 & IOERR_PARAM_MASK) == 619 IOERR_SEQUENCE_TIMEOUT) { 620 rc = -ETIMEDOUT; 621 } else { 622 rc = -EIO; 623 } 624 } 625 626 lpfc_els_free_iocb(phba, cmdiocbq); 627 628 lpfc_nlp_put(ndlp); 629 kfree(dd_data); 630 631 /* Complete the job if the job is still active */ 632 633 if (job) { 634 bsg_reply->result = rc; 635 bsg_job_done(job, bsg_reply->result, 636 bsg_reply->reply_payload_rcv_len); 637 } 638 return; 639 } 640 641 /** 642 * lpfc_bsg_rport_els - send an ELS command from a bsg request 643 * @job: fc_bsg_job to handle 644 **/ 645 static int 646 lpfc_bsg_rport_els(struct bsg_job *job) 647 { 648 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 649 struct lpfc_hba *phba = vport->phba; 650 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 651 struct lpfc_nodelist *ndlp = rdata->pnode; 652 struct fc_bsg_request *bsg_request = job->request; 653 struct fc_bsg_reply *bsg_reply = job->reply; 654 uint32_t elscmd; 655 uint32_t cmdsize; 656 struct lpfc_iocbq *cmdiocbq; 657 uint16_t rpi = 0; 658 struct bsg_job_data *dd_data; 659 unsigned long flags; 660 uint32_t creg_val; 661 int rc = 0; 662 663 /* in case no data is transferred */ 664 bsg_reply->reply_payload_rcv_len = 0; 665 666 /* verify the els command is not greater than the 667 * maximum ELS transfer size. 668 */ 669 670 if (job->request_payload.payload_len > FCELSSIZE) { 671 rc = -EINVAL; 672 goto no_dd_data; 673 } 674 675 /* allocate our bsg tracking structure */ 676 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 677 if (!dd_data) { 678 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 679 "2735 Failed allocation of dd_data\n"); 680 rc = -ENOMEM; 681 goto no_dd_data; 682 } 683 684 elscmd = bsg_request->rqst_data.r_els.els_code; 685 cmdsize = job->request_payload.payload_len; 686 687 if (!lpfc_nlp_get(ndlp)) { 688 rc = -ENODEV; 689 goto free_dd_data; 690 } 691 692 /* We will use the allocated dma buffers by prep els iocb for command 693 * and response to ensure if the job times out and the request is freed, 694 * we won't be dma into memory that is no longer allocated to for the 695 * request. 696 */ 697 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 698 ndlp->nlp_DID, elscmd); 699 if (!cmdiocbq) { 700 rc = -EIO; 701 goto release_ndlp; 702 } 703 704 /* Transfer the request payload to allocated command dma buffer */ 705 sg_copy_to_buffer(job->request_payload.sg_list, 706 job->request_payload.sg_cnt, 707 cmdiocbq->cmd_dmabuf->virt, 708 cmdsize); 709 710 rpi = ndlp->nlp_rpi; 711 712 if (phba->sli_rev == LPFC_SLI_REV4) 713 bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com, 714 phba->sli4_hba.rpi_ids[rpi]); 715 else 716 cmdiocbq->iocb.ulpContext = rpi; 717 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; 718 cmdiocbq->context_un.dd_data = dd_data; 719 cmdiocbq->ndlp = ndlp; 720 cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp; 721 dd_data->type = TYPE_IOCB; 722 dd_data->set_job = job; 723 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 724 dd_data->context_un.iocb.ndlp = ndlp; 725 dd_data->context_un.iocb.rmp = NULL; 726 job->dd_data = dd_data; 727 728 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 729 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 730 rc = -EIO; 731 goto linkdown_err; 732 } 733 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 734 writel(creg_val, phba->HCregaddr); 735 readl(phba->HCregaddr); /* flush */ 736 } 737 738 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 739 if (rc == IOCB_SUCCESS) { 740 spin_lock_irqsave(&phba->hbalock, flags); 741 /* make sure the I/O had not been completed/released */ 742 if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { 743 /* open up abort window to timeout handler */ 744 cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; 745 } 746 spin_unlock_irqrestore(&phba->hbalock, flags); 747 return 0; /* done for now */ 748 } else if (rc == IOCB_BUSY) { 749 rc = -EAGAIN; 750 } else { 751 rc = -EIO; 752 } 753 754 /* I/O issue failed. Cleanup resources. */ 755 756 linkdown_err: 757 lpfc_els_free_iocb(phba, cmdiocbq); 758 759 release_ndlp: 760 lpfc_nlp_put(ndlp); 761 762 free_dd_data: 763 kfree(dd_data); 764 765 no_dd_data: 766 /* make error code available to userspace */ 767 bsg_reply->result = rc; 768 job->dd_data = NULL; 769 return rc; 770 } 771 772 /** 773 * lpfc_bsg_event_free - frees an allocated event structure 774 * @kref: Pointer to a kref. 775 * 776 * Called from kref_put. Back cast the kref into an event structure address. 777 * Free any events to get, delete associated nodes, free any events to see, 778 * free any data then free the event itself. 779 **/ 780 static void 781 lpfc_bsg_event_free(struct kref *kref) 782 { 783 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 784 kref); 785 struct event_data *ed; 786 787 list_del(&evt->node); 788 789 while (!list_empty(&evt->events_to_get)) { 790 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 791 list_del(&ed->node); 792 kfree(ed->data); 793 kfree(ed); 794 } 795 796 while (!list_empty(&evt->events_to_see)) { 797 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 798 list_del(&ed->node); 799 kfree(ed->data); 800 kfree(ed); 801 } 802 803 kfree(evt->dd_data); 804 kfree(evt); 805 } 806 807 /** 808 * lpfc_bsg_event_ref - increments the kref for an event 809 * @evt: Pointer to an event structure. 810 **/ 811 static inline void 812 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 813 { 814 kref_get(&evt->kref); 815 } 816 817 /** 818 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 819 * @evt: Pointer to an event structure. 820 **/ 821 static inline void 822 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 823 { 824 kref_put(&evt->kref, lpfc_bsg_event_free); 825 } 826 827 /** 828 * lpfc_bsg_event_new - allocate and initialize a event structure 829 * @ev_mask: Mask of events. 830 * @ev_reg_id: Event reg id. 831 * @ev_req_id: Event request id. 832 **/ 833 static struct lpfc_bsg_event * 834 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 835 { 836 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 837 838 if (!evt) 839 return NULL; 840 841 INIT_LIST_HEAD(&evt->events_to_get); 842 INIT_LIST_HEAD(&evt->events_to_see); 843 evt->type_mask = ev_mask; 844 evt->req_id = ev_req_id; 845 evt->reg_id = ev_reg_id; 846 evt->wait_time_stamp = jiffies; 847 evt->dd_data = NULL; 848 init_waitqueue_head(&evt->wq); 849 kref_init(&evt->kref); 850 return evt; 851 } 852 853 /** 854 * diag_cmd_data_free - Frees an lpfc dma buffer extension 855 * @phba: Pointer to HBA context object. 856 * @mlist: Pointer to an lpfc dma buffer extension. 857 **/ 858 static int 859 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 860 { 861 struct lpfc_dmabufext *mlast; 862 struct pci_dev *pcidev; 863 struct list_head head, *curr, *next; 864 865 if ((!mlist) || (!lpfc_is_link_up(phba) && 866 (phba->link_flag & LS_LOOPBACK_MODE))) { 867 return 0; 868 } 869 870 pcidev = phba->pcidev; 871 list_add_tail(&head, &mlist->dma.list); 872 873 list_for_each_safe(curr, next, &head) { 874 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 875 if (mlast->dma.virt) 876 dma_free_coherent(&pcidev->dev, 877 mlast->size, 878 mlast->dma.virt, 879 mlast->dma.phys); 880 kfree(mlast); 881 } 882 return 0; 883 } 884 885 /* 886 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 887 * 888 * This function is called when an unsolicited CT command is received. It 889 * forwards the event to any processes registered to receive CT events. 890 **/ 891 int 892 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 893 struct lpfc_iocbq *piocbq) 894 { 895 uint32_t evt_req_id = 0; 896 u16 cmd; 897 struct lpfc_dmabuf *dmabuf = NULL; 898 struct lpfc_bsg_event *evt; 899 struct event_data *evt_dat = NULL; 900 struct lpfc_iocbq *iocbq; 901 IOCB_t *iocb = NULL; 902 size_t offset = 0; 903 struct list_head head; 904 struct ulp_bde64 *bde; 905 dma_addr_t dma_addr; 906 int i; 907 struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf; 908 struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf; 909 struct lpfc_sli_ct_request *ct_req; 910 struct bsg_job *job = NULL; 911 struct fc_bsg_reply *bsg_reply; 912 struct bsg_job_data *dd_data = NULL; 913 unsigned long flags; 914 int size = 0; 915 u32 bde_count = 0; 916 917 INIT_LIST_HEAD(&head); 918 list_add_tail(&head, &piocbq->list); 919 920 ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt; 921 evt_req_id = ct_req->FsType; 922 cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); 923 924 spin_lock_irqsave(&phba->ct_ev_lock, flags); 925 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 926 if (!(evt->type_mask & FC_REG_CT_EVENT) || 927 evt->req_id != evt_req_id) 928 continue; 929 930 lpfc_bsg_event_ref(evt); 931 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 932 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 933 if (evt_dat == NULL) { 934 spin_lock_irqsave(&phba->ct_ev_lock, flags); 935 lpfc_bsg_event_unref(evt); 936 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 937 "2614 Memory allocation failed for " 938 "CT event\n"); 939 break; 940 } 941 942 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 943 /* take accumulated byte count from the last iocbq */ 944 iocbq = list_entry(head.prev, typeof(*iocbq), list); 945 if (phba->sli_rev == LPFC_SLI_REV4) 946 evt_dat->len = iocbq->wcqe_cmpl.total_data_placed; 947 else 948 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 949 } else { 950 list_for_each_entry(iocbq, &head, list) { 951 iocb = &iocbq->iocb; 952 for (i = 0; i < iocb->ulpBdeCount; 953 i++) 954 evt_dat->len += 955 iocb->un.cont64[i].tus.f.bdeSize; 956 } 957 } 958 959 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 960 if (evt_dat->data == NULL) { 961 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 962 "2615 Memory allocation failed for " 963 "CT event data, size %d\n", 964 evt_dat->len); 965 kfree(evt_dat); 966 spin_lock_irqsave(&phba->ct_ev_lock, flags); 967 lpfc_bsg_event_unref(evt); 968 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 969 goto error_ct_unsol_exit; 970 } 971 972 list_for_each_entry(iocbq, &head, list) { 973 size = 0; 974 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 975 bdeBuf1 = iocbq->cmd_dmabuf; 976 bdeBuf2 = iocbq->bpl_dmabuf; 977 } 978 if (phba->sli_rev == LPFC_SLI_REV4) 979 bde_count = iocbq->wcqe_cmpl.word3; 980 else 981 bde_count = iocbq->iocb.ulpBdeCount; 982 for (i = 0; i < bde_count; i++) { 983 if (phba->sli3_options & 984 LPFC_SLI3_HBQ_ENABLED) { 985 if (i == 0) { 986 size = iocbq->wqe.gen_req.bde.tus.f.bdeSize; 987 dmabuf = bdeBuf1; 988 } else if (i == 1) { 989 size = iocbq->unsol_rcv_len; 990 dmabuf = bdeBuf2; 991 } 992 if ((offset + size) > evt_dat->len) 993 size = evt_dat->len - offset; 994 } else { 995 size = iocbq->iocb.un.cont64[i]. 996 tus.f.bdeSize; 997 bde = &iocbq->iocb.un.cont64[i]; 998 dma_addr = getPaddr(bde->addrHigh, 999 bde->addrLow); 1000 dmabuf = lpfc_sli_ringpostbuf_get(phba, 1001 pring, dma_addr); 1002 } 1003 if (!dmabuf) { 1004 lpfc_printf_log(phba, KERN_ERR, 1005 LOG_LIBDFC, "2616 No dmabuf " 1006 "found for iocbq x%px\n", 1007 iocbq); 1008 kfree(evt_dat->data); 1009 kfree(evt_dat); 1010 spin_lock_irqsave(&phba->ct_ev_lock, 1011 flags); 1012 lpfc_bsg_event_unref(evt); 1013 spin_unlock_irqrestore( 1014 &phba->ct_ev_lock, flags); 1015 goto error_ct_unsol_exit; 1016 } 1017 memcpy((char *)(evt_dat->data) + offset, 1018 dmabuf->virt, size); 1019 offset += size; 1020 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 1021 !(phba->sli3_options & 1022 LPFC_SLI3_HBQ_ENABLED)) { 1023 lpfc_sli_ringpostbuf_put(phba, pring, 1024 dmabuf); 1025 } else { 1026 switch (cmd) { 1027 case ELX_LOOPBACK_DATA: 1028 if (phba->sli_rev < 1029 LPFC_SLI_REV4) 1030 diag_cmd_data_free(phba, 1031 (struct lpfc_dmabufext 1032 *)dmabuf); 1033 break; 1034 case ELX_LOOPBACK_XRI_SETUP: 1035 if ((phba->sli_rev == 1036 LPFC_SLI_REV2) || 1037 (phba->sli3_options & 1038 LPFC_SLI3_HBQ_ENABLED 1039 )) { 1040 lpfc_in_buf_free(phba, 1041 dmabuf); 1042 } else { 1043 lpfc_sli3_post_buffer(phba, 1044 pring, 1045 1); 1046 } 1047 break; 1048 default: 1049 if (!(phba->sli3_options & 1050 LPFC_SLI3_HBQ_ENABLED)) 1051 lpfc_sli3_post_buffer(phba, 1052 pring, 1053 1); 1054 break; 1055 } 1056 } 1057 } 1058 } 1059 1060 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1061 if (phba->sli_rev == LPFC_SLI_REV4) { 1062 evt_dat->immed_dat = phba->ctx_idx; 1063 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; 1064 /* Provide warning for over-run of the ct_ctx array */ 1065 if (phba->ct_ctx[evt_dat->immed_dat].valid == 1066 UNSOL_VALID) 1067 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1068 "2717 CT context array entry " 1069 "[%d] over-run: oxid:x%x, " 1070 "sid:x%x\n", phba->ctx_idx, 1071 phba->ct_ctx[ 1072 evt_dat->immed_dat].oxid, 1073 phba->ct_ctx[ 1074 evt_dat->immed_dat].SID); 1075 phba->ct_ctx[evt_dat->immed_dat].rxid = 1076 get_job_ulpcontext(phba, piocbq); 1077 phba->ct_ctx[evt_dat->immed_dat].oxid = 1078 get_job_rcvoxid(phba, piocbq); 1079 phba->ct_ctx[evt_dat->immed_dat].SID = 1080 bf_get(wqe_els_did, 1081 &piocbq->wqe.xmit_els_rsp.wqe_dest); 1082 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; 1083 } else 1084 evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq); 1085 1086 evt_dat->type = FC_REG_CT_EVENT; 1087 list_add(&evt_dat->node, &evt->events_to_see); 1088 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 1089 wake_up_interruptible(&evt->wq); 1090 lpfc_bsg_event_unref(evt); 1091 break; 1092 } 1093 1094 list_move(evt->events_to_see.prev, &evt->events_to_get); 1095 1096 dd_data = (struct bsg_job_data *)evt->dd_data; 1097 job = dd_data->set_job; 1098 dd_data->set_job = NULL; 1099 lpfc_bsg_event_unref(evt); 1100 if (job) { 1101 bsg_reply = job->reply; 1102 bsg_reply->reply_payload_rcv_len = size; 1103 /* make error code available to userspace */ 1104 bsg_reply->result = 0; 1105 job->dd_data = NULL; 1106 /* complete the job back to userspace */ 1107 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1108 bsg_job_done(job, bsg_reply->result, 1109 bsg_reply->reply_payload_rcv_len); 1110 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1111 } 1112 } 1113 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1114 1115 error_ct_unsol_exit: 1116 if (!list_empty(&head)) 1117 list_del(&head); 1118 if ((phba->sli_rev < LPFC_SLI_REV4) && 1119 (evt_req_id == SLI_CT_ELX_LOOPBACK)) 1120 return 0; 1121 return 1; 1122 } 1123 1124 /** 1125 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane 1126 * @phba: Pointer to HBA context object. 1127 * @dmabuf: pointer to a dmabuf that describes the FC sequence 1128 * 1129 * This function handles abort to the CT command toward management plane 1130 * for SLI4 port. 1131 * 1132 * If the pending context of a CT command to management plane present, clears 1133 * such context and returns 1 for handled; otherwise, it returns 0 indicating 1134 * no context exists. 1135 **/ 1136 int 1137 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) 1138 { 1139 struct fc_frame_header fc_hdr; 1140 struct fc_frame_header *fc_hdr_ptr = &fc_hdr; 1141 int ctx_idx, handled = 0; 1142 uint16_t oxid, rxid; 1143 uint32_t sid; 1144 1145 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 1146 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); 1147 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); 1148 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); 1149 1150 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { 1151 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) 1152 continue; 1153 if (phba->ct_ctx[ctx_idx].rxid != rxid) 1154 continue; 1155 if (phba->ct_ctx[ctx_idx].oxid != oxid) 1156 continue; 1157 if (phba->ct_ctx[ctx_idx].SID != sid) 1158 continue; 1159 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; 1160 handled = 1; 1161 } 1162 return handled; 1163 } 1164 1165 /** 1166 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1167 * @job: SET_EVENT fc_bsg_job 1168 **/ 1169 static int 1170 lpfc_bsg_hba_set_event(struct bsg_job *job) 1171 { 1172 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1173 struct lpfc_hba *phba = vport->phba; 1174 struct fc_bsg_request *bsg_request = job->request; 1175 struct set_ct_event *event_req; 1176 struct lpfc_bsg_event *evt; 1177 int rc = 0; 1178 struct bsg_job_data *dd_data = NULL; 1179 uint32_t ev_mask; 1180 unsigned long flags; 1181 1182 if (job->request_len < 1183 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1184 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1185 "2612 Received SET_CT_EVENT below minimum " 1186 "size\n"); 1187 rc = -EINVAL; 1188 goto job_error; 1189 } 1190 1191 event_req = (struct set_ct_event *) 1192 bsg_request->rqst_data.h_vendor.vendor_cmd; 1193 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1194 FC_REG_EVENT_MASK); 1195 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1196 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1197 if (evt->reg_id == event_req->ev_reg_id) { 1198 lpfc_bsg_event_ref(evt); 1199 evt->wait_time_stamp = jiffies; 1200 dd_data = (struct bsg_job_data *)evt->dd_data; 1201 break; 1202 } 1203 } 1204 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1205 1206 if (&evt->node == &phba->ct_ev_waiters) { 1207 /* no event waiting struct yet - first call */ 1208 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1209 if (dd_data == NULL) { 1210 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1211 "2734 Failed allocation of dd_data\n"); 1212 rc = -ENOMEM; 1213 goto job_error; 1214 } 1215 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1216 event_req->ev_req_id); 1217 if (!evt) { 1218 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1219 "2617 Failed allocation of event " 1220 "waiter\n"); 1221 rc = -ENOMEM; 1222 goto job_error; 1223 } 1224 dd_data->type = TYPE_EVT; 1225 dd_data->set_job = NULL; 1226 dd_data->context_un.evt = evt; 1227 evt->dd_data = (void *)dd_data; 1228 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1229 list_add(&evt->node, &phba->ct_ev_waiters); 1230 lpfc_bsg_event_ref(evt); 1231 evt->wait_time_stamp = jiffies; 1232 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1233 } 1234 1235 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1236 evt->waiting = 1; 1237 dd_data->set_job = job; /* for unsolicited command */ 1238 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1239 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1240 return 0; /* call job done later */ 1241 1242 job_error: 1243 kfree(dd_data); 1244 job->dd_data = NULL; 1245 return rc; 1246 } 1247 1248 /** 1249 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1250 * @job: GET_EVENT fc_bsg_job 1251 **/ 1252 static int 1253 lpfc_bsg_hba_get_event(struct bsg_job *job) 1254 { 1255 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1256 struct lpfc_hba *phba = vport->phba; 1257 struct fc_bsg_request *bsg_request = job->request; 1258 struct fc_bsg_reply *bsg_reply = job->reply; 1259 struct get_ct_event *event_req; 1260 struct get_ct_event_reply *event_reply; 1261 struct lpfc_bsg_event *evt, *evt_next; 1262 struct event_data *evt_dat = NULL; 1263 unsigned long flags; 1264 uint32_t rc = 0; 1265 1266 if (job->request_len < 1267 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1268 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1269 "2613 Received GET_CT_EVENT request below " 1270 "minimum size\n"); 1271 rc = -EINVAL; 1272 goto job_error; 1273 } 1274 1275 event_req = (struct get_ct_event *) 1276 bsg_request->rqst_data.h_vendor.vendor_cmd; 1277 1278 event_reply = (struct get_ct_event_reply *) 1279 bsg_reply->reply_data.vendor_reply.vendor_rsp; 1280 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1281 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { 1282 if (evt->reg_id == event_req->ev_reg_id) { 1283 if (list_empty(&evt->events_to_get)) 1284 break; 1285 lpfc_bsg_event_ref(evt); 1286 evt->wait_time_stamp = jiffies; 1287 evt_dat = list_entry(evt->events_to_get.prev, 1288 struct event_data, node); 1289 list_del(&evt_dat->node); 1290 break; 1291 } 1292 } 1293 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1294 1295 /* The app may continue to ask for event data until it gets 1296 * an error indicating that there isn't anymore 1297 */ 1298 if (evt_dat == NULL) { 1299 bsg_reply->reply_payload_rcv_len = 0; 1300 rc = -ENOENT; 1301 goto job_error; 1302 } 1303 1304 if (evt_dat->len > job->request_payload.payload_len) { 1305 evt_dat->len = job->request_payload.payload_len; 1306 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1307 "2618 Truncated event data at %d " 1308 "bytes\n", 1309 job->request_payload.payload_len); 1310 } 1311 1312 event_reply->type = evt_dat->type; 1313 event_reply->immed_data = evt_dat->immed_dat; 1314 if (evt_dat->len > 0) 1315 bsg_reply->reply_payload_rcv_len = 1316 sg_copy_from_buffer(job->request_payload.sg_list, 1317 job->request_payload.sg_cnt, 1318 evt_dat->data, evt_dat->len); 1319 else 1320 bsg_reply->reply_payload_rcv_len = 0; 1321 1322 if (evt_dat) { 1323 kfree(evt_dat->data); 1324 kfree(evt_dat); 1325 } 1326 1327 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1328 lpfc_bsg_event_unref(evt); 1329 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1330 job->dd_data = NULL; 1331 bsg_reply->result = 0; 1332 bsg_job_done(job, bsg_reply->result, 1333 bsg_reply->reply_payload_rcv_len); 1334 return 0; 1335 1336 job_error: 1337 job->dd_data = NULL; 1338 bsg_reply->result = rc; 1339 return rc; 1340 } 1341 1342 /** 1343 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1344 * @phba: Pointer to HBA context object. 1345 * @cmdiocbq: Pointer to command iocb. 1346 * @rspiocbq: Pointer to response iocb. 1347 * 1348 * This function is the completion handler for iocbs issued using 1349 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1350 * ring event handler function without any lock held. This function 1351 * can be called from both worker thread context and interrupt 1352 * context. This function also can be called from other thread which 1353 * cleans up the SLI layer objects. 1354 * This function copy the contents of the response iocb to the 1355 * response iocb memory object provided by the caller of 1356 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1357 * sleeps for the iocb completion. 1358 **/ 1359 static void 1360 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1361 struct lpfc_iocbq *cmdiocbq, 1362 struct lpfc_iocbq *rspiocbq) 1363 { 1364 struct bsg_job_data *dd_data; 1365 struct bsg_job *job; 1366 struct fc_bsg_reply *bsg_reply; 1367 struct lpfc_dmabuf *bmp, *cmp; 1368 struct lpfc_nodelist *ndlp; 1369 unsigned long flags; 1370 int rc = 0; 1371 u32 ulp_status, ulp_word4; 1372 1373 dd_data = cmdiocbq->context_un.dd_data; 1374 1375 /* Determine if job has been aborted */ 1376 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1377 job = dd_data->set_job; 1378 if (job) { 1379 /* Prevent timeout handling from trying to abort job */ 1380 job->dd_data = NULL; 1381 } 1382 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1383 1384 /* Close the timeout handler abort window */ 1385 spin_lock_irqsave(&phba->hbalock, flags); 1386 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; 1387 spin_unlock_irqrestore(&phba->hbalock, flags); 1388 1389 ndlp = dd_data->context_un.iocb.ndlp; 1390 cmp = cmdiocbq->cmd_dmabuf; 1391 bmp = cmdiocbq->bpl_dmabuf; 1392 1393 ulp_status = get_job_ulpstatus(phba, rspiocbq); 1394 ulp_word4 = get_job_word4(phba, rspiocbq); 1395 1396 /* Copy the completed job data or set the error status */ 1397 1398 if (job) { 1399 bsg_reply = job->reply; 1400 if (ulp_status) { 1401 if (ulp_status == IOSTAT_LOCAL_REJECT) { 1402 switch (ulp_word4 & IOERR_PARAM_MASK) { 1403 case IOERR_SEQUENCE_TIMEOUT: 1404 rc = -ETIMEDOUT; 1405 break; 1406 case IOERR_INVALID_RPI: 1407 rc = -EFAULT; 1408 break; 1409 default: 1410 rc = -EACCES; 1411 break; 1412 } 1413 } else { 1414 rc = -EACCES; 1415 } 1416 } else { 1417 bsg_reply->reply_payload_rcv_len = 0; 1418 } 1419 } 1420 1421 lpfc_free_bsg_buffers(phba, cmp); 1422 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1423 kfree(bmp); 1424 lpfc_sli_release_iocbq(phba, cmdiocbq); 1425 lpfc_nlp_put(ndlp); 1426 kfree(dd_data); 1427 1428 /* Complete the job if the job is still active */ 1429 1430 if (job) { 1431 bsg_reply->result = rc; 1432 bsg_job_done(job, bsg_reply->result, 1433 bsg_reply->reply_payload_rcv_len); 1434 } 1435 return; 1436 } 1437 1438 /** 1439 * lpfc_issue_ct_rsp - issue a ct response 1440 * @phba: Pointer to HBA context object. 1441 * @job: Pointer to the job object. 1442 * @tag: tag index value into the ports context exchange array. 1443 * @cmp: Pointer to a cmp dma buffer descriptor. 1444 * @bmp: Pointer to a bmp dma buffer descriptor. 1445 * @num_entry: Number of enties in the bde. 1446 **/ 1447 static int 1448 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, 1449 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, 1450 int num_entry) 1451 { 1452 struct lpfc_iocbq *ctiocb = NULL; 1453 int rc = 0; 1454 struct lpfc_nodelist *ndlp = NULL; 1455 struct bsg_job_data *dd_data; 1456 unsigned long flags; 1457 uint32_t creg_val; 1458 u16 ulp_context, iotag; 1459 1460 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1461 if (!ndlp) { 1462 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1463 "2721 ndlp null for oxid %x SID %x\n", 1464 phba->ct_ctx[tag].rxid, 1465 phba->ct_ctx[tag].SID); 1466 return IOCB_ERROR; 1467 } 1468 1469 /* allocate our bsg tracking structure */ 1470 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1471 if (!dd_data) { 1472 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1473 "2736 Failed allocation of dd_data\n"); 1474 rc = -ENOMEM; 1475 goto no_dd_data; 1476 } 1477 1478 /* Allocate buffer for command iocb */ 1479 ctiocb = lpfc_sli_get_iocbq(phba); 1480 if (!ctiocb) { 1481 rc = -ENOMEM; 1482 goto no_ctiocb; 1483 } 1484 1485 if (phba->sli_rev == LPFC_SLI_REV4) { 1486 /* Do not issue unsol response if oxid not marked as valid */ 1487 if (phba->ct_ctx[tag].valid != UNSOL_VALID) { 1488 rc = IOCB_ERROR; 1489 goto issue_ct_rsp_exit; 1490 } 1491 1492 lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 1493 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 1494 phba->ct_ctx[tag].oxid, num_entry, 1495 FC_RCTL_DD_SOL_CTL, 1, 1496 CMD_XMIT_SEQUENCE64_WQE); 1497 1498 /* The exchange is done, mark the entry as invalid */ 1499 phba->ct_ctx[tag].valid = UNSOL_INVALID; 1500 iotag = get_wqe_reqtag(ctiocb); 1501 } else { 1502 lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry, 1503 FC_RCTL_DD_SOL_CTL, 1, 1504 CMD_XMIT_SEQUENCE64_CX); 1505 ctiocb->num_bdes = num_entry; 1506 iotag = ctiocb->iocb.ulpIoTag; 1507 } 1508 1509 ulp_context = get_job_ulpcontext(phba, ctiocb); 1510 1511 /* Xmit CT response on exchange <xid> */ 1512 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1513 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", 1514 ulp_context, iotag, tag, phba->link_state); 1515 1516 ctiocb->cmd_flag |= LPFC_IO_LIBDFC; 1517 ctiocb->vport = phba->pport; 1518 ctiocb->context_un.dd_data = dd_data; 1519 ctiocb->cmd_dmabuf = cmp; 1520 ctiocb->bpl_dmabuf = bmp; 1521 ctiocb->ndlp = ndlp; 1522 ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp; 1523 1524 dd_data->type = TYPE_IOCB; 1525 dd_data->set_job = job; 1526 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1527 dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp); 1528 if (!dd_data->context_un.iocb.ndlp) { 1529 rc = -IOCB_ERROR; 1530 goto issue_ct_rsp_exit; 1531 } 1532 dd_data->context_un.iocb.rmp = NULL; 1533 job->dd_data = dd_data; 1534 1535 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1536 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1537 rc = -IOCB_ERROR; 1538 goto issue_ct_rsp_exit; 1539 } 1540 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1541 writel(creg_val, phba->HCregaddr); 1542 readl(phba->HCregaddr); /* flush */ 1543 } 1544 1545 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1546 if (rc == IOCB_SUCCESS) { 1547 spin_lock_irqsave(&phba->hbalock, flags); 1548 /* make sure the I/O had not been completed/released */ 1549 if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) { 1550 /* open up abort window to timeout handler */ 1551 ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; 1552 } 1553 spin_unlock_irqrestore(&phba->hbalock, flags); 1554 return 0; /* done for now */ 1555 } 1556 1557 /* iocb failed so cleanup */ 1558 job->dd_data = NULL; 1559 lpfc_nlp_put(ndlp); 1560 1561 issue_ct_rsp_exit: 1562 lpfc_sli_release_iocbq(phba, ctiocb); 1563 no_ctiocb: 1564 kfree(dd_data); 1565 no_dd_data: 1566 return rc; 1567 } 1568 1569 /** 1570 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1571 * @job: SEND_MGMT_RESP fc_bsg_job 1572 **/ 1573 static int 1574 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job) 1575 { 1576 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1577 struct lpfc_hba *phba = vport->phba; 1578 struct fc_bsg_request *bsg_request = job->request; 1579 struct fc_bsg_reply *bsg_reply = job->reply; 1580 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1581 bsg_request->rqst_data.h_vendor.vendor_cmd; 1582 struct ulp_bde64 *bpl; 1583 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; 1584 int bpl_entries; 1585 uint32_t tag = mgmt_resp->tag; 1586 unsigned long reqbfrcnt = 1587 (unsigned long)job->request_payload.payload_len; 1588 int rc = 0; 1589 1590 /* in case no data is transferred */ 1591 bsg_reply->reply_payload_rcv_len = 0; 1592 1593 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1594 rc = -ERANGE; 1595 goto send_mgmt_rsp_exit; 1596 } 1597 1598 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1599 if (!bmp) { 1600 rc = -ENOMEM; 1601 goto send_mgmt_rsp_exit; 1602 } 1603 1604 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1605 if (!bmp->virt) { 1606 rc = -ENOMEM; 1607 goto send_mgmt_rsp_free_bmp; 1608 } 1609 1610 INIT_LIST_HEAD(&bmp->list); 1611 bpl = (struct ulp_bde64 *) bmp->virt; 1612 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); 1613 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 1614 1, bpl, &bpl_entries); 1615 if (!cmp) { 1616 rc = -ENOMEM; 1617 goto send_mgmt_rsp_free_bmp; 1618 } 1619 lpfc_bsg_copy_data(cmp, &job->request_payload, 1620 job->request_payload.payload_len, 1); 1621 1622 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); 1623 1624 if (rc == IOCB_SUCCESS) 1625 return 0; /* done for now */ 1626 1627 rc = -EACCES; 1628 1629 lpfc_free_bsg_buffers(phba, cmp); 1630 1631 send_mgmt_rsp_free_bmp: 1632 if (bmp->virt) 1633 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1634 kfree(bmp); 1635 send_mgmt_rsp_exit: 1636 /* make error code available to userspace */ 1637 bsg_reply->result = rc; 1638 job->dd_data = NULL; 1639 return rc; 1640 } 1641 1642 /** 1643 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1644 * @phba: Pointer to HBA context object. 1645 * 1646 * This function is responsible for preparing driver for diag loopback 1647 * on device. 1648 */ 1649 static int 1650 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) 1651 { 1652 struct lpfc_vport **vports; 1653 struct Scsi_Host *shost; 1654 struct lpfc_sli *psli; 1655 struct lpfc_queue *qp = NULL; 1656 struct lpfc_sli_ring *pring; 1657 int i = 0; 1658 1659 psli = &phba->sli; 1660 if (!psli) 1661 return -ENODEV; 1662 1663 1664 if ((phba->link_state == LPFC_HBA_ERROR) || 1665 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1666 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 1667 return -EACCES; 1668 1669 vports = lpfc_create_vport_work_array(phba); 1670 if (vports) { 1671 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1672 shost = lpfc_shost_from_vport(vports[i]); 1673 scsi_block_requests(shost); 1674 } 1675 lpfc_destroy_vport_work_array(phba, vports); 1676 } else { 1677 shost = lpfc_shost_from_vport(phba->pport); 1678 scsi_block_requests(shost); 1679 } 1680 1681 if (phba->sli_rev != LPFC_SLI_REV4) { 1682 pring = &psli->sli3_ring[LPFC_FCP_RING]; 1683 lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock); 1684 return 0; 1685 } 1686 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1687 pring = qp->pring; 1688 if (!pring || (pring->ringno != LPFC_FCP_RING)) 1689 continue; 1690 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1691 &pring->ring_lock)) 1692 break; 1693 } 1694 return 0; 1695 } 1696 1697 /** 1698 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1699 * @phba: Pointer to HBA context object. 1700 * 1701 * This function is responsible for driver exit processing of setting up 1702 * diag loopback mode on device. 1703 */ 1704 static void 1705 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) 1706 { 1707 struct Scsi_Host *shost; 1708 struct lpfc_vport **vports; 1709 int i; 1710 1711 vports = lpfc_create_vport_work_array(phba); 1712 if (vports) { 1713 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1714 shost = lpfc_shost_from_vport(vports[i]); 1715 scsi_unblock_requests(shost); 1716 } 1717 lpfc_destroy_vport_work_array(phba, vports); 1718 } else { 1719 shost = lpfc_shost_from_vport(phba->pport); 1720 scsi_unblock_requests(shost); 1721 } 1722 return; 1723 } 1724 1725 /** 1726 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command 1727 * @phba: Pointer to HBA context object. 1728 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1729 * 1730 * This function is responsible for placing an sli3 port into diagnostic 1731 * loopback mode in order to perform a diagnostic loopback test. 1732 * All new scsi requests are blocked, a small delay is used to allow the 1733 * scsi requests to complete then the link is brought down. If the link is 1734 * is placed in loopback mode then scsi requests are again allowed 1735 * so the scsi mid-layer doesn't give up on the port. 1736 * All of this is done in-line. 1737 */ 1738 static int 1739 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 1740 { 1741 struct fc_bsg_request *bsg_request = job->request; 1742 struct fc_bsg_reply *bsg_reply = job->reply; 1743 struct diag_mode_set *loopback_mode; 1744 uint32_t link_flags; 1745 uint32_t timeout; 1746 LPFC_MBOXQ_t *pmboxq = NULL; 1747 int mbxstatus = MBX_SUCCESS; 1748 int i = 0; 1749 int rc = 0; 1750 1751 /* no data to return just the return code */ 1752 bsg_reply->reply_payload_rcv_len = 0; 1753 1754 if (job->request_len < sizeof(struct fc_bsg_request) + 1755 sizeof(struct diag_mode_set)) { 1756 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1757 "2738 Received DIAG MODE request size:%d " 1758 "below the minimum size:%d\n", 1759 job->request_len, 1760 (int)(sizeof(struct fc_bsg_request) + 1761 sizeof(struct diag_mode_set))); 1762 rc = -EINVAL; 1763 goto job_error; 1764 } 1765 1766 rc = lpfc_bsg_diag_mode_enter(phba); 1767 if (rc) 1768 goto job_error; 1769 1770 /* bring the link to diagnostic mode */ 1771 loopback_mode = (struct diag_mode_set *) 1772 bsg_request->rqst_data.h_vendor.vendor_cmd; 1773 link_flags = loopback_mode->type; 1774 timeout = loopback_mode->timeout * 100; 1775 1776 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1777 if (!pmboxq) { 1778 rc = -ENOMEM; 1779 goto loopback_mode_exit; 1780 } 1781 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1782 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1783 pmboxq->u.mb.mbxOwner = OWN_HOST; 1784 1785 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1786 1787 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1788 /* wait for link down before proceeding */ 1789 i = 0; 1790 while (phba->link_state != LPFC_LINK_DOWN) { 1791 if (i++ > timeout) { 1792 rc = -ETIMEDOUT; 1793 goto loopback_mode_exit; 1794 } 1795 msleep(10); 1796 } 1797 1798 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1799 if (link_flags == INTERNAL_LOOP_BACK) 1800 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1801 else 1802 pmboxq->u.mb.un.varInitLnk.link_flags = 1803 FLAGS_TOPOLOGY_MODE_LOOP; 1804 1805 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1806 pmboxq->u.mb.mbxOwner = OWN_HOST; 1807 1808 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1809 LPFC_MBOX_TMO); 1810 1811 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1812 rc = -ENODEV; 1813 else { 1814 spin_lock_irq(&phba->hbalock); 1815 phba->link_flag |= LS_LOOPBACK_MODE; 1816 spin_unlock_irq(&phba->hbalock); 1817 /* wait for the link attention interrupt */ 1818 msleep(100); 1819 1820 i = 0; 1821 while (phba->link_state != LPFC_HBA_READY) { 1822 if (i++ > timeout) { 1823 rc = -ETIMEDOUT; 1824 break; 1825 } 1826 1827 msleep(10); 1828 } 1829 } 1830 1831 } else 1832 rc = -ENODEV; 1833 1834 loopback_mode_exit: 1835 lpfc_bsg_diag_mode_exit(phba); 1836 1837 /* 1838 * Let SLI layer release mboxq if mbox command completed after timeout. 1839 */ 1840 if (pmboxq && mbxstatus != MBX_TIMEOUT) 1841 mempool_free(pmboxq, phba->mbox_mem_pool); 1842 1843 job_error: 1844 /* make error code available to userspace */ 1845 bsg_reply->result = rc; 1846 /* complete the job back to userspace if no error */ 1847 if (rc == 0) 1848 bsg_job_done(job, bsg_reply->result, 1849 bsg_reply->reply_payload_rcv_len); 1850 return rc; 1851 } 1852 1853 /** 1854 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state 1855 * @phba: Pointer to HBA context object. 1856 * @diag: Flag for set link to diag or nomral operation state. 1857 * 1858 * This function is responsible for issuing a sli4 mailbox command for setting 1859 * link to either diag state or normal operation state. 1860 */ 1861 static int 1862 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) 1863 { 1864 LPFC_MBOXQ_t *pmboxq; 1865 struct lpfc_mbx_set_link_diag_state *link_diag_state; 1866 uint32_t req_len, alloc_len; 1867 int mbxstatus = MBX_SUCCESS, rc; 1868 1869 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1870 if (!pmboxq) 1871 return -ENOMEM; 1872 1873 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 1874 sizeof(struct lpfc_sli4_cfg_mhdr)); 1875 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1876 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 1877 req_len, LPFC_SLI4_MBX_EMBED); 1878 if (alloc_len != req_len) { 1879 rc = -ENOMEM; 1880 goto link_diag_state_set_out; 1881 } 1882 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1883 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", 1884 diag, phba->sli4_hba.lnk_info.lnk_tp, 1885 phba->sli4_hba.lnk_info.lnk_no); 1886 1887 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1888 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, 1889 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); 1890 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1891 phba->sli4_hba.lnk_info.lnk_no); 1892 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1893 phba->sli4_hba.lnk_info.lnk_tp); 1894 if (diag) 1895 bf_set(lpfc_mbx_set_diag_state_diag, 1896 &link_diag_state->u.req, 1); 1897 else 1898 bf_set(lpfc_mbx_set_diag_state_diag, 1899 &link_diag_state->u.req, 0); 1900 1901 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1902 1903 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) 1904 rc = 0; 1905 else 1906 rc = -ENODEV; 1907 1908 link_diag_state_set_out: 1909 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1910 mempool_free(pmboxq, phba->mbox_mem_pool); 1911 1912 return rc; 1913 } 1914 1915 /** 1916 * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic 1917 * @phba: Pointer to HBA context object. 1918 * @mode: loopback mode to set 1919 * @link_no: link number for loopback mode to set 1920 * 1921 * This function is responsible for issuing a sli4 mailbox command for setting 1922 * up loopback diagnostic for a link. 1923 */ 1924 static int 1925 lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode, 1926 uint32_t link_no) 1927 { 1928 LPFC_MBOXQ_t *pmboxq; 1929 uint32_t req_len, alloc_len; 1930 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1931 int mbxstatus = MBX_SUCCESS, rc = 0; 1932 1933 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1934 if (!pmboxq) 1935 return -ENOMEM; 1936 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - 1937 sizeof(struct lpfc_sli4_cfg_mhdr)); 1938 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1939 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, 1940 req_len, LPFC_SLI4_MBX_EMBED); 1941 if (alloc_len != req_len) { 1942 mempool_free(pmboxq, phba->mbox_mem_pool); 1943 return -ENOMEM; 1944 } 1945 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; 1946 bf_set(lpfc_mbx_set_diag_state_link_num, 1947 &link_diag_loopback->u.req, link_no); 1948 1949 if (phba->sli4_hba.conf_trunk & (1 << link_no)) { 1950 bf_set(lpfc_mbx_set_diag_state_link_type, 1951 &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED); 1952 } else { 1953 bf_set(lpfc_mbx_set_diag_state_link_type, 1954 &link_diag_loopback->u.req, 1955 phba->sli4_hba.lnk_info.lnk_tp); 1956 } 1957 1958 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, 1959 mode); 1960 1961 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1962 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { 1963 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1964 "3127 Failed setup loopback mode mailbox " 1965 "command, rc:x%x, status:x%x\n", mbxstatus, 1966 pmboxq->u.mb.mbxStatus); 1967 rc = -ENODEV; 1968 } 1969 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1970 mempool_free(pmboxq, phba->mbox_mem_pool); 1971 return rc; 1972 } 1973 1974 /** 1975 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic 1976 * @phba: Pointer to HBA context object. 1977 * 1978 * This function set up SLI4 FC port registrations for diagnostic run, which 1979 * includes all the rpis, vfi, and also vpi. 1980 */ 1981 static int 1982 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) 1983 { 1984 if (test_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag)) { 1985 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1986 "3136 Port still had vfi registered: " 1987 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", 1988 phba->pport->fc_myDID, phba->fcf.fcfi, 1989 phba->sli4_hba.vfi_ids[phba->pport->vfi], 1990 phba->vpi_ids[phba->pport->vpi]); 1991 return -EINVAL; 1992 } 1993 return lpfc_issue_reg_vfi(phba->pport); 1994 } 1995 1996 /** 1997 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command 1998 * @phba: Pointer to HBA context object. 1999 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2000 * 2001 * This function is responsible for placing an sli4 port into diagnostic 2002 * loopback mode in order to perform a diagnostic loopback test. 2003 */ 2004 static int 2005 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 2006 { 2007 struct fc_bsg_request *bsg_request = job->request; 2008 struct fc_bsg_reply *bsg_reply = job->reply; 2009 struct diag_mode_set *loopback_mode; 2010 uint32_t link_flags, timeout, link_no; 2011 int i, rc = 0; 2012 2013 /* no data to return just the return code */ 2014 bsg_reply->reply_payload_rcv_len = 0; 2015 2016 if (job->request_len < sizeof(struct fc_bsg_request) + 2017 sizeof(struct diag_mode_set)) { 2018 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2019 "3011 Received DIAG MODE request size:%d " 2020 "below the minimum size:%d\n", 2021 job->request_len, 2022 (int)(sizeof(struct fc_bsg_request) + 2023 sizeof(struct diag_mode_set))); 2024 rc = -EINVAL; 2025 goto job_done; 2026 } 2027 2028 loopback_mode = (struct diag_mode_set *) 2029 bsg_request->rqst_data.h_vendor.vendor_cmd; 2030 link_flags = loopback_mode->type; 2031 timeout = loopback_mode->timeout * 100; 2032 2033 if (loopback_mode->physical_link == -1) 2034 link_no = phba->sli4_hba.lnk_info.lnk_no; 2035 else 2036 link_no = loopback_mode->physical_link; 2037 2038 if (link_flags == DISABLE_LOOP_BACK) { 2039 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2040 LPFC_DIAG_LOOPBACK_TYPE_DISABLE, 2041 link_no); 2042 if (!rc) { 2043 /* Unset the need disable bit */ 2044 phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4); 2045 } 2046 goto job_done; 2047 } else { 2048 /* Check if we need to disable the loopback state */ 2049 if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) { 2050 rc = -EPERM; 2051 goto job_done; 2052 } 2053 } 2054 2055 rc = lpfc_bsg_diag_mode_enter(phba); 2056 if (rc) 2057 goto job_done; 2058 2059 /* indicate we are in loobpack diagnostic mode */ 2060 spin_lock_irq(&phba->hbalock); 2061 phba->link_flag |= LS_LOOPBACK_MODE; 2062 spin_unlock_irq(&phba->hbalock); 2063 2064 /* reset port to start frome scratch */ 2065 rc = lpfc_selective_reset(phba); 2066 if (rc) 2067 goto job_done; 2068 2069 /* bring the link to diagnostic mode */ 2070 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2071 "3129 Bring link to diagnostic state.\n"); 2072 2073 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2074 if (rc) { 2075 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2076 "3130 Failed to bring link to diagnostic " 2077 "state, rc:x%x\n", rc); 2078 goto loopback_mode_exit; 2079 } 2080 2081 /* wait for link down before proceeding */ 2082 i = 0; 2083 while (phba->link_state != LPFC_LINK_DOWN) { 2084 if (i++ > timeout) { 2085 rc = -ETIMEDOUT; 2086 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2087 "3131 Timeout waiting for link to " 2088 "diagnostic mode, timeout:%d ms\n", 2089 timeout * 10); 2090 goto loopback_mode_exit; 2091 } 2092 msleep(10); 2093 } 2094 2095 /* set up loopback mode */ 2096 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2097 "3132 Set up loopback mode:x%x\n", link_flags); 2098 2099 switch (link_flags) { 2100 case INTERNAL_LOOP_BACK: 2101 if (phba->sli4_hba.conf_trunk & (1 << link_no)) { 2102 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2103 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, 2104 link_no); 2105 } else { 2106 /* Trunk is configured, but link is not in this trunk */ 2107 if (phba->sli4_hba.conf_trunk) { 2108 rc = -ELNRNG; 2109 goto loopback_mode_exit; 2110 } 2111 2112 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2113 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, 2114 link_no); 2115 } 2116 2117 if (!rc) { 2118 /* Set the need disable bit */ 2119 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; 2120 } 2121 2122 break; 2123 case EXTERNAL_LOOP_BACK: 2124 if (phba->sli4_hba.conf_trunk & (1 << link_no)) { 2125 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2126 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED, 2127 link_no); 2128 } else { 2129 /* Trunk is configured, but link is not in this trunk */ 2130 if (phba->sli4_hba.conf_trunk) { 2131 rc = -ELNRNG; 2132 goto loopback_mode_exit; 2133 } 2134 2135 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2136 LPFC_DIAG_LOOPBACK_TYPE_SERDES, 2137 link_no); 2138 } 2139 2140 if (!rc) { 2141 /* Set the need disable bit */ 2142 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; 2143 } 2144 2145 break; 2146 default: 2147 rc = -EINVAL; 2148 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2149 "3141 Loopback mode:x%x not supported\n", 2150 link_flags); 2151 goto loopback_mode_exit; 2152 } 2153 2154 if (!rc) { 2155 /* wait for the link attention interrupt */ 2156 msleep(100); 2157 i = 0; 2158 while (phba->link_state < LPFC_LINK_UP) { 2159 if (i++ > timeout) { 2160 rc = -ETIMEDOUT; 2161 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2162 "3137 Timeout waiting for link up " 2163 "in loopback mode, timeout:%d ms\n", 2164 timeout * 10); 2165 break; 2166 } 2167 msleep(10); 2168 } 2169 } 2170 2171 /* port resource registration setup for loopback diagnostic */ 2172 if (!rc) { 2173 /* set up a none zero myDID for loopback test */ 2174 phba->pport->fc_myDID = 1; 2175 rc = lpfc_sli4_diag_fcport_reg_setup(phba); 2176 } else 2177 goto loopback_mode_exit; 2178 2179 if (!rc) { 2180 /* wait for the port ready */ 2181 msleep(100); 2182 i = 0; 2183 while (phba->link_state != LPFC_HBA_READY) { 2184 if (i++ > timeout) { 2185 rc = -ETIMEDOUT; 2186 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2187 "3133 Timeout waiting for port " 2188 "loopback mode ready, timeout:%d ms\n", 2189 timeout * 10); 2190 break; 2191 } 2192 msleep(10); 2193 } 2194 } 2195 2196 loopback_mode_exit: 2197 /* clear loopback diagnostic mode */ 2198 if (rc) { 2199 spin_lock_irq(&phba->hbalock); 2200 phba->link_flag &= ~LS_LOOPBACK_MODE; 2201 spin_unlock_irq(&phba->hbalock); 2202 } 2203 lpfc_bsg_diag_mode_exit(phba); 2204 2205 job_done: 2206 /* make error code available to userspace */ 2207 bsg_reply->result = rc; 2208 /* complete the job back to userspace if no error */ 2209 if (rc == 0) 2210 bsg_job_done(job, bsg_reply->result, 2211 bsg_reply->reply_payload_rcv_len); 2212 return rc; 2213 } 2214 2215 /** 2216 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode 2217 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2218 * 2219 * This function is responsible for responding to check and dispatch bsg diag 2220 * command from the user to proper driver action routines. 2221 */ 2222 static int 2223 lpfc_bsg_diag_loopback_mode(struct bsg_job *job) 2224 { 2225 struct Scsi_Host *shost; 2226 struct lpfc_vport *vport; 2227 struct lpfc_hba *phba; 2228 int rc; 2229 2230 shost = fc_bsg_to_shost(job); 2231 if (!shost) 2232 return -ENODEV; 2233 vport = shost_priv(shost); 2234 if (!vport) 2235 return -ENODEV; 2236 phba = vport->phba; 2237 if (!phba) 2238 return -ENODEV; 2239 2240 if (phba->sli_rev < LPFC_SLI_REV4) 2241 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); 2242 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 2243 LPFC_SLI_INTF_IF_TYPE_2) 2244 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); 2245 else 2246 rc = -ENODEV; 2247 2248 return rc; 2249 } 2250 2251 /** 2252 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode 2253 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END 2254 * 2255 * This function is responsible for responding to check and dispatch bsg diag 2256 * command from the user to proper driver action routines. 2257 */ 2258 static int 2259 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) 2260 { 2261 struct fc_bsg_request *bsg_request = job->request; 2262 struct fc_bsg_reply *bsg_reply = job->reply; 2263 struct Scsi_Host *shost; 2264 struct lpfc_vport *vport; 2265 struct lpfc_hba *phba; 2266 struct diag_mode_set *loopback_mode_end_cmd; 2267 uint32_t timeout; 2268 int rc, i; 2269 2270 shost = fc_bsg_to_shost(job); 2271 if (!shost) 2272 return -ENODEV; 2273 vport = shost_priv(shost); 2274 if (!vport) 2275 return -ENODEV; 2276 phba = vport->phba; 2277 if (!phba) 2278 return -ENODEV; 2279 2280 if (phba->sli_rev < LPFC_SLI_REV4) 2281 return -ENODEV; 2282 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 2283 LPFC_SLI_INTF_IF_TYPE_2) 2284 return -ENODEV; 2285 2286 /* clear loopback diagnostic mode */ 2287 spin_lock_irq(&phba->hbalock); 2288 phba->link_flag &= ~LS_LOOPBACK_MODE; 2289 spin_unlock_irq(&phba->hbalock); 2290 loopback_mode_end_cmd = (struct diag_mode_set *) 2291 bsg_request->rqst_data.h_vendor.vendor_cmd; 2292 timeout = loopback_mode_end_cmd->timeout * 100; 2293 2294 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2295 if (rc) { 2296 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2297 "3139 Failed to bring link to diagnostic " 2298 "state, rc:x%x\n", rc); 2299 goto loopback_mode_end_exit; 2300 } 2301 2302 /* wait for link down before proceeding */ 2303 i = 0; 2304 while (phba->link_state != LPFC_LINK_DOWN) { 2305 if (i++ > timeout) { 2306 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2307 "3140 Timeout waiting for link to " 2308 "diagnostic mode_end, timeout:%d ms\n", 2309 timeout * 10); 2310 /* there is nothing much we can do here */ 2311 break; 2312 } 2313 msleep(10); 2314 } 2315 2316 /* reset port resource registrations */ 2317 rc = lpfc_selective_reset(phba); 2318 phba->pport->fc_myDID = 0; 2319 2320 loopback_mode_end_exit: 2321 /* make return code available to userspace */ 2322 bsg_reply->result = rc; 2323 /* complete the job back to userspace if no error */ 2324 if (rc == 0) 2325 bsg_job_done(job, bsg_reply->result, 2326 bsg_reply->reply_payload_rcv_len); 2327 return rc; 2328 } 2329 2330 /** 2331 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test 2332 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST 2333 * 2334 * This function is to perform SLI4 diag link test request from the user 2335 * applicaiton. 2336 */ 2337 static int 2338 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) 2339 { 2340 struct fc_bsg_request *bsg_request = job->request; 2341 struct fc_bsg_reply *bsg_reply = job->reply; 2342 struct Scsi_Host *shost; 2343 struct lpfc_vport *vport; 2344 struct lpfc_hba *phba; 2345 LPFC_MBOXQ_t *pmboxq; 2346 struct sli4_link_diag *link_diag_test_cmd; 2347 uint32_t req_len, alloc_len; 2348 struct lpfc_mbx_run_link_diag_test *run_link_diag_test; 2349 union lpfc_sli4_cfg_shdr *shdr; 2350 uint32_t shdr_status, shdr_add_status; 2351 struct diag_status *diag_status_reply; 2352 int mbxstatus, rc = -ENODEV, rc1 = 0; 2353 2354 shost = fc_bsg_to_shost(job); 2355 if (!shost) 2356 goto job_error; 2357 2358 vport = shost_priv(shost); 2359 if (!vport) 2360 goto job_error; 2361 2362 phba = vport->phba; 2363 if (!phba) 2364 goto job_error; 2365 2366 2367 if (phba->sli_rev < LPFC_SLI_REV4) 2368 goto job_error; 2369 2370 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 2371 LPFC_SLI_INTF_IF_TYPE_2) 2372 goto job_error; 2373 2374 if (job->request_len < sizeof(struct fc_bsg_request) + 2375 sizeof(struct sli4_link_diag)) { 2376 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2377 "3013 Received LINK DIAG TEST request " 2378 " size:%d below the minimum size:%d\n", 2379 job->request_len, 2380 (int)(sizeof(struct fc_bsg_request) + 2381 sizeof(struct sli4_link_diag))); 2382 rc = -EINVAL; 2383 goto job_error; 2384 } 2385 2386 rc = lpfc_bsg_diag_mode_enter(phba); 2387 if (rc) 2388 goto job_error; 2389 2390 link_diag_test_cmd = (struct sli4_link_diag *) 2391 bsg_request->rqst_data.h_vendor.vendor_cmd; 2392 2393 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2394 2395 if (rc) 2396 goto job_error; 2397 2398 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2399 if (!pmboxq) { 2400 rc = -ENOMEM; 2401 goto link_diag_test_exit; 2402 } 2403 2404 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 2405 sizeof(struct lpfc_sli4_cfg_mhdr)); 2406 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2407 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 2408 req_len, LPFC_SLI4_MBX_EMBED); 2409 if (alloc_len != req_len) { 2410 rc = -ENOMEM; 2411 goto link_diag_test_exit; 2412 } 2413 2414 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; 2415 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, 2416 phba->sli4_hba.lnk_info.lnk_no); 2417 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, 2418 phba->sli4_hba.lnk_info.lnk_tp); 2419 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, 2420 link_diag_test_cmd->test_id); 2421 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, 2422 link_diag_test_cmd->loops); 2423 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, 2424 link_diag_test_cmd->test_version); 2425 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, 2426 link_diag_test_cmd->error_action); 2427 2428 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2429 2430 shdr = (union lpfc_sli4_cfg_shdr *) 2431 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; 2432 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2433 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2434 if (shdr_status || shdr_add_status || mbxstatus) { 2435 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2436 "3010 Run link diag test mailbox failed with " 2437 "mbx_status x%x status x%x, add_status x%x\n", 2438 mbxstatus, shdr_status, shdr_add_status); 2439 } 2440 2441 diag_status_reply = (struct diag_status *) 2442 bsg_reply->reply_data.vendor_reply.vendor_rsp; 2443 2444 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) { 2445 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2446 "3012 Received Run link diag test reply " 2447 "below minimum size (%d): reply_len:%d\n", 2448 (int)(sizeof(*bsg_reply) + 2449 sizeof(*diag_status_reply)), 2450 job->reply_len); 2451 rc = -EINVAL; 2452 goto job_error; 2453 } 2454 2455 diag_status_reply->mbox_status = mbxstatus; 2456 diag_status_reply->shdr_status = shdr_status; 2457 diag_status_reply->shdr_add_status = shdr_add_status; 2458 2459 link_diag_test_exit: 2460 rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2461 2462 if (pmboxq) 2463 mempool_free(pmboxq, phba->mbox_mem_pool); 2464 2465 lpfc_bsg_diag_mode_exit(phba); 2466 2467 job_error: 2468 /* make error code available to userspace */ 2469 if (rc1 && !rc) 2470 rc = rc1; 2471 bsg_reply->result = rc; 2472 /* complete the job back to userspace if no error */ 2473 if (rc == 0) 2474 bsg_job_done(job, bsg_reply->result, 2475 bsg_reply->reply_payload_rcv_len); 2476 return rc; 2477 } 2478 2479 /** 2480 * lpfcdiag_loop_self_reg - obtains a remote port login id 2481 * @phba: Pointer to HBA context object 2482 * @rpi: Pointer to a remote port login id 2483 * 2484 * This function obtains a remote port login id so the diag loopback test 2485 * can send and receive its own unsolicited CT command. 2486 **/ 2487 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) 2488 { 2489 LPFC_MBOXQ_t *mbox; 2490 struct lpfc_dmabuf *dmabuff; 2491 int status; 2492 2493 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2494 if (!mbox) 2495 return -ENOMEM; 2496 2497 if (phba->sli_rev < LPFC_SLI_REV4) 2498 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 2499 (uint8_t *)&phba->pport->fc_sparam, 2500 mbox, *rpi); 2501 else { 2502 *rpi = lpfc_sli4_alloc_rpi(phba); 2503 if (*rpi == LPFC_RPI_ALLOC_ERROR) { 2504 mempool_free(mbox, phba->mbox_mem_pool); 2505 return -EBUSY; 2506 } 2507 status = lpfc_reg_rpi(phba, phba->pport->vpi, 2508 phba->pport->fc_myDID, 2509 (uint8_t *)&phba->pport->fc_sparam, 2510 mbox, *rpi); 2511 } 2512 2513 if (status) { 2514 mempool_free(mbox, phba->mbox_mem_pool); 2515 if (phba->sli_rev == LPFC_SLI_REV4) 2516 lpfc_sli4_free_rpi(phba, *rpi); 2517 return -ENOMEM; 2518 } 2519 2520 dmabuff = mbox->ctx_buf; 2521 mbox->ctx_buf = NULL; 2522 mbox->ctx_ndlp = NULL; 2523 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2524 2525 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2526 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2527 kfree(dmabuff); 2528 if (status != MBX_TIMEOUT) 2529 mempool_free(mbox, phba->mbox_mem_pool); 2530 if (phba->sli_rev == LPFC_SLI_REV4) 2531 lpfc_sli4_free_rpi(phba, *rpi); 2532 return -ENODEV; 2533 } 2534 2535 if (phba->sli_rev < LPFC_SLI_REV4) 2536 *rpi = mbox->u.mb.un.varWords[0]; 2537 2538 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2539 kfree(dmabuff); 2540 mempool_free(mbox, phba->mbox_mem_pool); 2541 return 0; 2542 } 2543 2544 /** 2545 * lpfcdiag_loop_self_unreg - unregs from the rpi 2546 * @phba: Pointer to HBA context object 2547 * @rpi: Remote port login id 2548 * 2549 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 2550 **/ 2551 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 2552 { 2553 LPFC_MBOXQ_t *mbox; 2554 int status; 2555 2556 /* Allocate mboxq structure */ 2557 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2558 if (mbox == NULL) 2559 return -ENOMEM; 2560 2561 if (phba->sli_rev < LPFC_SLI_REV4) 2562 lpfc_unreg_login(phba, 0, rpi, mbox); 2563 else 2564 lpfc_unreg_login(phba, phba->pport->vpi, 2565 phba->sli4_hba.rpi_ids[rpi], mbox); 2566 2567 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2568 2569 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2570 if (status != MBX_TIMEOUT) 2571 mempool_free(mbox, phba->mbox_mem_pool); 2572 return -EIO; 2573 } 2574 mempool_free(mbox, phba->mbox_mem_pool); 2575 if (phba->sli_rev == LPFC_SLI_REV4) 2576 lpfc_sli4_free_rpi(phba, rpi); 2577 return 0; 2578 } 2579 2580 /** 2581 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 2582 * @phba: Pointer to HBA context object 2583 * @rpi: Remote port login id 2584 * @txxri: Pointer to transmit exchange id 2585 * @rxxri: Pointer to response exchabge id 2586 * 2587 * This function obtains the transmit and receive ids required to send 2588 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 2589 * flags are used to the unsolicited response handler is able to process 2590 * the ct command sent on the same port. 2591 **/ 2592 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 2593 uint16_t *txxri, uint16_t * rxxri) 2594 { 2595 struct lpfc_bsg_event *evt; 2596 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2597 struct lpfc_dmabuf *dmabuf; 2598 struct ulp_bde64 *bpl = NULL; 2599 struct lpfc_sli_ct_request *ctreq = NULL; 2600 int ret_val = 0; 2601 int time_left; 2602 int iocb_stat = IOCB_SUCCESS; 2603 unsigned long flags; 2604 u32 status; 2605 2606 *txxri = 0; 2607 *rxxri = 0; 2608 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2609 SLI_CT_ELX_LOOPBACK); 2610 if (!evt) 2611 return -ENOMEM; 2612 2613 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2614 list_add(&evt->node, &phba->ct_ev_waiters); 2615 lpfc_bsg_event_ref(evt); 2616 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2617 2618 cmdiocbq = lpfc_sli_get_iocbq(phba); 2619 rspiocbq = lpfc_sli_get_iocbq(phba); 2620 2621 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2622 if (dmabuf) { 2623 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 2624 if (dmabuf->virt) { 2625 INIT_LIST_HEAD(&dmabuf->list); 2626 bpl = (struct ulp_bde64 *) dmabuf->virt; 2627 memset(bpl, 0, sizeof(*bpl)); 2628 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 2629 bpl->addrHigh = 2630 le32_to_cpu(putPaddrHigh(dmabuf->phys + 2631 sizeof(*bpl))); 2632 bpl->addrLow = 2633 le32_to_cpu(putPaddrLow(dmabuf->phys + 2634 sizeof(*bpl))); 2635 bpl->tus.f.bdeFlags = 0; 2636 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 2637 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2638 } 2639 } 2640 2641 if (cmdiocbq == NULL || rspiocbq == NULL || 2642 dmabuf == NULL || bpl == NULL || ctreq == NULL || 2643 dmabuf->virt == NULL) { 2644 ret_val = -ENOMEM; 2645 goto err_get_xri_exit; 2646 } 2647 2648 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2649 2650 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2651 ctreq->RevisionId.bits.InId = 0; 2652 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2653 ctreq->FsSubType = 0; 2654 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 2655 ctreq->CommandResponse.bits.Size = 0; 2656 2657 cmdiocbq->bpl_dmabuf = dmabuf; 2658 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; 2659 cmdiocbq->vport = phba->pport; 2660 cmdiocbq->cmd_cmpl = NULL; 2661 2662 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1, 2663 FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR); 2664 2665 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2666 rspiocbq, (phba->fc_ratov * 2) 2667 + LPFC_DRVR_TIMEOUT); 2668 2669 status = get_job_ulpstatus(phba, rspiocbq); 2670 if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) { 2671 ret_val = -EIO; 2672 goto err_get_xri_exit; 2673 } 2674 *txxri = get_job_ulpcontext(phba, rspiocbq); 2675 2676 evt->waiting = 1; 2677 evt->wait_time_stamp = jiffies; 2678 time_left = wait_event_interruptible_timeout( 2679 evt->wq, !list_empty(&evt->events_to_see), 2680 msecs_to_jiffies(1000 * 2681 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 2682 if (list_empty(&evt->events_to_see)) 2683 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2684 else { 2685 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2686 list_move(evt->events_to_see.prev, &evt->events_to_get); 2687 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2688 *rxxri = (list_entry(evt->events_to_get.prev, 2689 typeof(struct event_data), 2690 node))->immed_dat; 2691 } 2692 evt->waiting = 0; 2693 2694 err_get_xri_exit: 2695 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2696 lpfc_bsg_event_unref(evt); /* release ref */ 2697 lpfc_bsg_event_unref(evt); /* delete */ 2698 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2699 2700 if (dmabuf) { 2701 if (dmabuf->virt) 2702 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2703 kfree(dmabuf); 2704 } 2705 2706 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 2707 lpfc_sli_release_iocbq(phba, cmdiocbq); 2708 if (rspiocbq) 2709 lpfc_sli_release_iocbq(phba, rspiocbq); 2710 return ret_val; 2711 } 2712 2713 /** 2714 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers 2715 * @phba: Pointer to HBA context object 2716 * 2717 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and 2718 * returns the pointer to the buffer. 2719 **/ 2720 static struct lpfc_dmabuf * 2721 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) 2722 { 2723 struct lpfc_dmabuf *dmabuf; 2724 struct pci_dev *pcidev = phba->pcidev; 2725 2726 /* allocate dma buffer struct */ 2727 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2728 if (!dmabuf) 2729 return NULL; 2730 2731 INIT_LIST_HEAD(&dmabuf->list); 2732 2733 /* now, allocate dma buffer */ 2734 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2735 &(dmabuf->phys), GFP_KERNEL); 2736 2737 if (!dmabuf->virt) { 2738 kfree(dmabuf); 2739 return NULL; 2740 } 2741 2742 return dmabuf; 2743 } 2744 2745 /** 2746 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer 2747 * @phba: Pointer to HBA context object. 2748 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. 2749 * 2750 * This routine just simply frees a dma buffer and its associated buffer 2751 * descriptor referred by @dmabuf. 2752 **/ 2753 static void 2754 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) 2755 { 2756 struct pci_dev *pcidev = phba->pcidev; 2757 2758 if (!dmabuf) 2759 return; 2760 2761 if (dmabuf->virt) 2762 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2763 dmabuf->virt, dmabuf->phys); 2764 kfree(dmabuf); 2765 return; 2766 } 2767 2768 /** 2769 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers 2770 * @phba: Pointer to HBA context object. 2771 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. 2772 * 2773 * This routine just simply frees all dma buffers and their associated buffer 2774 * descriptors referred by @dmabuf_list. 2775 **/ 2776 static void 2777 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, 2778 struct list_head *dmabuf_list) 2779 { 2780 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2781 2782 if (list_empty(dmabuf_list)) 2783 return; 2784 2785 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { 2786 list_del_init(&dmabuf->list); 2787 lpfc_bsg_dma_page_free(phba, dmabuf); 2788 } 2789 return; 2790 } 2791 2792 /** 2793 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2794 * @phba: Pointer to HBA context object 2795 * @bpl: Pointer to 64 bit bde structure 2796 * @size: Number of bytes to process 2797 * @nocopydata: Flag to copy user data into the allocated buffer 2798 * 2799 * This function allocates page size buffers and populates an lpfc_dmabufext. 2800 * If allowed the user data pointed to with indataptr is copied into the kernel 2801 * memory. The chained list of page size buffers is returned. 2802 **/ 2803 static struct lpfc_dmabufext * 2804 diag_cmd_data_alloc(struct lpfc_hba *phba, 2805 struct ulp_bde64 *bpl, uint32_t size, 2806 int nocopydata) 2807 { 2808 struct lpfc_dmabufext *mlist = NULL; 2809 struct lpfc_dmabufext *dmp; 2810 int cnt, offset = 0, i = 0; 2811 struct pci_dev *pcidev; 2812 2813 pcidev = phba->pcidev; 2814 2815 while (size) { 2816 /* We get chunks of 4K */ 2817 if (size > BUF_SZ_4K) 2818 cnt = BUF_SZ_4K; 2819 else 2820 cnt = size; 2821 2822 /* allocate struct lpfc_dmabufext buffer header */ 2823 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 2824 if (!dmp) 2825 goto out; 2826 2827 INIT_LIST_HEAD(&dmp->dma.list); 2828 2829 /* Queue it to a linked list */ 2830 if (mlist) 2831 list_add_tail(&dmp->dma.list, &mlist->dma.list); 2832 else 2833 mlist = dmp; 2834 2835 /* allocate buffer */ 2836 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 2837 cnt, 2838 &(dmp->dma.phys), 2839 GFP_KERNEL); 2840 2841 if (!dmp->dma.virt) 2842 goto out; 2843 2844 dmp->size = cnt; 2845 2846 if (nocopydata) { 2847 bpl->tus.f.bdeFlags = 0; 2848 } else { 2849 memset((uint8_t *)dmp->dma.virt, 0, cnt); 2850 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2851 } 2852 2853 /* build buffer ptr list for IOCB */ 2854 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 2855 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 2856 bpl->tus.f.bdeSize = (ushort) cnt; 2857 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2858 bpl++; 2859 2860 i++; 2861 offset += cnt; 2862 size -= cnt; 2863 } 2864 2865 if (mlist) { 2866 mlist->flag = i; 2867 return mlist; 2868 } 2869 out: 2870 diag_cmd_data_free(phba, mlist); 2871 return NULL; 2872 } 2873 2874 /** 2875 * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 2876 * @phba: Pointer to HBA context object 2877 * @rxxri: Receive exchange id 2878 * @len: Number of data bytes 2879 * 2880 * This function allocates and posts a data buffer of sufficient size to receive 2881 * an unsolicited CT command. 2882 **/ 2883 static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 2884 size_t len) 2885 { 2886 struct lpfc_sli_ring *pring; 2887 struct lpfc_iocbq *cmdiocbq; 2888 IOCB_t *cmd = NULL; 2889 struct list_head head, *curr, *next; 2890 struct lpfc_dmabuf *rxbmp; 2891 struct lpfc_dmabuf *dmp; 2892 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 2893 struct ulp_bde64 *rxbpl = NULL; 2894 uint32_t num_bde; 2895 struct lpfc_dmabufext *rxbuffer = NULL; 2896 int ret_val = 0; 2897 int iocb_stat; 2898 int i = 0; 2899 2900 pring = lpfc_phba_elsring(phba); 2901 2902 cmdiocbq = lpfc_sli_get_iocbq(phba); 2903 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2904 if (rxbmp != NULL) { 2905 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2906 if (rxbmp->virt) { 2907 INIT_LIST_HEAD(&rxbmp->list); 2908 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2909 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 2910 } 2911 } 2912 2913 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) { 2914 ret_val = -ENOMEM; 2915 goto err_post_rxbufs_exit; 2916 } 2917 2918 /* Queue buffers for the receive exchange */ 2919 num_bde = (uint32_t)rxbuffer->flag; 2920 dmp = &rxbuffer->dma; 2921 cmd = &cmdiocbq->iocb; 2922 i = 0; 2923 2924 INIT_LIST_HEAD(&head); 2925 list_add_tail(&head, &dmp->list); 2926 list_for_each_safe(curr, next, &head) { 2927 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 2928 list_del(curr); 2929 2930 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2931 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 2932 cmd->un.quexri64cx.buff.bde.addrHigh = 2933 putPaddrHigh(mp[i]->phys); 2934 cmd->un.quexri64cx.buff.bde.addrLow = 2935 putPaddrLow(mp[i]->phys); 2936 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 2937 ((struct lpfc_dmabufext *)mp[i])->size; 2938 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 2939 cmd->ulpCommand = CMD_QUE_XRI64_CX; 2940 cmd->ulpPU = 0; 2941 cmd->ulpLe = 1; 2942 cmd->ulpBdeCount = 1; 2943 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 2944 2945 } else { 2946 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 2947 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2948 cmd->un.cont64[i].tus.f.bdeSize = 2949 ((struct lpfc_dmabufext *)mp[i])->size; 2950 cmd->ulpBdeCount = ++i; 2951 2952 if ((--num_bde > 0) && (i < 2)) 2953 continue; 2954 2955 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2956 cmd->ulpLe = 1; 2957 } 2958 2959 cmd->ulpClass = CLASS3; 2960 cmd->ulpContext = rxxri; 2961 2962 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2963 0); 2964 if (iocb_stat == IOCB_ERROR) { 2965 diag_cmd_data_free(phba, 2966 (struct lpfc_dmabufext *)mp[0]); 2967 if (mp[1]) 2968 diag_cmd_data_free(phba, 2969 (struct lpfc_dmabufext *)mp[1]); 2970 dmp = list_entry(next, struct lpfc_dmabuf, list); 2971 ret_val = -EIO; 2972 goto err_post_rxbufs_exit; 2973 } 2974 2975 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2976 if (mp[1]) { 2977 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2978 mp[1] = NULL; 2979 } 2980 2981 /* The iocb was freed by lpfc_sli_issue_iocb */ 2982 cmdiocbq = lpfc_sli_get_iocbq(phba); 2983 if (!cmdiocbq) { 2984 dmp = list_entry(next, struct lpfc_dmabuf, list); 2985 ret_val = -EIO; 2986 goto err_post_rxbufs_exit; 2987 } 2988 cmd = &cmdiocbq->iocb; 2989 i = 0; 2990 } 2991 list_del(&head); 2992 2993 err_post_rxbufs_exit: 2994 2995 if (rxbmp) { 2996 if (rxbmp->virt) 2997 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 2998 kfree(rxbmp); 2999 } 3000 3001 if (cmdiocbq) 3002 lpfc_sli_release_iocbq(phba, cmdiocbq); 3003 return ret_val; 3004 } 3005 3006 /** 3007 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself 3008 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 3009 * 3010 * This function receives a user data buffer to be transmitted and received on 3011 * the same port, the link must be up and in loopback mode prior 3012 * to being called. 3013 * 1. A kernel buffer is allocated to copy the user data into. 3014 * 2. The port registers with "itself". 3015 * 3. The transmit and receive exchange ids are obtained. 3016 * 4. The receive exchange id is posted. 3017 * 5. A new els loopback event is created. 3018 * 6. The command and response iocbs are allocated. 3019 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 3020 * 3021 * This function is meant to be called n times while the port is in loopback 3022 * so it is the apps responsibility to issue a reset to take the port out 3023 * of loopback mode. 3024 **/ 3025 static int 3026 lpfc_bsg_diag_loopback_run(struct bsg_job *job) 3027 { 3028 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3029 struct fc_bsg_reply *bsg_reply = job->reply; 3030 struct lpfc_hba *phba = vport->phba; 3031 struct lpfc_bsg_event *evt; 3032 struct event_data *evdat; 3033 struct lpfc_sli *psli = &phba->sli; 3034 uint32_t size; 3035 uint32_t full_size; 3036 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 3037 uint16_t rpi = 0; 3038 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; 3039 union lpfc_wqe128 *cmdwqe, *rspwqe; 3040 struct lpfc_sli_ct_request *ctreq; 3041 struct lpfc_dmabuf *txbmp; 3042 struct ulp_bde64 *txbpl = NULL; 3043 struct lpfc_dmabufext *txbuffer = NULL; 3044 struct list_head head; 3045 struct lpfc_dmabuf *curr; 3046 uint16_t txxri = 0, rxxri; 3047 uint32_t num_bde; 3048 uint8_t *ptr = NULL, *rx_databuf = NULL; 3049 int rc = 0; 3050 int time_left; 3051 int iocb_stat = IOCB_SUCCESS; 3052 unsigned long flags; 3053 void *dataout = NULL; 3054 uint32_t total_mem; 3055 3056 /* in case no data is returned return just the return code */ 3057 bsg_reply->reply_payload_rcv_len = 0; 3058 3059 if (job->request_len < 3060 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 3061 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3062 "2739 Received DIAG TEST request below minimum " 3063 "size\n"); 3064 rc = -EINVAL; 3065 goto loopback_test_exit; 3066 } 3067 3068 if (job->request_payload.payload_len != 3069 job->reply_payload.payload_len) { 3070 rc = -EINVAL; 3071 goto loopback_test_exit; 3072 } 3073 3074 if ((phba->link_state == LPFC_HBA_ERROR) || 3075 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 3076 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 3077 rc = -EACCES; 3078 goto loopback_test_exit; 3079 } 3080 3081 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 3082 rc = -EACCES; 3083 goto loopback_test_exit; 3084 } 3085 3086 size = job->request_payload.payload_len; 3087 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 3088 3089 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 3090 rc = -ERANGE; 3091 goto loopback_test_exit; 3092 } 3093 3094 if (full_size >= BUF_SZ_4K) { 3095 /* 3096 * Allocate memory for ioctl data. If buffer is bigger than 64k, 3097 * then we allocate 64k and re-use that buffer over and over to 3098 * xfer the whole block. This is because Linux kernel has a 3099 * problem allocating more than 120k of kernel space memory. Saw 3100 * problem with GET_FCPTARGETMAPPING... 3101 */ 3102 if (size <= (64 * 1024)) 3103 total_mem = full_size; 3104 else 3105 total_mem = 64 * 1024; 3106 } else 3107 /* Allocate memory for ioctl data */ 3108 total_mem = BUF_SZ_4K; 3109 3110 dataout = kmalloc(total_mem, GFP_KERNEL); 3111 if (dataout == NULL) { 3112 rc = -ENOMEM; 3113 goto loopback_test_exit; 3114 } 3115 3116 ptr = dataout; 3117 ptr += ELX_LOOPBACK_HEADER_SZ; 3118 sg_copy_to_buffer(job->request_payload.sg_list, 3119 job->request_payload.sg_cnt, 3120 ptr, size); 3121 rc = lpfcdiag_loop_self_reg(phba, &rpi); 3122 if (rc) 3123 goto loopback_test_exit; 3124 3125 if (phba->sli_rev < LPFC_SLI_REV4) { 3126 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 3127 if (rc) { 3128 lpfcdiag_loop_self_unreg(phba, rpi); 3129 goto loopback_test_exit; 3130 } 3131 3132 rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size); 3133 if (rc) { 3134 lpfcdiag_loop_self_unreg(phba, rpi); 3135 goto loopback_test_exit; 3136 } 3137 } 3138 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 3139 SLI_CT_ELX_LOOPBACK); 3140 if (!evt) { 3141 lpfcdiag_loop_self_unreg(phba, rpi); 3142 rc = -ENOMEM; 3143 goto loopback_test_exit; 3144 } 3145 3146 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3147 list_add(&evt->node, &phba->ct_ev_waiters); 3148 lpfc_bsg_event_ref(evt); 3149 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3150 3151 cmdiocbq = lpfc_sli_get_iocbq(phba); 3152 if (phba->sli_rev < LPFC_SLI_REV4) 3153 rspiocbq = lpfc_sli_get_iocbq(phba); 3154 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3155 3156 if (txbmp) { 3157 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 3158 if (txbmp->virt) { 3159 INIT_LIST_HEAD(&txbmp->list); 3160 txbpl = (struct ulp_bde64 *) txbmp->virt; 3161 txbuffer = diag_cmd_data_alloc(phba, 3162 txbpl, full_size, 0); 3163 } 3164 } 3165 3166 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { 3167 rc = -ENOMEM; 3168 goto err_loopback_test_exit; 3169 } 3170 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { 3171 rc = -ENOMEM; 3172 goto err_loopback_test_exit; 3173 } 3174 3175 cmdwqe = &cmdiocbq->wqe; 3176 memset(cmdwqe, 0, sizeof(*cmdwqe)); 3177 if (phba->sli_rev < LPFC_SLI_REV4) { 3178 rspwqe = &rspiocbq->wqe; 3179 memset(rspwqe, 0, sizeof(*rspwqe)); 3180 } 3181 3182 INIT_LIST_HEAD(&head); 3183 list_add_tail(&head, &txbuffer->dma.list); 3184 list_for_each_entry(curr, &head, list) { 3185 segment_len = ((struct lpfc_dmabufext *)curr)->size; 3186 if (current_offset == 0) { 3187 ctreq = curr->virt; 3188 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 3189 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 3190 ctreq->RevisionId.bits.InId = 0; 3191 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 3192 ctreq->FsSubType = 0; 3193 ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA); 3194 ctreq->CommandResponse.bits.Size = cpu_to_be16(size); 3195 segment_offset = ELX_LOOPBACK_HEADER_SZ; 3196 } else 3197 segment_offset = 0; 3198 3199 BUG_ON(segment_offset >= segment_len); 3200 memcpy(curr->virt + segment_offset, 3201 ptr + current_offset, 3202 segment_len - segment_offset); 3203 3204 current_offset += segment_len - segment_offset; 3205 BUG_ON(current_offset > size); 3206 } 3207 list_del(&head); 3208 3209 /* Build the XMIT_SEQUENCE iocb */ 3210 num_bde = (uint32_t)txbuffer->flag; 3211 3212 cmdiocbq->num_bdes = num_bde; 3213 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; 3214 cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK; 3215 if (phba->cfg_vmid_app_header) 3216 cmdiocbq->cmd_flag |= LPFC_IO_VMID; 3217 3218 cmdiocbq->vport = phba->pport; 3219 cmdiocbq->cmd_cmpl = NULL; 3220 cmdiocbq->bpl_dmabuf = txbmp; 3221 3222 if (phba->sli_rev < LPFC_SLI_REV4) { 3223 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri, 3224 num_bde, FC_RCTL_DD_UNSOL_CTL, 1, 3225 CMD_XMIT_SEQUENCE64_CX); 3226 3227 } else { 3228 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 3229 phba->sli4_hba.rpi_ids[rpi], 0xffff, 3230 full_size, FC_RCTL_DD_UNSOL_CTL, 1, 3231 CMD_XMIT_SEQUENCE64_WQE); 3232 cmdiocbq->sli4_xritag = NO_XRI; 3233 } 3234 3235 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 3236 rspiocbq, (phba->fc_ratov * 2) + 3237 LPFC_DRVR_TIMEOUT); 3238 if (iocb_stat != IOCB_SUCCESS || 3239 (phba->sli_rev < LPFC_SLI_REV4 && 3240 (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) { 3241 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3242 "3126 Failed loopback test issue iocb: " 3243 "iocb_stat:x%x\n", iocb_stat); 3244 rc = -EIO; 3245 goto err_loopback_test_exit; 3246 } 3247 3248 evt->waiting = 1; 3249 time_left = wait_event_interruptible_timeout( 3250 evt->wq, !list_empty(&evt->events_to_see), 3251 msecs_to_jiffies(1000 * 3252 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 3253 evt->waiting = 0; 3254 if (list_empty(&evt->events_to_see)) { 3255 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3256 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3257 "3125 Not receiving unsolicited event, " 3258 "rc:x%x\n", rc); 3259 } else { 3260 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3261 list_move(evt->events_to_see.prev, &evt->events_to_get); 3262 evdat = list_entry(evt->events_to_get.prev, 3263 typeof(*evdat), node); 3264 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3265 rx_databuf = evdat->data; 3266 if (evdat->len != full_size) { 3267 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3268 "1603 Loopback test did not receive expected " 3269 "data length. actual length 0x%x expected " 3270 "length 0x%x\n", 3271 evdat->len, full_size); 3272 rc = -EIO; 3273 } else if (rx_databuf == NULL) 3274 rc = -EIO; 3275 else { 3276 rc = IOCB_SUCCESS; 3277 /* skip over elx loopback header */ 3278 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 3279 bsg_reply->reply_payload_rcv_len = 3280 sg_copy_from_buffer(job->reply_payload.sg_list, 3281 job->reply_payload.sg_cnt, 3282 rx_databuf, size); 3283 bsg_reply->reply_payload_rcv_len = size; 3284 } 3285 } 3286 3287 err_loopback_test_exit: 3288 lpfcdiag_loop_self_unreg(phba, rpi); 3289 3290 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3291 lpfc_bsg_event_unref(evt); /* release ref */ 3292 lpfc_bsg_event_unref(evt); /* delete */ 3293 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3294 3295 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) 3296 lpfc_sli_release_iocbq(phba, cmdiocbq); 3297 3298 if (rspiocbq != NULL) 3299 lpfc_sli_release_iocbq(phba, rspiocbq); 3300 3301 if (txbmp != NULL) { 3302 if (txbpl != NULL) { 3303 if (txbuffer != NULL) 3304 diag_cmd_data_free(phba, txbuffer); 3305 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 3306 } 3307 kfree(txbmp); 3308 } 3309 3310 loopback_test_exit: 3311 kfree(dataout); 3312 /* make error code available to userspace */ 3313 bsg_reply->result = rc; 3314 job->dd_data = NULL; 3315 /* complete the job back to userspace if no error */ 3316 if (rc == IOCB_SUCCESS) 3317 bsg_job_done(job, bsg_reply->result, 3318 bsg_reply->reply_payload_rcv_len); 3319 return rc; 3320 } 3321 3322 /** 3323 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 3324 * @job: GET_DFC_REV fc_bsg_job 3325 **/ 3326 static int 3327 lpfc_bsg_get_dfc_rev(struct bsg_job *job) 3328 { 3329 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3330 struct fc_bsg_reply *bsg_reply = job->reply; 3331 struct lpfc_hba *phba = vport->phba; 3332 struct get_mgmt_rev_reply *event_reply; 3333 int rc = 0; 3334 3335 if (job->request_len < 3336 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 3337 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3338 "2740 Received GET_DFC_REV request below " 3339 "minimum size\n"); 3340 rc = -EINVAL; 3341 goto job_error; 3342 } 3343 3344 event_reply = (struct get_mgmt_rev_reply *) 3345 bsg_reply->reply_data.vendor_reply.vendor_rsp; 3346 3347 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { 3348 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3349 "2741 Received GET_DFC_REV reply below " 3350 "minimum size\n"); 3351 rc = -EINVAL; 3352 goto job_error; 3353 } 3354 3355 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 3356 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 3357 job_error: 3358 bsg_reply->result = rc; 3359 if (rc == 0) 3360 bsg_job_done(job, bsg_reply->result, 3361 bsg_reply->reply_payload_rcv_len); 3362 return rc; 3363 } 3364 3365 /** 3366 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler 3367 * @phba: Pointer to HBA context object. 3368 * @pmboxq: Pointer to mailbox command. 3369 * 3370 * This is completion handler function for mailbox commands issued from 3371 * lpfc_bsg_issue_mbox function. This function is called by the 3372 * mailbox event handler function with no lock held. This function 3373 * will wake up thread waiting on the wait queue pointed by dd_data 3374 * of the mailbox. 3375 **/ 3376 static void 3377 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3378 { 3379 struct bsg_job_data *dd_data; 3380 struct fc_bsg_reply *bsg_reply; 3381 struct bsg_job *job; 3382 uint32_t size; 3383 unsigned long flags; 3384 uint8_t *pmb, *pmb_buf; 3385 3386 dd_data = pmboxq->ctx_u.dd_data; 3387 3388 /* 3389 * The outgoing buffer is readily referred from the dma buffer, 3390 * just need to get header part from mailboxq structure. 3391 */ 3392 pmb = (uint8_t *)&pmboxq->u.mb; 3393 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3394 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3395 3396 /* Determine if job has been aborted */ 3397 3398 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3399 job = dd_data->set_job; 3400 if (job) { 3401 /* Prevent timeout handling from trying to abort job */ 3402 job->dd_data = NULL; 3403 } 3404 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3405 3406 /* Copy the mailbox data to the job if it is still active */ 3407 3408 if (job) { 3409 bsg_reply = job->reply; 3410 size = job->reply_payload.payload_len; 3411 bsg_reply->reply_payload_rcv_len = 3412 sg_copy_from_buffer(job->reply_payload.sg_list, 3413 job->reply_payload.sg_cnt, 3414 pmb_buf, size); 3415 } 3416 3417 dd_data->set_job = NULL; 3418 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3419 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3420 kfree(dd_data); 3421 3422 /* Complete the job if the job is still active */ 3423 3424 if (job) { 3425 bsg_reply->result = 0; 3426 bsg_job_done(job, bsg_reply->result, 3427 bsg_reply->reply_payload_rcv_len); 3428 } 3429 return; 3430 } 3431 3432 /** 3433 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 3434 * @phba: Pointer to HBA context object. 3435 * @mb: Pointer to a mailbox object. 3436 * @vport: Pointer to a vport object. 3437 * 3438 * Some commands require the port to be offline, some may not be called from 3439 * the application. 3440 **/ 3441 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 3442 MAILBOX_t *mb, struct lpfc_vport *vport) 3443 { 3444 /* return negative error values for bsg job */ 3445 switch (mb->mbxCommand) { 3446 /* Offline only */ 3447 case MBX_INIT_LINK: 3448 case MBX_DOWN_LINK: 3449 case MBX_CONFIG_LINK: 3450 case MBX_CONFIG_RING: 3451 case MBX_RESET_RING: 3452 case MBX_UNREG_LOGIN: 3453 case MBX_CLEAR_LA: 3454 case MBX_DUMP_CONTEXT: 3455 case MBX_RUN_DIAGS: 3456 case MBX_RESTART: 3457 case MBX_SET_MASK: 3458 if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { 3459 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3460 "2743 Command 0x%x is illegal in on-line " 3461 "state\n", 3462 mb->mbxCommand); 3463 return -EPERM; 3464 } 3465 break; 3466 case MBX_WRITE_NV: 3467 case MBX_WRITE_VPARMS: 3468 case MBX_LOAD_SM: 3469 case MBX_READ_NV: 3470 case MBX_READ_CONFIG: 3471 case MBX_READ_RCONFIG: 3472 case MBX_READ_STATUS: 3473 case MBX_READ_XRI: 3474 case MBX_READ_REV: 3475 case MBX_READ_LNK_STAT: 3476 case MBX_DUMP_MEMORY: 3477 case MBX_DOWN_LOAD: 3478 case MBX_UPDATE_CFG: 3479 case MBX_KILL_BOARD: 3480 case MBX_READ_TOPOLOGY: 3481 case MBX_LOAD_AREA: 3482 case MBX_LOAD_EXP_ROM: 3483 case MBX_BEACON: 3484 case MBX_DEL_LD_ENTRY: 3485 case MBX_SET_DEBUG: 3486 case MBX_WRITE_WWN: 3487 case MBX_SLI4_CONFIG: 3488 case MBX_READ_EVENT_LOG: 3489 case MBX_READ_EVENT_LOG_STATUS: 3490 case MBX_WRITE_EVENT_LOG: 3491 case MBX_PORT_CAPABILITIES: 3492 case MBX_PORT_IOV_CONTROL: 3493 case MBX_RUN_BIU_DIAG64: 3494 break; 3495 case MBX_SET_VARIABLE: 3496 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3497 "1226 mbox: set_variable 0x%x, 0x%x\n", 3498 mb->un.varWords[0], 3499 mb->un.varWords[1]); 3500 break; 3501 case MBX_READ_SPARM64: 3502 case MBX_REG_LOGIN: 3503 case MBX_REG_LOGIN64: 3504 case MBX_CONFIG_PORT: 3505 case MBX_RUN_BIU_DIAG: 3506 default: 3507 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3508 "2742 Unknown Command 0x%x\n", 3509 mb->mbxCommand); 3510 return -EPERM; 3511 } 3512 3513 return 0; /* ok */ 3514 } 3515 3516 /** 3517 * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session 3518 * @phba: Pointer to HBA context object. 3519 * 3520 * This is routine clean up and reset BSG handling of multi-buffer mbox 3521 * command session. 3522 **/ 3523 static void 3524 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) 3525 { 3526 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) 3527 return; 3528 3529 /* free all memory, including dma buffers */ 3530 lpfc_bsg_dma_page_list_free(phba, 3531 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3532 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); 3533 /* multi-buffer write mailbox command pass-through complete */ 3534 memset((char *)&phba->mbox_ext_buf_ctx, 0, 3535 sizeof(struct lpfc_mbox_ext_buf_ctx)); 3536 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3537 3538 return; 3539 } 3540 3541 /** 3542 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl 3543 * @phba: Pointer to HBA context object. 3544 * @pmboxq: Pointer to mailbox command. 3545 * 3546 * This is routine handles BSG job for mailbox commands completions with 3547 * multiple external buffers. 3548 **/ 3549 static struct bsg_job * 3550 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3551 { 3552 struct bsg_job_data *dd_data; 3553 struct bsg_job *job; 3554 struct fc_bsg_reply *bsg_reply; 3555 uint8_t *pmb, *pmb_buf; 3556 unsigned long flags; 3557 uint32_t size; 3558 int rc = 0; 3559 struct lpfc_dmabuf *dmabuf; 3560 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3561 uint8_t *pmbx; 3562 3563 dd_data = pmboxq->ctx_u.dd_data; 3564 3565 /* Determine if job has been aborted */ 3566 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3567 job = dd_data->set_job; 3568 if (job) { 3569 bsg_reply = job->reply; 3570 /* Prevent timeout handling from trying to abort job */ 3571 job->dd_data = NULL; 3572 } 3573 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3574 3575 /* 3576 * The outgoing buffer is readily referred from the dma buffer, 3577 * just need to get header part from mailboxq structure. 3578 */ 3579 3580 pmb = (uint8_t *)&pmboxq->u.mb; 3581 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3582 /* Copy the byte swapped response mailbox back to the user */ 3583 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3584 /* if there is any non-embedded extended data copy that too */ 3585 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; 3586 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3587 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 3588 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 3589 pmbx = (uint8_t *)dmabuf->virt; 3590 /* byte swap the extended data following the mailbox command */ 3591 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3592 &pmbx[sizeof(MAILBOX_t)], 3593 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3594 } 3595 3596 /* Complete the job if the job is still active */ 3597 3598 if (job) { 3599 size = job->reply_payload.payload_len; 3600 bsg_reply->reply_payload_rcv_len = 3601 sg_copy_from_buffer(job->reply_payload.sg_list, 3602 job->reply_payload.sg_cnt, 3603 pmb_buf, size); 3604 3605 /* result for successful */ 3606 bsg_reply->result = 0; 3607 3608 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3609 "2937 SLI_CONFIG ext-buffer mailbox command " 3610 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3611 phba->mbox_ext_buf_ctx.nembType, 3612 phba->mbox_ext_buf_ctx.mboxType, size); 3613 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, 3614 phba->mbox_ext_buf_ctx.nembType, 3615 phba->mbox_ext_buf_ctx.mboxType, 3616 dma_ebuf, sta_pos_addr, 3617 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3618 } else { 3619 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3620 "2938 SLI_CONFIG ext-buffer mailbox " 3621 "command (x%x/x%x) failure, rc:x%x\n", 3622 phba->mbox_ext_buf_ctx.nembType, 3623 phba->mbox_ext_buf_ctx.mboxType, rc); 3624 } 3625 3626 3627 /* state change */ 3628 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3629 kfree(dd_data); 3630 return job; 3631 } 3632 3633 /** 3634 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox 3635 * @phba: Pointer to HBA context object. 3636 * @pmboxq: Pointer to mailbox command. 3637 * 3638 * This is completion handler function for mailbox read commands with multiple 3639 * external buffers. 3640 **/ 3641 static void 3642 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3643 { 3644 struct bsg_job *job; 3645 struct fc_bsg_reply *bsg_reply; 3646 3647 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3648 3649 /* handle the BSG job with mailbox command */ 3650 if (!job) 3651 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3652 3653 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3654 "2939 SLI_CONFIG ext-buffer rd mailbox command " 3655 "complete, ctxState:x%x, mbxStatus:x%x\n", 3656 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3657 3658 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3659 lpfc_bsg_mbox_ext_session_reset(phba); 3660 3661 /* free base driver mailbox structure memory */ 3662 mempool_free(pmboxq, phba->mbox_mem_pool); 3663 3664 /* if the job is still active, call job done */ 3665 if (job) { 3666 bsg_reply = job->reply; 3667 bsg_job_done(job, bsg_reply->result, 3668 bsg_reply->reply_payload_rcv_len); 3669 } 3670 return; 3671 } 3672 3673 /** 3674 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox 3675 * @phba: Pointer to HBA context object. 3676 * @pmboxq: Pointer to mailbox command. 3677 * 3678 * This is completion handler function for mailbox write commands with multiple 3679 * external buffers. 3680 **/ 3681 static void 3682 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3683 { 3684 struct bsg_job *job; 3685 struct fc_bsg_reply *bsg_reply; 3686 3687 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3688 3689 /* handle the BSG job with the mailbox command */ 3690 if (!job) 3691 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3692 3693 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3694 "2940 SLI_CONFIG ext-buffer wr mailbox command " 3695 "complete, ctxState:x%x, mbxStatus:x%x\n", 3696 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3697 3698 /* free all memory, including dma buffers */ 3699 mempool_free(pmboxq, phba->mbox_mem_pool); 3700 lpfc_bsg_mbox_ext_session_reset(phba); 3701 3702 /* if the job is still active, call job done */ 3703 if (job) { 3704 bsg_reply = job->reply; 3705 bsg_job_done(job, bsg_reply->result, 3706 bsg_reply->reply_payload_rcv_len); 3707 } 3708 3709 return; 3710 } 3711 3712 static void 3713 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, 3714 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, 3715 struct lpfc_dmabuf *ext_dmabuf) 3716 { 3717 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3718 3719 /* pointer to the start of mailbox command */ 3720 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; 3721 3722 if (nemb_tp == nemb_mse) { 3723 if (index == 0) { 3724 sli_cfg_mbx->un.sli_config_emb0_subsys. 3725 mse[index].pa_hi = 3726 putPaddrHigh(mbx_dmabuf->phys + 3727 sizeof(MAILBOX_t)); 3728 sli_cfg_mbx->un.sli_config_emb0_subsys. 3729 mse[index].pa_lo = 3730 putPaddrLow(mbx_dmabuf->phys + 3731 sizeof(MAILBOX_t)); 3732 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3733 "2943 SLI_CONFIG(mse)[%d], " 3734 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3735 index, 3736 sli_cfg_mbx->un.sli_config_emb0_subsys. 3737 mse[index].buf_len, 3738 sli_cfg_mbx->un.sli_config_emb0_subsys. 3739 mse[index].pa_hi, 3740 sli_cfg_mbx->un.sli_config_emb0_subsys. 3741 mse[index].pa_lo); 3742 } else { 3743 sli_cfg_mbx->un.sli_config_emb0_subsys. 3744 mse[index].pa_hi = 3745 putPaddrHigh(ext_dmabuf->phys); 3746 sli_cfg_mbx->un.sli_config_emb0_subsys. 3747 mse[index].pa_lo = 3748 putPaddrLow(ext_dmabuf->phys); 3749 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3750 "2944 SLI_CONFIG(mse)[%d], " 3751 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3752 index, 3753 sli_cfg_mbx->un.sli_config_emb0_subsys. 3754 mse[index].buf_len, 3755 sli_cfg_mbx->un.sli_config_emb0_subsys. 3756 mse[index].pa_hi, 3757 sli_cfg_mbx->un.sli_config_emb0_subsys. 3758 mse[index].pa_lo); 3759 } 3760 } else { 3761 if (index == 0) { 3762 sli_cfg_mbx->un.sli_config_emb1_subsys. 3763 hbd[index].pa_hi = 3764 putPaddrHigh(mbx_dmabuf->phys + 3765 sizeof(MAILBOX_t)); 3766 sli_cfg_mbx->un.sli_config_emb1_subsys. 3767 hbd[index].pa_lo = 3768 putPaddrLow(mbx_dmabuf->phys + 3769 sizeof(MAILBOX_t)); 3770 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3771 "3007 SLI_CONFIG(hbd)[%d], " 3772 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3773 index, 3774 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3775 &sli_cfg_mbx->un. 3776 sli_config_emb1_subsys.hbd[index]), 3777 sli_cfg_mbx->un.sli_config_emb1_subsys. 3778 hbd[index].pa_hi, 3779 sli_cfg_mbx->un.sli_config_emb1_subsys. 3780 hbd[index].pa_lo); 3781 3782 } else { 3783 sli_cfg_mbx->un.sli_config_emb1_subsys. 3784 hbd[index].pa_hi = 3785 putPaddrHigh(ext_dmabuf->phys); 3786 sli_cfg_mbx->un.sli_config_emb1_subsys. 3787 hbd[index].pa_lo = 3788 putPaddrLow(ext_dmabuf->phys); 3789 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3790 "3008 SLI_CONFIG(hbd)[%d], " 3791 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3792 index, 3793 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3794 &sli_cfg_mbx->un. 3795 sli_config_emb1_subsys.hbd[index]), 3796 sli_cfg_mbx->un.sli_config_emb1_subsys. 3797 hbd[index].pa_hi, 3798 sli_cfg_mbx->un.sli_config_emb1_subsys. 3799 hbd[index].pa_lo); 3800 } 3801 } 3802 return; 3803 } 3804 3805 /** 3806 * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read 3807 * @phba: Pointer to HBA context object. 3808 * @job: Pointer to the job object. 3809 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3810 * @dmabuf: Pointer to a DMA buffer descriptor. 3811 * 3812 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with 3813 * non-embedded external buffers. 3814 **/ 3815 static int 3816 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 3817 enum nemb_type nemb_tp, 3818 struct lpfc_dmabuf *dmabuf) 3819 { 3820 struct fc_bsg_request *bsg_request = job->request; 3821 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3822 struct dfc_mbox_req *mbox_req; 3823 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; 3824 uint32_t ext_buf_cnt, ext_buf_index; 3825 struct lpfc_dmabuf *ext_dmabuf = NULL; 3826 struct bsg_job_data *dd_data = NULL; 3827 LPFC_MBOXQ_t *pmboxq = NULL; 3828 MAILBOX_t *pmb; 3829 uint8_t *pmbx; 3830 int rc, i; 3831 3832 mbox_req = 3833 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 3834 3835 /* pointer to the start of mailbox command */ 3836 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3837 3838 if (nemb_tp == nemb_mse) { 3839 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3840 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3841 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3842 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3843 "2945 Handled SLI_CONFIG(mse) rd, " 3844 "ext_buf_cnt(%d) out of range(%d)\n", 3845 ext_buf_cnt, 3846 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3847 rc = -ERANGE; 3848 goto job_error; 3849 } 3850 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3851 "2941 Handled SLI_CONFIG(mse) rd, " 3852 "ext_buf_cnt:%d\n", ext_buf_cnt); 3853 } else { 3854 /* sanity check on interface type for support */ 3855 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 3856 LPFC_SLI_INTF_IF_TYPE_2) { 3857 rc = -ENODEV; 3858 goto job_error; 3859 } 3860 /* nemb_tp == nemb_hbd */ 3861 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3862 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3863 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3864 "2946 Handled SLI_CONFIG(hbd) rd, " 3865 "ext_buf_cnt(%d) out of range(%d)\n", 3866 ext_buf_cnt, 3867 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3868 rc = -ERANGE; 3869 goto job_error; 3870 } 3871 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3872 "2942 Handled SLI_CONFIG(hbd) rd, " 3873 "ext_buf_cnt:%d\n", ext_buf_cnt); 3874 } 3875 3876 /* before dma descriptor setup */ 3877 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3878 sta_pre_addr, dmabuf, ext_buf_cnt); 3879 3880 /* reject non-embedded mailbox command with none external buffer */ 3881 if (ext_buf_cnt == 0) { 3882 rc = -EPERM; 3883 goto job_error; 3884 } else if (ext_buf_cnt > 1) { 3885 /* additional external read buffers */ 3886 for (i = 1; i < ext_buf_cnt; i++) { 3887 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); 3888 if (!ext_dmabuf) { 3889 rc = -ENOMEM; 3890 goto job_error; 3891 } 3892 list_add_tail(&ext_dmabuf->list, 3893 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3894 } 3895 } 3896 3897 /* bsg tracking structure */ 3898 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3899 if (!dd_data) { 3900 rc = -ENOMEM; 3901 goto job_error; 3902 } 3903 3904 /* mailbox command structure for base driver */ 3905 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3906 if (!pmboxq) { 3907 rc = -ENOMEM; 3908 goto job_error; 3909 } 3910 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3911 3912 /* for the first external buffer */ 3913 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3914 3915 /* for the rest of external buffer descriptors if any */ 3916 if (ext_buf_cnt > 1) { 3917 ext_buf_index = 1; 3918 list_for_each_entry_safe(curr_dmabuf, next_dmabuf, 3919 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { 3920 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 3921 ext_buf_index, dmabuf, 3922 curr_dmabuf); 3923 ext_buf_index++; 3924 } 3925 } 3926 3927 /* after dma descriptor setup */ 3928 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3929 sta_pos_addr, dmabuf, ext_buf_cnt); 3930 3931 /* construct base driver mbox command */ 3932 pmb = &pmboxq->u.mb; 3933 pmbx = (uint8_t *)dmabuf->virt; 3934 memcpy(pmb, pmbx, sizeof(*pmb)); 3935 pmb->mbxOwner = OWN_HOST; 3936 pmboxq->vport = phba->pport; 3937 3938 /* multi-buffer handling context */ 3939 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 3940 phba->mbox_ext_buf_ctx.mboxType = mbox_rd; 3941 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 3942 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 3943 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 3944 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 3945 3946 /* callback for multi-buffer read mailbox command */ 3947 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; 3948 3949 /* context fields to callback function */ 3950 pmboxq->ctx_u.dd_data = dd_data; 3951 dd_data->type = TYPE_MBOX; 3952 dd_data->set_job = job; 3953 dd_data->context_un.mbox.pmboxq = pmboxq; 3954 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3955 job->dd_data = dd_data; 3956 3957 /* state change */ 3958 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3959 3960 /* 3961 * Non-embedded mailbox subcommand data gets byte swapped here because 3962 * the lower level driver code only does the first 64 mailbox words. 3963 */ 3964 if ((!bsg_bf_get(lpfc_mbox_hdr_emb, 3965 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && 3966 (nemb_tp == nemb_mse)) 3967 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3968 &pmbx[sizeof(MAILBOX_t)], 3969 sli_cfg_mbx->un.sli_config_emb0_subsys. 3970 mse[0].buf_len); 3971 3972 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3973 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3974 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3975 "2947 Issued SLI_CONFIG ext-buffer " 3976 "mailbox command, rc:x%x\n", rc); 3977 return SLI_CONFIG_HANDLED; 3978 } 3979 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3980 "2948 Failed to issue SLI_CONFIG ext-buffer " 3981 "mailbox command, rc:x%x\n", rc); 3982 rc = -EPIPE; 3983 3984 job_error: 3985 if (pmboxq) 3986 mempool_free(pmboxq, phba->mbox_mem_pool); 3987 lpfc_bsg_dma_page_list_free(phba, 3988 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3989 kfree(dd_data); 3990 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 3991 return rc; 3992 } 3993 3994 /** 3995 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write 3996 * @phba: Pointer to HBA context object. 3997 * @job: Pointer to the job object. 3998 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3999 * @dmabuf: Pointer to a DMA buffer descriptor. 4000 * 4001 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with 4002 * non-embedded external buffers. 4003 **/ 4004 static int 4005 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 4006 enum nemb_type nemb_tp, 4007 struct lpfc_dmabuf *dmabuf) 4008 { 4009 struct fc_bsg_request *bsg_request = job->request; 4010 struct fc_bsg_reply *bsg_reply = job->reply; 4011 struct dfc_mbox_req *mbox_req; 4012 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4013 uint32_t ext_buf_cnt; 4014 struct bsg_job_data *dd_data = NULL; 4015 LPFC_MBOXQ_t *pmboxq = NULL; 4016 MAILBOX_t *pmb; 4017 uint8_t *mbx; 4018 int rc = SLI_CONFIG_NOT_HANDLED, i; 4019 4020 mbox_req = 4021 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4022 4023 /* pointer to the start of mailbox command */ 4024 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4025 4026 if (nemb_tp == nemb_mse) { 4027 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 4028 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 4029 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 4030 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4031 "2953 Failed SLI_CONFIG(mse) wr, " 4032 "ext_buf_cnt(%d) out of range(%d)\n", 4033 ext_buf_cnt, 4034 LPFC_MBX_SLI_CONFIG_MAX_MSE); 4035 return -ERANGE; 4036 } 4037 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4038 "2949 Handled SLI_CONFIG(mse) wr, " 4039 "ext_buf_cnt:%d\n", ext_buf_cnt); 4040 } else { 4041 /* sanity check on interface type for support */ 4042 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 4043 LPFC_SLI_INTF_IF_TYPE_2) 4044 return -ENODEV; 4045 /* nemb_tp == nemb_hbd */ 4046 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 4047 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 4048 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4049 "2954 Failed SLI_CONFIG(hbd) wr, " 4050 "ext_buf_cnt(%d) out of range(%d)\n", 4051 ext_buf_cnt, 4052 LPFC_MBX_SLI_CONFIG_MAX_HBD); 4053 return -ERANGE; 4054 } 4055 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4056 "2950 Handled SLI_CONFIG(hbd) wr, " 4057 "ext_buf_cnt:%d\n", ext_buf_cnt); 4058 } 4059 4060 /* before dma buffer descriptor setup */ 4061 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4062 sta_pre_addr, dmabuf, ext_buf_cnt); 4063 4064 if (ext_buf_cnt == 0) 4065 return -EPERM; 4066 4067 /* for the first external buffer */ 4068 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 4069 4070 /* after dma descriptor setup */ 4071 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4072 sta_pos_addr, dmabuf, ext_buf_cnt); 4073 4074 /* log for looking forward */ 4075 for (i = 1; i < ext_buf_cnt; i++) { 4076 if (nemb_tp == nemb_mse) 4077 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4078 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", 4079 i, sli_cfg_mbx->un.sli_config_emb0_subsys. 4080 mse[i].buf_len); 4081 else 4082 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4083 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", 4084 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4085 &sli_cfg_mbx->un.sli_config_emb1_subsys. 4086 hbd[i])); 4087 } 4088 4089 /* multi-buffer handling context */ 4090 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 4091 phba->mbox_ext_buf_ctx.mboxType = mbox_wr; 4092 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 4093 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 4094 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 4095 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 4096 4097 if (ext_buf_cnt == 1) { 4098 /* bsg tracking structure */ 4099 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4100 if (!dd_data) { 4101 rc = -ENOMEM; 4102 goto job_error; 4103 } 4104 4105 /* mailbox command structure for base driver */ 4106 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4107 if (!pmboxq) { 4108 rc = -ENOMEM; 4109 goto job_error; 4110 } 4111 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4112 pmb = &pmboxq->u.mb; 4113 mbx = (uint8_t *)dmabuf->virt; 4114 memcpy(pmb, mbx, sizeof(*pmb)); 4115 pmb->mbxOwner = OWN_HOST; 4116 pmboxq->vport = phba->pport; 4117 4118 /* callback for multi-buffer read mailbox command */ 4119 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4120 4121 /* context fields to callback function */ 4122 pmboxq->ctx_u.dd_data = dd_data; 4123 dd_data->type = TYPE_MBOX; 4124 dd_data->set_job = job; 4125 dd_data->context_un.mbox.pmboxq = pmboxq; 4126 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 4127 job->dd_data = dd_data; 4128 4129 /* state change */ 4130 4131 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4132 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4133 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4134 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4135 "2955 Issued SLI_CONFIG ext-buffer " 4136 "mailbox command, rc:x%x\n", rc); 4137 return SLI_CONFIG_HANDLED; 4138 } 4139 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4140 "2956 Failed to issue SLI_CONFIG ext-buffer " 4141 "mailbox command, rc:x%x\n", rc); 4142 rc = -EPIPE; 4143 goto job_error; 4144 } 4145 4146 /* wait for additional external buffers */ 4147 4148 bsg_reply->result = 0; 4149 bsg_job_done(job, bsg_reply->result, 4150 bsg_reply->reply_payload_rcv_len); 4151 return SLI_CONFIG_HANDLED; 4152 4153 job_error: 4154 if (pmboxq) 4155 mempool_free(pmboxq, phba->mbox_mem_pool); 4156 kfree(dd_data); 4157 4158 return rc; 4159 } 4160 4161 /** 4162 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer 4163 * @phba: Pointer to HBA context object. 4164 * @job: Pointer to the job object. 4165 * @dmabuf: Pointer to a DMA buffer descriptor. 4166 * 4167 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded 4168 * external buffers, including both 0x9B with non-embedded MSEs and 0x9B 4169 * with embedded subsystem 0x1 and opcodes with external HBDs. 4170 **/ 4171 static int 4172 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4173 struct lpfc_dmabuf *dmabuf) 4174 { 4175 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4176 uint32_t subsys; 4177 uint32_t opcode; 4178 int rc = SLI_CONFIG_NOT_HANDLED; 4179 4180 /* state change on new multi-buffer pass-through mailbox command */ 4181 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; 4182 4183 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4184 4185 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 4186 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 4187 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, 4188 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4189 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, 4190 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4191 if (subsys == SLI_CONFIG_SUBSYS_FCOE) { 4192 switch (opcode) { 4193 case FCOE_OPCODE_READ_FCF: 4194 case FCOE_OPCODE_GET_DPORT_RESULTS: 4195 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4196 "2957 Handled SLI_CONFIG " 4197 "subsys_fcoe, opcode:x%x\n", 4198 opcode); 4199 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4200 nemb_mse, dmabuf); 4201 break; 4202 case FCOE_OPCODE_ADD_FCF: 4203 case FCOE_OPCODE_SET_DPORT_MODE: 4204 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: 4205 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4206 "2958 Handled SLI_CONFIG " 4207 "subsys_fcoe, opcode:x%x\n", 4208 opcode); 4209 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4210 nemb_mse, dmabuf); 4211 break; 4212 default: 4213 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4214 "2959 Reject SLI_CONFIG " 4215 "subsys_fcoe, opcode:x%x\n", 4216 opcode); 4217 rc = -EPERM; 4218 break; 4219 } 4220 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4221 switch (opcode) { 4222 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 4223 case COMN_OPCODE_GET_CNTL_ATTRIBUTES: 4224 case COMN_OPCODE_GET_PROFILE_CONFIG: 4225 case COMN_OPCODE_SET_FEATURES: 4226 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4227 "3106 Handled SLI_CONFIG " 4228 "subsys_comn, opcode:x%x\n", 4229 opcode); 4230 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4231 nemb_mse, dmabuf); 4232 break; 4233 default: 4234 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4235 "3107 Reject SLI_CONFIG " 4236 "subsys_comn, opcode:x%x\n", 4237 opcode); 4238 rc = -EPERM; 4239 break; 4240 } 4241 } else { 4242 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4243 "2977 Reject SLI_CONFIG " 4244 "subsys:x%d, opcode:x%x\n", 4245 subsys, opcode); 4246 rc = -EPERM; 4247 } 4248 } else { 4249 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, 4250 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4251 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, 4252 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4253 if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4254 switch (opcode) { 4255 case COMN_OPCODE_READ_OBJECT: 4256 case COMN_OPCODE_READ_OBJECT_LIST: 4257 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4258 "2960 Handled SLI_CONFIG " 4259 "subsys_comn, opcode:x%x\n", 4260 opcode); 4261 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4262 nemb_hbd, dmabuf); 4263 break; 4264 case COMN_OPCODE_WRITE_OBJECT: 4265 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4266 "2961 Handled SLI_CONFIG " 4267 "subsys_comn, opcode:x%x\n", 4268 opcode); 4269 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4270 nemb_hbd, dmabuf); 4271 break; 4272 default: 4273 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4274 "2962 Not handled SLI_CONFIG " 4275 "subsys_comn, opcode:x%x\n", 4276 opcode); 4277 rc = SLI_CONFIG_NOT_HANDLED; 4278 break; 4279 } 4280 } else { 4281 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4282 "2978 Not handled SLI_CONFIG " 4283 "subsys:x%d, opcode:x%x\n", 4284 subsys, opcode); 4285 rc = SLI_CONFIG_NOT_HANDLED; 4286 } 4287 } 4288 4289 /* state reset on not handled new multi-buffer mailbox command */ 4290 if (rc != SLI_CONFIG_HANDLED) 4291 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4292 4293 return rc; 4294 } 4295 4296 /** 4297 * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers 4298 * @phba: Pointer to HBA context object. 4299 * 4300 * This routine is for requesting to abort a pass-through mailbox command with 4301 * multiple external buffers due to error condition. 4302 **/ 4303 static void 4304 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) 4305 { 4306 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 4307 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 4308 else 4309 lpfc_bsg_mbox_ext_session_reset(phba); 4310 return; 4311 } 4312 4313 /** 4314 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer 4315 * @phba: Pointer to HBA context object. 4316 * @job: Pointer to the job object. 4317 * 4318 * This routine extracts the next mailbox read external buffer back to 4319 * user space through BSG. 4320 **/ 4321 static int 4322 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) 4323 { 4324 struct fc_bsg_reply *bsg_reply = job->reply; 4325 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4326 struct lpfc_dmabuf *dmabuf; 4327 uint8_t *pbuf; 4328 uint32_t size; 4329 uint32_t index; 4330 4331 index = phba->mbox_ext_buf_ctx.seqNum; 4332 phba->mbox_ext_buf_ctx.seqNum++; 4333 4334 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4335 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4336 4337 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4338 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, 4339 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); 4340 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4341 "2963 SLI_CONFIG (mse) ext-buffer rd get " 4342 "buffer[%d], size:%d\n", index, size); 4343 } else { 4344 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4345 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); 4346 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4347 "2964 SLI_CONFIG (hbd) ext-buffer rd get " 4348 "buffer[%d], size:%d\n", index, size); 4349 } 4350 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) 4351 return -EPIPE; 4352 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 4353 struct lpfc_dmabuf, list); 4354 list_del_init(&dmabuf->list); 4355 4356 /* after dma buffer descriptor setup */ 4357 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4358 mbox_rd, dma_ebuf, sta_pos_addr, 4359 dmabuf, index); 4360 4361 pbuf = (uint8_t *)dmabuf->virt; 4362 bsg_reply->reply_payload_rcv_len = 4363 sg_copy_from_buffer(job->reply_payload.sg_list, 4364 job->reply_payload.sg_cnt, 4365 pbuf, size); 4366 4367 lpfc_bsg_dma_page_free(phba, dmabuf); 4368 4369 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4370 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4371 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " 4372 "command session done\n"); 4373 lpfc_bsg_mbox_ext_session_reset(phba); 4374 } 4375 4376 bsg_reply->result = 0; 4377 bsg_job_done(job, bsg_reply->result, 4378 bsg_reply->reply_payload_rcv_len); 4379 4380 return SLI_CONFIG_HANDLED; 4381 } 4382 4383 /** 4384 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer 4385 * @phba: Pointer to HBA context object. 4386 * @job: Pointer to the job object. 4387 * @dmabuf: Pointer to a DMA buffer descriptor. 4388 * 4389 * This routine sets up the next mailbox read external buffer obtained 4390 * from user space through BSG. 4391 **/ 4392 static int 4393 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, 4394 struct lpfc_dmabuf *dmabuf) 4395 { 4396 struct fc_bsg_reply *bsg_reply = job->reply; 4397 struct bsg_job_data *dd_data = NULL; 4398 LPFC_MBOXQ_t *pmboxq = NULL; 4399 MAILBOX_t *pmb; 4400 enum nemb_type nemb_tp; 4401 uint8_t *pbuf; 4402 uint32_t size; 4403 uint32_t index; 4404 int rc; 4405 4406 index = phba->mbox_ext_buf_ctx.seqNum; 4407 phba->mbox_ext_buf_ctx.seqNum++; 4408 nemb_tp = phba->mbox_ext_buf_ctx.nembType; 4409 4410 pbuf = (uint8_t *)dmabuf->virt; 4411 size = job->request_payload.payload_len; 4412 sg_copy_to_buffer(job->request_payload.sg_list, 4413 job->request_payload.sg_cnt, 4414 pbuf, size); 4415 4416 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4417 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4418 "2966 SLI_CONFIG (mse) ext-buffer wr set " 4419 "buffer[%d], size:%d\n", 4420 phba->mbox_ext_buf_ctx.seqNum, size); 4421 4422 } else { 4423 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4424 "2967 SLI_CONFIG (hbd) ext-buffer wr set " 4425 "buffer[%d], size:%d\n", 4426 phba->mbox_ext_buf_ctx.seqNum, size); 4427 4428 } 4429 4430 /* set up external buffer descriptor and add to external buffer list */ 4431 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, 4432 phba->mbox_ext_buf_ctx.mbx_dmabuf, 4433 dmabuf); 4434 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4435 4436 /* after write dma buffer */ 4437 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4438 mbox_wr, dma_ebuf, sta_pos_addr, 4439 dmabuf, index); 4440 4441 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4442 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4443 "2968 SLI_CONFIG ext-buffer wr all %d " 4444 "ebuffers received\n", 4445 phba->mbox_ext_buf_ctx.numBuf); 4446 4447 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4448 if (!dd_data) { 4449 rc = -ENOMEM; 4450 goto job_error; 4451 } 4452 4453 /* mailbox command structure for base driver */ 4454 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4455 if (!pmboxq) { 4456 rc = -ENOMEM; 4457 goto job_error; 4458 } 4459 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4460 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4461 pmb = &pmboxq->u.mb; 4462 memcpy(pmb, pbuf, sizeof(*pmb)); 4463 pmb->mbxOwner = OWN_HOST; 4464 pmboxq->vport = phba->pport; 4465 4466 /* callback for multi-buffer write mailbox command */ 4467 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4468 4469 /* context fields to callback function */ 4470 pmboxq->ctx_u.dd_data = dd_data; 4471 dd_data->type = TYPE_MBOX; 4472 dd_data->set_job = job; 4473 dd_data->context_un.mbox.pmboxq = pmboxq; 4474 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4475 job->dd_data = dd_data; 4476 4477 /* state change */ 4478 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4479 4480 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4481 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4482 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4483 "2969 Issued SLI_CONFIG ext-buffer " 4484 "mailbox command, rc:x%x\n", rc); 4485 return SLI_CONFIG_HANDLED; 4486 } 4487 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4488 "2970 Failed to issue SLI_CONFIG ext-buffer " 4489 "mailbox command, rc:x%x\n", rc); 4490 rc = -EPIPE; 4491 goto job_error; 4492 } 4493 4494 /* wait for additional external buffers */ 4495 bsg_reply->result = 0; 4496 bsg_job_done(job, bsg_reply->result, 4497 bsg_reply->reply_payload_rcv_len); 4498 return SLI_CONFIG_HANDLED; 4499 4500 job_error: 4501 if (pmboxq) 4502 mempool_free(pmboxq, phba->mbox_mem_pool); 4503 lpfc_bsg_dma_page_free(phba, dmabuf); 4504 kfree(dd_data); 4505 4506 return rc; 4507 } 4508 4509 /** 4510 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd 4511 * @phba: Pointer to HBA context object. 4512 * @job: Pointer to the job object. 4513 * @dmabuf: Pointer to a DMA buffer descriptor. 4514 * 4515 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox 4516 * command with multiple non-embedded external buffers. 4517 **/ 4518 static int 4519 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, 4520 struct lpfc_dmabuf *dmabuf) 4521 { 4522 int rc; 4523 4524 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4525 "2971 SLI_CONFIG buffer (type:x%x)\n", 4526 phba->mbox_ext_buf_ctx.mboxType); 4527 4528 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { 4529 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { 4530 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4531 "2972 SLI_CONFIG rd buffer state " 4532 "mismatch:x%x\n", 4533 phba->mbox_ext_buf_ctx.state); 4534 lpfc_bsg_mbox_ext_abort(phba); 4535 return -EPIPE; 4536 } 4537 rc = lpfc_bsg_read_ebuf_get(phba, job); 4538 if (rc == SLI_CONFIG_HANDLED) 4539 lpfc_bsg_dma_page_free(phba, dmabuf); 4540 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ 4541 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { 4542 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4543 "2973 SLI_CONFIG wr buffer state " 4544 "mismatch:x%x\n", 4545 phba->mbox_ext_buf_ctx.state); 4546 lpfc_bsg_mbox_ext_abort(phba); 4547 return -EPIPE; 4548 } 4549 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); 4550 } 4551 return rc; 4552 } 4553 4554 /** 4555 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer 4556 * @phba: Pointer to HBA context object. 4557 * @job: Pointer to the job object. 4558 * @dmabuf: Pointer to a DMA buffer descriptor. 4559 * 4560 * This routine checks and handles non-embedded multi-buffer SLI_CONFIG 4561 * (0x9B) mailbox commands and external buffers. 4562 **/ 4563 static int 4564 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job, 4565 struct lpfc_dmabuf *dmabuf) 4566 { 4567 struct fc_bsg_request *bsg_request = job->request; 4568 struct dfc_mbox_req *mbox_req; 4569 int rc = SLI_CONFIG_NOT_HANDLED; 4570 4571 mbox_req = 4572 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4573 4574 /* mbox command with/without single external buffer */ 4575 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4576 return rc; 4577 4578 /* mbox command and first external buffer */ 4579 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4580 if (mbox_req->extSeqNum == 1) { 4581 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4582 "2974 SLI_CONFIG mailbox: tag:%d, " 4583 "seq:%d\n", mbox_req->extMboxTag, 4584 mbox_req->extSeqNum); 4585 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); 4586 return rc; 4587 } else 4588 goto sli_cfg_ext_error; 4589 } 4590 4591 /* 4592 * handle additional external buffers 4593 */ 4594 4595 /* check broken pipe conditions */ 4596 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) 4597 goto sli_cfg_ext_error; 4598 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) 4599 goto sli_cfg_ext_error; 4600 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) 4601 goto sli_cfg_ext_error; 4602 4603 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4604 "2975 SLI_CONFIG mailbox external buffer: " 4605 "extSta:x%x, tag:%d, seq:%d\n", 4606 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, 4607 mbox_req->extSeqNum); 4608 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); 4609 return rc; 4610 4611 sli_cfg_ext_error: 4612 /* all other cases, broken pipe */ 4613 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4614 "2976 SLI_CONFIG mailbox broken pipe: " 4615 "ctxSta:x%x, ctxNumBuf:%d " 4616 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", 4617 phba->mbox_ext_buf_ctx.state, 4618 phba->mbox_ext_buf_ctx.numBuf, 4619 phba->mbox_ext_buf_ctx.mbxTag, 4620 phba->mbox_ext_buf_ctx.seqNum, 4621 mbox_req->extMboxTag, mbox_req->extSeqNum); 4622 4623 lpfc_bsg_mbox_ext_session_reset(phba); 4624 4625 return -EPIPE; 4626 } 4627 4628 /** 4629 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4630 * @phba: Pointer to HBA context object. 4631 * @job: Pointer to the job object. 4632 * @vport: Pointer to a vport object. 4633 * 4634 * Allocate a tracking object, mailbox command memory, get a mailbox 4635 * from the mailbox pool, copy the caller mailbox command. 4636 * 4637 * If offline and the sli is active we need to poll for the command (port is 4638 * being reset) and complete the job, otherwise issue the mailbox command and 4639 * let our completion handler finish the command. 4640 **/ 4641 static int 4642 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4643 struct lpfc_vport *vport) 4644 { 4645 struct fc_bsg_request *bsg_request = job->request; 4646 struct fc_bsg_reply *bsg_reply = job->reply; 4647 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4648 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4649 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4650 uint8_t *pmbx = NULL; 4651 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4652 struct lpfc_dmabuf *dmabuf = NULL; 4653 struct dfc_mbox_req *mbox_req; 4654 struct READ_EVENT_LOG_VAR *rdEventLog; 4655 uint32_t transmit_length, receive_length, mode; 4656 struct lpfc_mbx_sli4_config *sli4_config; 4657 struct lpfc_mbx_nembed_cmd *nembed_sge; 4658 struct ulp_bde64 *bde; 4659 uint8_t *ext = NULL; 4660 int rc = 0; 4661 uint8_t *from; 4662 uint32_t size; 4663 4664 /* in case no data is transferred */ 4665 bsg_reply->reply_payload_rcv_len = 0; 4666 4667 /* sanity check to protect driver */ 4668 if (job->reply_payload.payload_len > BSG_MBOX_SIZE || 4669 job->request_payload.payload_len > BSG_MBOX_SIZE) { 4670 rc = -ERANGE; 4671 goto job_done; 4672 } 4673 4674 /* 4675 * Don't allow mailbox commands to be sent when blocked or when in 4676 * the middle of discovery 4677 */ 4678 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 4679 rc = -EAGAIN; 4680 goto job_done; 4681 } 4682 4683 mbox_req = 4684 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4685 4686 /* check if requested extended data lengths are valid */ 4687 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4688 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4689 rc = -ERANGE; 4690 goto job_done; 4691 } 4692 4693 dmabuf = lpfc_bsg_dma_page_alloc(phba); 4694 if (!dmabuf || !dmabuf->virt) { 4695 rc = -ENOMEM; 4696 goto job_done; 4697 } 4698 4699 /* Get the mailbox command or external buffer from BSG */ 4700 pmbx = (uint8_t *)dmabuf->virt; 4701 size = job->request_payload.payload_len; 4702 sg_copy_to_buffer(job->request_payload.sg_list, 4703 job->request_payload.sg_cnt, pmbx, size); 4704 4705 /* Handle possible SLI_CONFIG with non-embedded payloads */ 4706 if (phba->sli_rev == LPFC_SLI_REV4) { 4707 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); 4708 if (rc == SLI_CONFIG_HANDLED) 4709 goto job_cont; 4710 if (rc) 4711 goto job_done; 4712 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ 4713 } 4714 4715 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); 4716 if (rc != 0) 4717 goto job_done; /* must be negative */ 4718 4719 /* allocate our bsg tracking structure */ 4720 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4721 if (!dd_data) { 4722 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4723 "2727 Failed allocation of dd_data\n"); 4724 rc = -ENOMEM; 4725 goto job_done; 4726 } 4727 4728 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4729 if (!pmboxq) { 4730 rc = -ENOMEM; 4731 goto job_done; 4732 } 4733 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4734 4735 pmb = &pmboxq->u.mb; 4736 memcpy(pmb, pmbx, sizeof(*pmb)); 4737 pmb->mbxOwner = OWN_HOST; 4738 pmboxq->vport = vport; 4739 4740 /* If HBA encountered an error attention, allow only DUMP 4741 * or RESTART mailbox commands until the HBA is restarted. 4742 */ 4743 if (phba->pport->stopped && 4744 pmb->mbxCommand != MBX_DUMP_MEMORY && 4745 pmb->mbxCommand != MBX_RESTART && 4746 pmb->mbxCommand != MBX_WRITE_VPARMS && 4747 pmb->mbxCommand != MBX_WRITE_WWN) 4748 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 4749 "2797 mbox: Issued mailbox cmd " 4750 "0x%x while in stopped state.\n", 4751 pmb->mbxCommand); 4752 4753 /* extended mailbox commands will need an extended buffer */ 4754 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4755 from = pmbx; 4756 ext = from + sizeof(MAILBOX_t); 4757 pmboxq->ext_buf = ext; 4758 pmboxq->in_ext_byte_len = 4759 mbox_req->inExtWLen * sizeof(uint32_t); 4760 pmboxq->out_ext_byte_len = 4761 mbox_req->outExtWLen * sizeof(uint32_t); 4762 pmboxq->mbox_offset_word = mbox_req->mbOffset; 4763 } 4764 4765 /* biu diag will need a kernel buffer to transfer the data 4766 * allocate our own buffer and setup the mailbox command to 4767 * use ours 4768 */ 4769 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 4770 transmit_length = pmb->un.varWords[1]; 4771 receive_length = pmb->un.varWords[4]; 4772 /* transmit length cannot be greater than receive length or 4773 * mailbox extension size 4774 */ 4775 if ((transmit_length > receive_length) || 4776 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4777 rc = -ERANGE; 4778 goto job_done; 4779 } 4780 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4781 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); 4782 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4783 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); 4784 4785 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4786 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) 4787 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4788 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4789 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) 4790 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4791 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4792 rdEventLog = &pmb->un.varRdEventLog; 4793 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4794 mode = bf_get(lpfc_event_log, rdEventLog); 4795 4796 /* receive length cannot be greater than mailbox 4797 * extension size 4798 */ 4799 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4800 rc = -ERANGE; 4801 goto job_done; 4802 } 4803 4804 /* mode zero uses a bde like biu diags command */ 4805 if (mode == 0) { 4806 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4807 + sizeof(MAILBOX_t)); 4808 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4809 + sizeof(MAILBOX_t)); 4810 } 4811 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4812 /* Let type 4 (well known data) through because the data is 4813 * returned in varwords[4-8] 4814 * otherwise check the recieve length and fetch the buffer addr 4815 */ 4816 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && 4817 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { 4818 /* rebuild the command for sli4 using our own buffers 4819 * like we do for biu diags 4820 */ 4821 receive_length = pmb->un.varWords[2]; 4822 /* receive length cannot be greater than mailbox 4823 * extension size 4824 */ 4825 if (receive_length == 0) { 4826 rc = -ERANGE; 4827 goto job_done; 4828 } 4829 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4830 + sizeof(MAILBOX_t)); 4831 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4832 + sizeof(MAILBOX_t)); 4833 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4834 pmb->un.varUpdateCfg.co) { 4835 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4836 4837 /* bde size cannot be greater than mailbox ext size */ 4838 if (bde->tus.f.bdeSize > 4839 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4840 rc = -ERANGE; 4841 goto job_done; 4842 } 4843 bde->addrHigh = putPaddrHigh(dmabuf->phys 4844 + sizeof(MAILBOX_t)); 4845 bde->addrLow = putPaddrLow(dmabuf->phys 4846 + sizeof(MAILBOX_t)); 4847 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4848 /* Handling non-embedded SLI_CONFIG mailbox command */ 4849 sli4_config = &pmboxq->u.mqe.un.sli4_config; 4850 if (!bf_get(lpfc_mbox_hdr_emb, 4851 &sli4_config->header.cfg_mhdr)) { 4852 /* rebuild the command for sli4 using our 4853 * own buffers like we do for biu diags 4854 */ 4855 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4856 &pmb->un.varWords[0]; 4857 receive_length = nembed_sge->sge[0].length; 4858 4859 /* receive length cannot be greater than 4860 * mailbox extension size 4861 */ 4862 if ((receive_length == 0) || 4863 (receive_length > 4864 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4865 rc = -ERANGE; 4866 goto job_done; 4867 } 4868 4869 nembed_sge->sge[0].pa_hi = 4870 putPaddrHigh(dmabuf->phys 4871 + sizeof(MAILBOX_t)); 4872 nembed_sge->sge[0].pa_lo = 4873 putPaddrLow(dmabuf->phys 4874 + sizeof(MAILBOX_t)); 4875 } 4876 } 4877 } 4878 4879 dd_data->context_un.mbox.dmabuffers = dmabuf; 4880 4881 /* setup wake call as IOCB callback */ 4882 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; 4883 4884 /* setup context field to pass wait_queue pointer to wake function */ 4885 pmboxq->ctx_u.dd_data = dd_data; 4886 dd_data->type = TYPE_MBOX; 4887 dd_data->set_job = job; 4888 dd_data->context_un.mbox.pmboxq = pmboxq; 4889 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4890 dd_data->context_un.mbox.ext = ext; 4891 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4892 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4893 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 4894 job->dd_data = dd_data; 4895 4896 if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) || 4897 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 4898 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4899 if (rc != MBX_SUCCESS) { 4900 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 4901 goto job_done; 4902 } 4903 4904 /* job finished, copy the data */ 4905 memcpy(pmbx, pmb, sizeof(*pmb)); 4906 bsg_reply->reply_payload_rcv_len = 4907 sg_copy_from_buffer(job->reply_payload.sg_list, 4908 job->reply_payload.sg_cnt, 4909 pmbx, size); 4910 /* not waiting mbox already done */ 4911 rc = 0; 4912 goto job_done; 4913 } 4914 4915 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4916 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 4917 return 1; /* job started */ 4918 4919 job_done: 4920 /* common exit for error or job completed inline */ 4921 if (pmboxq) 4922 mempool_free(pmboxq, phba->mbox_mem_pool); 4923 lpfc_bsg_dma_page_free(phba, dmabuf); 4924 kfree(dd_data); 4925 4926 job_cont: 4927 return rc; 4928 } 4929 4930 /** 4931 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 4932 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 4933 **/ 4934 static int 4935 lpfc_bsg_mbox_cmd(struct bsg_job *job) 4936 { 4937 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 4938 struct fc_bsg_request *bsg_request = job->request; 4939 struct fc_bsg_reply *bsg_reply = job->reply; 4940 struct lpfc_hba *phba = vport->phba; 4941 struct dfc_mbox_req *mbox_req; 4942 int rc = 0; 4943 4944 /* mix-and-match backward compatibility */ 4945 bsg_reply->reply_payload_rcv_len = 0; 4946 if (job->request_len < 4947 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4948 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4949 "2737 Mix-and-match backward compatibility " 4950 "between MBOX_REQ old size:%d and " 4951 "new request size:%d\n", 4952 (int)(job->request_len - 4953 sizeof(struct fc_bsg_request)), 4954 (int)sizeof(struct dfc_mbox_req)); 4955 mbox_req = (struct dfc_mbox_req *) 4956 bsg_request->rqst_data.h_vendor.vendor_cmd; 4957 mbox_req->extMboxTag = 0; 4958 mbox_req->extSeqNum = 0; 4959 } 4960 4961 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4962 4963 if (rc == 0) { 4964 /* job done */ 4965 bsg_reply->result = 0; 4966 job->dd_data = NULL; 4967 bsg_job_done(job, bsg_reply->result, 4968 bsg_reply->reply_payload_rcv_len); 4969 } else if (rc == 1) 4970 /* job submitted, will complete later*/ 4971 rc = 0; /* return zero, no error */ 4972 else { 4973 /* some error occurred */ 4974 bsg_reply->result = rc; 4975 job->dd_data = NULL; 4976 } 4977 4978 return rc; 4979 } 4980 4981 static int 4982 lpfc_forced_link_speed(struct bsg_job *job) 4983 { 4984 struct Scsi_Host *shost = fc_bsg_to_shost(job); 4985 struct lpfc_vport *vport = shost_priv(shost); 4986 struct lpfc_hba *phba = vport->phba; 4987 struct fc_bsg_reply *bsg_reply = job->reply; 4988 struct forced_link_speed_support_reply *forced_reply; 4989 int rc = 0; 4990 4991 if (job->request_len < 4992 sizeof(struct fc_bsg_request) + 4993 sizeof(struct get_forced_link_speed_support)) { 4994 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4995 "0048 Received FORCED_LINK_SPEED request " 4996 "below minimum size\n"); 4997 rc = -EINVAL; 4998 goto job_error; 4999 } 5000 5001 forced_reply = (struct forced_link_speed_support_reply *) 5002 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5003 5004 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) { 5005 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5006 "0049 Received FORCED_LINK_SPEED reply below " 5007 "minimum size\n"); 5008 rc = -EINVAL; 5009 goto job_error; 5010 } 5011 5012 forced_reply->supported = test_bit(HBA_FORCED_LINK_SPEED, 5013 &phba->hba_flag) 5014 ? LPFC_FORCED_LINK_SPEED_SUPPORTED 5015 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; 5016 job_error: 5017 bsg_reply->result = rc; 5018 if (rc == 0) 5019 bsg_job_done(job, bsg_reply->result, 5020 bsg_reply->reply_payload_rcv_len); 5021 return rc; 5022 } 5023 5024 /** 5025 * lpfc_check_fwlog_support: Check FW log support on the adapter 5026 * @phba: Pointer to HBA context object. 5027 * 5028 * Check if FW Logging support by the adapter 5029 **/ 5030 int 5031 lpfc_check_fwlog_support(struct lpfc_hba *phba) 5032 { 5033 struct lpfc_ras_fwlog *ras_fwlog = NULL; 5034 5035 ras_fwlog = &phba->ras_fwlog; 5036 5037 if (!ras_fwlog->ras_hwsupport) 5038 return -EACCES; 5039 else if (!ras_fwlog->ras_enabled) 5040 return -EPERM; 5041 else 5042 return 0; 5043 } 5044 5045 /** 5046 * lpfc_bsg_get_ras_config: Get RAS configuration settings 5047 * @job: fc_bsg_job to handle 5048 * 5049 * Get RAS configuration values set. 5050 **/ 5051 static int 5052 lpfc_bsg_get_ras_config(struct bsg_job *job) 5053 { 5054 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5055 struct lpfc_vport *vport = shost_priv(shost); 5056 struct fc_bsg_reply *bsg_reply = job->reply; 5057 struct lpfc_hba *phba = vport->phba; 5058 struct lpfc_bsg_get_ras_config_reply *ras_reply; 5059 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 5060 int rc = 0; 5061 5062 if (job->request_len < 5063 sizeof(struct fc_bsg_request) + 5064 sizeof(struct lpfc_bsg_ras_req)) { 5065 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5066 "6192 FW_LOG request received " 5067 "below minimum size\n"); 5068 rc = -EINVAL; 5069 goto ras_job_error; 5070 } 5071 5072 /* Check FW log status */ 5073 rc = lpfc_check_fwlog_support(phba); 5074 if (rc) 5075 goto ras_job_error; 5076 5077 ras_reply = (struct lpfc_bsg_get_ras_config_reply *) 5078 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5079 5080 /* Current logging state */ 5081 spin_lock_irq(&phba->ras_fwlog_lock); 5082 if (ras_fwlog->state == ACTIVE) 5083 ras_reply->state = LPFC_RASLOG_STATE_RUNNING; 5084 else 5085 ras_reply->state = LPFC_RASLOG_STATE_STOPPED; 5086 spin_unlock_irq(&phba->ras_fwlog_lock); 5087 5088 ras_reply->log_level = phba->ras_fwlog.fw_loglevel; 5089 ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize; 5090 5091 ras_job_error: 5092 /* make error code available to userspace */ 5093 bsg_reply->result = rc; 5094 5095 /* complete the job back to userspace */ 5096 if (!rc) 5097 bsg_job_done(job, bsg_reply->result, 5098 bsg_reply->reply_payload_rcv_len); 5099 return rc; 5100 } 5101 5102 /** 5103 * lpfc_bsg_set_ras_config: Set FW logging parameters 5104 * @job: fc_bsg_job to handle 5105 * 5106 * Set log-level parameters for FW-logging in host memory 5107 **/ 5108 static int 5109 lpfc_bsg_set_ras_config(struct bsg_job *job) 5110 { 5111 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5112 struct lpfc_vport *vport = shost_priv(shost); 5113 struct lpfc_hba *phba = vport->phba; 5114 struct lpfc_bsg_set_ras_config_req *ras_req; 5115 struct fc_bsg_request *bsg_request = job->request; 5116 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 5117 struct fc_bsg_reply *bsg_reply = job->reply; 5118 uint8_t action = 0, log_level = 0; 5119 int rc = 0, action_status = 0; 5120 5121 if (job->request_len < 5122 sizeof(struct fc_bsg_request) + 5123 sizeof(struct lpfc_bsg_set_ras_config_req)) { 5124 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5125 "6182 Received RAS_LOG request " 5126 "below minimum size\n"); 5127 rc = -EINVAL; 5128 goto ras_job_error; 5129 } 5130 5131 /* Check FW log status */ 5132 rc = lpfc_check_fwlog_support(phba); 5133 if (rc) 5134 goto ras_job_error; 5135 5136 ras_req = (struct lpfc_bsg_set_ras_config_req *) 5137 bsg_request->rqst_data.h_vendor.vendor_cmd; 5138 action = ras_req->action; 5139 log_level = ras_req->log_level; 5140 5141 if (action == LPFC_RASACTION_STOP_LOGGING) { 5142 /* Check if already disabled */ 5143 spin_lock_irq(&phba->ras_fwlog_lock); 5144 if (ras_fwlog->state != ACTIVE) { 5145 spin_unlock_irq(&phba->ras_fwlog_lock); 5146 rc = -ESRCH; 5147 goto ras_job_error; 5148 } 5149 spin_unlock_irq(&phba->ras_fwlog_lock); 5150 5151 /* Disable logging */ 5152 lpfc_ras_stop_fwlog(phba); 5153 } else { 5154 /*action = LPFC_RASACTION_START_LOGGING*/ 5155 5156 /* Even though FW-logging is active re-initialize 5157 * FW-logging with new log-level. Return status 5158 * "Logging already Running" to caller. 5159 **/ 5160 spin_lock_irq(&phba->ras_fwlog_lock); 5161 if (ras_fwlog->state != INACTIVE) 5162 action_status = -EINPROGRESS; 5163 spin_unlock_irq(&phba->ras_fwlog_lock); 5164 5165 /* Enable logging */ 5166 rc = lpfc_sli4_ras_fwlog_init(phba, log_level, 5167 LPFC_RAS_ENABLE_LOGGING); 5168 if (rc) { 5169 rc = -EINVAL; 5170 goto ras_job_error; 5171 } 5172 5173 /* Check if FW-logging is re-initialized */ 5174 if (action_status == -EINPROGRESS) 5175 rc = action_status; 5176 } 5177 ras_job_error: 5178 /* make error code available to userspace */ 5179 bsg_reply->result = rc; 5180 5181 /* complete the job back to userspace */ 5182 if (!rc) 5183 bsg_job_done(job, bsg_reply->result, 5184 bsg_reply->reply_payload_rcv_len); 5185 5186 return rc; 5187 } 5188 5189 /** 5190 * lpfc_bsg_get_ras_lwpd: Get log write position data 5191 * @job: fc_bsg_job to handle 5192 * 5193 * Get Offset/Wrap count of the log message written 5194 * in host memory 5195 **/ 5196 static int 5197 lpfc_bsg_get_ras_lwpd(struct bsg_job *job) 5198 { 5199 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5200 struct lpfc_vport *vport = shost_priv(shost); 5201 struct lpfc_bsg_get_ras_lwpd *ras_reply; 5202 struct lpfc_hba *phba = vport->phba; 5203 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 5204 struct fc_bsg_reply *bsg_reply = job->reply; 5205 u32 *lwpd_ptr = NULL; 5206 int rc = 0; 5207 5208 rc = lpfc_check_fwlog_support(phba); 5209 if (rc) 5210 goto ras_job_error; 5211 5212 if (job->request_len < 5213 sizeof(struct fc_bsg_request) + 5214 sizeof(struct lpfc_bsg_ras_req)) { 5215 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5216 "6183 Received RAS_LOG request " 5217 "below minimum size\n"); 5218 rc = -EINVAL; 5219 goto ras_job_error; 5220 } 5221 5222 ras_reply = (struct lpfc_bsg_get_ras_lwpd *) 5223 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5224 5225 if (!ras_fwlog->lwpd.virt) { 5226 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5227 "6193 Restart FW Logging\n"); 5228 rc = -EINVAL; 5229 goto ras_job_error; 5230 } 5231 5232 /* Get lwpd offset */ 5233 lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt); 5234 ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff); 5235 5236 /* Get wrap count */ 5237 ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff); 5238 5239 ras_job_error: 5240 /* make error code available to userspace */ 5241 bsg_reply->result = rc; 5242 5243 /* complete the job back to userspace */ 5244 if (!rc) 5245 bsg_job_done(job, bsg_reply->result, 5246 bsg_reply->reply_payload_rcv_len); 5247 5248 return rc; 5249 } 5250 5251 /** 5252 * lpfc_bsg_get_ras_fwlog: Read FW log 5253 * @job: fc_bsg_job to handle 5254 * 5255 * Copy the FW log into the passed buffer. 5256 **/ 5257 static int 5258 lpfc_bsg_get_ras_fwlog(struct bsg_job *job) 5259 { 5260 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5261 struct lpfc_vport *vport = shost_priv(shost); 5262 struct lpfc_hba *phba = vport->phba; 5263 struct fc_bsg_request *bsg_request = job->request; 5264 struct fc_bsg_reply *bsg_reply = job->reply; 5265 struct lpfc_bsg_get_fwlog_req *ras_req; 5266 u32 rd_offset, rd_index, offset; 5267 void *src, *fwlog_buff; 5268 struct lpfc_ras_fwlog *ras_fwlog = NULL; 5269 struct lpfc_dmabuf *dmabuf, *next; 5270 int rc = 0; 5271 5272 ras_fwlog = &phba->ras_fwlog; 5273 5274 rc = lpfc_check_fwlog_support(phba); 5275 if (rc) 5276 goto ras_job_error; 5277 5278 /* Logging to be stopped before reading */ 5279 spin_lock_irq(&phba->ras_fwlog_lock); 5280 if (ras_fwlog->state == ACTIVE) { 5281 spin_unlock_irq(&phba->ras_fwlog_lock); 5282 rc = -EINPROGRESS; 5283 goto ras_job_error; 5284 } 5285 spin_unlock_irq(&phba->ras_fwlog_lock); 5286 5287 if (job->request_len < 5288 sizeof(struct fc_bsg_request) + 5289 sizeof(struct lpfc_bsg_get_fwlog_req)) { 5290 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5291 "6184 Received RAS_LOG request " 5292 "below minimum size\n"); 5293 rc = -EINVAL; 5294 goto ras_job_error; 5295 } 5296 5297 ras_req = (struct lpfc_bsg_get_fwlog_req *) 5298 bsg_request->rqst_data.h_vendor.vendor_cmd; 5299 rd_offset = ras_req->read_offset; 5300 5301 /* Allocate memory to read fw log*/ 5302 fwlog_buff = vmalloc(ras_req->read_size); 5303 if (!fwlog_buff) { 5304 rc = -ENOMEM; 5305 goto ras_job_error; 5306 } 5307 5308 rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE); 5309 offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE); 5310 5311 list_for_each_entry_safe(dmabuf, next, 5312 &ras_fwlog->fwlog_buff_list, list) { 5313 5314 if (dmabuf->buffer_tag < rd_index) 5315 continue; 5316 5317 src = dmabuf->virt + offset; 5318 memcpy(fwlog_buff, src, ras_req->read_size); 5319 break; 5320 } 5321 5322 bsg_reply->reply_payload_rcv_len = 5323 sg_copy_from_buffer(job->reply_payload.sg_list, 5324 job->reply_payload.sg_cnt, 5325 fwlog_buff, ras_req->read_size); 5326 5327 vfree(fwlog_buff); 5328 5329 ras_job_error: 5330 bsg_reply->result = rc; 5331 if (!rc) 5332 bsg_job_done(job, bsg_reply->result, 5333 bsg_reply->reply_payload_rcv_len); 5334 5335 return rc; 5336 } 5337 5338 static int 5339 lpfc_get_trunk_info(struct bsg_job *job) 5340 { 5341 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5342 struct lpfc_hba *phba = vport->phba; 5343 struct fc_bsg_reply *bsg_reply = job->reply; 5344 struct lpfc_trunk_info *event_reply; 5345 int rc = 0; 5346 5347 if (job->request_len < 5348 sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) { 5349 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5350 "2744 Received GET TRUNK _INFO request below " 5351 "minimum size\n"); 5352 rc = -EINVAL; 5353 goto job_error; 5354 } 5355 5356 event_reply = (struct lpfc_trunk_info *) 5357 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5358 5359 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { 5360 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5361 "2728 Received GET TRUNK _INFO reply below " 5362 "minimum size\n"); 5363 rc = -EINVAL; 5364 goto job_error; 5365 } 5366 if (event_reply == NULL) { 5367 rc = -EINVAL; 5368 goto job_error; 5369 } 5370 5371 bsg_bf_set(lpfc_trunk_info_link_status, event_reply, 5372 (phba->link_state >= LPFC_LINK_UP) ? 1 : 0); 5373 5374 bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply, 5375 (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0); 5376 5377 bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply, 5378 (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0); 5379 5380 bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply, 5381 (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0); 5382 5383 bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply, 5384 (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0); 5385 5386 bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply, 5387 bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)); 5388 5389 bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply, 5390 bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)); 5391 5392 bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply, 5393 bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)); 5394 5395 bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply, 5396 bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)); 5397 5398 event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000; 5399 event_reply->logical_speed = 5400 phba->sli4_hba.link_state.logical_speed / 1000; 5401 job_error: 5402 bsg_reply->result = rc; 5403 if (!rc) 5404 bsg_job_done(job, bsg_reply->result, 5405 bsg_reply->reply_payload_rcv_len); 5406 return rc; 5407 5408 } 5409 5410 static int 5411 lpfc_get_cgnbuf_info(struct bsg_job *job) 5412 { 5413 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5414 struct lpfc_hba *phba = vport->phba; 5415 struct fc_bsg_request *bsg_request = job->request; 5416 struct fc_bsg_reply *bsg_reply = job->reply; 5417 struct get_cgnbuf_info_req *cgnbuf_req; 5418 struct lpfc_cgn_info *cp; 5419 uint8_t *cgn_buff; 5420 size_t size, cinfosz; 5421 int rc = 0; 5422 5423 if (job->request_len < sizeof(struct fc_bsg_request) + 5424 sizeof(struct get_cgnbuf_info_req)) { 5425 rc = -ENOMEM; 5426 goto job_exit; 5427 } 5428 5429 if (!phba->sli4_hba.pc_sli4_params.cmf) { 5430 rc = -ENOENT; 5431 goto job_exit; 5432 } 5433 5434 if (!phba->cgn_i || !phba->cgn_i->virt) { 5435 rc = -ENOENT; 5436 goto job_exit; 5437 } 5438 5439 cp = phba->cgn_i->virt; 5440 if (cp->cgn_info_version < LPFC_CGN_INFO_V3) { 5441 rc = -EPERM; 5442 goto job_exit; 5443 } 5444 5445 cgnbuf_req = (struct get_cgnbuf_info_req *) 5446 bsg_request->rqst_data.h_vendor.vendor_cmd; 5447 5448 /* For reset or size == 0 */ 5449 bsg_reply->reply_payload_rcv_len = 0; 5450 5451 if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) { 5452 lpfc_init_congestion_stat(phba); 5453 goto job_exit; 5454 } 5455 5456 /* We don't want to include the CRC at the end */ 5457 cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t); 5458 5459 size = cgnbuf_req->read_size; 5460 if (!size) 5461 goto job_exit; 5462 5463 if (size < cinfosz) { 5464 /* Just copy back what we can */ 5465 cinfosz = size; 5466 rc = -E2BIG; 5467 } 5468 5469 /* Allocate memory to read congestion info */ 5470 cgn_buff = vmalloc(cinfosz); 5471 if (!cgn_buff) { 5472 rc = -ENOMEM; 5473 goto job_exit; 5474 } 5475 5476 memcpy(cgn_buff, cp, cinfosz); 5477 5478 bsg_reply->reply_payload_rcv_len = 5479 sg_copy_from_buffer(job->reply_payload.sg_list, 5480 job->reply_payload.sg_cnt, 5481 cgn_buff, cinfosz); 5482 5483 vfree(cgn_buff); 5484 5485 job_exit: 5486 bsg_reply->result = rc; 5487 if (!rc) 5488 bsg_job_done(job, bsg_reply->result, 5489 bsg_reply->reply_payload_rcv_len); 5490 else 5491 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5492 "2724 GET CGNBUF error: %d\n", rc); 5493 return rc; 5494 } 5495 5496 /** 5497 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5498 * @job: fc_bsg_job to handle 5499 **/ 5500 static int 5501 lpfc_bsg_hst_vendor(struct bsg_job *job) 5502 { 5503 struct fc_bsg_request *bsg_request = job->request; 5504 struct fc_bsg_reply *bsg_reply = job->reply; 5505 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 5506 int rc; 5507 5508 switch (command) { 5509 case LPFC_BSG_VENDOR_SET_CT_EVENT: 5510 rc = lpfc_bsg_hba_set_event(job); 5511 break; 5512 case LPFC_BSG_VENDOR_GET_CT_EVENT: 5513 rc = lpfc_bsg_hba_get_event(job); 5514 break; 5515 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 5516 rc = lpfc_bsg_send_mgmt_rsp(job); 5517 break; 5518 case LPFC_BSG_VENDOR_DIAG_MODE: 5519 rc = lpfc_bsg_diag_loopback_mode(job); 5520 break; 5521 case LPFC_BSG_VENDOR_DIAG_MODE_END: 5522 rc = lpfc_sli4_bsg_diag_mode_end(job); 5523 break; 5524 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: 5525 rc = lpfc_bsg_diag_loopback_run(job); 5526 break; 5527 case LPFC_BSG_VENDOR_LINK_DIAG_TEST: 5528 rc = lpfc_sli4_bsg_link_diag_test(job); 5529 break; 5530 case LPFC_BSG_VENDOR_GET_MGMT_REV: 5531 rc = lpfc_bsg_get_dfc_rev(job); 5532 break; 5533 case LPFC_BSG_VENDOR_MBOX: 5534 rc = lpfc_bsg_mbox_cmd(job); 5535 break; 5536 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: 5537 rc = lpfc_forced_link_speed(job); 5538 break; 5539 case LPFC_BSG_VENDOR_RAS_GET_LWPD: 5540 rc = lpfc_bsg_get_ras_lwpd(job); 5541 break; 5542 case LPFC_BSG_VENDOR_RAS_GET_FWLOG: 5543 rc = lpfc_bsg_get_ras_fwlog(job); 5544 break; 5545 case LPFC_BSG_VENDOR_RAS_GET_CONFIG: 5546 rc = lpfc_bsg_get_ras_config(job); 5547 break; 5548 case LPFC_BSG_VENDOR_RAS_SET_CONFIG: 5549 rc = lpfc_bsg_set_ras_config(job); 5550 break; 5551 case LPFC_BSG_VENDOR_GET_TRUNK_INFO: 5552 rc = lpfc_get_trunk_info(job); 5553 break; 5554 case LPFC_BSG_VENDOR_GET_CGNBUF_INFO: 5555 rc = lpfc_get_cgnbuf_info(job); 5556 break; 5557 default: 5558 rc = -EINVAL; 5559 bsg_reply->reply_payload_rcv_len = 0; 5560 /* make error code available to userspace */ 5561 bsg_reply->result = rc; 5562 break; 5563 } 5564 5565 return rc; 5566 } 5567 5568 /** 5569 * lpfc_bsg_request - handle a bsg request from the FC transport 5570 * @job: bsg_job to handle 5571 **/ 5572 int 5573 lpfc_bsg_request(struct bsg_job *job) 5574 { 5575 struct fc_bsg_request *bsg_request = job->request; 5576 struct fc_bsg_reply *bsg_reply = job->reply; 5577 uint32_t msgcode; 5578 int rc; 5579 5580 msgcode = bsg_request->msgcode; 5581 switch (msgcode) { 5582 case FC_BSG_HST_VENDOR: 5583 rc = lpfc_bsg_hst_vendor(job); 5584 break; 5585 case FC_BSG_RPT_ELS: 5586 rc = lpfc_bsg_rport_els(job); 5587 break; 5588 case FC_BSG_RPT_CT: 5589 rc = lpfc_bsg_send_mgmt_cmd(job); 5590 break; 5591 default: 5592 rc = -EINVAL; 5593 bsg_reply->reply_payload_rcv_len = 0; 5594 /* make error code available to userspace */ 5595 bsg_reply->result = rc; 5596 break; 5597 } 5598 5599 return rc; 5600 } 5601 5602 /** 5603 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 5604 * @job: bsg_job that has timed out 5605 * 5606 * This function just aborts the job's IOCB. The aborted IOCB will return to 5607 * the waiting function which will handle passing the error back to userspace 5608 **/ 5609 int 5610 lpfc_bsg_timeout(struct bsg_job *job) 5611 { 5612 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5613 struct lpfc_hba *phba = vport->phba; 5614 struct lpfc_iocbq *cmdiocb; 5615 struct lpfc_sli_ring *pring; 5616 struct bsg_job_data *dd_data; 5617 unsigned long flags; 5618 int rc = 0; 5619 LIST_HEAD(completions); 5620 struct lpfc_iocbq *check_iocb, *next_iocb; 5621 5622 pring = lpfc_phba_elsring(phba); 5623 if (unlikely(!pring)) 5624 return -EIO; 5625 5626 /* if job's driver data is NULL, the command completed or is in the 5627 * the process of completing. In this case, return status to request 5628 * so the timeout is retried. This avoids double completion issues 5629 * and the request will be pulled off the timer queue when the 5630 * command's completion handler executes. Otherwise, prevent the 5631 * command's completion handler from executing the job done callback 5632 * and continue processing to abort the outstanding the command. 5633 */ 5634 5635 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5636 dd_data = (struct bsg_job_data *)job->dd_data; 5637 if (dd_data) { 5638 dd_data->set_job = NULL; 5639 job->dd_data = NULL; 5640 } else { 5641 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5642 return -EAGAIN; 5643 } 5644 5645 switch (dd_data->type) { 5646 case TYPE_IOCB: 5647 /* Check to see if IOCB was issued to the port or not. If not, 5648 * remove it from the txq queue and call cancel iocbs. 5649 * Otherwise, call abort iotag 5650 */ 5651 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5652 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5653 5654 spin_lock_irqsave(&phba->hbalock, flags); 5655 /* make sure the I/O abort window is still open */ 5656 if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) { 5657 spin_unlock_irqrestore(&phba->hbalock, flags); 5658 return -EAGAIN; 5659 } 5660 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5661 list) { 5662 if (check_iocb == cmdiocb) { 5663 list_move_tail(&check_iocb->list, &completions); 5664 break; 5665 } 5666 } 5667 if (list_empty(&completions)) 5668 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL); 5669 spin_unlock_irqrestore(&phba->hbalock, flags); 5670 if (!list_empty(&completions)) { 5671 lpfc_sli_cancel_iocbs(phba, &completions, 5672 IOSTAT_LOCAL_REJECT, 5673 IOERR_SLI_ABORTED); 5674 } 5675 break; 5676 5677 case TYPE_EVT: 5678 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5679 break; 5680 5681 case TYPE_MBOX: 5682 /* Update the ext buf ctx state if needed */ 5683 5684 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5685 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5686 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5687 break; 5688 default: 5689 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5690 break; 5691 } 5692 5693 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 5694 * otherwise an error message will be displayed on the console 5695 * so always return success (zero) 5696 */ 5697 return rc; 5698 } 5699