1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2009-2015 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * * 10 * This program is free software; you can redistribute it and/or * 11 * modify it under the terms of version 2 of the GNU General * 12 * Public License as published by the Free Software Foundation. * 13 * This program is distributed in the hope that it will be useful. * 14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 18 * TO BE LEGALLY INVALID. See the GNU General Public License for * 19 * more details, a copy of which can be found in the file COPYING * 20 * included with this package. * 21 *******************************************************************/ 22 23 #include <linux/interrupt.h> 24 #include <linux/mempool.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/delay.h> 28 #include <linux/list.h> 29 #include <linux/bsg-lib.h> 30 #include <linux/vmalloc.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_host.h> 34 #include <scsi/scsi_transport_fc.h> 35 #include <scsi/scsi_bsg_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 38 #include "lpfc_hw4.h" 39 #include "lpfc_hw.h" 40 #include "lpfc_sli.h" 41 #include "lpfc_sli4.h" 42 #include "lpfc_nl.h" 43 #include "lpfc_bsg.h" 44 #include "lpfc_disc.h" 45 #include "lpfc_scsi.h" 46 #include "lpfc.h" 47 #include "lpfc_logmsg.h" 48 #include "lpfc_crtn.h" 49 #include "lpfc_debugfs.h" 50 #include "lpfc_vport.h" 51 #include "lpfc_version.h" 52 53 struct lpfc_bsg_event { 54 struct list_head node; 55 struct kref kref; 56 wait_queue_head_t wq; 57 58 /* Event type and waiter identifiers */ 59 uint32_t type_mask; 60 uint32_t req_id; 61 uint32_t reg_id; 62 63 /* next two flags are here for the auto-delete logic */ 64 unsigned long wait_time_stamp; 65 int waiting; 66 67 /* seen and not seen events */ 68 struct list_head events_to_get; 69 struct list_head events_to_see; 70 71 /* driver data associated with the job */ 72 void *dd_data; 73 }; 74 75 struct lpfc_bsg_iocb { 76 struct lpfc_iocbq *cmdiocbq; 77 struct lpfc_dmabuf *rmp; 78 struct lpfc_nodelist *ndlp; 79 }; 80 81 struct lpfc_bsg_mbox { 82 LPFC_MBOXQ_t *pmboxq; 83 MAILBOX_t *mb; 84 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ 85 uint8_t *ext; /* extended mailbox data */ 86 uint32_t mbOffset; /* from app */ 87 uint32_t inExtWLen; /* from app */ 88 uint32_t outExtWLen; /* from app */ 89 }; 90 91 #define TYPE_EVT 1 92 #define TYPE_IOCB 2 93 #define TYPE_MBOX 3 94 struct bsg_job_data { 95 uint32_t type; 96 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 97 union { 98 struct lpfc_bsg_event *evt; 99 struct lpfc_bsg_iocb iocb; 100 struct lpfc_bsg_mbox mbox; 101 } context_un; 102 }; 103 104 struct event_data { 105 struct list_head node; 106 uint32_t type; 107 uint32_t immed_dat; 108 void *data; 109 uint32_t len; 110 }; 111 112 #define BUF_SZ_4K 4096 113 #define SLI_CT_ELX_LOOPBACK 0x10 114 115 enum ELX_LOOPBACK_CMD { 116 ELX_LOOPBACK_XRI_SETUP, 117 ELX_LOOPBACK_DATA, 118 }; 119 120 #define ELX_LOOPBACK_HEADER_SZ \ 121 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 122 123 struct lpfc_dmabufext { 124 struct lpfc_dmabuf dma; 125 uint32_t size; 126 uint32_t flag; 127 }; 128 129 static void 130 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 131 { 132 struct lpfc_dmabuf *mlast, *next_mlast; 133 134 if (mlist) { 135 list_for_each_entry_safe(mlast, next_mlast, &mlist->list, 136 list) { 137 list_del(&mlast->list); 138 lpfc_mbuf_free(phba, mlast->virt, mlast->phys); 139 kfree(mlast); 140 } 141 lpfc_mbuf_free(phba, mlist->virt, mlist->phys); 142 kfree(mlist); 143 } 144 return; 145 } 146 147 static struct lpfc_dmabuf * 148 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, 149 int outbound_buffers, struct ulp_bde64 *bpl, 150 int *bpl_entries) 151 { 152 struct lpfc_dmabuf *mlist = NULL; 153 struct lpfc_dmabuf *mp; 154 unsigned int bytes_left = size; 155 156 /* Verify we can support the size specified */ 157 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) 158 return NULL; 159 160 /* Determine the number of dma buffers to allocate */ 161 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : 162 size/LPFC_BPL_SIZE); 163 164 /* Allocate dma buffer and place in BPL passed */ 165 while (bytes_left) { 166 /* Allocate dma buffer */ 167 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 168 if (!mp) { 169 if (mlist) 170 lpfc_free_bsg_buffers(phba, mlist); 171 return NULL; 172 } 173 174 INIT_LIST_HEAD(&mp->list); 175 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 176 177 if (!mp->virt) { 178 kfree(mp); 179 if (mlist) 180 lpfc_free_bsg_buffers(phba, mlist); 181 return NULL; 182 } 183 184 /* Queue it to a linked list */ 185 if (!mlist) 186 mlist = mp; 187 else 188 list_add_tail(&mp->list, &mlist->list); 189 190 /* Add buffer to buffer pointer list */ 191 if (outbound_buffers) 192 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 193 else 194 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 195 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); 196 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); 197 bpl->tus.f.bdeSize = (uint16_t) 198 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : 199 bytes_left); 200 bytes_left -= bpl->tus.f.bdeSize; 201 bpl->tus.w = le32_to_cpu(bpl->tus.w); 202 bpl++; 203 } 204 return mlist; 205 } 206 207 static unsigned int 208 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, 209 struct bsg_buffer *bsg_buffers, 210 unsigned int bytes_to_transfer, int to_buffers) 211 { 212 213 struct lpfc_dmabuf *mp; 214 unsigned int transfer_bytes, bytes_copied = 0; 215 unsigned int sg_offset, dma_offset; 216 unsigned char *dma_address, *sg_address; 217 LIST_HEAD(temp_list); 218 struct sg_mapping_iter miter; 219 unsigned long flags; 220 unsigned int sg_flags = SG_MITER_ATOMIC; 221 bool sg_valid; 222 223 list_splice_init(&dma_buffers->list, &temp_list); 224 list_add(&dma_buffers->list, &temp_list); 225 sg_offset = 0; 226 if (to_buffers) 227 sg_flags |= SG_MITER_FROM_SG; 228 else 229 sg_flags |= SG_MITER_TO_SG; 230 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, 231 sg_flags); 232 local_irq_save(flags); 233 sg_valid = sg_miter_next(&miter); 234 list_for_each_entry(mp, &temp_list, list) { 235 dma_offset = 0; 236 while (bytes_to_transfer && sg_valid && 237 (dma_offset < LPFC_BPL_SIZE)) { 238 dma_address = mp->virt + dma_offset; 239 if (sg_offset) { 240 /* Continue previous partial transfer of sg */ 241 sg_address = miter.addr + sg_offset; 242 transfer_bytes = miter.length - sg_offset; 243 } else { 244 sg_address = miter.addr; 245 transfer_bytes = miter.length; 246 } 247 if (bytes_to_transfer < transfer_bytes) 248 transfer_bytes = bytes_to_transfer; 249 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) 250 transfer_bytes = LPFC_BPL_SIZE - dma_offset; 251 if (to_buffers) 252 memcpy(dma_address, sg_address, transfer_bytes); 253 else 254 memcpy(sg_address, dma_address, transfer_bytes); 255 dma_offset += transfer_bytes; 256 sg_offset += transfer_bytes; 257 bytes_to_transfer -= transfer_bytes; 258 bytes_copied += transfer_bytes; 259 if (sg_offset >= miter.length) { 260 sg_offset = 0; 261 sg_valid = sg_miter_next(&miter); 262 } 263 } 264 } 265 sg_miter_stop(&miter); 266 local_irq_restore(flags); 267 list_del_init(&dma_buffers->list); 268 list_splice(&temp_list, &dma_buffers->list); 269 return bytes_copied; 270 } 271 272 /** 273 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 274 * @phba: Pointer to HBA context object. 275 * @cmdiocbq: Pointer to command iocb. 276 * @rspiocbq: Pointer to response iocb. 277 * 278 * This function is the completion handler for iocbs issued using 279 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 280 * ring event handler function without any lock held. This function 281 * can be called from both worker thread context and interrupt 282 * context. This function also can be called from another thread which 283 * cleans up the SLI layer objects. 284 * This function copies the contents of the response iocb to the 285 * response iocb memory object provided by the caller of 286 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 287 * sleeps for the iocb completion. 288 **/ 289 static void 290 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 291 struct lpfc_iocbq *cmdiocbq, 292 struct lpfc_iocbq *rspiocbq) 293 { 294 struct bsg_job_data *dd_data; 295 struct bsg_job *job; 296 struct fc_bsg_reply *bsg_reply; 297 struct lpfc_dmabuf *bmp, *cmp, *rmp; 298 struct lpfc_nodelist *ndlp; 299 struct lpfc_bsg_iocb *iocb; 300 unsigned long flags; 301 int rc = 0; 302 u32 ulp_status, ulp_word4, total_data_placed; 303 304 dd_data = cmdiocbq->context_un.dd_data; 305 306 /* Determine if job has been aborted */ 307 spin_lock_irqsave(&phba->ct_ev_lock, flags); 308 job = dd_data->set_job; 309 if (job) { 310 bsg_reply = job->reply; 311 /* Prevent timeout handling from trying to abort job */ 312 job->dd_data = NULL; 313 } 314 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 315 316 /* Close the timeout handler abort window */ 317 spin_lock_irqsave(&phba->hbalock, flags); 318 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; 319 spin_unlock_irqrestore(&phba->hbalock, flags); 320 321 iocb = &dd_data->context_un.iocb; 322 ndlp = iocb->cmdiocbq->ndlp; 323 rmp = iocb->rmp; 324 cmp = cmdiocbq->cmd_dmabuf; 325 bmp = cmdiocbq->bpl_dmabuf; 326 ulp_status = get_job_ulpstatus(phba, rspiocbq); 327 ulp_word4 = get_job_word4(phba, rspiocbq); 328 total_data_placed = get_job_data_placed(phba, rspiocbq); 329 330 /* Copy the completed data or set the error status */ 331 332 if (job) { 333 if (ulp_status) { 334 if (ulp_status == IOSTAT_LOCAL_REJECT) { 335 switch (ulp_word4 & IOERR_PARAM_MASK) { 336 case IOERR_SEQUENCE_TIMEOUT: 337 rc = -ETIMEDOUT; 338 break; 339 case IOERR_INVALID_RPI: 340 rc = -EFAULT; 341 break; 342 default: 343 rc = -EACCES; 344 break; 345 } 346 } else { 347 rc = -EACCES; 348 } 349 } else { 350 bsg_reply->reply_payload_rcv_len = 351 lpfc_bsg_copy_data(rmp, &job->reply_payload, 352 total_data_placed, 0); 353 } 354 } 355 356 lpfc_free_bsg_buffers(phba, cmp); 357 lpfc_free_bsg_buffers(phba, rmp); 358 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 359 kfree(bmp); 360 lpfc_nlp_put(ndlp); 361 lpfc_sli_release_iocbq(phba, cmdiocbq); 362 kfree(dd_data); 363 364 /* Complete the job if the job is still active */ 365 366 if (job) { 367 bsg_reply->result = rc; 368 bsg_job_done(job, bsg_reply->result, 369 bsg_reply->reply_payload_rcv_len); 370 } 371 return; 372 } 373 374 /** 375 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 376 * @job: fc_bsg_job to handle 377 **/ 378 static int 379 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) 380 { 381 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 382 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 383 struct lpfc_hba *phba = vport->phba; 384 struct lpfc_nodelist *ndlp = rdata->pnode; 385 struct fc_bsg_reply *bsg_reply = job->reply; 386 struct ulp_bde64 *bpl = NULL; 387 struct lpfc_iocbq *cmdiocbq = NULL; 388 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 389 int request_nseg, reply_nseg; 390 u32 num_entry; 391 struct bsg_job_data *dd_data; 392 unsigned long flags; 393 uint32_t creg_val; 394 int rc = 0; 395 int iocb_stat; 396 u16 ulp_context; 397 398 /* in case no data is transferred */ 399 bsg_reply->reply_payload_rcv_len = 0; 400 401 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) 402 return -ENODEV; 403 404 /* allocate our bsg tracking structure */ 405 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 406 if (!dd_data) { 407 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 408 "2733 Failed allocation of dd_data\n"); 409 rc = -ENOMEM; 410 goto no_dd_data; 411 } 412 413 cmdiocbq = lpfc_sli_get_iocbq(phba); 414 if (!cmdiocbq) { 415 rc = -ENOMEM; 416 goto free_dd; 417 } 418 419 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 420 if (!bmp) { 421 rc = -ENOMEM; 422 goto free_cmdiocbq; 423 } 424 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 425 if (!bmp->virt) { 426 rc = -ENOMEM; 427 goto free_bmp; 428 } 429 430 INIT_LIST_HEAD(&bmp->list); 431 432 bpl = (struct ulp_bde64 *) bmp->virt; 433 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 434 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 435 1, bpl, &request_nseg); 436 if (!cmp) { 437 rc = -ENOMEM; 438 goto free_bmp; 439 } 440 lpfc_bsg_copy_data(cmp, &job->request_payload, 441 job->request_payload.payload_len, 1); 442 443 bpl += request_nseg; 444 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 445 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 446 bpl, &reply_nseg); 447 if (!rmp) { 448 rc = -ENOMEM; 449 goto free_cmp; 450 } 451 452 num_entry = request_nseg + reply_nseg; 453 454 if (phba->sli_rev == LPFC_SLI_REV4) 455 ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 456 else 457 ulp_context = ndlp->nlp_rpi; 458 459 lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry, 460 phba->fc_ratov * 2); 461 462 cmdiocbq->num_bdes = num_entry; 463 cmdiocbq->vport = phba->pport; 464 cmdiocbq->cmd_dmabuf = cmp; 465 cmdiocbq->bpl_dmabuf = bmp; 466 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; 467 468 cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 469 cmdiocbq->context_un.dd_data = dd_data; 470 471 dd_data->type = TYPE_IOCB; 472 dd_data->set_job = job; 473 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 474 dd_data->context_un.iocb.rmp = rmp; 475 job->dd_data = dd_data; 476 477 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 478 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 479 rc = -EIO ; 480 goto free_rmp; 481 } 482 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 483 writel(creg_val, phba->HCregaddr); 484 readl(phba->HCregaddr); /* flush */ 485 } 486 487 cmdiocbq->ndlp = lpfc_nlp_get(ndlp); 488 if (!cmdiocbq->ndlp) { 489 rc = -ENODEV; 490 goto free_rmp; 491 } 492 493 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 494 if (iocb_stat == IOCB_SUCCESS) { 495 spin_lock_irqsave(&phba->hbalock, flags); 496 /* make sure the I/O had not been completed yet */ 497 if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { 498 /* open up abort window to timeout handler */ 499 cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; 500 } 501 spin_unlock_irqrestore(&phba->hbalock, flags); 502 return 0; /* done for now */ 503 } else if (iocb_stat == IOCB_BUSY) { 504 rc = -EAGAIN; 505 } else { 506 rc = -EIO; 507 } 508 509 /* iocb failed so cleanup */ 510 lpfc_nlp_put(ndlp); 511 512 free_rmp: 513 lpfc_free_bsg_buffers(phba, rmp); 514 free_cmp: 515 lpfc_free_bsg_buffers(phba, cmp); 516 free_bmp: 517 if (bmp->virt) 518 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 519 kfree(bmp); 520 free_cmdiocbq: 521 lpfc_sli_release_iocbq(phba, cmdiocbq); 522 free_dd: 523 kfree(dd_data); 524 no_dd_data: 525 /* make error code available to userspace */ 526 bsg_reply->result = rc; 527 job->dd_data = NULL; 528 return rc; 529 } 530 531 /** 532 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 533 * @phba: Pointer to HBA context object. 534 * @cmdiocbq: Pointer to command iocb. 535 * @rspiocbq: Pointer to response iocb. 536 * 537 * This function is the completion handler for iocbs issued using 538 * lpfc_bsg_rport_els_cmp function. This function is called by the 539 * ring event handler function without any lock held. This function 540 * can be called from both worker thread context and interrupt 541 * context. This function also can be called from other thread which 542 * cleans up the SLI layer objects. 543 * This function copies the contents of the response iocb to the 544 * response iocb memory object provided by the caller of 545 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 546 * sleeps for the iocb completion. 547 **/ 548 static void 549 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 550 struct lpfc_iocbq *cmdiocbq, 551 struct lpfc_iocbq *rspiocbq) 552 { 553 struct bsg_job_data *dd_data; 554 struct bsg_job *job; 555 struct fc_bsg_reply *bsg_reply; 556 struct lpfc_nodelist *ndlp; 557 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; 558 struct fc_bsg_ctels_reply *els_reply; 559 uint8_t *rjt_data; 560 unsigned long flags; 561 unsigned int rsp_size; 562 int rc = 0; 563 u32 ulp_status, ulp_word4, total_data_placed; 564 565 dd_data = cmdiocbq->context_un.dd_data; 566 ndlp = dd_data->context_un.iocb.ndlp; 567 cmdiocbq->ndlp = ndlp; 568 569 /* Determine if job has been aborted */ 570 spin_lock_irqsave(&phba->ct_ev_lock, flags); 571 job = dd_data->set_job; 572 if (job) { 573 bsg_reply = job->reply; 574 /* Prevent timeout handling from trying to abort job */ 575 job->dd_data = NULL; 576 } 577 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 578 579 /* Close the timeout handler abort window */ 580 spin_lock_irqsave(&phba->hbalock, flags); 581 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; 582 spin_unlock_irqrestore(&phba->hbalock, flags); 583 584 ulp_status = get_job_ulpstatus(phba, rspiocbq); 585 ulp_word4 = get_job_word4(phba, rspiocbq); 586 total_data_placed = get_job_data_placed(phba, rspiocbq); 587 pcmd = cmdiocbq->cmd_dmabuf; 588 prsp = (struct lpfc_dmabuf *)pcmd->list.next; 589 590 /* Copy the completed job data or determine the job status if job is 591 * still active 592 */ 593 594 if (job) { 595 if (ulp_status == IOSTAT_SUCCESS) { 596 rsp_size = total_data_placed; 597 bsg_reply->reply_payload_rcv_len = 598 sg_copy_from_buffer(job->reply_payload.sg_list, 599 job->reply_payload.sg_cnt, 600 prsp->virt, 601 rsp_size); 602 } else if (ulp_status == IOSTAT_LS_RJT) { 603 bsg_reply->reply_payload_rcv_len = 604 sizeof(struct fc_bsg_ctels_reply); 605 /* LS_RJT data returned in word 4 */ 606 rjt_data = (uint8_t *)&ulp_word4; 607 els_reply = &bsg_reply->reply_data.ctels_reply; 608 els_reply->status = FC_CTELS_STATUS_REJECT; 609 els_reply->rjt_data.action = rjt_data[3]; 610 els_reply->rjt_data.reason_code = rjt_data[2]; 611 els_reply->rjt_data.reason_explanation = rjt_data[1]; 612 els_reply->rjt_data.vendor_unique = rjt_data[0]; 613 } else if (ulp_status == IOSTAT_LOCAL_REJECT && 614 (ulp_word4 & IOERR_PARAM_MASK) == 615 IOERR_SEQUENCE_TIMEOUT) { 616 rc = -ETIMEDOUT; 617 } else { 618 rc = -EIO; 619 } 620 } 621 622 lpfc_els_free_iocb(phba, cmdiocbq); 623 624 lpfc_nlp_put(ndlp); 625 kfree(dd_data); 626 627 /* Complete the job if the job is still active */ 628 629 if (job) { 630 bsg_reply->result = rc; 631 bsg_job_done(job, bsg_reply->result, 632 bsg_reply->reply_payload_rcv_len); 633 } 634 return; 635 } 636 637 /** 638 * lpfc_bsg_rport_els - send an ELS command from a bsg request 639 * @job: fc_bsg_job to handle 640 **/ 641 static int 642 lpfc_bsg_rport_els(struct bsg_job *job) 643 { 644 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 645 struct lpfc_hba *phba = vport->phba; 646 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 647 struct lpfc_nodelist *ndlp = rdata->pnode; 648 struct fc_bsg_request *bsg_request = job->request; 649 struct fc_bsg_reply *bsg_reply = job->reply; 650 uint32_t elscmd; 651 uint32_t cmdsize; 652 struct lpfc_iocbq *cmdiocbq; 653 uint16_t rpi = 0; 654 struct bsg_job_data *dd_data; 655 unsigned long flags; 656 uint32_t creg_val; 657 int rc = 0; 658 659 /* in case no data is transferred */ 660 bsg_reply->reply_payload_rcv_len = 0; 661 662 /* verify the els command is not greater than the 663 * maximum ELS transfer size. 664 */ 665 666 if (job->request_payload.payload_len > FCELSSIZE) { 667 rc = -EINVAL; 668 goto no_dd_data; 669 } 670 671 /* allocate our bsg tracking structure */ 672 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 673 if (!dd_data) { 674 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 675 "2735 Failed allocation of dd_data\n"); 676 rc = -ENOMEM; 677 goto no_dd_data; 678 } 679 680 elscmd = bsg_request->rqst_data.r_els.els_code; 681 cmdsize = job->request_payload.payload_len; 682 683 if (!lpfc_nlp_get(ndlp)) { 684 rc = -ENODEV; 685 goto free_dd_data; 686 } 687 688 /* We will use the allocated dma buffers by prep els iocb for command 689 * and response to ensure if the job times out and the request is freed, 690 * we won't be dma into memory that is no longer allocated to for the 691 * request. 692 */ 693 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 694 ndlp->nlp_DID, elscmd); 695 if (!cmdiocbq) { 696 rc = -EIO; 697 goto release_ndlp; 698 } 699 700 /* Transfer the request payload to allocated command dma buffer */ 701 sg_copy_to_buffer(job->request_payload.sg_list, 702 job->request_payload.sg_cnt, 703 cmdiocbq->cmd_dmabuf->virt, 704 cmdsize); 705 706 rpi = ndlp->nlp_rpi; 707 708 if (phba->sli_rev == LPFC_SLI_REV4) 709 bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com, 710 phba->sli4_hba.rpi_ids[rpi]); 711 else 712 cmdiocbq->iocb.ulpContext = rpi; 713 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; 714 cmdiocbq->context_un.dd_data = dd_data; 715 cmdiocbq->ndlp = ndlp; 716 cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp; 717 dd_data->type = TYPE_IOCB; 718 dd_data->set_job = job; 719 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 720 dd_data->context_un.iocb.ndlp = ndlp; 721 dd_data->context_un.iocb.rmp = NULL; 722 job->dd_data = dd_data; 723 724 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 725 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 726 rc = -EIO; 727 goto linkdown_err; 728 } 729 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 730 writel(creg_val, phba->HCregaddr); 731 readl(phba->HCregaddr); /* flush */ 732 } 733 734 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 735 if (rc == IOCB_SUCCESS) { 736 spin_lock_irqsave(&phba->hbalock, flags); 737 /* make sure the I/O had not been completed/released */ 738 if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { 739 /* open up abort window to timeout handler */ 740 cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; 741 } 742 spin_unlock_irqrestore(&phba->hbalock, flags); 743 return 0; /* done for now */ 744 } else if (rc == IOCB_BUSY) { 745 rc = -EAGAIN; 746 } else { 747 rc = -EIO; 748 } 749 750 /* I/O issue failed. Cleanup resources. */ 751 752 linkdown_err: 753 lpfc_els_free_iocb(phba, cmdiocbq); 754 755 release_ndlp: 756 lpfc_nlp_put(ndlp); 757 758 free_dd_data: 759 kfree(dd_data); 760 761 no_dd_data: 762 /* make error code available to userspace */ 763 bsg_reply->result = rc; 764 job->dd_data = NULL; 765 return rc; 766 } 767 768 /** 769 * lpfc_bsg_event_free - frees an allocated event structure 770 * @kref: Pointer to a kref. 771 * 772 * Called from kref_put. Back cast the kref into an event structure address. 773 * Free any events to get, delete associated nodes, free any events to see, 774 * free any data then free the event itself. 775 **/ 776 static void 777 lpfc_bsg_event_free(struct kref *kref) 778 { 779 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 780 kref); 781 struct event_data *ed; 782 783 list_del(&evt->node); 784 785 while (!list_empty(&evt->events_to_get)) { 786 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 787 list_del(&ed->node); 788 kfree(ed->data); 789 kfree(ed); 790 } 791 792 while (!list_empty(&evt->events_to_see)) { 793 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 794 list_del(&ed->node); 795 kfree(ed->data); 796 kfree(ed); 797 } 798 799 kfree(evt->dd_data); 800 kfree(evt); 801 } 802 803 /** 804 * lpfc_bsg_event_ref - increments the kref for an event 805 * @evt: Pointer to an event structure. 806 **/ 807 static inline void 808 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 809 { 810 kref_get(&evt->kref); 811 } 812 813 /** 814 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 815 * @evt: Pointer to an event structure. 816 **/ 817 static inline void 818 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 819 { 820 kref_put(&evt->kref, lpfc_bsg_event_free); 821 } 822 823 /** 824 * lpfc_bsg_event_new - allocate and initialize a event structure 825 * @ev_mask: Mask of events. 826 * @ev_reg_id: Event reg id. 827 * @ev_req_id: Event request id. 828 **/ 829 static struct lpfc_bsg_event * 830 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 831 { 832 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 833 834 if (!evt) 835 return NULL; 836 837 INIT_LIST_HEAD(&evt->events_to_get); 838 INIT_LIST_HEAD(&evt->events_to_see); 839 evt->type_mask = ev_mask; 840 evt->req_id = ev_req_id; 841 evt->reg_id = ev_reg_id; 842 evt->wait_time_stamp = jiffies; 843 evt->dd_data = NULL; 844 init_waitqueue_head(&evt->wq); 845 kref_init(&evt->kref); 846 return evt; 847 } 848 849 /** 850 * diag_cmd_data_free - Frees an lpfc dma buffer extension 851 * @phba: Pointer to HBA context object. 852 * @mlist: Pointer to an lpfc dma buffer extension. 853 **/ 854 static int 855 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 856 { 857 struct lpfc_dmabufext *mlast; 858 struct pci_dev *pcidev; 859 struct list_head head, *curr, *next; 860 861 if ((!mlist) || (!lpfc_is_link_up(phba) && 862 (phba->link_flag & LS_LOOPBACK_MODE))) { 863 return 0; 864 } 865 866 pcidev = phba->pcidev; 867 list_add_tail(&head, &mlist->dma.list); 868 869 list_for_each_safe(curr, next, &head) { 870 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 871 if (mlast->dma.virt) 872 dma_free_coherent(&pcidev->dev, 873 mlast->size, 874 mlast->dma.virt, 875 mlast->dma.phys); 876 kfree(mlast); 877 } 878 return 0; 879 } 880 881 /* 882 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 883 * 884 * This function is called when an unsolicited CT command is received. It 885 * forwards the event to any processes registered to receive CT events. 886 **/ 887 int 888 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 889 struct lpfc_iocbq *piocbq) 890 { 891 uint32_t evt_req_id = 0; 892 u16 cmd; 893 struct lpfc_dmabuf *dmabuf = NULL; 894 struct lpfc_bsg_event *evt; 895 struct event_data *evt_dat = NULL; 896 struct lpfc_iocbq *iocbq; 897 IOCB_t *iocb = NULL; 898 size_t offset = 0; 899 struct list_head head; 900 struct ulp_bde64 *bde; 901 dma_addr_t dma_addr; 902 int i; 903 struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf; 904 struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf; 905 struct lpfc_sli_ct_request *ct_req; 906 struct bsg_job *job = NULL; 907 struct fc_bsg_reply *bsg_reply; 908 struct bsg_job_data *dd_data = NULL; 909 unsigned long flags; 910 int size = 0; 911 u32 bde_count = 0; 912 913 INIT_LIST_HEAD(&head); 914 list_add_tail(&head, &piocbq->list); 915 916 ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt; 917 evt_req_id = ct_req->FsType; 918 cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); 919 920 spin_lock_irqsave(&phba->ct_ev_lock, flags); 921 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 922 if (!(evt->type_mask & FC_REG_CT_EVENT) || 923 evt->req_id != evt_req_id) 924 continue; 925 926 lpfc_bsg_event_ref(evt); 927 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 928 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 929 if (evt_dat == NULL) { 930 spin_lock_irqsave(&phba->ct_ev_lock, flags); 931 lpfc_bsg_event_unref(evt); 932 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 933 "2614 Memory allocation failed for " 934 "CT event\n"); 935 break; 936 } 937 938 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 939 /* take accumulated byte count from the last iocbq */ 940 iocbq = list_entry(head.prev, typeof(*iocbq), list); 941 if (phba->sli_rev == LPFC_SLI_REV4) 942 evt_dat->len = iocbq->wcqe_cmpl.total_data_placed; 943 else 944 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 945 } else { 946 list_for_each_entry(iocbq, &head, list) { 947 iocb = &iocbq->iocb; 948 for (i = 0; i < iocb->ulpBdeCount; 949 i++) 950 evt_dat->len += 951 iocb->un.cont64[i].tus.f.bdeSize; 952 } 953 } 954 955 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 956 if (evt_dat->data == NULL) { 957 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 958 "2615 Memory allocation failed for " 959 "CT event data, size %d\n", 960 evt_dat->len); 961 kfree(evt_dat); 962 spin_lock_irqsave(&phba->ct_ev_lock, flags); 963 lpfc_bsg_event_unref(evt); 964 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 965 goto error_ct_unsol_exit; 966 } 967 968 list_for_each_entry(iocbq, &head, list) { 969 size = 0; 970 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 971 bdeBuf1 = iocbq->cmd_dmabuf; 972 bdeBuf2 = iocbq->bpl_dmabuf; 973 } 974 if (phba->sli_rev == LPFC_SLI_REV4) 975 bde_count = iocbq->wcqe_cmpl.word3; 976 else 977 bde_count = iocbq->iocb.ulpBdeCount; 978 for (i = 0; i < bde_count; i++) { 979 if (phba->sli3_options & 980 LPFC_SLI3_HBQ_ENABLED) { 981 if (i == 0) { 982 size = iocbq->wqe.gen_req.bde.tus.f.bdeSize; 983 dmabuf = bdeBuf1; 984 } else if (i == 1) { 985 size = iocbq->unsol_rcv_len; 986 dmabuf = bdeBuf2; 987 } 988 if ((offset + size) > evt_dat->len) 989 size = evt_dat->len - offset; 990 } else { 991 size = iocbq->iocb.un.cont64[i]. 992 tus.f.bdeSize; 993 bde = &iocbq->iocb.un.cont64[i]; 994 dma_addr = getPaddr(bde->addrHigh, 995 bde->addrLow); 996 dmabuf = lpfc_sli_ringpostbuf_get(phba, 997 pring, dma_addr); 998 } 999 if (!dmabuf) { 1000 lpfc_printf_log(phba, KERN_ERR, 1001 LOG_LIBDFC, "2616 No dmabuf " 1002 "found for iocbq x%px\n", 1003 iocbq); 1004 kfree(evt_dat->data); 1005 kfree(evt_dat); 1006 spin_lock_irqsave(&phba->ct_ev_lock, 1007 flags); 1008 lpfc_bsg_event_unref(evt); 1009 spin_unlock_irqrestore( 1010 &phba->ct_ev_lock, flags); 1011 goto error_ct_unsol_exit; 1012 } 1013 memcpy((char *)(evt_dat->data) + offset, 1014 dmabuf->virt, size); 1015 offset += size; 1016 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 1017 !(phba->sli3_options & 1018 LPFC_SLI3_HBQ_ENABLED)) { 1019 lpfc_sli_ringpostbuf_put(phba, pring, 1020 dmabuf); 1021 } else { 1022 switch (cmd) { 1023 case ELX_LOOPBACK_DATA: 1024 if (phba->sli_rev < 1025 LPFC_SLI_REV4) 1026 diag_cmd_data_free(phba, 1027 (struct lpfc_dmabufext 1028 *)dmabuf); 1029 break; 1030 case ELX_LOOPBACK_XRI_SETUP: 1031 if ((phba->sli_rev == 1032 LPFC_SLI_REV2) || 1033 (phba->sli3_options & 1034 LPFC_SLI3_HBQ_ENABLED 1035 )) { 1036 lpfc_in_buf_free(phba, 1037 dmabuf); 1038 } else { 1039 lpfc_sli3_post_buffer(phba, 1040 pring, 1041 1); 1042 } 1043 break; 1044 default: 1045 if (!(phba->sli3_options & 1046 LPFC_SLI3_HBQ_ENABLED)) 1047 lpfc_sli3_post_buffer(phba, 1048 pring, 1049 1); 1050 break; 1051 } 1052 } 1053 } 1054 } 1055 1056 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1057 if (phba->sli_rev == LPFC_SLI_REV4) { 1058 evt_dat->immed_dat = phba->ctx_idx; 1059 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; 1060 /* Provide warning for over-run of the ct_ctx array */ 1061 if (phba->ct_ctx[evt_dat->immed_dat].valid == 1062 UNSOL_VALID) 1063 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1064 "2717 CT context array entry " 1065 "[%d] over-run: oxid:x%x, " 1066 "sid:x%x\n", phba->ctx_idx, 1067 phba->ct_ctx[ 1068 evt_dat->immed_dat].oxid, 1069 phba->ct_ctx[ 1070 evt_dat->immed_dat].SID); 1071 phba->ct_ctx[evt_dat->immed_dat].rxid = 1072 get_job_ulpcontext(phba, piocbq); 1073 phba->ct_ctx[evt_dat->immed_dat].oxid = 1074 get_job_rcvoxid(phba, piocbq); 1075 phba->ct_ctx[evt_dat->immed_dat].SID = 1076 bf_get(wqe_els_did, 1077 &piocbq->wqe.xmit_els_rsp.wqe_dest); 1078 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; 1079 } else 1080 evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq); 1081 1082 evt_dat->type = FC_REG_CT_EVENT; 1083 list_add(&evt_dat->node, &evt->events_to_see); 1084 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 1085 wake_up_interruptible(&evt->wq); 1086 lpfc_bsg_event_unref(evt); 1087 break; 1088 } 1089 1090 list_move(evt->events_to_see.prev, &evt->events_to_get); 1091 1092 dd_data = (struct bsg_job_data *)evt->dd_data; 1093 job = dd_data->set_job; 1094 dd_data->set_job = NULL; 1095 lpfc_bsg_event_unref(evt); 1096 if (job) { 1097 bsg_reply = job->reply; 1098 bsg_reply->reply_payload_rcv_len = size; 1099 /* make error code available to userspace */ 1100 bsg_reply->result = 0; 1101 job->dd_data = NULL; 1102 /* complete the job back to userspace */ 1103 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1104 bsg_job_done(job, bsg_reply->result, 1105 bsg_reply->reply_payload_rcv_len); 1106 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1107 } 1108 } 1109 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1110 1111 error_ct_unsol_exit: 1112 if (!list_empty(&head)) 1113 list_del(&head); 1114 if ((phba->sli_rev < LPFC_SLI_REV4) && 1115 (evt_req_id == SLI_CT_ELX_LOOPBACK)) 1116 return 0; 1117 return 1; 1118 } 1119 1120 /** 1121 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane 1122 * @phba: Pointer to HBA context object. 1123 * @dmabuf: pointer to a dmabuf that describes the FC sequence 1124 * 1125 * This function handles abort to the CT command toward management plane 1126 * for SLI4 port. 1127 * 1128 * If the pending context of a CT command to management plane present, clears 1129 * such context and returns 1 for handled; otherwise, it returns 0 indicating 1130 * no context exists. 1131 **/ 1132 int 1133 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) 1134 { 1135 struct fc_frame_header fc_hdr; 1136 struct fc_frame_header *fc_hdr_ptr = &fc_hdr; 1137 int ctx_idx, handled = 0; 1138 uint16_t oxid, rxid; 1139 uint32_t sid; 1140 1141 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 1142 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); 1143 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); 1144 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); 1145 1146 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { 1147 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) 1148 continue; 1149 if (phba->ct_ctx[ctx_idx].rxid != rxid) 1150 continue; 1151 if (phba->ct_ctx[ctx_idx].oxid != oxid) 1152 continue; 1153 if (phba->ct_ctx[ctx_idx].SID != sid) 1154 continue; 1155 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; 1156 handled = 1; 1157 } 1158 return handled; 1159 } 1160 1161 /** 1162 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1163 * @job: SET_EVENT fc_bsg_job 1164 **/ 1165 static int 1166 lpfc_bsg_hba_set_event(struct bsg_job *job) 1167 { 1168 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1169 struct lpfc_hba *phba = vport->phba; 1170 struct fc_bsg_request *bsg_request = job->request; 1171 struct set_ct_event *event_req; 1172 struct lpfc_bsg_event *evt; 1173 int rc = 0; 1174 struct bsg_job_data *dd_data = NULL; 1175 uint32_t ev_mask; 1176 unsigned long flags; 1177 1178 if (job->request_len < 1179 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1180 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1181 "2612 Received SET_CT_EVENT below minimum " 1182 "size\n"); 1183 rc = -EINVAL; 1184 goto job_error; 1185 } 1186 1187 event_req = (struct set_ct_event *) 1188 bsg_request->rqst_data.h_vendor.vendor_cmd; 1189 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1190 FC_REG_EVENT_MASK); 1191 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1192 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1193 if (evt->reg_id == event_req->ev_reg_id) { 1194 lpfc_bsg_event_ref(evt); 1195 evt->wait_time_stamp = jiffies; 1196 dd_data = (struct bsg_job_data *)evt->dd_data; 1197 break; 1198 } 1199 } 1200 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1201 1202 if (&evt->node == &phba->ct_ev_waiters) { 1203 /* no event waiting struct yet - first call */ 1204 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1205 if (dd_data == NULL) { 1206 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1207 "2734 Failed allocation of dd_data\n"); 1208 rc = -ENOMEM; 1209 goto job_error; 1210 } 1211 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1212 event_req->ev_req_id); 1213 if (!evt) { 1214 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1215 "2617 Failed allocation of event " 1216 "waiter\n"); 1217 rc = -ENOMEM; 1218 goto job_error; 1219 } 1220 dd_data->type = TYPE_EVT; 1221 dd_data->set_job = NULL; 1222 dd_data->context_un.evt = evt; 1223 evt->dd_data = (void *)dd_data; 1224 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1225 list_add(&evt->node, &phba->ct_ev_waiters); 1226 lpfc_bsg_event_ref(evt); 1227 evt->wait_time_stamp = jiffies; 1228 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1229 } 1230 1231 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1232 evt->waiting = 1; 1233 dd_data->set_job = job; /* for unsolicited command */ 1234 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1235 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1236 return 0; /* call job done later */ 1237 1238 job_error: 1239 kfree(dd_data); 1240 job->dd_data = NULL; 1241 return rc; 1242 } 1243 1244 /** 1245 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1246 * @job: GET_EVENT fc_bsg_job 1247 **/ 1248 static int 1249 lpfc_bsg_hba_get_event(struct bsg_job *job) 1250 { 1251 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1252 struct lpfc_hba *phba = vport->phba; 1253 struct fc_bsg_request *bsg_request = job->request; 1254 struct fc_bsg_reply *bsg_reply = job->reply; 1255 struct get_ct_event *event_req; 1256 struct get_ct_event_reply *event_reply; 1257 struct lpfc_bsg_event *evt, *evt_next; 1258 struct event_data *evt_dat = NULL; 1259 unsigned long flags; 1260 uint32_t rc = 0; 1261 1262 if (job->request_len < 1263 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1264 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1265 "2613 Received GET_CT_EVENT request below " 1266 "minimum size\n"); 1267 rc = -EINVAL; 1268 goto job_error; 1269 } 1270 1271 event_req = (struct get_ct_event *) 1272 bsg_request->rqst_data.h_vendor.vendor_cmd; 1273 1274 event_reply = (struct get_ct_event_reply *) 1275 bsg_reply->reply_data.vendor_reply.vendor_rsp; 1276 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1277 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { 1278 if (evt->reg_id == event_req->ev_reg_id) { 1279 if (list_empty(&evt->events_to_get)) 1280 break; 1281 lpfc_bsg_event_ref(evt); 1282 evt->wait_time_stamp = jiffies; 1283 evt_dat = list_entry(evt->events_to_get.prev, 1284 struct event_data, node); 1285 list_del(&evt_dat->node); 1286 break; 1287 } 1288 } 1289 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1290 1291 /* The app may continue to ask for event data until it gets 1292 * an error indicating that there isn't anymore 1293 */ 1294 if (evt_dat == NULL) { 1295 bsg_reply->reply_payload_rcv_len = 0; 1296 rc = -ENOENT; 1297 goto job_error; 1298 } 1299 1300 if (evt_dat->len > job->request_payload.payload_len) { 1301 evt_dat->len = job->request_payload.payload_len; 1302 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1303 "2618 Truncated event data at %d " 1304 "bytes\n", 1305 job->request_payload.payload_len); 1306 } 1307 1308 event_reply->type = evt_dat->type; 1309 event_reply->immed_data = evt_dat->immed_dat; 1310 if (evt_dat->len > 0) 1311 bsg_reply->reply_payload_rcv_len = 1312 sg_copy_from_buffer(job->request_payload.sg_list, 1313 job->request_payload.sg_cnt, 1314 evt_dat->data, evt_dat->len); 1315 else 1316 bsg_reply->reply_payload_rcv_len = 0; 1317 1318 if (evt_dat) { 1319 kfree(evt_dat->data); 1320 kfree(evt_dat); 1321 } 1322 1323 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1324 lpfc_bsg_event_unref(evt); 1325 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1326 job->dd_data = NULL; 1327 bsg_reply->result = 0; 1328 bsg_job_done(job, bsg_reply->result, 1329 bsg_reply->reply_payload_rcv_len); 1330 return 0; 1331 1332 job_error: 1333 job->dd_data = NULL; 1334 bsg_reply->result = rc; 1335 return rc; 1336 } 1337 1338 /** 1339 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1340 * @phba: Pointer to HBA context object. 1341 * @cmdiocbq: Pointer to command iocb. 1342 * @rspiocbq: Pointer to response iocb. 1343 * 1344 * This function is the completion handler for iocbs issued using 1345 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1346 * ring event handler function without any lock held. This function 1347 * can be called from both worker thread context and interrupt 1348 * context. This function also can be called from other thread which 1349 * cleans up the SLI layer objects. 1350 * This function copy the contents of the response iocb to the 1351 * response iocb memory object provided by the caller of 1352 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1353 * sleeps for the iocb completion. 1354 **/ 1355 static void 1356 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1357 struct lpfc_iocbq *cmdiocbq, 1358 struct lpfc_iocbq *rspiocbq) 1359 { 1360 struct bsg_job_data *dd_data; 1361 struct bsg_job *job; 1362 struct fc_bsg_reply *bsg_reply; 1363 struct lpfc_dmabuf *bmp, *cmp; 1364 struct lpfc_nodelist *ndlp; 1365 unsigned long flags; 1366 int rc = 0; 1367 u32 ulp_status, ulp_word4; 1368 1369 dd_data = cmdiocbq->context_un.dd_data; 1370 1371 /* Determine if job has been aborted */ 1372 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1373 job = dd_data->set_job; 1374 if (job) { 1375 /* Prevent timeout handling from trying to abort job */ 1376 job->dd_data = NULL; 1377 } 1378 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1379 1380 /* Close the timeout handler abort window */ 1381 spin_lock_irqsave(&phba->hbalock, flags); 1382 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; 1383 spin_unlock_irqrestore(&phba->hbalock, flags); 1384 1385 ndlp = dd_data->context_un.iocb.ndlp; 1386 cmp = cmdiocbq->cmd_dmabuf; 1387 bmp = cmdiocbq->bpl_dmabuf; 1388 1389 ulp_status = get_job_ulpstatus(phba, rspiocbq); 1390 ulp_word4 = get_job_word4(phba, rspiocbq); 1391 1392 /* Copy the completed job data or set the error status */ 1393 1394 if (job) { 1395 bsg_reply = job->reply; 1396 if (ulp_status) { 1397 if (ulp_status == IOSTAT_LOCAL_REJECT) { 1398 switch (ulp_word4 & IOERR_PARAM_MASK) { 1399 case IOERR_SEQUENCE_TIMEOUT: 1400 rc = -ETIMEDOUT; 1401 break; 1402 case IOERR_INVALID_RPI: 1403 rc = -EFAULT; 1404 break; 1405 default: 1406 rc = -EACCES; 1407 break; 1408 } 1409 } else { 1410 rc = -EACCES; 1411 } 1412 } else { 1413 bsg_reply->reply_payload_rcv_len = 0; 1414 } 1415 } 1416 1417 lpfc_free_bsg_buffers(phba, cmp); 1418 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1419 kfree(bmp); 1420 lpfc_sli_release_iocbq(phba, cmdiocbq); 1421 lpfc_nlp_put(ndlp); 1422 kfree(dd_data); 1423 1424 /* Complete the job if the job is still active */ 1425 1426 if (job) { 1427 bsg_reply->result = rc; 1428 bsg_job_done(job, bsg_reply->result, 1429 bsg_reply->reply_payload_rcv_len); 1430 } 1431 return; 1432 } 1433 1434 /** 1435 * lpfc_issue_ct_rsp - issue a ct response 1436 * @phba: Pointer to HBA context object. 1437 * @job: Pointer to the job object. 1438 * @tag: tag index value into the ports context exchange array. 1439 * @cmp: Pointer to a cmp dma buffer descriptor. 1440 * @bmp: Pointer to a bmp dma buffer descriptor. 1441 * @num_entry: Number of enties in the bde. 1442 **/ 1443 static int 1444 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, 1445 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, 1446 int num_entry) 1447 { 1448 struct lpfc_iocbq *ctiocb = NULL; 1449 int rc = 0; 1450 struct lpfc_nodelist *ndlp = NULL; 1451 struct bsg_job_data *dd_data; 1452 unsigned long flags; 1453 uint32_t creg_val; 1454 u16 ulp_context, iotag; 1455 1456 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1457 if (!ndlp) { 1458 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1459 "2721 ndlp null for oxid %x SID %x\n", 1460 phba->ct_ctx[tag].rxid, 1461 phba->ct_ctx[tag].SID); 1462 return IOCB_ERROR; 1463 } 1464 1465 /* allocate our bsg tracking structure */ 1466 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1467 if (!dd_data) { 1468 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1469 "2736 Failed allocation of dd_data\n"); 1470 rc = -ENOMEM; 1471 goto no_dd_data; 1472 } 1473 1474 /* Allocate buffer for command iocb */ 1475 ctiocb = lpfc_sli_get_iocbq(phba); 1476 if (!ctiocb) { 1477 rc = -ENOMEM; 1478 goto no_ctiocb; 1479 } 1480 1481 if (phba->sli_rev == LPFC_SLI_REV4) { 1482 /* Do not issue unsol response if oxid not marked as valid */ 1483 if (phba->ct_ctx[tag].valid != UNSOL_VALID) { 1484 rc = IOCB_ERROR; 1485 goto issue_ct_rsp_exit; 1486 } 1487 1488 lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 1489 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 1490 phba->ct_ctx[tag].oxid, num_entry, 1491 FC_RCTL_DD_SOL_CTL, 1, 1492 CMD_XMIT_SEQUENCE64_WQE); 1493 1494 /* The exchange is done, mark the entry as invalid */ 1495 phba->ct_ctx[tag].valid = UNSOL_INVALID; 1496 iotag = get_wqe_reqtag(ctiocb); 1497 } else { 1498 lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry, 1499 FC_RCTL_DD_SOL_CTL, 1, 1500 CMD_XMIT_SEQUENCE64_CX); 1501 ctiocb->num_bdes = num_entry; 1502 iotag = ctiocb->iocb.ulpIoTag; 1503 } 1504 1505 ulp_context = get_job_ulpcontext(phba, ctiocb); 1506 1507 /* Xmit CT response on exchange <xid> */ 1508 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1509 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", 1510 ulp_context, iotag, tag, phba->link_state); 1511 1512 ctiocb->cmd_flag |= LPFC_IO_LIBDFC; 1513 ctiocb->vport = phba->pport; 1514 ctiocb->context_un.dd_data = dd_data; 1515 ctiocb->cmd_dmabuf = cmp; 1516 ctiocb->bpl_dmabuf = bmp; 1517 ctiocb->ndlp = ndlp; 1518 ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp; 1519 1520 dd_data->type = TYPE_IOCB; 1521 dd_data->set_job = job; 1522 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1523 dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp); 1524 if (!dd_data->context_un.iocb.ndlp) { 1525 rc = -IOCB_ERROR; 1526 goto issue_ct_rsp_exit; 1527 } 1528 dd_data->context_un.iocb.rmp = NULL; 1529 job->dd_data = dd_data; 1530 1531 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1532 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1533 rc = -IOCB_ERROR; 1534 goto issue_ct_rsp_exit; 1535 } 1536 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1537 writel(creg_val, phba->HCregaddr); 1538 readl(phba->HCregaddr); /* flush */ 1539 } 1540 1541 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1542 if (rc == IOCB_SUCCESS) { 1543 spin_lock_irqsave(&phba->hbalock, flags); 1544 /* make sure the I/O had not been completed/released */ 1545 if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) { 1546 /* open up abort window to timeout handler */ 1547 ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; 1548 } 1549 spin_unlock_irqrestore(&phba->hbalock, flags); 1550 return 0; /* done for now */ 1551 } 1552 1553 /* iocb failed so cleanup */ 1554 job->dd_data = NULL; 1555 lpfc_nlp_put(ndlp); 1556 1557 issue_ct_rsp_exit: 1558 lpfc_sli_release_iocbq(phba, ctiocb); 1559 no_ctiocb: 1560 kfree(dd_data); 1561 no_dd_data: 1562 return rc; 1563 } 1564 1565 /** 1566 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1567 * @job: SEND_MGMT_RESP fc_bsg_job 1568 **/ 1569 static int 1570 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job) 1571 { 1572 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1573 struct lpfc_hba *phba = vport->phba; 1574 struct fc_bsg_request *bsg_request = job->request; 1575 struct fc_bsg_reply *bsg_reply = job->reply; 1576 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1577 bsg_request->rqst_data.h_vendor.vendor_cmd; 1578 struct ulp_bde64 *bpl; 1579 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; 1580 int bpl_entries; 1581 uint32_t tag = mgmt_resp->tag; 1582 unsigned long reqbfrcnt = 1583 (unsigned long)job->request_payload.payload_len; 1584 int rc = 0; 1585 1586 /* in case no data is transferred */ 1587 bsg_reply->reply_payload_rcv_len = 0; 1588 1589 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1590 rc = -ERANGE; 1591 goto send_mgmt_rsp_exit; 1592 } 1593 1594 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1595 if (!bmp) { 1596 rc = -ENOMEM; 1597 goto send_mgmt_rsp_exit; 1598 } 1599 1600 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1601 if (!bmp->virt) { 1602 rc = -ENOMEM; 1603 goto send_mgmt_rsp_free_bmp; 1604 } 1605 1606 INIT_LIST_HEAD(&bmp->list); 1607 bpl = (struct ulp_bde64 *) bmp->virt; 1608 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); 1609 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 1610 1, bpl, &bpl_entries); 1611 if (!cmp) { 1612 rc = -ENOMEM; 1613 goto send_mgmt_rsp_free_bmp; 1614 } 1615 lpfc_bsg_copy_data(cmp, &job->request_payload, 1616 job->request_payload.payload_len, 1); 1617 1618 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); 1619 1620 if (rc == IOCB_SUCCESS) 1621 return 0; /* done for now */ 1622 1623 rc = -EACCES; 1624 1625 lpfc_free_bsg_buffers(phba, cmp); 1626 1627 send_mgmt_rsp_free_bmp: 1628 if (bmp->virt) 1629 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1630 kfree(bmp); 1631 send_mgmt_rsp_exit: 1632 /* make error code available to userspace */ 1633 bsg_reply->result = rc; 1634 job->dd_data = NULL; 1635 return rc; 1636 } 1637 1638 /** 1639 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1640 * @phba: Pointer to HBA context object. 1641 * 1642 * This function is responsible for preparing driver for diag loopback 1643 * on device. 1644 */ 1645 static int 1646 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) 1647 { 1648 struct lpfc_vport **vports; 1649 struct Scsi_Host *shost; 1650 struct lpfc_sli *psli; 1651 struct lpfc_queue *qp = NULL; 1652 struct lpfc_sli_ring *pring; 1653 int i = 0; 1654 1655 psli = &phba->sli; 1656 if (!psli) 1657 return -ENODEV; 1658 1659 1660 if ((phba->link_state == LPFC_HBA_ERROR) || 1661 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1662 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 1663 return -EACCES; 1664 1665 vports = lpfc_create_vport_work_array(phba); 1666 if (vports) { 1667 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1668 shost = lpfc_shost_from_vport(vports[i]); 1669 scsi_block_requests(shost); 1670 } 1671 lpfc_destroy_vport_work_array(phba, vports); 1672 } else { 1673 shost = lpfc_shost_from_vport(phba->pport); 1674 scsi_block_requests(shost); 1675 } 1676 1677 if (phba->sli_rev != LPFC_SLI_REV4) { 1678 pring = &psli->sli3_ring[LPFC_FCP_RING]; 1679 lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock); 1680 return 0; 1681 } 1682 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1683 pring = qp->pring; 1684 if (!pring || (pring->ringno != LPFC_FCP_RING)) 1685 continue; 1686 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1687 &pring->ring_lock)) 1688 break; 1689 } 1690 return 0; 1691 } 1692 1693 /** 1694 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1695 * @phba: Pointer to HBA context object. 1696 * 1697 * This function is responsible for driver exit processing of setting up 1698 * diag loopback mode on device. 1699 */ 1700 static void 1701 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) 1702 { 1703 struct Scsi_Host *shost; 1704 struct lpfc_vport **vports; 1705 int i; 1706 1707 vports = lpfc_create_vport_work_array(phba); 1708 if (vports) { 1709 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1710 shost = lpfc_shost_from_vport(vports[i]); 1711 scsi_unblock_requests(shost); 1712 } 1713 lpfc_destroy_vport_work_array(phba, vports); 1714 } else { 1715 shost = lpfc_shost_from_vport(phba->pport); 1716 scsi_unblock_requests(shost); 1717 } 1718 return; 1719 } 1720 1721 /** 1722 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command 1723 * @phba: Pointer to HBA context object. 1724 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1725 * 1726 * This function is responsible for placing an sli3 port into diagnostic 1727 * loopback mode in order to perform a diagnostic loopback test. 1728 * All new scsi requests are blocked, a small delay is used to allow the 1729 * scsi requests to complete then the link is brought down. If the link is 1730 * is placed in loopback mode then scsi requests are again allowed 1731 * so the scsi mid-layer doesn't give up on the port. 1732 * All of this is done in-line. 1733 */ 1734 static int 1735 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 1736 { 1737 struct fc_bsg_request *bsg_request = job->request; 1738 struct fc_bsg_reply *bsg_reply = job->reply; 1739 struct diag_mode_set *loopback_mode; 1740 uint32_t link_flags; 1741 uint32_t timeout; 1742 LPFC_MBOXQ_t *pmboxq = NULL; 1743 int mbxstatus = MBX_SUCCESS; 1744 int i = 0; 1745 int rc = 0; 1746 1747 /* no data to return just the return code */ 1748 bsg_reply->reply_payload_rcv_len = 0; 1749 1750 if (job->request_len < sizeof(struct fc_bsg_request) + 1751 sizeof(struct diag_mode_set)) { 1752 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1753 "2738 Received DIAG MODE request size:%d " 1754 "below the minimum size:%d\n", 1755 job->request_len, 1756 (int)(sizeof(struct fc_bsg_request) + 1757 sizeof(struct diag_mode_set))); 1758 rc = -EINVAL; 1759 goto job_error; 1760 } 1761 1762 rc = lpfc_bsg_diag_mode_enter(phba); 1763 if (rc) 1764 goto job_error; 1765 1766 /* bring the link to diagnostic mode */ 1767 loopback_mode = (struct diag_mode_set *) 1768 bsg_request->rqst_data.h_vendor.vendor_cmd; 1769 link_flags = loopback_mode->type; 1770 timeout = loopback_mode->timeout * 100; 1771 1772 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1773 if (!pmboxq) { 1774 rc = -ENOMEM; 1775 goto loopback_mode_exit; 1776 } 1777 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1778 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1779 pmboxq->u.mb.mbxOwner = OWN_HOST; 1780 1781 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1782 1783 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1784 /* wait for link down before proceeding */ 1785 i = 0; 1786 while (phba->link_state != LPFC_LINK_DOWN) { 1787 if (i++ > timeout) { 1788 rc = -ETIMEDOUT; 1789 goto loopback_mode_exit; 1790 } 1791 msleep(10); 1792 } 1793 1794 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1795 if (link_flags == INTERNAL_LOOP_BACK) 1796 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1797 else 1798 pmboxq->u.mb.un.varInitLnk.link_flags = 1799 FLAGS_TOPOLOGY_MODE_LOOP; 1800 1801 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1802 pmboxq->u.mb.mbxOwner = OWN_HOST; 1803 1804 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1805 LPFC_MBOX_TMO); 1806 1807 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1808 rc = -ENODEV; 1809 else { 1810 spin_lock_irq(&phba->hbalock); 1811 phba->link_flag |= LS_LOOPBACK_MODE; 1812 spin_unlock_irq(&phba->hbalock); 1813 /* wait for the link attention interrupt */ 1814 msleep(100); 1815 1816 i = 0; 1817 while (phba->link_state != LPFC_HBA_READY) { 1818 if (i++ > timeout) { 1819 rc = -ETIMEDOUT; 1820 break; 1821 } 1822 1823 msleep(10); 1824 } 1825 } 1826 1827 } else 1828 rc = -ENODEV; 1829 1830 loopback_mode_exit: 1831 lpfc_bsg_diag_mode_exit(phba); 1832 1833 /* 1834 * Let SLI layer release mboxq if mbox command completed after timeout. 1835 */ 1836 if (pmboxq && mbxstatus != MBX_TIMEOUT) 1837 mempool_free(pmboxq, phba->mbox_mem_pool); 1838 1839 job_error: 1840 /* make error code available to userspace */ 1841 bsg_reply->result = rc; 1842 /* complete the job back to userspace if no error */ 1843 if (rc == 0) 1844 bsg_job_done(job, bsg_reply->result, 1845 bsg_reply->reply_payload_rcv_len); 1846 return rc; 1847 } 1848 1849 /** 1850 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state 1851 * @phba: Pointer to HBA context object. 1852 * @diag: Flag for set link to diag or nomral operation state. 1853 * 1854 * This function is responsible for issuing a sli4 mailbox command for setting 1855 * link to either diag state or normal operation state. 1856 */ 1857 static int 1858 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) 1859 { 1860 LPFC_MBOXQ_t *pmboxq; 1861 struct lpfc_mbx_set_link_diag_state *link_diag_state; 1862 uint32_t req_len, alloc_len; 1863 int mbxstatus = MBX_SUCCESS, rc; 1864 1865 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1866 if (!pmboxq) 1867 return -ENOMEM; 1868 1869 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 1870 sizeof(struct lpfc_sli4_cfg_mhdr)); 1871 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1872 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 1873 req_len, LPFC_SLI4_MBX_EMBED); 1874 if (alloc_len != req_len) { 1875 rc = -ENOMEM; 1876 goto link_diag_state_set_out; 1877 } 1878 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1879 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", 1880 diag, phba->sli4_hba.lnk_info.lnk_tp, 1881 phba->sli4_hba.lnk_info.lnk_no); 1882 1883 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1884 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, 1885 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); 1886 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1887 phba->sli4_hba.lnk_info.lnk_no); 1888 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1889 phba->sli4_hba.lnk_info.lnk_tp); 1890 if (diag) 1891 bf_set(lpfc_mbx_set_diag_state_diag, 1892 &link_diag_state->u.req, 1); 1893 else 1894 bf_set(lpfc_mbx_set_diag_state_diag, 1895 &link_diag_state->u.req, 0); 1896 1897 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1898 1899 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) 1900 rc = 0; 1901 else 1902 rc = -ENODEV; 1903 1904 link_diag_state_set_out: 1905 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1906 mempool_free(pmboxq, phba->mbox_mem_pool); 1907 1908 return rc; 1909 } 1910 1911 /** 1912 * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic 1913 * @phba: Pointer to HBA context object. 1914 * @mode: loopback mode to set 1915 * @link_no: link number for loopback mode to set 1916 * 1917 * This function is responsible for issuing a sli4 mailbox command for setting 1918 * up loopback diagnostic for a link. 1919 */ 1920 static int 1921 lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode, 1922 uint32_t link_no) 1923 { 1924 LPFC_MBOXQ_t *pmboxq; 1925 uint32_t req_len, alloc_len; 1926 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1927 int mbxstatus = MBX_SUCCESS, rc = 0; 1928 1929 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1930 if (!pmboxq) 1931 return -ENOMEM; 1932 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - 1933 sizeof(struct lpfc_sli4_cfg_mhdr)); 1934 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1935 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, 1936 req_len, LPFC_SLI4_MBX_EMBED); 1937 if (alloc_len != req_len) { 1938 mempool_free(pmboxq, phba->mbox_mem_pool); 1939 return -ENOMEM; 1940 } 1941 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; 1942 bf_set(lpfc_mbx_set_diag_state_link_num, 1943 &link_diag_loopback->u.req, link_no); 1944 1945 if (phba->sli4_hba.conf_trunk & (1 << link_no)) { 1946 bf_set(lpfc_mbx_set_diag_state_link_type, 1947 &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED); 1948 } else { 1949 bf_set(lpfc_mbx_set_diag_state_link_type, 1950 &link_diag_loopback->u.req, 1951 phba->sli4_hba.lnk_info.lnk_tp); 1952 } 1953 1954 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, 1955 mode); 1956 1957 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1958 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { 1959 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1960 "3127 Failed setup loopback mode mailbox " 1961 "command, rc:x%x, status:x%x\n", mbxstatus, 1962 pmboxq->u.mb.mbxStatus); 1963 rc = -ENODEV; 1964 } 1965 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1966 mempool_free(pmboxq, phba->mbox_mem_pool); 1967 return rc; 1968 } 1969 1970 /** 1971 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic 1972 * @phba: Pointer to HBA context object. 1973 * 1974 * This function set up SLI4 FC port registrations for diagnostic run, which 1975 * includes all the rpis, vfi, and also vpi. 1976 */ 1977 static int 1978 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) 1979 { 1980 if (test_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag)) { 1981 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1982 "3136 Port still had vfi registered: " 1983 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", 1984 phba->pport->fc_myDID, phba->fcf.fcfi, 1985 phba->sli4_hba.vfi_ids[phba->pport->vfi], 1986 phba->vpi_ids[phba->pport->vpi]); 1987 return -EINVAL; 1988 } 1989 return lpfc_issue_reg_vfi(phba->pport); 1990 } 1991 1992 /** 1993 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command 1994 * @phba: Pointer to HBA context object. 1995 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1996 * 1997 * This function is responsible for placing an sli4 port into diagnostic 1998 * loopback mode in order to perform a diagnostic loopback test. 1999 */ 2000 static int 2001 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 2002 { 2003 struct fc_bsg_request *bsg_request = job->request; 2004 struct fc_bsg_reply *bsg_reply = job->reply; 2005 struct diag_mode_set *loopback_mode; 2006 uint32_t link_flags, timeout, link_no; 2007 int i, rc = 0; 2008 2009 /* no data to return just the return code */ 2010 bsg_reply->reply_payload_rcv_len = 0; 2011 2012 if (job->request_len < sizeof(struct fc_bsg_request) + 2013 sizeof(struct diag_mode_set)) { 2014 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2015 "3011 Received DIAG MODE request size:%d " 2016 "below the minimum size:%d\n", 2017 job->request_len, 2018 (int)(sizeof(struct fc_bsg_request) + 2019 sizeof(struct diag_mode_set))); 2020 rc = -EINVAL; 2021 goto job_done; 2022 } 2023 2024 loopback_mode = (struct diag_mode_set *) 2025 bsg_request->rqst_data.h_vendor.vendor_cmd; 2026 link_flags = loopback_mode->type; 2027 timeout = loopback_mode->timeout * 100; 2028 2029 if (loopback_mode->physical_link == -1) 2030 link_no = phba->sli4_hba.lnk_info.lnk_no; 2031 else 2032 link_no = loopback_mode->physical_link; 2033 2034 if (link_flags == DISABLE_LOOP_BACK) { 2035 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2036 LPFC_DIAG_LOOPBACK_TYPE_DISABLE, 2037 link_no); 2038 if (!rc) { 2039 /* Unset the need disable bit */ 2040 phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4); 2041 } 2042 goto job_done; 2043 } else { 2044 /* Check if we need to disable the loopback state */ 2045 if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) { 2046 rc = -EPERM; 2047 goto job_done; 2048 } 2049 } 2050 2051 rc = lpfc_bsg_diag_mode_enter(phba); 2052 if (rc) 2053 goto job_done; 2054 2055 /* indicate we are in loobpack diagnostic mode */ 2056 spin_lock_irq(&phba->hbalock); 2057 phba->link_flag |= LS_LOOPBACK_MODE; 2058 spin_unlock_irq(&phba->hbalock); 2059 2060 /* reset port to start frome scratch */ 2061 rc = lpfc_selective_reset(phba); 2062 if (rc) 2063 goto job_done; 2064 2065 /* bring the link to diagnostic mode */ 2066 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2067 "3129 Bring link to diagnostic state.\n"); 2068 2069 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2070 if (rc) { 2071 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2072 "3130 Failed to bring link to diagnostic " 2073 "state, rc:x%x\n", rc); 2074 goto loopback_mode_exit; 2075 } 2076 2077 /* wait for link down before proceeding */ 2078 i = 0; 2079 while (phba->link_state != LPFC_LINK_DOWN) { 2080 if (i++ > timeout) { 2081 rc = -ETIMEDOUT; 2082 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2083 "3131 Timeout waiting for link to " 2084 "diagnostic mode, timeout:%d ms\n", 2085 timeout * 10); 2086 goto loopback_mode_exit; 2087 } 2088 msleep(10); 2089 } 2090 2091 /* set up loopback mode */ 2092 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2093 "3132 Set up loopback mode:x%x\n", link_flags); 2094 2095 switch (link_flags) { 2096 case INTERNAL_LOOP_BACK: 2097 if (phba->sli4_hba.conf_trunk & (1 << link_no)) { 2098 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2099 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, 2100 link_no); 2101 } else { 2102 /* Trunk is configured, but link is not in this trunk */ 2103 if (phba->sli4_hba.conf_trunk) { 2104 rc = -ELNRNG; 2105 goto loopback_mode_exit; 2106 } 2107 2108 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2109 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, 2110 link_no); 2111 } 2112 2113 if (!rc) { 2114 /* Set the need disable bit */ 2115 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; 2116 } 2117 2118 break; 2119 case EXTERNAL_LOOP_BACK: 2120 if (phba->sli4_hba.conf_trunk & (1 << link_no)) { 2121 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2122 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED, 2123 link_no); 2124 } else { 2125 /* Trunk is configured, but link is not in this trunk */ 2126 if (phba->sli4_hba.conf_trunk) { 2127 rc = -ELNRNG; 2128 goto loopback_mode_exit; 2129 } 2130 2131 rc = lpfc_sli4_bsg_set_loopback_mode(phba, 2132 LPFC_DIAG_LOOPBACK_TYPE_SERDES, 2133 link_no); 2134 } 2135 2136 if (!rc) { 2137 /* Set the need disable bit */ 2138 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; 2139 } 2140 2141 break; 2142 default: 2143 rc = -EINVAL; 2144 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2145 "3141 Loopback mode:x%x not supported\n", 2146 link_flags); 2147 goto loopback_mode_exit; 2148 } 2149 2150 if (!rc) { 2151 /* wait for the link attention interrupt */ 2152 msleep(100); 2153 i = 0; 2154 while (phba->link_state < LPFC_LINK_UP) { 2155 if (i++ > timeout) { 2156 rc = -ETIMEDOUT; 2157 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2158 "3137 Timeout waiting for link up " 2159 "in loopback mode, timeout:%d ms\n", 2160 timeout * 10); 2161 break; 2162 } 2163 msleep(10); 2164 } 2165 } 2166 2167 /* port resource registration setup for loopback diagnostic */ 2168 if (!rc) { 2169 /* set up a none zero myDID for loopback test */ 2170 phba->pport->fc_myDID = 1; 2171 rc = lpfc_sli4_diag_fcport_reg_setup(phba); 2172 } else 2173 goto loopback_mode_exit; 2174 2175 if (!rc) { 2176 /* wait for the port ready */ 2177 msleep(100); 2178 i = 0; 2179 while (phba->link_state != LPFC_HBA_READY) { 2180 if (i++ > timeout) { 2181 rc = -ETIMEDOUT; 2182 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2183 "3133 Timeout waiting for port " 2184 "loopback mode ready, timeout:%d ms\n", 2185 timeout * 10); 2186 break; 2187 } 2188 msleep(10); 2189 } 2190 } 2191 2192 loopback_mode_exit: 2193 /* clear loopback diagnostic mode */ 2194 if (rc) { 2195 spin_lock_irq(&phba->hbalock); 2196 phba->link_flag &= ~LS_LOOPBACK_MODE; 2197 spin_unlock_irq(&phba->hbalock); 2198 } 2199 lpfc_bsg_diag_mode_exit(phba); 2200 2201 job_done: 2202 /* make error code available to userspace */ 2203 bsg_reply->result = rc; 2204 /* complete the job back to userspace if no error */ 2205 if (rc == 0) 2206 bsg_job_done(job, bsg_reply->result, 2207 bsg_reply->reply_payload_rcv_len); 2208 return rc; 2209 } 2210 2211 /** 2212 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode 2213 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2214 * 2215 * This function is responsible for responding to check and dispatch bsg diag 2216 * command from the user to proper driver action routines. 2217 */ 2218 static int 2219 lpfc_bsg_diag_loopback_mode(struct bsg_job *job) 2220 { 2221 struct Scsi_Host *shost; 2222 struct lpfc_vport *vport; 2223 struct lpfc_hba *phba; 2224 int rc; 2225 2226 shost = fc_bsg_to_shost(job); 2227 if (!shost) 2228 return -ENODEV; 2229 vport = shost_priv(shost); 2230 if (!vport) 2231 return -ENODEV; 2232 phba = vport->phba; 2233 if (!phba) 2234 return -ENODEV; 2235 2236 if (phba->sli_rev < LPFC_SLI_REV4) 2237 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); 2238 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 2239 LPFC_SLI_INTF_IF_TYPE_2) 2240 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); 2241 else 2242 rc = -ENODEV; 2243 2244 return rc; 2245 } 2246 2247 /** 2248 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode 2249 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END 2250 * 2251 * This function is responsible for responding to check and dispatch bsg diag 2252 * command from the user to proper driver action routines. 2253 */ 2254 static int 2255 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) 2256 { 2257 struct fc_bsg_request *bsg_request = job->request; 2258 struct fc_bsg_reply *bsg_reply = job->reply; 2259 struct Scsi_Host *shost; 2260 struct lpfc_vport *vport; 2261 struct lpfc_hba *phba; 2262 struct diag_mode_set *loopback_mode_end_cmd; 2263 uint32_t timeout; 2264 int rc, i; 2265 2266 shost = fc_bsg_to_shost(job); 2267 if (!shost) 2268 return -ENODEV; 2269 vport = shost_priv(shost); 2270 if (!vport) 2271 return -ENODEV; 2272 phba = vport->phba; 2273 if (!phba) 2274 return -ENODEV; 2275 2276 if (phba->sli_rev < LPFC_SLI_REV4) 2277 return -ENODEV; 2278 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 2279 LPFC_SLI_INTF_IF_TYPE_2) 2280 return -ENODEV; 2281 2282 /* clear loopback diagnostic mode */ 2283 spin_lock_irq(&phba->hbalock); 2284 phba->link_flag &= ~LS_LOOPBACK_MODE; 2285 spin_unlock_irq(&phba->hbalock); 2286 loopback_mode_end_cmd = (struct diag_mode_set *) 2287 bsg_request->rqst_data.h_vendor.vendor_cmd; 2288 timeout = loopback_mode_end_cmd->timeout * 100; 2289 2290 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2291 if (rc) { 2292 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2293 "3139 Failed to bring link to diagnostic " 2294 "state, rc:x%x\n", rc); 2295 goto loopback_mode_end_exit; 2296 } 2297 2298 /* wait for link down before proceeding */ 2299 i = 0; 2300 while (phba->link_state != LPFC_LINK_DOWN) { 2301 if (i++ > timeout) { 2302 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2303 "3140 Timeout waiting for link to " 2304 "diagnostic mode_end, timeout:%d ms\n", 2305 timeout * 10); 2306 /* there is nothing much we can do here */ 2307 break; 2308 } 2309 msleep(10); 2310 } 2311 2312 /* reset port resource registrations */ 2313 rc = lpfc_selective_reset(phba); 2314 phba->pport->fc_myDID = 0; 2315 2316 loopback_mode_end_exit: 2317 /* make return code available to userspace */ 2318 bsg_reply->result = rc; 2319 /* complete the job back to userspace if no error */ 2320 if (rc == 0) 2321 bsg_job_done(job, bsg_reply->result, 2322 bsg_reply->reply_payload_rcv_len); 2323 return rc; 2324 } 2325 2326 /** 2327 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test 2328 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST 2329 * 2330 * This function is to perform SLI4 diag link test request from the user 2331 * applicaiton. 2332 */ 2333 static int 2334 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) 2335 { 2336 struct fc_bsg_request *bsg_request = job->request; 2337 struct fc_bsg_reply *bsg_reply = job->reply; 2338 struct Scsi_Host *shost; 2339 struct lpfc_vport *vport; 2340 struct lpfc_hba *phba; 2341 LPFC_MBOXQ_t *pmboxq; 2342 struct sli4_link_diag *link_diag_test_cmd; 2343 uint32_t req_len, alloc_len; 2344 struct lpfc_mbx_run_link_diag_test *run_link_diag_test; 2345 union lpfc_sli4_cfg_shdr *shdr; 2346 uint32_t shdr_status, shdr_add_status; 2347 struct diag_status *diag_status_reply; 2348 int mbxstatus, rc = -ENODEV, rc1 = 0; 2349 2350 shost = fc_bsg_to_shost(job); 2351 if (!shost) 2352 goto job_error; 2353 2354 vport = shost_priv(shost); 2355 if (!vport) 2356 goto job_error; 2357 2358 phba = vport->phba; 2359 if (!phba) 2360 goto job_error; 2361 2362 2363 if (phba->sli_rev < LPFC_SLI_REV4) 2364 goto job_error; 2365 2366 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 2367 LPFC_SLI_INTF_IF_TYPE_2) 2368 goto job_error; 2369 2370 if (job->request_len < sizeof(struct fc_bsg_request) + 2371 sizeof(struct sli4_link_diag)) { 2372 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2373 "3013 Received LINK DIAG TEST request " 2374 " size:%d below the minimum size:%d\n", 2375 job->request_len, 2376 (int)(sizeof(struct fc_bsg_request) + 2377 sizeof(struct sli4_link_diag))); 2378 rc = -EINVAL; 2379 goto job_error; 2380 } 2381 2382 rc = lpfc_bsg_diag_mode_enter(phba); 2383 if (rc) 2384 goto job_error; 2385 2386 link_diag_test_cmd = (struct sli4_link_diag *) 2387 bsg_request->rqst_data.h_vendor.vendor_cmd; 2388 2389 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2390 2391 if (rc) 2392 goto job_error; 2393 2394 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2395 if (!pmboxq) { 2396 rc = -ENOMEM; 2397 goto link_diag_test_exit; 2398 } 2399 2400 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 2401 sizeof(struct lpfc_sli4_cfg_mhdr)); 2402 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2403 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 2404 req_len, LPFC_SLI4_MBX_EMBED); 2405 if (alloc_len != req_len) { 2406 rc = -ENOMEM; 2407 goto link_diag_test_exit; 2408 } 2409 2410 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; 2411 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, 2412 phba->sli4_hba.lnk_info.lnk_no); 2413 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, 2414 phba->sli4_hba.lnk_info.lnk_tp); 2415 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, 2416 link_diag_test_cmd->test_id); 2417 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, 2418 link_diag_test_cmd->loops); 2419 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, 2420 link_diag_test_cmd->test_version); 2421 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, 2422 link_diag_test_cmd->error_action); 2423 2424 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2425 2426 shdr = (union lpfc_sli4_cfg_shdr *) 2427 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; 2428 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2429 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2430 if (shdr_status || shdr_add_status || mbxstatus) { 2431 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2432 "3010 Run link diag test mailbox failed with " 2433 "mbx_status x%x status x%x, add_status x%x\n", 2434 mbxstatus, shdr_status, shdr_add_status); 2435 } 2436 2437 diag_status_reply = (struct diag_status *) 2438 bsg_reply->reply_data.vendor_reply.vendor_rsp; 2439 2440 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) { 2441 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2442 "3012 Received Run link diag test reply " 2443 "below minimum size (%d): reply_len:%d\n", 2444 (int)(sizeof(*bsg_reply) + 2445 sizeof(*diag_status_reply)), 2446 job->reply_len); 2447 rc = -EINVAL; 2448 goto job_error; 2449 } 2450 2451 diag_status_reply->mbox_status = mbxstatus; 2452 diag_status_reply->shdr_status = shdr_status; 2453 diag_status_reply->shdr_add_status = shdr_add_status; 2454 2455 link_diag_test_exit: 2456 rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2457 2458 if (pmboxq) 2459 mempool_free(pmboxq, phba->mbox_mem_pool); 2460 2461 lpfc_bsg_diag_mode_exit(phba); 2462 2463 job_error: 2464 /* make error code available to userspace */ 2465 if (rc1 && !rc) 2466 rc = rc1; 2467 bsg_reply->result = rc; 2468 /* complete the job back to userspace if no error */ 2469 if (rc == 0) 2470 bsg_job_done(job, bsg_reply->result, 2471 bsg_reply->reply_payload_rcv_len); 2472 return rc; 2473 } 2474 2475 /** 2476 * lpfcdiag_loop_self_reg - obtains a remote port login id 2477 * @phba: Pointer to HBA context object 2478 * @rpi: Pointer to a remote port login id 2479 * 2480 * This function obtains a remote port login id so the diag loopback test 2481 * can send and receive its own unsolicited CT command. 2482 **/ 2483 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) 2484 { 2485 LPFC_MBOXQ_t *mbox; 2486 struct lpfc_dmabuf *dmabuff; 2487 int status; 2488 2489 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2490 if (!mbox) 2491 return -ENOMEM; 2492 2493 if (phba->sli_rev < LPFC_SLI_REV4) 2494 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 2495 (uint8_t *)&phba->pport->fc_sparam, 2496 mbox, *rpi); 2497 else { 2498 *rpi = lpfc_sli4_alloc_rpi(phba); 2499 if (*rpi == LPFC_RPI_ALLOC_ERROR) { 2500 mempool_free(mbox, phba->mbox_mem_pool); 2501 return -EBUSY; 2502 } 2503 status = lpfc_reg_rpi(phba, phba->pport->vpi, 2504 phba->pport->fc_myDID, 2505 (uint8_t *)&phba->pport->fc_sparam, 2506 mbox, *rpi); 2507 } 2508 2509 if (status) { 2510 mempool_free(mbox, phba->mbox_mem_pool); 2511 if (phba->sli_rev == LPFC_SLI_REV4) 2512 lpfc_sli4_free_rpi(phba, *rpi); 2513 return -ENOMEM; 2514 } 2515 2516 dmabuff = mbox->ctx_buf; 2517 mbox->ctx_buf = NULL; 2518 mbox->ctx_ndlp = NULL; 2519 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2520 2521 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2522 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2523 kfree(dmabuff); 2524 if (status != MBX_TIMEOUT) 2525 mempool_free(mbox, phba->mbox_mem_pool); 2526 if (phba->sli_rev == LPFC_SLI_REV4) 2527 lpfc_sli4_free_rpi(phba, *rpi); 2528 return -ENODEV; 2529 } 2530 2531 if (phba->sli_rev < LPFC_SLI_REV4) 2532 *rpi = mbox->u.mb.un.varWords[0]; 2533 2534 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2535 kfree(dmabuff); 2536 mempool_free(mbox, phba->mbox_mem_pool); 2537 return 0; 2538 } 2539 2540 /** 2541 * lpfcdiag_loop_self_unreg - unregs from the rpi 2542 * @phba: Pointer to HBA context object 2543 * @rpi: Remote port login id 2544 * 2545 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 2546 **/ 2547 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 2548 { 2549 LPFC_MBOXQ_t *mbox; 2550 int status; 2551 2552 /* Allocate mboxq structure */ 2553 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2554 if (mbox == NULL) 2555 return -ENOMEM; 2556 2557 if (phba->sli_rev < LPFC_SLI_REV4) 2558 lpfc_unreg_login(phba, 0, rpi, mbox); 2559 else 2560 lpfc_unreg_login(phba, phba->pport->vpi, 2561 phba->sli4_hba.rpi_ids[rpi], mbox); 2562 2563 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2564 2565 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2566 if (status != MBX_TIMEOUT) 2567 mempool_free(mbox, phba->mbox_mem_pool); 2568 return -EIO; 2569 } 2570 mempool_free(mbox, phba->mbox_mem_pool); 2571 if (phba->sli_rev == LPFC_SLI_REV4) 2572 lpfc_sli4_free_rpi(phba, rpi); 2573 return 0; 2574 } 2575 2576 /** 2577 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 2578 * @phba: Pointer to HBA context object 2579 * @rpi: Remote port login id 2580 * @txxri: Pointer to transmit exchange id 2581 * @rxxri: Pointer to response exchabge id 2582 * 2583 * This function obtains the transmit and receive ids required to send 2584 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 2585 * flags are used to the unsolicited response handler is able to process 2586 * the ct command sent on the same port. 2587 **/ 2588 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 2589 uint16_t *txxri, uint16_t * rxxri) 2590 { 2591 struct lpfc_bsg_event *evt; 2592 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2593 struct lpfc_dmabuf *dmabuf; 2594 struct ulp_bde64 *bpl = NULL; 2595 struct lpfc_sli_ct_request *ctreq = NULL; 2596 int ret_val = 0; 2597 int time_left; 2598 int iocb_stat = IOCB_SUCCESS; 2599 unsigned long flags; 2600 u32 status; 2601 2602 *txxri = 0; 2603 *rxxri = 0; 2604 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2605 SLI_CT_ELX_LOOPBACK); 2606 if (!evt) 2607 return -ENOMEM; 2608 2609 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2610 list_add(&evt->node, &phba->ct_ev_waiters); 2611 lpfc_bsg_event_ref(evt); 2612 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2613 2614 cmdiocbq = lpfc_sli_get_iocbq(phba); 2615 rspiocbq = lpfc_sli_get_iocbq(phba); 2616 2617 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2618 if (dmabuf) { 2619 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 2620 if (dmabuf->virt) { 2621 INIT_LIST_HEAD(&dmabuf->list); 2622 bpl = (struct ulp_bde64 *) dmabuf->virt; 2623 memset(bpl, 0, sizeof(*bpl)); 2624 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 2625 bpl->addrHigh = 2626 le32_to_cpu(putPaddrHigh(dmabuf->phys + 2627 sizeof(*bpl))); 2628 bpl->addrLow = 2629 le32_to_cpu(putPaddrLow(dmabuf->phys + 2630 sizeof(*bpl))); 2631 bpl->tus.f.bdeFlags = 0; 2632 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 2633 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2634 } 2635 } 2636 2637 if (cmdiocbq == NULL || rspiocbq == NULL || 2638 dmabuf == NULL || bpl == NULL || ctreq == NULL || 2639 dmabuf->virt == NULL) { 2640 ret_val = -ENOMEM; 2641 goto err_get_xri_exit; 2642 } 2643 2644 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2645 2646 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2647 ctreq->RevisionId.bits.InId = 0; 2648 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2649 ctreq->FsSubType = 0; 2650 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 2651 ctreq->CommandResponse.bits.Size = 0; 2652 2653 cmdiocbq->bpl_dmabuf = dmabuf; 2654 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; 2655 cmdiocbq->vport = phba->pport; 2656 cmdiocbq->cmd_cmpl = NULL; 2657 2658 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1, 2659 FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR); 2660 2661 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2662 rspiocbq, (phba->fc_ratov * 2) 2663 + LPFC_DRVR_TIMEOUT); 2664 2665 status = get_job_ulpstatus(phba, rspiocbq); 2666 if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) { 2667 ret_val = -EIO; 2668 goto err_get_xri_exit; 2669 } 2670 *txxri = get_job_ulpcontext(phba, rspiocbq); 2671 2672 evt->waiting = 1; 2673 evt->wait_time_stamp = jiffies; 2674 time_left = wait_event_interruptible_timeout( 2675 evt->wq, !list_empty(&evt->events_to_see), 2676 msecs_to_jiffies(1000 * 2677 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 2678 if (list_empty(&evt->events_to_see)) 2679 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2680 else { 2681 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2682 list_move(evt->events_to_see.prev, &evt->events_to_get); 2683 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2684 *rxxri = (list_entry(evt->events_to_get.prev, 2685 typeof(struct event_data), 2686 node))->immed_dat; 2687 } 2688 evt->waiting = 0; 2689 2690 err_get_xri_exit: 2691 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2692 lpfc_bsg_event_unref(evt); /* release ref */ 2693 lpfc_bsg_event_unref(evt); /* delete */ 2694 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2695 2696 if (dmabuf) { 2697 if (dmabuf->virt) 2698 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2699 kfree(dmabuf); 2700 } 2701 2702 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 2703 lpfc_sli_release_iocbq(phba, cmdiocbq); 2704 if (rspiocbq) 2705 lpfc_sli_release_iocbq(phba, rspiocbq); 2706 return ret_val; 2707 } 2708 2709 /** 2710 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers 2711 * @phba: Pointer to HBA context object 2712 * 2713 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and 2714 * returns the pointer to the buffer. 2715 **/ 2716 static struct lpfc_dmabuf * 2717 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) 2718 { 2719 struct lpfc_dmabuf *dmabuf; 2720 struct pci_dev *pcidev = phba->pcidev; 2721 2722 /* allocate dma buffer struct */ 2723 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2724 if (!dmabuf) 2725 return NULL; 2726 2727 INIT_LIST_HEAD(&dmabuf->list); 2728 2729 /* now, allocate dma buffer */ 2730 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2731 &(dmabuf->phys), GFP_KERNEL); 2732 2733 if (!dmabuf->virt) { 2734 kfree(dmabuf); 2735 return NULL; 2736 } 2737 2738 return dmabuf; 2739 } 2740 2741 /** 2742 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer 2743 * @phba: Pointer to HBA context object. 2744 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. 2745 * 2746 * This routine just simply frees a dma buffer and its associated buffer 2747 * descriptor referred by @dmabuf. 2748 **/ 2749 static void 2750 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) 2751 { 2752 struct pci_dev *pcidev = phba->pcidev; 2753 2754 if (!dmabuf) 2755 return; 2756 2757 if (dmabuf->virt) 2758 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2759 dmabuf->virt, dmabuf->phys); 2760 kfree(dmabuf); 2761 return; 2762 } 2763 2764 /** 2765 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers 2766 * @phba: Pointer to HBA context object. 2767 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. 2768 * 2769 * This routine just simply frees all dma buffers and their associated buffer 2770 * descriptors referred by @dmabuf_list. 2771 **/ 2772 static void 2773 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, 2774 struct list_head *dmabuf_list) 2775 { 2776 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2777 2778 if (list_empty(dmabuf_list)) 2779 return; 2780 2781 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { 2782 list_del_init(&dmabuf->list); 2783 lpfc_bsg_dma_page_free(phba, dmabuf); 2784 } 2785 return; 2786 } 2787 2788 /** 2789 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2790 * @phba: Pointer to HBA context object 2791 * @bpl: Pointer to 64 bit bde structure 2792 * @size: Number of bytes to process 2793 * @nocopydata: Flag to copy user data into the allocated buffer 2794 * 2795 * This function allocates page size buffers and populates an lpfc_dmabufext. 2796 * If allowed the user data pointed to with indataptr is copied into the kernel 2797 * memory. The chained list of page size buffers is returned. 2798 **/ 2799 static struct lpfc_dmabufext * 2800 diag_cmd_data_alloc(struct lpfc_hba *phba, 2801 struct ulp_bde64 *bpl, uint32_t size, 2802 int nocopydata) 2803 { 2804 struct lpfc_dmabufext *mlist = NULL; 2805 struct lpfc_dmabufext *dmp; 2806 int cnt, offset = 0, i = 0; 2807 struct pci_dev *pcidev; 2808 2809 pcidev = phba->pcidev; 2810 2811 while (size) { 2812 /* We get chunks of 4K */ 2813 if (size > BUF_SZ_4K) 2814 cnt = BUF_SZ_4K; 2815 else 2816 cnt = size; 2817 2818 /* allocate struct lpfc_dmabufext buffer header */ 2819 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 2820 if (!dmp) 2821 goto out; 2822 2823 INIT_LIST_HEAD(&dmp->dma.list); 2824 2825 /* Queue it to a linked list */ 2826 if (mlist) 2827 list_add_tail(&dmp->dma.list, &mlist->dma.list); 2828 else 2829 mlist = dmp; 2830 2831 /* allocate buffer */ 2832 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 2833 cnt, 2834 &(dmp->dma.phys), 2835 GFP_KERNEL); 2836 2837 if (!dmp->dma.virt) 2838 goto out; 2839 2840 dmp->size = cnt; 2841 2842 if (nocopydata) { 2843 bpl->tus.f.bdeFlags = 0; 2844 } else { 2845 memset((uint8_t *)dmp->dma.virt, 0, cnt); 2846 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2847 } 2848 2849 /* build buffer ptr list for IOCB */ 2850 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 2851 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 2852 bpl->tus.f.bdeSize = (ushort) cnt; 2853 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2854 bpl++; 2855 2856 i++; 2857 offset += cnt; 2858 size -= cnt; 2859 } 2860 2861 if (mlist) { 2862 mlist->flag = i; 2863 return mlist; 2864 } 2865 out: 2866 diag_cmd_data_free(phba, mlist); 2867 return NULL; 2868 } 2869 2870 /** 2871 * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 2872 * @phba: Pointer to HBA context object 2873 * @rxxri: Receive exchange id 2874 * @len: Number of data bytes 2875 * 2876 * This function allocates and posts a data buffer of sufficient size to receive 2877 * an unsolicited CT command. 2878 **/ 2879 static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 2880 size_t len) 2881 { 2882 struct lpfc_sli_ring *pring; 2883 struct lpfc_iocbq *cmdiocbq; 2884 IOCB_t *cmd = NULL; 2885 struct list_head head, *curr, *next; 2886 struct lpfc_dmabuf *rxbmp; 2887 struct lpfc_dmabuf *dmp; 2888 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 2889 struct ulp_bde64 *rxbpl = NULL; 2890 uint32_t num_bde; 2891 struct lpfc_dmabufext *rxbuffer = NULL; 2892 int ret_val = 0; 2893 int iocb_stat; 2894 int i = 0; 2895 2896 pring = lpfc_phba_elsring(phba); 2897 2898 cmdiocbq = lpfc_sli_get_iocbq(phba); 2899 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2900 if (rxbmp != NULL) { 2901 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2902 if (rxbmp->virt) { 2903 INIT_LIST_HEAD(&rxbmp->list); 2904 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2905 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 2906 } 2907 } 2908 2909 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) { 2910 ret_val = -ENOMEM; 2911 goto err_post_rxbufs_exit; 2912 } 2913 2914 /* Queue buffers for the receive exchange */ 2915 num_bde = (uint32_t)rxbuffer->flag; 2916 dmp = &rxbuffer->dma; 2917 cmd = &cmdiocbq->iocb; 2918 i = 0; 2919 2920 INIT_LIST_HEAD(&head); 2921 list_add_tail(&head, &dmp->list); 2922 list_for_each_safe(curr, next, &head) { 2923 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 2924 list_del(curr); 2925 2926 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2927 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 2928 cmd->un.quexri64cx.buff.bde.addrHigh = 2929 putPaddrHigh(mp[i]->phys); 2930 cmd->un.quexri64cx.buff.bde.addrLow = 2931 putPaddrLow(mp[i]->phys); 2932 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 2933 ((struct lpfc_dmabufext *)mp[i])->size; 2934 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 2935 cmd->ulpCommand = CMD_QUE_XRI64_CX; 2936 cmd->ulpPU = 0; 2937 cmd->ulpLe = 1; 2938 cmd->ulpBdeCount = 1; 2939 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 2940 2941 } else { 2942 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 2943 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2944 cmd->un.cont64[i].tus.f.bdeSize = 2945 ((struct lpfc_dmabufext *)mp[i])->size; 2946 cmd->ulpBdeCount = ++i; 2947 2948 if ((--num_bde > 0) && (i < 2)) 2949 continue; 2950 2951 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2952 cmd->ulpLe = 1; 2953 } 2954 2955 cmd->ulpClass = CLASS3; 2956 cmd->ulpContext = rxxri; 2957 2958 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2959 0); 2960 if (iocb_stat == IOCB_ERROR) { 2961 diag_cmd_data_free(phba, 2962 (struct lpfc_dmabufext *)mp[0]); 2963 if (mp[1]) 2964 diag_cmd_data_free(phba, 2965 (struct lpfc_dmabufext *)mp[1]); 2966 dmp = list_entry(next, struct lpfc_dmabuf, list); 2967 ret_val = -EIO; 2968 goto err_post_rxbufs_exit; 2969 } 2970 2971 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2972 if (mp[1]) { 2973 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2974 mp[1] = NULL; 2975 } 2976 2977 /* The iocb was freed by lpfc_sli_issue_iocb */ 2978 cmdiocbq = lpfc_sli_get_iocbq(phba); 2979 if (!cmdiocbq) { 2980 dmp = list_entry(next, struct lpfc_dmabuf, list); 2981 ret_val = -EIO; 2982 goto err_post_rxbufs_exit; 2983 } 2984 cmd = &cmdiocbq->iocb; 2985 i = 0; 2986 } 2987 list_del(&head); 2988 2989 err_post_rxbufs_exit: 2990 2991 if (rxbmp) { 2992 if (rxbmp->virt) 2993 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 2994 kfree(rxbmp); 2995 } 2996 2997 if (cmdiocbq) 2998 lpfc_sli_release_iocbq(phba, cmdiocbq); 2999 return ret_val; 3000 } 3001 3002 /** 3003 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself 3004 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 3005 * 3006 * This function receives a user data buffer to be transmitted and received on 3007 * the same port, the link must be up and in loopback mode prior 3008 * to being called. 3009 * 1. A kernel buffer is allocated to copy the user data into. 3010 * 2. The port registers with "itself". 3011 * 3. The transmit and receive exchange ids are obtained. 3012 * 4. The receive exchange id is posted. 3013 * 5. A new els loopback event is created. 3014 * 6. The command and response iocbs are allocated. 3015 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 3016 * 3017 * This function is meant to be called n times while the port is in loopback 3018 * so it is the apps responsibility to issue a reset to take the port out 3019 * of loopback mode. 3020 **/ 3021 static int 3022 lpfc_bsg_diag_loopback_run(struct bsg_job *job) 3023 { 3024 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3025 struct fc_bsg_reply *bsg_reply = job->reply; 3026 struct lpfc_hba *phba = vport->phba; 3027 struct lpfc_bsg_event *evt; 3028 struct event_data *evdat; 3029 struct lpfc_sli *psli = &phba->sli; 3030 uint32_t size; 3031 uint32_t full_size; 3032 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 3033 uint16_t rpi = 0; 3034 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; 3035 union lpfc_wqe128 *cmdwqe, *rspwqe; 3036 struct lpfc_sli_ct_request *ctreq; 3037 struct lpfc_dmabuf *txbmp; 3038 struct ulp_bde64 *txbpl = NULL; 3039 struct lpfc_dmabufext *txbuffer = NULL; 3040 struct list_head head; 3041 struct lpfc_dmabuf *curr; 3042 uint16_t txxri = 0, rxxri; 3043 uint32_t num_bde; 3044 uint8_t *ptr = NULL, *rx_databuf = NULL; 3045 int rc = 0; 3046 int time_left; 3047 int iocb_stat = IOCB_SUCCESS; 3048 unsigned long flags; 3049 void *dataout = NULL; 3050 uint32_t total_mem; 3051 3052 /* in case no data is returned return just the return code */ 3053 bsg_reply->reply_payload_rcv_len = 0; 3054 3055 if (job->request_len < 3056 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 3057 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3058 "2739 Received DIAG TEST request below minimum " 3059 "size\n"); 3060 rc = -EINVAL; 3061 goto loopback_test_exit; 3062 } 3063 3064 if (job->request_payload.payload_len != 3065 job->reply_payload.payload_len) { 3066 rc = -EINVAL; 3067 goto loopback_test_exit; 3068 } 3069 3070 if ((phba->link_state == LPFC_HBA_ERROR) || 3071 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 3072 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 3073 rc = -EACCES; 3074 goto loopback_test_exit; 3075 } 3076 3077 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 3078 rc = -EACCES; 3079 goto loopback_test_exit; 3080 } 3081 3082 size = job->request_payload.payload_len; 3083 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 3084 3085 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 3086 rc = -ERANGE; 3087 goto loopback_test_exit; 3088 } 3089 3090 if (full_size >= BUF_SZ_4K) { 3091 /* 3092 * Allocate memory for ioctl data. If buffer is bigger than 64k, 3093 * then we allocate 64k and re-use that buffer over and over to 3094 * xfer the whole block. This is because Linux kernel has a 3095 * problem allocating more than 120k of kernel space memory. Saw 3096 * problem with GET_FCPTARGETMAPPING... 3097 */ 3098 if (size <= (64 * 1024)) 3099 total_mem = full_size; 3100 else 3101 total_mem = 64 * 1024; 3102 } else 3103 /* Allocate memory for ioctl data */ 3104 total_mem = BUF_SZ_4K; 3105 3106 dataout = kmalloc(total_mem, GFP_KERNEL); 3107 if (dataout == NULL) { 3108 rc = -ENOMEM; 3109 goto loopback_test_exit; 3110 } 3111 3112 ptr = dataout; 3113 ptr += ELX_LOOPBACK_HEADER_SZ; 3114 sg_copy_to_buffer(job->request_payload.sg_list, 3115 job->request_payload.sg_cnt, 3116 ptr, size); 3117 rc = lpfcdiag_loop_self_reg(phba, &rpi); 3118 if (rc) 3119 goto loopback_test_exit; 3120 3121 if (phba->sli_rev < LPFC_SLI_REV4) { 3122 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 3123 if (rc) { 3124 lpfcdiag_loop_self_unreg(phba, rpi); 3125 goto loopback_test_exit; 3126 } 3127 3128 rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size); 3129 if (rc) { 3130 lpfcdiag_loop_self_unreg(phba, rpi); 3131 goto loopback_test_exit; 3132 } 3133 } 3134 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 3135 SLI_CT_ELX_LOOPBACK); 3136 if (!evt) { 3137 lpfcdiag_loop_self_unreg(phba, rpi); 3138 rc = -ENOMEM; 3139 goto loopback_test_exit; 3140 } 3141 3142 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3143 list_add(&evt->node, &phba->ct_ev_waiters); 3144 lpfc_bsg_event_ref(evt); 3145 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3146 3147 cmdiocbq = lpfc_sli_get_iocbq(phba); 3148 if (phba->sli_rev < LPFC_SLI_REV4) 3149 rspiocbq = lpfc_sli_get_iocbq(phba); 3150 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3151 3152 if (txbmp) { 3153 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 3154 if (txbmp->virt) { 3155 INIT_LIST_HEAD(&txbmp->list); 3156 txbpl = (struct ulp_bde64 *) txbmp->virt; 3157 txbuffer = diag_cmd_data_alloc(phba, 3158 txbpl, full_size, 0); 3159 } 3160 } 3161 3162 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { 3163 rc = -ENOMEM; 3164 goto err_loopback_test_exit; 3165 } 3166 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { 3167 rc = -ENOMEM; 3168 goto err_loopback_test_exit; 3169 } 3170 3171 cmdwqe = &cmdiocbq->wqe; 3172 memset(cmdwqe, 0, sizeof(*cmdwqe)); 3173 if (phba->sli_rev < LPFC_SLI_REV4) { 3174 rspwqe = &rspiocbq->wqe; 3175 memset(rspwqe, 0, sizeof(*rspwqe)); 3176 } 3177 3178 INIT_LIST_HEAD(&head); 3179 list_add_tail(&head, &txbuffer->dma.list); 3180 list_for_each_entry(curr, &head, list) { 3181 segment_len = ((struct lpfc_dmabufext *)curr)->size; 3182 if (current_offset == 0) { 3183 ctreq = curr->virt; 3184 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 3185 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 3186 ctreq->RevisionId.bits.InId = 0; 3187 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 3188 ctreq->FsSubType = 0; 3189 ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA); 3190 ctreq->CommandResponse.bits.Size = cpu_to_be16(size); 3191 segment_offset = ELX_LOOPBACK_HEADER_SZ; 3192 } else 3193 segment_offset = 0; 3194 3195 BUG_ON(segment_offset >= segment_len); 3196 memcpy(curr->virt + segment_offset, 3197 ptr + current_offset, 3198 segment_len - segment_offset); 3199 3200 current_offset += segment_len - segment_offset; 3201 BUG_ON(current_offset > size); 3202 } 3203 list_del(&head); 3204 3205 /* Build the XMIT_SEQUENCE iocb */ 3206 num_bde = (uint32_t)txbuffer->flag; 3207 3208 cmdiocbq->num_bdes = num_bde; 3209 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; 3210 cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK; 3211 if (phba->cfg_vmid_app_header) 3212 cmdiocbq->cmd_flag |= LPFC_IO_VMID; 3213 3214 cmdiocbq->vport = phba->pport; 3215 cmdiocbq->cmd_cmpl = NULL; 3216 cmdiocbq->bpl_dmabuf = txbmp; 3217 3218 if (phba->sli_rev < LPFC_SLI_REV4) { 3219 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri, 3220 num_bde, FC_RCTL_DD_UNSOL_CTL, 1, 3221 CMD_XMIT_SEQUENCE64_CX); 3222 3223 } else { 3224 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 3225 phba->sli4_hba.rpi_ids[rpi], 0xffff, 3226 full_size, FC_RCTL_DD_UNSOL_CTL, 1, 3227 CMD_XMIT_SEQUENCE64_WQE); 3228 cmdiocbq->sli4_xritag = NO_XRI; 3229 } 3230 3231 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 3232 rspiocbq, (phba->fc_ratov * 2) + 3233 LPFC_DRVR_TIMEOUT); 3234 if (iocb_stat != IOCB_SUCCESS || 3235 (phba->sli_rev < LPFC_SLI_REV4 && 3236 (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) { 3237 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3238 "3126 Failed loopback test issue iocb: " 3239 "iocb_stat:x%x\n", iocb_stat); 3240 rc = -EIO; 3241 goto err_loopback_test_exit; 3242 } 3243 3244 evt->waiting = 1; 3245 time_left = wait_event_interruptible_timeout( 3246 evt->wq, !list_empty(&evt->events_to_see), 3247 msecs_to_jiffies(1000 * 3248 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 3249 evt->waiting = 0; 3250 if (list_empty(&evt->events_to_see)) { 3251 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3252 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3253 "3125 Not receiving unsolicited event, " 3254 "rc:x%x\n", rc); 3255 } else { 3256 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3257 list_move(evt->events_to_see.prev, &evt->events_to_get); 3258 evdat = list_entry(evt->events_to_get.prev, 3259 typeof(*evdat), node); 3260 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3261 rx_databuf = evdat->data; 3262 if (evdat->len != full_size) { 3263 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3264 "1603 Loopback test did not receive expected " 3265 "data length. actual length 0x%x expected " 3266 "length 0x%x\n", 3267 evdat->len, full_size); 3268 rc = -EIO; 3269 } else if (rx_databuf == NULL) 3270 rc = -EIO; 3271 else { 3272 rc = IOCB_SUCCESS; 3273 /* skip over elx loopback header */ 3274 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 3275 bsg_reply->reply_payload_rcv_len = 3276 sg_copy_from_buffer(job->reply_payload.sg_list, 3277 job->reply_payload.sg_cnt, 3278 rx_databuf, size); 3279 bsg_reply->reply_payload_rcv_len = size; 3280 } 3281 } 3282 3283 err_loopback_test_exit: 3284 lpfcdiag_loop_self_unreg(phba, rpi); 3285 3286 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3287 lpfc_bsg_event_unref(evt); /* release ref */ 3288 lpfc_bsg_event_unref(evt); /* delete */ 3289 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3290 3291 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) 3292 lpfc_sli_release_iocbq(phba, cmdiocbq); 3293 3294 if (rspiocbq != NULL) 3295 lpfc_sli_release_iocbq(phba, rspiocbq); 3296 3297 if (txbmp != NULL) { 3298 if (txbpl != NULL) { 3299 if (txbuffer != NULL) 3300 diag_cmd_data_free(phba, txbuffer); 3301 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 3302 } 3303 kfree(txbmp); 3304 } 3305 3306 loopback_test_exit: 3307 kfree(dataout); 3308 /* make error code available to userspace */ 3309 bsg_reply->result = rc; 3310 job->dd_data = NULL; 3311 /* complete the job back to userspace if no error */ 3312 if (rc == IOCB_SUCCESS) 3313 bsg_job_done(job, bsg_reply->result, 3314 bsg_reply->reply_payload_rcv_len); 3315 return rc; 3316 } 3317 3318 /** 3319 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 3320 * @job: GET_DFC_REV fc_bsg_job 3321 **/ 3322 static int 3323 lpfc_bsg_get_dfc_rev(struct bsg_job *job) 3324 { 3325 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3326 struct fc_bsg_reply *bsg_reply = job->reply; 3327 struct lpfc_hba *phba = vport->phba; 3328 struct get_mgmt_rev_reply *event_reply; 3329 int rc = 0; 3330 3331 if (job->request_len < 3332 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 3333 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3334 "2740 Received GET_DFC_REV request below " 3335 "minimum size\n"); 3336 rc = -EINVAL; 3337 goto job_error; 3338 } 3339 3340 event_reply = (struct get_mgmt_rev_reply *) 3341 bsg_reply->reply_data.vendor_reply.vendor_rsp; 3342 3343 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { 3344 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3345 "2741 Received GET_DFC_REV reply below " 3346 "minimum size\n"); 3347 rc = -EINVAL; 3348 goto job_error; 3349 } 3350 3351 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 3352 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 3353 job_error: 3354 bsg_reply->result = rc; 3355 if (rc == 0) 3356 bsg_job_done(job, bsg_reply->result, 3357 bsg_reply->reply_payload_rcv_len); 3358 return rc; 3359 } 3360 3361 /** 3362 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler 3363 * @phba: Pointer to HBA context object. 3364 * @pmboxq: Pointer to mailbox command. 3365 * 3366 * This is completion handler function for mailbox commands issued from 3367 * lpfc_bsg_issue_mbox function. This function is called by the 3368 * mailbox event handler function with no lock held. This function 3369 * will wake up thread waiting on the wait queue pointed by dd_data 3370 * of the mailbox. 3371 **/ 3372 static void 3373 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3374 { 3375 struct bsg_job_data *dd_data; 3376 struct fc_bsg_reply *bsg_reply; 3377 struct bsg_job *job; 3378 uint32_t size; 3379 unsigned long flags; 3380 uint8_t *pmb, *pmb_buf; 3381 3382 dd_data = pmboxq->ctx_u.dd_data; 3383 3384 /* 3385 * The outgoing buffer is readily referred from the dma buffer, 3386 * just need to get header part from mailboxq structure. 3387 */ 3388 pmb = (uint8_t *)&pmboxq->u.mb; 3389 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3390 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3391 3392 /* Determine if job has been aborted */ 3393 3394 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3395 job = dd_data->set_job; 3396 if (job) { 3397 /* Prevent timeout handling from trying to abort job */ 3398 job->dd_data = NULL; 3399 } 3400 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3401 3402 /* Copy the mailbox data to the job if it is still active */ 3403 3404 if (job) { 3405 bsg_reply = job->reply; 3406 size = job->reply_payload.payload_len; 3407 bsg_reply->reply_payload_rcv_len = 3408 sg_copy_from_buffer(job->reply_payload.sg_list, 3409 job->reply_payload.sg_cnt, 3410 pmb_buf, size); 3411 } 3412 3413 dd_data->set_job = NULL; 3414 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3415 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3416 kfree(dd_data); 3417 3418 /* Complete the job if the job is still active */ 3419 3420 if (job) { 3421 bsg_reply->result = 0; 3422 bsg_job_done(job, bsg_reply->result, 3423 bsg_reply->reply_payload_rcv_len); 3424 } 3425 return; 3426 } 3427 3428 /** 3429 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 3430 * @phba: Pointer to HBA context object. 3431 * @mb: Pointer to a mailbox object. 3432 * @vport: Pointer to a vport object. 3433 * 3434 * Some commands require the port to be offline, some may not be called from 3435 * the application. 3436 **/ 3437 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 3438 MAILBOX_t *mb, struct lpfc_vport *vport) 3439 { 3440 /* return negative error values for bsg job */ 3441 switch (mb->mbxCommand) { 3442 /* Offline only */ 3443 case MBX_INIT_LINK: 3444 case MBX_DOWN_LINK: 3445 case MBX_CONFIG_LINK: 3446 case MBX_CONFIG_RING: 3447 case MBX_RESET_RING: 3448 case MBX_UNREG_LOGIN: 3449 case MBX_CLEAR_LA: 3450 case MBX_DUMP_CONTEXT: 3451 case MBX_RUN_DIAGS: 3452 case MBX_RESTART: 3453 case MBX_SET_MASK: 3454 if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { 3455 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3456 "2743 Command 0x%x is illegal in on-line " 3457 "state\n", 3458 mb->mbxCommand); 3459 return -EPERM; 3460 } 3461 break; 3462 case MBX_WRITE_NV: 3463 case MBX_WRITE_VPARMS: 3464 case MBX_LOAD_SM: 3465 case MBX_READ_NV: 3466 case MBX_READ_CONFIG: 3467 case MBX_READ_RCONFIG: 3468 case MBX_READ_STATUS: 3469 case MBX_READ_XRI: 3470 case MBX_READ_REV: 3471 case MBX_READ_LNK_STAT: 3472 case MBX_DUMP_MEMORY: 3473 case MBX_DOWN_LOAD: 3474 case MBX_UPDATE_CFG: 3475 case MBX_KILL_BOARD: 3476 case MBX_READ_TOPOLOGY: 3477 case MBX_LOAD_AREA: 3478 case MBX_LOAD_EXP_ROM: 3479 case MBX_BEACON: 3480 case MBX_DEL_LD_ENTRY: 3481 case MBX_SET_DEBUG: 3482 case MBX_WRITE_WWN: 3483 case MBX_SLI4_CONFIG: 3484 case MBX_READ_EVENT_LOG: 3485 case MBX_READ_EVENT_LOG_STATUS: 3486 case MBX_WRITE_EVENT_LOG: 3487 case MBX_PORT_CAPABILITIES: 3488 case MBX_PORT_IOV_CONTROL: 3489 case MBX_RUN_BIU_DIAG64: 3490 break; 3491 case MBX_SET_VARIABLE: 3492 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3493 "1226 mbox: set_variable 0x%x, 0x%x\n", 3494 mb->un.varWords[0], 3495 mb->un.varWords[1]); 3496 break; 3497 case MBX_READ_SPARM64: 3498 case MBX_REG_LOGIN: 3499 case MBX_REG_LOGIN64: 3500 case MBX_CONFIG_PORT: 3501 case MBX_RUN_BIU_DIAG: 3502 default: 3503 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3504 "2742 Unknown Command 0x%x\n", 3505 mb->mbxCommand); 3506 return -EPERM; 3507 } 3508 3509 return 0; /* ok */ 3510 } 3511 3512 /** 3513 * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session 3514 * @phba: Pointer to HBA context object. 3515 * 3516 * This is routine clean up and reset BSG handling of multi-buffer mbox 3517 * command session. 3518 **/ 3519 static void 3520 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) 3521 { 3522 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) 3523 return; 3524 3525 /* free all memory, including dma buffers */ 3526 lpfc_bsg_dma_page_list_free(phba, 3527 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3528 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); 3529 /* multi-buffer write mailbox command pass-through complete */ 3530 memset((char *)&phba->mbox_ext_buf_ctx, 0, 3531 sizeof(struct lpfc_mbox_ext_buf_ctx)); 3532 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3533 3534 return; 3535 } 3536 3537 /** 3538 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl 3539 * @phba: Pointer to HBA context object. 3540 * @pmboxq: Pointer to mailbox command. 3541 * 3542 * This is routine handles BSG job for mailbox commands completions with 3543 * multiple external buffers. 3544 **/ 3545 static struct bsg_job * 3546 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3547 { 3548 struct bsg_job_data *dd_data; 3549 struct bsg_job *job; 3550 struct fc_bsg_reply *bsg_reply; 3551 uint8_t *pmb, *pmb_buf; 3552 unsigned long flags; 3553 uint32_t size; 3554 int rc = 0; 3555 struct lpfc_dmabuf *dmabuf; 3556 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3557 uint8_t *pmbx; 3558 3559 dd_data = pmboxq->ctx_u.dd_data; 3560 3561 /* Determine if job has been aborted */ 3562 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3563 job = dd_data->set_job; 3564 if (job) { 3565 bsg_reply = job->reply; 3566 /* Prevent timeout handling from trying to abort job */ 3567 job->dd_data = NULL; 3568 } 3569 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3570 3571 /* 3572 * The outgoing buffer is readily referred from the dma buffer, 3573 * just need to get header part from mailboxq structure. 3574 */ 3575 3576 pmb = (uint8_t *)&pmboxq->u.mb; 3577 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3578 /* Copy the byte swapped response mailbox back to the user */ 3579 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3580 /* if there is any non-embedded extended data copy that too */ 3581 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; 3582 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3583 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 3584 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 3585 pmbx = (uint8_t *)dmabuf->virt; 3586 /* byte swap the extended data following the mailbox command */ 3587 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3588 &pmbx[sizeof(MAILBOX_t)], 3589 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3590 } 3591 3592 /* Complete the job if the job is still active */ 3593 3594 if (job) { 3595 size = job->reply_payload.payload_len; 3596 bsg_reply->reply_payload_rcv_len = 3597 sg_copy_from_buffer(job->reply_payload.sg_list, 3598 job->reply_payload.sg_cnt, 3599 pmb_buf, size); 3600 3601 /* result for successful */ 3602 bsg_reply->result = 0; 3603 3604 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3605 "2937 SLI_CONFIG ext-buffer mailbox command " 3606 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3607 phba->mbox_ext_buf_ctx.nembType, 3608 phba->mbox_ext_buf_ctx.mboxType, size); 3609 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, 3610 phba->mbox_ext_buf_ctx.nembType, 3611 phba->mbox_ext_buf_ctx.mboxType, 3612 dma_ebuf, sta_pos_addr, 3613 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3614 } else { 3615 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3616 "2938 SLI_CONFIG ext-buffer mailbox " 3617 "command (x%x/x%x) failure, rc:x%x\n", 3618 phba->mbox_ext_buf_ctx.nembType, 3619 phba->mbox_ext_buf_ctx.mboxType, rc); 3620 } 3621 3622 3623 /* state change */ 3624 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3625 kfree(dd_data); 3626 return job; 3627 } 3628 3629 /** 3630 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox 3631 * @phba: Pointer to HBA context object. 3632 * @pmboxq: Pointer to mailbox command. 3633 * 3634 * This is completion handler function for mailbox read commands with multiple 3635 * external buffers. 3636 **/ 3637 static void 3638 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3639 { 3640 struct bsg_job *job; 3641 struct fc_bsg_reply *bsg_reply; 3642 3643 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3644 3645 /* handle the BSG job with mailbox command */ 3646 if (!job) 3647 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3648 3649 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3650 "2939 SLI_CONFIG ext-buffer rd mailbox command " 3651 "complete, ctxState:x%x, mbxStatus:x%x\n", 3652 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3653 3654 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3655 lpfc_bsg_mbox_ext_session_reset(phba); 3656 3657 /* free base driver mailbox structure memory */ 3658 mempool_free(pmboxq, phba->mbox_mem_pool); 3659 3660 /* if the job is still active, call job done */ 3661 if (job) { 3662 bsg_reply = job->reply; 3663 bsg_job_done(job, bsg_reply->result, 3664 bsg_reply->reply_payload_rcv_len); 3665 } 3666 return; 3667 } 3668 3669 /** 3670 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox 3671 * @phba: Pointer to HBA context object. 3672 * @pmboxq: Pointer to mailbox command. 3673 * 3674 * This is completion handler function for mailbox write commands with multiple 3675 * external buffers. 3676 **/ 3677 static void 3678 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3679 { 3680 struct bsg_job *job; 3681 struct fc_bsg_reply *bsg_reply; 3682 3683 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3684 3685 /* handle the BSG job with the mailbox command */ 3686 if (!job) 3687 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3688 3689 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3690 "2940 SLI_CONFIG ext-buffer wr mailbox command " 3691 "complete, ctxState:x%x, mbxStatus:x%x\n", 3692 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3693 3694 /* free all memory, including dma buffers */ 3695 mempool_free(pmboxq, phba->mbox_mem_pool); 3696 lpfc_bsg_mbox_ext_session_reset(phba); 3697 3698 /* if the job is still active, call job done */ 3699 if (job) { 3700 bsg_reply = job->reply; 3701 bsg_job_done(job, bsg_reply->result, 3702 bsg_reply->reply_payload_rcv_len); 3703 } 3704 3705 return; 3706 } 3707 3708 static void 3709 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, 3710 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, 3711 struct lpfc_dmabuf *ext_dmabuf) 3712 { 3713 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3714 3715 /* pointer to the start of mailbox command */ 3716 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; 3717 3718 if (nemb_tp == nemb_mse) { 3719 if (index == 0) { 3720 sli_cfg_mbx->un.sli_config_emb0_subsys. 3721 mse[index].pa_hi = 3722 putPaddrHigh(mbx_dmabuf->phys + 3723 sizeof(MAILBOX_t)); 3724 sli_cfg_mbx->un.sli_config_emb0_subsys. 3725 mse[index].pa_lo = 3726 putPaddrLow(mbx_dmabuf->phys + 3727 sizeof(MAILBOX_t)); 3728 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3729 "2943 SLI_CONFIG(mse)[%d], " 3730 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3731 index, 3732 sli_cfg_mbx->un.sli_config_emb0_subsys. 3733 mse[index].buf_len, 3734 sli_cfg_mbx->un.sli_config_emb0_subsys. 3735 mse[index].pa_hi, 3736 sli_cfg_mbx->un.sli_config_emb0_subsys. 3737 mse[index].pa_lo); 3738 } else { 3739 sli_cfg_mbx->un.sli_config_emb0_subsys. 3740 mse[index].pa_hi = 3741 putPaddrHigh(ext_dmabuf->phys); 3742 sli_cfg_mbx->un.sli_config_emb0_subsys. 3743 mse[index].pa_lo = 3744 putPaddrLow(ext_dmabuf->phys); 3745 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3746 "2944 SLI_CONFIG(mse)[%d], " 3747 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3748 index, 3749 sli_cfg_mbx->un.sli_config_emb0_subsys. 3750 mse[index].buf_len, 3751 sli_cfg_mbx->un.sli_config_emb0_subsys. 3752 mse[index].pa_hi, 3753 sli_cfg_mbx->un.sli_config_emb0_subsys. 3754 mse[index].pa_lo); 3755 } 3756 } else { 3757 if (index == 0) { 3758 sli_cfg_mbx->un.sli_config_emb1_subsys. 3759 hbd[index].pa_hi = 3760 putPaddrHigh(mbx_dmabuf->phys + 3761 sizeof(MAILBOX_t)); 3762 sli_cfg_mbx->un.sli_config_emb1_subsys. 3763 hbd[index].pa_lo = 3764 putPaddrLow(mbx_dmabuf->phys + 3765 sizeof(MAILBOX_t)); 3766 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3767 "3007 SLI_CONFIG(hbd)[%d], " 3768 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3769 index, 3770 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3771 &sli_cfg_mbx->un. 3772 sli_config_emb1_subsys.hbd[index]), 3773 sli_cfg_mbx->un.sli_config_emb1_subsys. 3774 hbd[index].pa_hi, 3775 sli_cfg_mbx->un.sli_config_emb1_subsys. 3776 hbd[index].pa_lo); 3777 3778 } else { 3779 sli_cfg_mbx->un.sli_config_emb1_subsys. 3780 hbd[index].pa_hi = 3781 putPaddrHigh(ext_dmabuf->phys); 3782 sli_cfg_mbx->un.sli_config_emb1_subsys. 3783 hbd[index].pa_lo = 3784 putPaddrLow(ext_dmabuf->phys); 3785 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3786 "3008 SLI_CONFIG(hbd)[%d], " 3787 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3788 index, 3789 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3790 &sli_cfg_mbx->un. 3791 sli_config_emb1_subsys.hbd[index]), 3792 sli_cfg_mbx->un.sli_config_emb1_subsys. 3793 hbd[index].pa_hi, 3794 sli_cfg_mbx->un.sli_config_emb1_subsys. 3795 hbd[index].pa_lo); 3796 } 3797 } 3798 return; 3799 } 3800 3801 /** 3802 * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read 3803 * @phba: Pointer to HBA context object. 3804 * @job: Pointer to the job object. 3805 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3806 * @dmabuf: Pointer to a DMA buffer descriptor. 3807 * 3808 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with 3809 * non-embedded external buffers. 3810 **/ 3811 static int 3812 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 3813 enum nemb_type nemb_tp, 3814 struct lpfc_dmabuf *dmabuf) 3815 { 3816 struct fc_bsg_request *bsg_request = job->request; 3817 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3818 struct dfc_mbox_req *mbox_req; 3819 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; 3820 uint32_t ext_buf_cnt, ext_buf_index; 3821 struct lpfc_dmabuf *ext_dmabuf = NULL; 3822 struct bsg_job_data *dd_data = NULL; 3823 LPFC_MBOXQ_t *pmboxq = NULL; 3824 MAILBOX_t *pmb; 3825 uint8_t *pmbx; 3826 int rc, i; 3827 3828 mbox_req = 3829 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 3830 3831 /* pointer to the start of mailbox command */ 3832 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3833 3834 if (nemb_tp == nemb_mse) { 3835 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3836 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3837 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3838 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3839 "2945 Handled SLI_CONFIG(mse) rd, " 3840 "ext_buf_cnt(%d) out of range(%d)\n", 3841 ext_buf_cnt, 3842 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3843 rc = -ERANGE; 3844 goto job_error; 3845 } 3846 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3847 "2941 Handled SLI_CONFIG(mse) rd, " 3848 "ext_buf_cnt:%d\n", ext_buf_cnt); 3849 } else { 3850 /* sanity check on interface type for support */ 3851 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 3852 LPFC_SLI_INTF_IF_TYPE_2) { 3853 rc = -ENODEV; 3854 goto job_error; 3855 } 3856 /* nemb_tp == nemb_hbd */ 3857 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3858 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3859 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3860 "2946 Handled SLI_CONFIG(hbd) rd, " 3861 "ext_buf_cnt(%d) out of range(%d)\n", 3862 ext_buf_cnt, 3863 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3864 rc = -ERANGE; 3865 goto job_error; 3866 } 3867 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3868 "2942 Handled SLI_CONFIG(hbd) rd, " 3869 "ext_buf_cnt:%d\n", ext_buf_cnt); 3870 } 3871 3872 /* before dma descriptor setup */ 3873 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3874 sta_pre_addr, dmabuf, ext_buf_cnt); 3875 3876 /* reject non-embedded mailbox command with none external buffer */ 3877 if (ext_buf_cnt == 0) { 3878 rc = -EPERM; 3879 goto job_error; 3880 } else if (ext_buf_cnt > 1) { 3881 /* additional external read buffers */ 3882 for (i = 1; i < ext_buf_cnt; i++) { 3883 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); 3884 if (!ext_dmabuf) { 3885 rc = -ENOMEM; 3886 goto job_error; 3887 } 3888 list_add_tail(&ext_dmabuf->list, 3889 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3890 } 3891 } 3892 3893 /* bsg tracking structure */ 3894 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3895 if (!dd_data) { 3896 rc = -ENOMEM; 3897 goto job_error; 3898 } 3899 3900 /* mailbox command structure for base driver */ 3901 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3902 if (!pmboxq) { 3903 rc = -ENOMEM; 3904 goto job_error; 3905 } 3906 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3907 3908 /* for the first external buffer */ 3909 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3910 3911 /* for the rest of external buffer descriptors if any */ 3912 if (ext_buf_cnt > 1) { 3913 ext_buf_index = 1; 3914 list_for_each_entry_safe(curr_dmabuf, next_dmabuf, 3915 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { 3916 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 3917 ext_buf_index, dmabuf, 3918 curr_dmabuf); 3919 ext_buf_index++; 3920 } 3921 } 3922 3923 /* after dma descriptor setup */ 3924 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3925 sta_pos_addr, dmabuf, ext_buf_cnt); 3926 3927 /* construct base driver mbox command */ 3928 pmb = &pmboxq->u.mb; 3929 pmbx = (uint8_t *)dmabuf->virt; 3930 memcpy(pmb, pmbx, sizeof(*pmb)); 3931 pmb->mbxOwner = OWN_HOST; 3932 pmboxq->vport = phba->pport; 3933 3934 /* multi-buffer handling context */ 3935 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 3936 phba->mbox_ext_buf_ctx.mboxType = mbox_rd; 3937 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 3938 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 3939 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 3940 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 3941 3942 /* callback for multi-buffer read mailbox command */ 3943 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; 3944 3945 /* context fields to callback function */ 3946 pmboxq->ctx_u.dd_data = dd_data; 3947 dd_data->type = TYPE_MBOX; 3948 dd_data->set_job = job; 3949 dd_data->context_un.mbox.pmboxq = pmboxq; 3950 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3951 job->dd_data = dd_data; 3952 3953 /* state change */ 3954 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3955 3956 /* 3957 * Non-embedded mailbox subcommand data gets byte swapped here because 3958 * the lower level driver code only does the first 64 mailbox words. 3959 */ 3960 if ((!bsg_bf_get(lpfc_mbox_hdr_emb, 3961 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && 3962 (nemb_tp == nemb_mse)) 3963 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3964 &pmbx[sizeof(MAILBOX_t)], 3965 sli_cfg_mbx->un.sli_config_emb0_subsys. 3966 mse[0].buf_len); 3967 3968 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3969 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3970 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3971 "2947 Issued SLI_CONFIG ext-buffer " 3972 "mailbox command, rc:x%x\n", rc); 3973 return SLI_CONFIG_HANDLED; 3974 } 3975 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3976 "2948 Failed to issue SLI_CONFIG ext-buffer " 3977 "mailbox command, rc:x%x\n", rc); 3978 rc = -EPIPE; 3979 3980 job_error: 3981 if (pmboxq) 3982 mempool_free(pmboxq, phba->mbox_mem_pool); 3983 lpfc_bsg_dma_page_list_free(phba, 3984 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3985 kfree(dd_data); 3986 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 3987 return rc; 3988 } 3989 3990 /** 3991 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write 3992 * @phba: Pointer to HBA context object. 3993 * @job: Pointer to the job object. 3994 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3995 * @dmabuf: Pointer to a DMA buffer descriptor. 3996 * 3997 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with 3998 * non-embedded external buffers. 3999 **/ 4000 static int 4001 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 4002 enum nemb_type nemb_tp, 4003 struct lpfc_dmabuf *dmabuf) 4004 { 4005 struct fc_bsg_request *bsg_request = job->request; 4006 struct fc_bsg_reply *bsg_reply = job->reply; 4007 struct dfc_mbox_req *mbox_req; 4008 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4009 uint32_t ext_buf_cnt; 4010 struct bsg_job_data *dd_data = NULL; 4011 LPFC_MBOXQ_t *pmboxq = NULL; 4012 MAILBOX_t *pmb; 4013 uint8_t *mbx; 4014 int rc = SLI_CONFIG_NOT_HANDLED, i; 4015 4016 mbox_req = 4017 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4018 4019 /* pointer to the start of mailbox command */ 4020 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4021 4022 if (nemb_tp == nemb_mse) { 4023 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 4024 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 4025 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 4026 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4027 "2953 Failed SLI_CONFIG(mse) wr, " 4028 "ext_buf_cnt(%d) out of range(%d)\n", 4029 ext_buf_cnt, 4030 LPFC_MBX_SLI_CONFIG_MAX_MSE); 4031 return -ERANGE; 4032 } 4033 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4034 "2949 Handled SLI_CONFIG(mse) wr, " 4035 "ext_buf_cnt:%d\n", ext_buf_cnt); 4036 } else { 4037 /* sanity check on interface type for support */ 4038 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 4039 LPFC_SLI_INTF_IF_TYPE_2) 4040 return -ENODEV; 4041 /* nemb_tp == nemb_hbd */ 4042 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 4043 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 4044 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4045 "2954 Failed SLI_CONFIG(hbd) wr, " 4046 "ext_buf_cnt(%d) out of range(%d)\n", 4047 ext_buf_cnt, 4048 LPFC_MBX_SLI_CONFIG_MAX_HBD); 4049 return -ERANGE; 4050 } 4051 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4052 "2950 Handled SLI_CONFIG(hbd) wr, " 4053 "ext_buf_cnt:%d\n", ext_buf_cnt); 4054 } 4055 4056 /* before dma buffer descriptor setup */ 4057 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4058 sta_pre_addr, dmabuf, ext_buf_cnt); 4059 4060 if (ext_buf_cnt == 0) 4061 return -EPERM; 4062 4063 /* for the first external buffer */ 4064 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 4065 4066 /* after dma descriptor setup */ 4067 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4068 sta_pos_addr, dmabuf, ext_buf_cnt); 4069 4070 /* log for looking forward */ 4071 for (i = 1; i < ext_buf_cnt; i++) { 4072 if (nemb_tp == nemb_mse) 4073 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4074 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", 4075 i, sli_cfg_mbx->un.sli_config_emb0_subsys. 4076 mse[i].buf_len); 4077 else 4078 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4079 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", 4080 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4081 &sli_cfg_mbx->un.sli_config_emb1_subsys. 4082 hbd[i])); 4083 } 4084 4085 /* multi-buffer handling context */ 4086 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 4087 phba->mbox_ext_buf_ctx.mboxType = mbox_wr; 4088 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 4089 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 4090 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 4091 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 4092 4093 if (ext_buf_cnt == 1) { 4094 /* bsg tracking structure */ 4095 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4096 if (!dd_data) { 4097 rc = -ENOMEM; 4098 goto job_error; 4099 } 4100 4101 /* mailbox command structure for base driver */ 4102 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4103 if (!pmboxq) { 4104 rc = -ENOMEM; 4105 goto job_error; 4106 } 4107 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4108 pmb = &pmboxq->u.mb; 4109 mbx = (uint8_t *)dmabuf->virt; 4110 memcpy(pmb, mbx, sizeof(*pmb)); 4111 pmb->mbxOwner = OWN_HOST; 4112 pmboxq->vport = phba->pport; 4113 4114 /* callback for multi-buffer read mailbox command */ 4115 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4116 4117 /* context fields to callback function */ 4118 pmboxq->ctx_u.dd_data = dd_data; 4119 dd_data->type = TYPE_MBOX; 4120 dd_data->set_job = job; 4121 dd_data->context_un.mbox.pmboxq = pmboxq; 4122 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 4123 job->dd_data = dd_data; 4124 4125 /* state change */ 4126 4127 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4128 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4129 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4130 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4131 "2955 Issued SLI_CONFIG ext-buffer " 4132 "mailbox command, rc:x%x\n", rc); 4133 return SLI_CONFIG_HANDLED; 4134 } 4135 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4136 "2956 Failed to issue SLI_CONFIG ext-buffer " 4137 "mailbox command, rc:x%x\n", rc); 4138 rc = -EPIPE; 4139 goto job_error; 4140 } 4141 4142 /* wait for additional external buffers */ 4143 4144 bsg_reply->result = 0; 4145 bsg_job_done(job, bsg_reply->result, 4146 bsg_reply->reply_payload_rcv_len); 4147 return SLI_CONFIG_HANDLED; 4148 4149 job_error: 4150 if (pmboxq) 4151 mempool_free(pmboxq, phba->mbox_mem_pool); 4152 kfree(dd_data); 4153 4154 return rc; 4155 } 4156 4157 /** 4158 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer 4159 * @phba: Pointer to HBA context object. 4160 * @job: Pointer to the job object. 4161 * @dmabuf: Pointer to a DMA buffer descriptor. 4162 * 4163 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded 4164 * external buffers, including both 0x9B with non-embedded MSEs and 0x9B 4165 * with embedded subsystem 0x1 and opcodes with external HBDs. 4166 **/ 4167 static int 4168 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4169 struct lpfc_dmabuf *dmabuf) 4170 { 4171 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4172 uint32_t subsys; 4173 uint32_t opcode; 4174 int rc = SLI_CONFIG_NOT_HANDLED; 4175 4176 /* state change on new multi-buffer pass-through mailbox command */ 4177 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; 4178 4179 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4180 4181 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 4182 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 4183 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, 4184 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4185 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, 4186 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4187 if (subsys == SLI_CONFIG_SUBSYS_FCOE) { 4188 switch (opcode) { 4189 case FCOE_OPCODE_READ_FCF: 4190 case FCOE_OPCODE_GET_DPORT_RESULTS: 4191 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4192 "2957 Handled SLI_CONFIG " 4193 "subsys_fcoe, opcode:x%x\n", 4194 opcode); 4195 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4196 nemb_mse, dmabuf); 4197 break; 4198 case FCOE_OPCODE_ADD_FCF: 4199 case FCOE_OPCODE_SET_DPORT_MODE: 4200 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: 4201 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4202 "2958 Handled SLI_CONFIG " 4203 "subsys_fcoe, opcode:x%x\n", 4204 opcode); 4205 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4206 nemb_mse, dmabuf); 4207 break; 4208 default: 4209 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4210 "2959 Reject SLI_CONFIG " 4211 "subsys_fcoe, opcode:x%x\n", 4212 opcode); 4213 rc = -EPERM; 4214 break; 4215 } 4216 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4217 switch (opcode) { 4218 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 4219 case COMN_OPCODE_GET_CNTL_ATTRIBUTES: 4220 case COMN_OPCODE_GET_PROFILE_CONFIG: 4221 case COMN_OPCODE_SET_FEATURES: 4222 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4223 "3106 Handled SLI_CONFIG " 4224 "subsys_comn, opcode:x%x\n", 4225 opcode); 4226 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4227 nemb_mse, dmabuf); 4228 break; 4229 default: 4230 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4231 "3107 Reject SLI_CONFIG " 4232 "subsys_comn, opcode:x%x\n", 4233 opcode); 4234 rc = -EPERM; 4235 break; 4236 } 4237 } else { 4238 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4239 "2977 Reject SLI_CONFIG " 4240 "subsys:x%d, opcode:x%x\n", 4241 subsys, opcode); 4242 rc = -EPERM; 4243 } 4244 } else { 4245 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, 4246 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4247 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, 4248 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4249 if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4250 switch (opcode) { 4251 case COMN_OPCODE_READ_OBJECT: 4252 case COMN_OPCODE_READ_OBJECT_LIST: 4253 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4254 "2960 Handled SLI_CONFIG " 4255 "subsys_comn, opcode:x%x\n", 4256 opcode); 4257 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4258 nemb_hbd, dmabuf); 4259 break; 4260 case COMN_OPCODE_WRITE_OBJECT: 4261 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4262 "2961 Handled SLI_CONFIG " 4263 "subsys_comn, opcode:x%x\n", 4264 opcode); 4265 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4266 nemb_hbd, dmabuf); 4267 break; 4268 default: 4269 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4270 "2962 Not handled SLI_CONFIG " 4271 "subsys_comn, opcode:x%x\n", 4272 opcode); 4273 rc = SLI_CONFIG_NOT_HANDLED; 4274 break; 4275 } 4276 } else { 4277 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4278 "2978 Not handled SLI_CONFIG " 4279 "subsys:x%d, opcode:x%x\n", 4280 subsys, opcode); 4281 rc = SLI_CONFIG_NOT_HANDLED; 4282 } 4283 } 4284 4285 /* state reset on not handled new multi-buffer mailbox command */ 4286 if (rc != SLI_CONFIG_HANDLED) 4287 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4288 4289 return rc; 4290 } 4291 4292 /** 4293 * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers 4294 * @phba: Pointer to HBA context object. 4295 * 4296 * This routine is for requesting to abort a pass-through mailbox command with 4297 * multiple external buffers due to error condition. 4298 **/ 4299 static void 4300 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) 4301 { 4302 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 4303 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 4304 else 4305 lpfc_bsg_mbox_ext_session_reset(phba); 4306 return; 4307 } 4308 4309 /** 4310 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer 4311 * @phba: Pointer to HBA context object. 4312 * @job: Pointer to the job object. 4313 * 4314 * This routine extracts the next mailbox read external buffer back to 4315 * user space through BSG. 4316 **/ 4317 static int 4318 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) 4319 { 4320 struct fc_bsg_reply *bsg_reply = job->reply; 4321 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4322 struct lpfc_dmabuf *dmabuf; 4323 uint8_t *pbuf; 4324 uint32_t size; 4325 uint32_t index; 4326 4327 index = phba->mbox_ext_buf_ctx.seqNum; 4328 phba->mbox_ext_buf_ctx.seqNum++; 4329 4330 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4331 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4332 4333 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4334 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, 4335 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); 4336 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4337 "2963 SLI_CONFIG (mse) ext-buffer rd get " 4338 "buffer[%d], size:%d\n", index, size); 4339 } else { 4340 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4341 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); 4342 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4343 "2964 SLI_CONFIG (hbd) ext-buffer rd get " 4344 "buffer[%d], size:%d\n", index, size); 4345 } 4346 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) 4347 return -EPIPE; 4348 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 4349 struct lpfc_dmabuf, list); 4350 list_del_init(&dmabuf->list); 4351 4352 /* after dma buffer descriptor setup */ 4353 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4354 mbox_rd, dma_ebuf, sta_pos_addr, 4355 dmabuf, index); 4356 4357 pbuf = (uint8_t *)dmabuf->virt; 4358 bsg_reply->reply_payload_rcv_len = 4359 sg_copy_from_buffer(job->reply_payload.sg_list, 4360 job->reply_payload.sg_cnt, 4361 pbuf, size); 4362 4363 lpfc_bsg_dma_page_free(phba, dmabuf); 4364 4365 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4366 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4367 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " 4368 "command session done\n"); 4369 lpfc_bsg_mbox_ext_session_reset(phba); 4370 } 4371 4372 bsg_reply->result = 0; 4373 bsg_job_done(job, bsg_reply->result, 4374 bsg_reply->reply_payload_rcv_len); 4375 4376 return SLI_CONFIG_HANDLED; 4377 } 4378 4379 /** 4380 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer 4381 * @phba: Pointer to HBA context object. 4382 * @job: Pointer to the job object. 4383 * @dmabuf: Pointer to a DMA buffer descriptor. 4384 * 4385 * This routine sets up the next mailbox read external buffer obtained 4386 * from user space through BSG. 4387 **/ 4388 static int 4389 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, 4390 struct lpfc_dmabuf *dmabuf) 4391 { 4392 struct fc_bsg_reply *bsg_reply = job->reply; 4393 struct bsg_job_data *dd_data = NULL; 4394 LPFC_MBOXQ_t *pmboxq = NULL; 4395 MAILBOX_t *pmb; 4396 enum nemb_type nemb_tp; 4397 uint8_t *pbuf; 4398 uint32_t size; 4399 uint32_t index; 4400 int rc; 4401 4402 index = phba->mbox_ext_buf_ctx.seqNum; 4403 phba->mbox_ext_buf_ctx.seqNum++; 4404 nemb_tp = phba->mbox_ext_buf_ctx.nembType; 4405 4406 pbuf = (uint8_t *)dmabuf->virt; 4407 size = job->request_payload.payload_len; 4408 sg_copy_to_buffer(job->request_payload.sg_list, 4409 job->request_payload.sg_cnt, 4410 pbuf, size); 4411 4412 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4413 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4414 "2966 SLI_CONFIG (mse) ext-buffer wr set " 4415 "buffer[%d], size:%d\n", 4416 phba->mbox_ext_buf_ctx.seqNum, size); 4417 4418 } else { 4419 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4420 "2967 SLI_CONFIG (hbd) ext-buffer wr set " 4421 "buffer[%d], size:%d\n", 4422 phba->mbox_ext_buf_ctx.seqNum, size); 4423 4424 } 4425 4426 /* set up external buffer descriptor and add to external buffer list */ 4427 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, 4428 phba->mbox_ext_buf_ctx.mbx_dmabuf, 4429 dmabuf); 4430 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4431 4432 /* after write dma buffer */ 4433 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4434 mbox_wr, dma_ebuf, sta_pos_addr, 4435 dmabuf, index); 4436 4437 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4438 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4439 "2968 SLI_CONFIG ext-buffer wr all %d " 4440 "ebuffers received\n", 4441 phba->mbox_ext_buf_ctx.numBuf); 4442 4443 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4444 if (!dd_data) { 4445 rc = -ENOMEM; 4446 goto job_error; 4447 } 4448 4449 /* mailbox command structure for base driver */ 4450 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4451 if (!pmboxq) { 4452 rc = -ENOMEM; 4453 goto job_error; 4454 } 4455 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4456 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4457 pmb = &pmboxq->u.mb; 4458 memcpy(pmb, pbuf, sizeof(*pmb)); 4459 pmb->mbxOwner = OWN_HOST; 4460 pmboxq->vport = phba->pport; 4461 4462 /* callback for multi-buffer write mailbox command */ 4463 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4464 4465 /* context fields to callback function */ 4466 pmboxq->ctx_u.dd_data = dd_data; 4467 dd_data->type = TYPE_MBOX; 4468 dd_data->set_job = job; 4469 dd_data->context_un.mbox.pmboxq = pmboxq; 4470 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4471 job->dd_data = dd_data; 4472 4473 /* state change */ 4474 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4475 4476 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4477 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4478 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4479 "2969 Issued SLI_CONFIG ext-buffer " 4480 "mailbox command, rc:x%x\n", rc); 4481 return SLI_CONFIG_HANDLED; 4482 } 4483 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4484 "2970 Failed to issue SLI_CONFIG ext-buffer " 4485 "mailbox command, rc:x%x\n", rc); 4486 rc = -EPIPE; 4487 goto job_error; 4488 } 4489 4490 /* wait for additional external buffers */ 4491 bsg_reply->result = 0; 4492 bsg_job_done(job, bsg_reply->result, 4493 bsg_reply->reply_payload_rcv_len); 4494 return SLI_CONFIG_HANDLED; 4495 4496 job_error: 4497 if (pmboxq) 4498 mempool_free(pmboxq, phba->mbox_mem_pool); 4499 lpfc_bsg_dma_page_free(phba, dmabuf); 4500 kfree(dd_data); 4501 4502 return rc; 4503 } 4504 4505 /** 4506 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd 4507 * @phba: Pointer to HBA context object. 4508 * @job: Pointer to the job object. 4509 * @dmabuf: Pointer to a DMA buffer descriptor. 4510 * 4511 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox 4512 * command with multiple non-embedded external buffers. 4513 **/ 4514 static int 4515 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, 4516 struct lpfc_dmabuf *dmabuf) 4517 { 4518 int rc; 4519 4520 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4521 "2971 SLI_CONFIG buffer (type:x%x)\n", 4522 phba->mbox_ext_buf_ctx.mboxType); 4523 4524 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { 4525 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { 4526 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4527 "2972 SLI_CONFIG rd buffer state " 4528 "mismatch:x%x\n", 4529 phba->mbox_ext_buf_ctx.state); 4530 lpfc_bsg_mbox_ext_abort(phba); 4531 return -EPIPE; 4532 } 4533 rc = lpfc_bsg_read_ebuf_get(phba, job); 4534 if (rc == SLI_CONFIG_HANDLED) 4535 lpfc_bsg_dma_page_free(phba, dmabuf); 4536 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ 4537 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { 4538 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4539 "2973 SLI_CONFIG wr buffer state " 4540 "mismatch:x%x\n", 4541 phba->mbox_ext_buf_ctx.state); 4542 lpfc_bsg_mbox_ext_abort(phba); 4543 return -EPIPE; 4544 } 4545 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); 4546 } 4547 return rc; 4548 } 4549 4550 /** 4551 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer 4552 * @phba: Pointer to HBA context object. 4553 * @job: Pointer to the job object. 4554 * @dmabuf: Pointer to a DMA buffer descriptor. 4555 * 4556 * This routine checks and handles non-embedded multi-buffer SLI_CONFIG 4557 * (0x9B) mailbox commands and external buffers. 4558 **/ 4559 static int 4560 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job, 4561 struct lpfc_dmabuf *dmabuf) 4562 { 4563 struct fc_bsg_request *bsg_request = job->request; 4564 struct dfc_mbox_req *mbox_req; 4565 int rc = SLI_CONFIG_NOT_HANDLED; 4566 4567 mbox_req = 4568 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4569 4570 /* mbox command with/without single external buffer */ 4571 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4572 return rc; 4573 4574 /* mbox command and first external buffer */ 4575 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4576 if (mbox_req->extSeqNum == 1) { 4577 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4578 "2974 SLI_CONFIG mailbox: tag:%d, " 4579 "seq:%d\n", mbox_req->extMboxTag, 4580 mbox_req->extSeqNum); 4581 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); 4582 return rc; 4583 } else 4584 goto sli_cfg_ext_error; 4585 } 4586 4587 /* 4588 * handle additional external buffers 4589 */ 4590 4591 /* check broken pipe conditions */ 4592 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) 4593 goto sli_cfg_ext_error; 4594 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) 4595 goto sli_cfg_ext_error; 4596 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) 4597 goto sli_cfg_ext_error; 4598 4599 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4600 "2975 SLI_CONFIG mailbox external buffer: " 4601 "extSta:x%x, tag:%d, seq:%d\n", 4602 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, 4603 mbox_req->extSeqNum); 4604 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); 4605 return rc; 4606 4607 sli_cfg_ext_error: 4608 /* all other cases, broken pipe */ 4609 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4610 "2976 SLI_CONFIG mailbox broken pipe: " 4611 "ctxSta:x%x, ctxNumBuf:%d " 4612 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", 4613 phba->mbox_ext_buf_ctx.state, 4614 phba->mbox_ext_buf_ctx.numBuf, 4615 phba->mbox_ext_buf_ctx.mbxTag, 4616 phba->mbox_ext_buf_ctx.seqNum, 4617 mbox_req->extMboxTag, mbox_req->extSeqNum); 4618 4619 lpfc_bsg_mbox_ext_session_reset(phba); 4620 4621 return -EPIPE; 4622 } 4623 4624 /** 4625 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4626 * @phba: Pointer to HBA context object. 4627 * @job: Pointer to the job object. 4628 * @vport: Pointer to a vport object. 4629 * 4630 * Allocate a tracking object, mailbox command memory, get a mailbox 4631 * from the mailbox pool, copy the caller mailbox command. 4632 * 4633 * If offline and the sli is active we need to poll for the command (port is 4634 * being reset) and complete the job, otherwise issue the mailbox command and 4635 * let our completion handler finish the command. 4636 **/ 4637 static int 4638 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4639 struct lpfc_vport *vport) 4640 { 4641 struct fc_bsg_request *bsg_request = job->request; 4642 struct fc_bsg_reply *bsg_reply = job->reply; 4643 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4644 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4645 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4646 uint8_t *pmbx = NULL; 4647 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4648 struct lpfc_dmabuf *dmabuf = NULL; 4649 struct dfc_mbox_req *mbox_req; 4650 struct READ_EVENT_LOG_VAR *rdEventLog; 4651 uint32_t transmit_length, receive_length, mode; 4652 struct lpfc_mbx_sli4_config *sli4_config; 4653 struct lpfc_mbx_nembed_cmd *nembed_sge; 4654 struct ulp_bde64 *bde; 4655 uint8_t *ext = NULL; 4656 int rc = 0; 4657 uint8_t *from; 4658 uint32_t size; 4659 4660 /* in case no data is transferred */ 4661 bsg_reply->reply_payload_rcv_len = 0; 4662 4663 /* sanity check to protect driver */ 4664 if (job->reply_payload.payload_len > BSG_MBOX_SIZE || 4665 job->request_payload.payload_len > BSG_MBOX_SIZE) { 4666 rc = -ERANGE; 4667 goto job_done; 4668 } 4669 4670 /* 4671 * Don't allow mailbox commands to be sent when blocked or when in 4672 * the middle of discovery 4673 */ 4674 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 4675 rc = -EAGAIN; 4676 goto job_done; 4677 } 4678 4679 mbox_req = 4680 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4681 4682 /* check if requested extended data lengths are valid */ 4683 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4684 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4685 rc = -ERANGE; 4686 goto job_done; 4687 } 4688 4689 dmabuf = lpfc_bsg_dma_page_alloc(phba); 4690 if (!dmabuf || !dmabuf->virt) { 4691 rc = -ENOMEM; 4692 goto job_done; 4693 } 4694 4695 /* Get the mailbox command or external buffer from BSG */ 4696 pmbx = (uint8_t *)dmabuf->virt; 4697 size = job->request_payload.payload_len; 4698 sg_copy_to_buffer(job->request_payload.sg_list, 4699 job->request_payload.sg_cnt, pmbx, size); 4700 4701 /* Handle possible SLI_CONFIG with non-embedded payloads */ 4702 if (phba->sli_rev == LPFC_SLI_REV4) { 4703 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); 4704 if (rc == SLI_CONFIG_HANDLED) 4705 goto job_cont; 4706 if (rc) 4707 goto job_done; 4708 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ 4709 } 4710 4711 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); 4712 if (rc != 0) 4713 goto job_done; /* must be negative */ 4714 4715 /* allocate our bsg tracking structure */ 4716 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4717 if (!dd_data) { 4718 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4719 "2727 Failed allocation of dd_data\n"); 4720 rc = -ENOMEM; 4721 goto job_done; 4722 } 4723 4724 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4725 if (!pmboxq) { 4726 rc = -ENOMEM; 4727 goto job_done; 4728 } 4729 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4730 4731 pmb = &pmboxq->u.mb; 4732 memcpy(pmb, pmbx, sizeof(*pmb)); 4733 pmb->mbxOwner = OWN_HOST; 4734 pmboxq->vport = vport; 4735 4736 /* If HBA encountered an error attention, allow only DUMP 4737 * or RESTART mailbox commands until the HBA is restarted. 4738 */ 4739 if (phba->pport->stopped && 4740 pmb->mbxCommand != MBX_DUMP_MEMORY && 4741 pmb->mbxCommand != MBX_RESTART && 4742 pmb->mbxCommand != MBX_WRITE_VPARMS && 4743 pmb->mbxCommand != MBX_WRITE_WWN) 4744 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 4745 "2797 mbox: Issued mailbox cmd " 4746 "0x%x while in stopped state.\n", 4747 pmb->mbxCommand); 4748 4749 /* extended mailbox commands will need an extended buffer */ 4750 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4751 from = pmbx; 4752 ext = from + sizeof(MAILBOX_t); 4753 pmboxq->ext_buf = ext; 4754 pmboxq->in_ext_byte_len = 4755 mbox_req->inExtWLen * sizeof(uint32_t); 4756 pmboxq->out_ext_byte_len = 4757 mbox_req->outExtWLen * sizeof(uint32_t); 4758 pmboxq->mbox_offset_word = mbox_req->mbOffset; 4759 } 4760 4761 /* biu diag will need a kernel buffer to transfer the data 4762 * allocate our own buffer and setup the mailbox command to 4763 * use ours 4764 */ 4765 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 4766 transmit_length = pmb->un.varWords[1]; 4767 receive_length = pmb->un.varWords[4]; 4768 /* transmit length cannot be greater than receive length or 4769 * mailbox extension size 4770 */ 4771 if ((transmit_length > receive_length) || 4772 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4773 rc = -ERANGE; 4774 goto job_done; 4775 } 4776 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4777 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); 4778 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4779 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); 4780 4781 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4782 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) 4783 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4784 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4785 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) 4786 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4787 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4788 rdEventLog = &pmb->un.varRdEventLog; 4789 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4790 mode = bf_get(lpfc_event_log, rdEventLog); 4791 4792 /* receive length cannot be greater than mailbox 4793 * extension size 4794 */ 4795 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4796 rc = -ERANGE; 4797 goto job_done; 4798 } 4799 4800 /* mode zero uses a bde like biu diags command */ 4801 if (mode == 0) { 4802 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4803 + sizeof(MAILBOX_t)); 4804 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4805 + sizeof(MAILBOX_t)); 4806 } 4807 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4808 /* Let type 4 (well known data) through because the data is 4809 * returned in varwords[4-8] 4810 * otherwise check the recieve length and fetch the buffer addr 4811 */ 4812 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && 4813 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { 4814 /* rebuild the command for sli4 using our own buffers 4815 * like we do for biu diags 4816 */ 4817 receive_length = pmb->un.varWords[2]; 4818 /* receive length cannot be greater than mailbox 4819 * extension size 4820 */ 4821 if (receive_length == 0) { 4822 rc = -ERANGE; 4823 goto job_done; 4824 } 4825 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4826 + sizeof(MAILBOX_t)); 4827 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4828 + sizeof(MAILBOX_t)); 4829 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4830 pmb->un.varUpdateCfg.co) { 4831 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4832 4833 /* bde size cannot be greater than mailbox ext size */ 4834 if (bde->tus.f.bdeSize > 4835 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4836 rc = -ERANGE; 4837 goto job_done; 4838 } 4839 bde->addrHigh = putPaddrHigh(dmabuf->phys 4840 + sizeof(MAILBOX_t)); 4841 bde->addrLow = putPaddrLow(dmabuf->phys 4842 + sizeof(MAILBOX_t)); 4843 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4844 /* Handling non-embedded SLI_CONFIG mailbox command */ 4845 sli4_config = &pmboxq->u.mqe.un.sli4_config; 4846 if (!bf_get(lpfc_mbox_hdr_emb, 4847 &sli4_config->header.cfg_mhdr)) { 4848 /* rebuild the command for sli4 using our 4849 * own buffers like we do for biu diags 4850 */ 4851 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4852 &pmb->un.varWords[0]; 4853 receive_length = nembed_sge->sge[0].length; 4854 4855 /* receive length cannot be greater than 4856 * mailbox extension size 4857 */ 4858 if ((receive_length == 0) || 4859 (receive_length > 4860 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4861 rc = -ERANGE; 4862 goto job_done; 4863 } 4864 4865 nembed_sge->sge[0].pa_hi = 4866 putPaddrHigh(dmabuf->phys 4867 + sizeof(MAILBOX_t)); 4868 nembed_sge->sge[0].pa_lo = 4869 putPaddrLow(dmabuf->phys 4870 + sizeof(MAILBOX_t)); 4871 } 4872 } 4873 } 4874 4875 dd_data->context_un.mbox.dmabuffers = dmabuf; 4876 4877 /* setup wake call as IOCB callback */ 4878 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; 4879 4880 /* setup context field to pass wait_queue pointer to wake function */ 4881 pmboxq->ctx_u.dd_data = dd_data; 4882 dd_data->type = TYPE_MBOX; 4883 dd_data->set_job = job; 4884 dd_data->context_un.mbox.pmboxq = pmboxq; 4885 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4886 dd_data->context_un.mbox.ext = ext; 4887 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4888 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4889 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 4890 job->dd_data = dd_data; 4891 4892 if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) || 4893 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 4894 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4895 if (rc != MBX_SUCCESS) { 4896 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 4897 goto job_done; 4898 } 4899 4900 /* job finished, copy the data */ 4901 memcpy(pmbx, pmb, sizeof(*pmb)); 4902 bsg_reply->reply_payload_rcv_len = 4903 sg_copy_from_buffer(job->reply_payload.sg_list, 4904 job->reply_payload.sg_cnt, 4905 pmbx, size); 4906 /* not waiting mbox already done */ 4907 rc = 0; 4908 goto job_done; 4909 } 4910 4911 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4912 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 4913 return 1; /* job started */ 4914 4915 job_done: 4916 /* common exit for error or job completed inline */ 4917 if (pmboxq) 4918 mempool_free(pmboxq, phba->mbox_mem_pool); 4919 lpfc_bsg_dma_page_free(phba, dmabuf); 4920 kfree(dd_data); 4921 4922 job_cont: 4923 return rc; 4924 } 4925 4926 /** 4927 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 4928 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 4929 **/ 4930 static int 4931 lpfc_bsg_mbox_cmd(struct bsg_job *job) 4932 { 4933 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 4934 struct fc_bsg_request *bsg_request = job->request; 4935 struct fc_bsg_reply *bsg_reply = job->reply; 4936 struct lpfc_hba *phba = vport->phba; 4937 struct dfc_mbox_req *mbox_req; 4938 int rc = 0; 4939 4940 /* mix-and-match backward compatibility */ 4941 bsg_reply->reply_payload_rcv_len = 0; 4942 if (job->request_len < 4943 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4944 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4945 "2737 Mix-and-match backward compatibility " 4946 "between MBOX_REQ old size:%d and " 4947 "new request size:%d\n", 4948 (int)(job->request_len - 4949 sizeof(struct fc_bsg_request)), 4950 (int)sizeof(struct dfc_mbox_req)); 4951 mbox_req = (struct dfc_mbox_req *) 4952 bsg_request->rqst_data.h_vendor.vendor_cmd; 4953 mbox_req->extMboxTag = 0; 4954 mbox_req->extSeqNum = 0; 4955 } 4956 4957 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4958 4959 if (rc == 0) { 4960 /* job done */ 4961 bsg_reply->result = 0; 4962 job->dd_data = NULL; 4963 bsg_job_done(job, bsg_reply->result, 4964 bsg_reply->reply_payload_rcv_len); 4965 } else if (rc == 1) 4966 /* job submitted, will complete later*/ 4967 rc = 0; /* return zero, no error */ 4968 else { 4969 /* some error occurred */ 4970 bsg_reply->result = rc; 4971 job->dd_data = NULL; 4972 } 4973 4974 return rc; 4975 } 4976 4977 static int 4978 lpfc_forced_link_speed(struct bsg_job *job) 4979 { 4980 struct Scsi_Host *shost = fc_bsg_to_shost(job); 4981 struct lpfc_vport *vport = shost_priv(shost); 4982 struct lpfc_hba *phba = vport->phba; 4983 struct fc_bsg_reply *bsg_reply = job->reply; 4984 struct forced_link_speed_support_reply *forced_reply; 4985 int rc = 0; 4986 4987 if (job->request_len < 4988 sizeof(struct fc_bsg_request) + 4989 sizeof(struct get_forced_link_speed_support)) { 4990 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4991 "0048 Received FORCED_LINK_SPEED request " 4992 "below minimum size\n"); 4993 rc = -EINVAL; 4994 goto job_error; 4995 } 4996 4997 forced_reply = (struct forced_link_speed_support_reply *) 4998 bsg_reply->reply_data.vendor_reply.vendor_rsp; 4999 5000 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) { 5001 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5002 "0049 Received FORCED_LINK_SPEED reply below " 5003 "minimum size\n"); 5004 rc = -EINVAL; 5005 goto job_error; 5006 } 5007 5008 forced_reply->supported = test_bit(HBA_FORCED_LINK_SPEED, 5009 &phba->hba_flag) 5010 ? LPFC_FORCED_LINK_SPEED_SUPPORTED 5011 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; 5012 job_error: 5013 bsg_reply->result = rc; 5014 if (rc == 0) 5015 bsg_job_done(job, bsg_reply->result, 5016 bsg_reply->reply_payload_rcv_len); 5017 return rc; 5018 } 5019 5020 /** 5021 * lpfc_check_fwlog_support: Check FW log support on the adapter 5022 * @phba: Pointer to HBA context object. 5023 * 5024 * Check if FW Logging support by the adapter 5025 **/ 5026 int 5027 lpfc_check_fwlog_support(struct lpfc_hba *phba) 5028 { 5029 struct lpfc_ras_fwlog *ras_fwlog = NULL; 5030 5031 ras_fwlog = &phba->ras_fwlog; 5032 5033 if (!ras_fwlog->ras_hwsupport) 5034 return -EACCES; 5035 else if (!ras_fwlog->ras_enabled) 5036 return -EPERM; 5037 else 5038 return 0; 5039 } 5040 5041 /** 5042 * lpfc_bsg_get_ras_config: Get RAS configuration settings 5043 * @job: fc_bsg_job to handle 5044 * 5045 * Get RAS configuration values set. 5046 **/ 5047 static int 5048 lpfc_bsg_get_ras_config(struct bsg_job *job) 5049 { 5050 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5051 struct lpfc_vport *vport = shost_priv(shost); 5052 struct fc_bsg_reply *bsg_reply = job->reply; 5053 struct lpfc_hba *phba = vport->phba; 5054 struct lpfc_bsg_get_ras_config_reply *ras_reply; 5055 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 5056 int rc = 0; 5057 5058 if (job->request_len < 5059 sizeof(struct fc_bsg_request) + 5060 sizeof(struct lpfc_bsg_ras_req)) { 5061 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5062 "6192 FW_LOG request received " 5063 "below minimum size\n"); 5064 rc = -EINVAL; 5065 goto ras_job_error; 5066 } 5067 5068 /* Check FW log status */ 5069 rc = lpfc_check_fwlog_support(phba); 5070 if (rc) 5071 goto ras_job_error; 5072 5073 ras_reply = (struct lpfc_bsg_get_ras_config_reply *) 5074 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5075 5076 /* Current logging state */ 5077 spin_lock_irq(&phba->ras_fwlog_lock); 5078 if (ras_fwlog->state == ACTIVE) 5079 ras_reply->state = LPFC_RASLOG_STATE_RUNNING; 5080 else 5081 ras_reply->state = LPFC_RASLOG_STATE_STOPPED; 5082 spin_unlock_irq(&phba->ras_fwlog_lock); 5083 5084 ras_reply->log_level = phba->ras_fwlog.fw_loglevel; 5085 ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize; 5086 5087 ras_job_error: 5088 /* make error code available to userspace */ 5089 bsg_reply->result = rc; 5090 5091 /* complete the job back to userspace */ 5092 if (!rc) 5093 bsg_job_done(job, bsg_reply->result, 5094 bsg_reply->reply_payload_rcv_len); 5095 return rc; 5096 } 5097 5098 /** 5099 * lpfc_bsg_set_ras_config: Set FW logging parameters 5100 * @job: fc_bsg_job to handle 5101 * 5102 * Set log-level parameters for FW-logging in host memory 5103 **/ 5104 static int 5105 lpfc_bsg_set_ras_config(struct bsg_job *job) 5106 { 5107 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5108 struct lpfc_vport *vport = shost_priv(shost); 5109 struct lpfc_hba *phba = vport->phba; 5110 struct lpfc_bsg_set_ras_config_req *ras_req; 5111 struct fc_bsg_request *bsg_request = job->request; 5112 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 5113 struct fc_bsg_reply *bsg_reply = job->reply; 5114 uint8_t action = 0, log_level = 0; 5115 int rc = 0, action_status = 0; 5116 5117 if (job->request_len < 5118 sizeof(struct fc_bsg_request) + 5119 sizeof(struct lpfc_bsg_set_ras_config_req)) { 5120 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5121 "6182 Received RAS_LOG request " 5122 "below minimum size\n"); 5123 rc = -EINVAL; 5124 goto ras_job_error; 5125 } 5126 5127 /* Check FW log status */ 5128 rc = lpfc_check_fwlog_support(phba); 5129 if (rc) 5130 goto ras_job_error; 5131 5132 ras_req = (struct lpfc_bsg_set_ras_config_req *) 5133 bsg_request->rqst_data.h_vendor.vendor_cmd; 5134 action = ras_req->action; 5135 log_level = ras_req->log_level; 5136 5137 if (action == LPFC_RASACTION_STOP_LOGGING) { 5138 /* Check if already disabled */ 5139 spin_lock_irq(&phba->ras_fwlog_lock); 5140 if (ras_fwlog->state != ACTIVE) { 5141 spin_unlock_irq(&phba->ras_fwlog_lock); 5142 rc = -ESRCH; 5143 goto ras_job_error; 5144 } 5145 spin_unlock_irq(&phba->ras_fwlog_lock); 5146 5147 /* Disable logging */ 5148 lpfc_ras_stop_fwlog(phba); 5149 } else { 5150 /*action = LPFC_RASACTION_START_LOGGING*/ 5151 5152 /* Even though FW-logging is active re-initialize 5153 * FW-logging with new log-level. Return status 5154 * "Logging already Running" to caller. 5155 **/ 5156 spin_lock_irq(&phba->ras_fwlog_lock); 5157 if (ras_fwlog->state != INACTIVE) 5158 action_status = -EINPROGRESS; 5159 spin_unlock_irq(&phba->ras_fwlog_lock); 5160 5161 /* Enable logging */ 5162 rc = lpfc_sli4_ras_fwlog_init(phba, log_level, 5163 LPFC_RAS_ENABLE_LOGGING); 5164 if (rc) { 5165 rc = -EINVAL; 5166 goto ras_job_error; 5167 } 5168 5169 /* Check if FW-logging is re-initialized */ 5170 if (action_status == -EINPROGRESS) 5171 rc = action_status; 5172 } 5173 ras_job_error: 5174 /* make error code available to userspace */ 5175 bsg_reply->result = rc; 5176 5177 /* complete the job back to userspace */ 5178 if (!rc) 5179 bsg_job_done(job, bsg_reply->result, 5180 bsg_reply->reply_payload_rcv_len); 5181 5182 return rc; 5183 } 5184 5185 /** 5186 * lpfc_bsg_get_ras_lwpd: Get log write position data 5187 * @job: fc_bsg_job to handle 5188 * 5189 * Get Offset/Wrap count of the log message written 5190 * in host memory 5191 **/ 5192 static int 5193 lpfc_bsg_get_ras_lwpd(struct bsg_job *job) 5194 { 5195 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5196 struct lpfc_vport *vport = shost_priv(shost); 5197 struct lpfc_bsg_get_ras_lwpd *ras_reply; 5198 struct lpfc_hba *phba = vport->phba; 5199 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 5200 struct fc_bsg_reply *bsg_reply = job->reply; 5201 u32 *lwpd_ptr = NULL; 5202 int rc = 0; 5203 5204 rc = lpfc_check_fwlog_support(phba); 5205 if (rc) 5206 goto ras_job_error; 5207 5208 if (job->request_len < 5209 sizeof(struct fc_bsg_request) + 5210 sizeof(struct lpfc_bsg_ras_req)) { 5211 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5212 "6183 Received RAS_LOG request " 5213 "below minimum size\n"); 5214 rc = -EINVAL; 5215 goto ras_job_error; 5216 } 5217 5218 ras_reply = (struct lpfc_bsg_get_ras_lwpd *) 5219 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5220 5221 if (!ras_fwlog->lwpd.virt) { 5222 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5223 "6193 Restart FW Logging\n"); 5224 rc = -EINVAL; 5225 goto ras_job_error; 5226 } 5227 5228 /* Get lwpd offset */ 5229 lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt); 5230 ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff); 5231 5232 /* Get wrap count */ 5233 ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff); 5234 5235 ras_job_error: 5236 /* make error code available to userspace */ 5237 bsg_reply->result = rc; 5238 5239 /* complete the job back to userspace */ 5240 if (!rc) 5241 bsg_job_done(job, bsg_reply->result, 5242 bsg_reply->reply_payload_rcv_len); 5243 5244 return rc; 5245 } 5246 5247 /** 5248 * lpfc_bsg_get_ras_fwlog: Read FW log 5249 * @job: fc_bsg_job to handle 5250 * 5251 * Copy the FW log into the passed buffer. 5252 **/ 5253 static int 5254 lpfc_bsg_get_ras_fwlog(struct bsg_job *job) 5255 { 5256 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5257 struct lpfc_vport *vport = shost_priv(shost); 5258 struct lpfc_hba *phba = vport->phba; 5259 struct fc_bsg_request *bsg_request = job->request; 5260 struct fc_bsg_reply *bsg_reply = job->reply; 5261 struct lpfc_bsg_get_fwlog_req *ras_req; 5262 u32 rd_offset, rd_index, offset; 5263 void *src, *fwlog_buff; 5264 struct lpfc_ras_fwlog *ras_fwlog = NULL; 5265 struct lpfc_dmabuf *dmabuf, *next; 5266 int rc = 0; 5267 5268 ras_fwlog = &phba->ras_fwlog; 5269 5270 rc = lpfc_check_fwlog_support(phba); 5271 if (rc) 5272 goto ras_job_error; 5273 5274 /* Logging to be stopped before reading */ 5275 spin_lock_irq(&phba->ras_fwlog_lock); 5276 if (ras_fwlog->state == ACTIVE) { 5277 spin_unlock_irq(&phba->ras_fwlog_lock); 5278 rc = -EINPROGRESS; 5279 goto ras_job_error; 5280 } 5281 spin_unlock_irq(&phba->ras_fwlog_lock); 5282 5283 if (job->request_len < 5284 sizeof(struct fc_bsg_request) + 5285 sizeof(struct lpfc_bsg_get_fwlog_req)) { 5286 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5287 "6184 Received RAS_LOG request " 5288 "below minimum size\n"); 5289 rc = -EINVAL; 5290 goto ras_job_error; 5291 } 5292 5293 ras_req = (struct lpfc_bsg_get_fwlog_req *) 5294 bsg_request->rqst_data.h_vendor.vendor_cmd; 5295 rd_offset = ras_req->read_offset; 5296 5297 /* Allocate memory to read fw log*/ 5298 fwlog_buff = vmalloc(ras_req->read_size); 5299 if (!fwlog_buff) { 5300 rc = -ENOMEM; 5301 goto ras_job_error; 5302 } 5303 5304 rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE); 5305 offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE); 5306 5307 list_for_each_entry_safe(dmabuf, next, 5308 &ras_fwlog->fwlog_buff_list, list) { 5309 5310 if (dmabuf->buffer_tag < rd_index) 5311 continue; 5312 5313 src = dmabuf->virt + offset; 5314 memcpy(fwlog_buff, src, ras_req->read_size); 5315 break; 5316 } 5317 5318 bsg_reply->reply_payload_rcv_len = 5319 sg_copy_from_buffer(job->reply_payload.sg_list, 5320 job->reply_payload.sg_cnt, 5321 fwlog_buff, ras_req->read_size); 5322 5323 vfree(fwlog_buff); 5324 5325 ras_job_error: 5326 bsg_reply->result = rc; 5327 if (!rc) 5328 bsg_job_done(job, bsg_reply->result, 5329 bsg_reply->reply_payload_rcv_len); 5330 5331 return rc; 5332 } 5333 5334 static int 5335 lpfc_get_trunk_info(struct bsg_job *job) 5336 { 5337 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5338 struct lpfc_hba *phba = vport->phba; 5339 struct fc_bsg_reply *bsg_reply = job->reply; 5340 struct lpfc_trunk_info *event_reply; 5341 int rc = 0; 5342 5343 if (job->request_len < 5344 sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) { 5345 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5346 "2744 Received GET TRUNK _INFO request below " 5347 "minimum size\n"); 5348 rc = -EINVAL; 5349 goto job_error; 5350 } 5351 5352 event_reply = (struct lpfc_trunk_info *) 5353 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5354 5355 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { 5356 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5357 "2728 Received GET TRUNK _INFO reply below " 5358 "minimum size\n"); 5359 rc = -EINVAL; 5360 goto job_error; 5361 } 5362 if (event_reply == NULL) { 5363 rc = -EINVAL; 5364 goto job_error; 5365 } 5366 5367 bsg_bf_set(lpfc_trunk_info_link_status, event_reply, 5368 (phba->link_state >= LPFC_LINK_UP) ? 1 : 0); 5369 5370 bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply, 5371 (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0); 5372 5373 bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply, 5374 (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0); 5375 5376 bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply, 5377 (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0); 5378 5379 bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply, 5380 (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0); 5381 5382 bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply, 5383 bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)); 5384 5385 bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply, 5386 bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)); 5387 5388 bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply, 5389 bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)); 5390 5391 bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply, 5392 bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)); 5393 5394 event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000; 5395 event_reply->logical_speed = 5396 phba->sli4_hba.link_state.logical_speed / 1000; 5397 job_error: 5398 bsg_reply->result = rc; 5399 if (!rc) 5400 bsg_job_done(job, bsg_reply->result, 5401 bsg_reply->reply_payload_rcv_len); 5402 return rc; 5403 5404 } 5405 5406 static int 5407 lpfc_get_cgnbuf_info(struct bsg_job *job) 5408 { 5409 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5410 struct lpfc_hba *phba = vport->phba; 5411 struct fc_bsg_request *bsg_request = job->request; 5412 struct fc_bsg_reply *bsg_reply = job->reply; 5413 struct get_cgnbuf_info_req *cgnbuf_req; 5414 struct lpfc_cgn_info *cp; 5415 uint8_t *cgn_buff; 5416 size_t size, cinfosz; 5417 int rc = 0; 5418 5419 if (job->request_len < sizeof(struct fc_bsg_request) + 5420 sizeof(struct get_cgnbuf_info_req)) { 5421 rc = -ENOMEM; 5422 goto job_exit; 5423 } 5424 5425 if (!phba->sli4_hba.pc_sli4_params.cmf) { 5426 rc = -ENOENT; 5427 goto job_exit; 5428 } 5429 5430 if (!phba->cgn_i || !phba->cgn_i->virt) { 5431 rc = -ENOENT; 5432 goto job_exit; 5433 } 5434 5435 cp = phba->cgn_i->virt; 5436 if (cp->cgn_info_version < LPFC_CGN_INFO_V3) { 5437 rc = -EPERM; 5438 goto job_exit; 5439 } 5440 5441 cgnbuf_req = (struct get_cgnbuf_info_req *) 5442 bsg_request->rqst_data.h_vendor.vendor_cmd; 5443 5444 /* For reset or size == 0 */ 5445 bsg_reply->reply_payload_rcv_len = 0; 5446 5447 if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) { 5448 lpfc_init_congestion_stat(phba); 5449 goto job_exit; 5450 } 5451 5452 /* We don't want to include the CRC at the end */ 5453 cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t); 5454 5455 size = cgnbuf_req->read_size; 5456 if (!size) 5457 goto job_exit; 5458 5459 if (size < cinfosz) { 5460 /* Just copy back what we can */ 5461 cinfosz = size; 5462 rc = -E2BIG; 5463 } 5464 5465 /* Allocate memory to read congestion info */ 5466 cgn_buff = vmalloc(cinfosz); 5467 if (!cgn_buff) { 5468 rc = -ENOMEM; 5469 goto job_exit; 5470 } 5471 5472 memcpy(cgn_buff, cp, cinfosz); 5473 5474 bsg_reply->reply_payload_rcv_len = 5475 sg_copy_from_buffer(job->reply_payload.sg_list, 5476 job->reply_payload.sg_cnt, 5477 cgn_buff, cinfosz); 5478 5479 vfree(cgn_buff); 5480 5481 job_exit: 5482 bsg_reply->result = rc; 5483 if (!rc) 5484 bsg_job_done(job, bsg_reply->result, 5485 bsg_reply->reply_payload_rcv_len); 5486 else 5487 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5488 "2724 GET CGNBUF error: %d\n", rc); 5489 return rc; 5490 } 5491 5492 /** 5493 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5494 * @job: fc_bsg_job to handle 5495 **/ 5496 static int 5497 lpfc_bsg_hst_vendor(struct bsg_job *job) 5498 { 5499 struct fc_bsg_request *bsg_request = job->request; 5500 struct fc_bsg_reply *bsg_reply = job->reply; 5501 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 5502 int rc; 5503 5504 switch (command) { 5505 case LPFC_BSG_VENDOR_SET_CT_EVENT: 5506 rc = lpfc_bsg_hba_set_event(job); 5507 break; 5508 case LPFC_BSG_VENDOR_GET_CT_EVENT: 5509 rc = lpfc_bsg_hba_get_event(job); 5510 break; 5511 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 5512 rc = lpfc_bsg_send_mgmt_rsp(job); 5513 break; 5514 case LPFC_BSG_VENDOR_DIAG_MODE: 5515 rc = lpfc_bsg_diag_loopback_mode(job); 5516 break; 5517 case LPFC_BSG_VENDOR_DIAG_MODE_END: 5518 rc = lpfc_sli4_bsg_diag_mode_end(job); 5519 break; 5520 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: 5521 rc = lpfc_bsg_diag_loopback_run(job); 5522 break; 5523 case LPFC_BSG_VENDOR_LINK_DIAG_TEST: 5524 rc = lpfc_sli4_bsg_link_diag_test(job); 5525 break; 5526 case LPFC_BSG_VENDOR_GET_MGMT_REV: 5527 rc = lpfc_bsg_get_dfc_rev(job); 5528 break; 5529 case LPFC_BSG_VENDOR_MBOX: 5530 rc = lpfc_bsg_mbox_cmd(job); 5531 break; 5532 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: 5533 rc = lpfc_forced_link_speed(job); 5534 break; 5535 case LPFC_BSG_VENDOR_RAS_GET_LWPD: 5536 rc = lpfc_bsg_get_ras_lwpd(job); 5537 break; 5538 case LPFC_BSG_VENDOR_RAS_GET_FWLOG: 5539 rc = lpfc_bsg_get_ras_fwlog(job); 5540 break; 5541 case LPFC_BSG_VENDOR_RAS_GET_CONFIG: 5542 rc = lpfc_bsg_get_ras_config(job); 5543 break; 5544 case LPFC_BSG_VENDOR_RAS_SET_CONFIG: 5545 rc = lpfc_bsg_set_ras_config(job); 5546 break; 5547 case LPFC_BSG_VENDOR_GET_TRUNK_INFO: 5548 rc = lpfc_get_trunk_info(job); 5549 break; 5550 case LPFC_BSG_VENDOR_GET_CGNBUF_INFO: 5551 rc = lpfc_get_cgnbuf_info(job); 5552 break; 5553 default: 5554 rc = -EINVAL; 5555 bsg_reply->reply_payload_rcv_len = 0; 5556 /* make error code available to userspace */ 5557 bsg_reply->result = rc; 5558 break; 5559 } 5560 5561 return rc; 5562 } 5563 5564 /** 5565 * lpfc_bsg_request - handle a bsg request from the FC transport 5566 * @job: bsg_job to handle 5567 **/ 5568 int 5569 lpfc_bsg_request(struct bsg_job *job) 5570 { 5571 struct fc_bsg_request *bsg_request = job->request; 5572 struct fc_bsg_reply *bsg_reply = job->reply; 5573 uint32_t msgcode; 5574 int rc; 5575 5576 msgcode = bsg_request->msgcode; 5577 switch (msgcode) { 5578 case FC_BSG_HST_VENDOR: 5579 rc = lpfc_bsg_hst_vendor(job); 5580 break; 5581 case FC_BSG_RPT_ELS: 5582 rc = lpfc_bsg_rport_els(job); 5583 break; 5584 case FC_BSG_RPT_CT: 5585 rc = lpfc_bsg_send_mgmt_cmd(job); 5586 break; 5587 default: 5588 rc = -EINVAL; 5589 bsg_reply->reply_payload_rcv_len = 0; 5590 /* make error code available to userspace */ 5591 bsg_reply->result = rc; 5592 break; 5593 } 5594 5595 return rc; 5596 } 5597 5598 /** 5599 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 5600 * @job: bsg_job that has timed out 5601 * 5602 * This function just aborts the job's IOCB. The aborted IOCB will return to 5603 * the waiting function which will handle passing the error back to userspace 5604 **/ 5605 int 5606 lpfc_bsg_timeout(struct bsg_job *job) 5607 { 5608 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5609 struct lpfc_hba *phba = vport->phba; 5610 struct lpfc_iocbq *cmdiocb; 5611 struct lpfc_sli_ring *pring; 5612 struct bsg_job_data *dd_data; 5613 unsigned long flags; 5614 int rc = 0; 5615 LIST_HEAD(completions); 5616 struct lpfc_iocbq *check_iocb, *next_iocb; 5617 5618 pring = lpfc_phba_elsring(phba); 5619 if (unlikely(!pring)) 5620 return -EIO; 5621 5622 /* if job's driver data is NULL, the command completed or is in the 5623 * the process of completing. In this case, return status to request 5624 * so the timeout is retried. This avoids double completion issues 5625 * and the request will be pulled off the timer queue when the 5626 * command's completion handler executes. Otherwise, prevent the 5627 * command's completion handler from executing the job done callback 5628 * and continue processing to abort the outstanding the command. 5629 */ 5630 5631 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5632 dd_data = (struct bsg_job_data *)job->dd_data; 5633 if (dd_data) { 5634 dd_data->set_job = NULL; 5635 job->dd_data = NULL; 5636 } else { 5637 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5638 return -EAGAIN; 5639 } 5640 5641 switch (dd_data->type) { 5642 case TYPE_IOCB: 5643 /* Check to see if IOCB was issued to the port or not. If not, 5644 * remove it from the txq queue and call cancel iocbs. 5645 * Otherwise, call abort iotag 5646 */ 5647 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5648 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5649 5650 spin_lock_irqsave(&phba->hbalock, flags); 5651 /* make sure the I/O abort window is still open */ 5652 if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) { 5653 spin_unlock_irqrestore(&phba->hbalock, flags); 5654 return -EAGAIN; 5655 } 5656 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5657 list) { 5658 if (check_iocb == cmdiocb) { 5659 list_move_tail(&check_iocb->list, &completions); 5660 break; 5661 } 5662 } 5663 if (list_empty(&completions)) 5664 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL); 5665 spin_unlock_irqrestore(&phba->hbalock, flags); 5666 if (!list_empty(&completions)) { 5667 lpfc_sli_cancel_iocbs(phba, &completions, 5668 IOSTAT_LOCAL_REJECT, 5669 IOERR_SLI_ABORTED); 5670 } 5671 break; 5672 5673 case TYPE_EVT: 5674 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5675 break; 5676 5677 case TYPE_MBOX: 5678 /* Update the ext buf ctx state if needed */ 5679 5680 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5681 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5682 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5683 break; 5684 default: 5685 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5686 break; 5687 } 5688 5689 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 5690 * otherwise an error message will be displayed on the console 5691 * so always return success (zero) 5692 */ 5693 return rc; 5694 } 5695