1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 69 struct lpfc_cqe *); 70 71 static IOCB_t * 72 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 73 { 74 return &iocbq->iocb; 75 } 76 77 /** 78 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 79 * @q: The Work Queue to operate on. 80 * @wqe: The work Queue Entry to put on the Work queue. 81 * 82 * This routine will copy the contents of @wqe to the next available entry on 83 * the @q. This function will then ring the Work Queue Doorbell to signal the 84 * HBA to start processing the Work Queue Entry. This function returns 0 if 85 * successful. If no entries are available on @q then this function will return 86 * -ENOMEM. 87 * The caller is expected to hold the hbalock when calling this routine. 88 **/ 89 static uint32_t 90 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 91 { 92 union lpfc_wqe *temp_wqe; 93 struct lpfc_register doorbell; 94 uint32_t host_index; 95 96 /* sanity check on queue memory */ 97 if (unlikely(!q)) 98 return -ENOMEM; 99 temp_wqe = q->qe[q->host_index].wqe; 100 101 /* If the host has not yet processed the next entry then we are done */ 102 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 103 return -ENOMEM; 104 /* set consumption flag every once in a while */ 105 if (!((q->host_index + 1) % q->entry_repost)) 106 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 107 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 108 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 109 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 110 111 /* Update the host index before invoking device */ 112 host_index = q->host_index; 113 q->host_index = ((q->host_index + 1) % q->entry_count); 114 115 /* Ring Doorbell */ 116 doorbell.word0 = 0; 117 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); 118 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 119 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 120 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 121 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ 122 123 return 0; 124 } 125 126 /** 127 * lpfc_sli4_wq_release - Updates internal hba index for WQ 128 * @q: The Work Queue to operate on. 129 * @index: The index to advance the hba index to. 130 * 131 * This routine will update the HBA index of a queue to reflect consumption of 132 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 133 * an entry the host calls this function to update the queue's internal 134 * pointers. This routine returns the number of entries that were consumed by 135 * the HBA. 136 **/ 137 static uint32_t 138 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 139 { 140 uint32_t released = 0; 141 142 /* sanity check on queue memory */ 143 if (unlikely(!q)) 144 return 0; 145 146 if (q->hba_index == index) 147 return 0; 148 do { 149 q->hba_index = ((q->hba_index + 1) % q->entry_count); 150 released++; 151 } while (q->hba_index != index); 152 return released; 153 } 154 155 /** 156 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 157 * @q: The Mailbox Queue to operate on. 158 * @wqe: The Mailbox Queue Entry to put on the Work queue. 159 * 160 * This routine will copy the contents of @mqe to the next available entry on 161 * the @q. This function will then ring the Work Queue Doorbell to signal the 162 * HBA to start processing the Work Queue Entry. This function returns 0 if 163 * successful. If no entries are available on @q then this function will return 164 * -ENOMEM. 165 * The caller is expected to hold the hbalock when calling this routine. 166 **/ 167 static uint32_t 168 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 169 { 170 struct lpfc_mqe *temp_mqe; 171 struct lpfc_register doorbell; 172 uint32_t host_index; 173 174 /* sanity check on queue memory */ 175 if (unlikely(!q)) 176 return -ENOMEM; 177 temp_mqe = q->qe[q->host_index].mqe; 178 179 /* If the host has not yet processed the next entry then we are done */ 180 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 181 return -ENOMEM; 182 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 183 /* Save off the mailbox pointer for completion */ 184 q->phba->mbox = (MAILBOX_t *)temp_mqe; 185 186 /* Update the host index before invoking device */ 187 host_index = q->host_index; 188 q->host_index = ((q->host_index + 1) % q->entry_count); 189 190 /* Ring Doorbell */ 191 doorbell.word0 = 0; 192 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 193 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 194 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 195 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ 196 return 0; 197 } 198 199 /** 200 * lpfc_sli4_mq_release - Updates internal hba index for MQ 201 * @q: The Mailbox Queue to operate on. 202 * 203 * This routine will update the HBA index of a queue to reflect consumption of 204 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 205 * an entry the host calls this function to update the queue's internal 206 * pointers. This routine returns the number of entries that were consumed by 207 * the HBA. 208 **/ 209 static uint32_t 210 lpfc_sli4_mq_release(struct lpfc_queue *q) 211 { 212 /* sanity check on queue memory */ 213 if (unlikely(!q)) 214 return 0; 215 216 /* Clear the mailbox pointer for completion */ 217 q->phba->mbox = NULL; 218 q->hba_index = ((q->hba_index + 1) % q->entry_count); 219 return 1; 220 } 221 222 /** 223 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 224 * @q: The Event Queue to get the first valid EQE from 225 * 226 * This routine will get the first valid Event Queue Entry from @q, update 227 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 228 * the Queue (no more work to do), or the Queue is full of EQEs that have been 229 * processed, but not popped back to the HBA then this routine will return NULL. 230 **/ 231 static struct lpfc_eqe * 232 lpfc_sli4_eq_get(struct lpfc_queue *q) 233 { 234 struct lpfc_eqe *eqe; 235 236 /* sanity check on queue memory */ 237 if (unlikely(!q)) 238 return NULL; 239 eqe = q->qe[q->hba_index].eqe; 240 241 /* If the next EQE is not valid then we are done */ 242 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 243 return NULL; 244 /* If the host has not yet processed the next entry then we are done */ 245 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 246 return NULL; 247 248 q->hba_index = ((q->hba_index + 1) % q->entry_count); 249 return eqe; 250 } 251 252 /** 253 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 254 * @q: The Event Queue that the host has completed processing for. 255 * @arm: Indicates whether the host wants to arms this CQ. 256 * 257 * This routine will mark all Event Queue Entries on @q, from the last 258 * known completed entry to the last entry that was processed, as completed 259 * by clearing the valid bit for each completion queue entry. Then it will 260 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 261 * The internal host index in the @q will be updated by this routine to indicate 262 * that the host has finished processing the entries. The @arm parameter 263 * indicates that the queue should be rearmed when ringing the doorbell. 264 * 265 * This function will return the number of EQEs that were popped. 266 **/ 267 uint32_t 268 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 269 { 270 uint32_t released = 0; 271 struct lpfc_eqe *temp_eqe; 272 struct lpfc_register doorbell; 273 274 /* sanity check on queue memory */ 275 if (unlikely(!q)) 276 return 0; 277 278 /* while there are valid entries */ 279 while (q->hba_index != q->host_index) { 280 temp_eqe = q->qe[q->host_index].eqe; 281 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 282 released++; 283 q->host_index = ((q->host_index + 1) % q->entry_count); 284 } 285 if (unlikely(released == 0 && !arm)) 286 return 0; 287 288 /* ring doorbell for number popped */ 289 doorbell.word0 = 0; 290 if (arm) { 291 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 292 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 293 } 294 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 295 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 296 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 297 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 298 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 299 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 300 readl(q->phba->sli4_hba.EQCQDBregaddr); 301 return released; 302 } 303 304 /** 305 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 306 * @q: The Completion Queue to get the first valid CQE from 307 * 308 * This routine will get the first valid Completion Queue Entry from @q, update 309 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 310 * the Queue (no more work to do), or the Queue is full of CQEs that have been 311 * processed, but not popped back to the HBA then this routine will return NULL. 312 **/ 313 static struct lpfc_cqe * 314 lpfc_sli4_cq_get(struct lpfc_queue *q) 315 { 316 struct lpfc_cqe *cqe; 317 318 /* sanity check on queue memory */ 319 if (unlikely(!q)) 320 return NULL; 321 322 /* If the next CQE is not valid then we are done */ 323 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 324 return NULL; 325 /* If the host has not yet processed the next entry then we are done */ 326 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 327 return NULL; 328 329 cqe = q->qe[q->hba_index].cqe; 330 q->hba_index = ((q->hba_index + 1) % q->entry_count); 331 return cqe; 332 } 333 334 /** 335 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 336 * @q: The Completion Queue that the host has completed processing for. 337 * @arm: Indicates whether the host wants to arms this CQ. 338 * 339 * This routine will mark all Completion queue entries on @q, from the last 340 * known completed entry to the last entry that was processed, as completed 341 * by clearing the valid bit for each completion queue entry. Then it will 342 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 343 * The internal host index in the @q will be updated by this routine to indicate 344 * that the host has finished processing the entries. The @arm parameter 345 * indicates that the queue should be rearmed when ringing the doorbell. 346 * 347 * This function will return the number of CQEs that were released. 348 **/ 349 uint32_t 350 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 351 { 352 uint32_t released = 0; 353 struct lpfc_cqe *temp_qe; 354 struct lpfc_register doorbell; 355 356 /* sanity check on queue memory */ 357 if (unlikely(!q)) 358 return 0; 359 /* while there are valid entries */ 360 while (q->hba_index != q->host_index) { 361 temp_qe = q->qe[q->host_index].cqe; 362 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 363 released++; 364 q->host_index = ((q->host_index + 1) % q->entry_count); 365 } 366 if (unlikely(released == 0 && !arm)) 367 return 0; 368 369 /* ring doorbell for number popped */ 370 doorbell.word0 = 0; 371 if (arm) 372 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 373 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 374 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 375 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); 376 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 377 return released; 378 } 379 380 /** 381 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 382 * @q: The Header Receive Queue to operate on. 383 * @wqe: The Receive Queue Entry to put on the Receive queue. 384 * 385 * This routine will copy the contents of @wqe to the next available entry on 386 * the @q. This function will then ring the Receive Queue Doorbell to signal the 387 * HBA to start processing the Receive Queue Entry. This function returns the 388 * index that the rqe was copied to if successful. If no entries are available 389 * on @q then this function will return -ENOMEM. 390 * The caller is expected to hold the hbalock when calling this routine. 391 **/ 392 static int 393 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 394 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 395 { 396 struct lpfc_rqe *temp_hrqe; 397 struct lpfc_rqe *temp_drqe; 398 struct lpfc_register doorbell; 399 int put_index = hq->host_index; 400 401 /* sanity check on queue memory */ 402 if (unlikely(!hq) || unlikely(!dq)) 403 return -ENOMEM; 404 temp_hrqe = hq->qe[hq->host_index].rqe; 405 temp_drqe = dq->qe[dq->host_index].rqe; 406 407 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 408 return -EINVAL; 409 if (hq->host_index != dq->host_index) 410 return -EINVAL; 411 /* If the host has not yet processed the next entry then we are done */ 412 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 413 return -EBUSY; 414 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 415 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 416 417 /* Update the host index to point to the next slot */ 418 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 419 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 420 421 /* Ring The Header Receive Queue Doorbell */ 422 if (!(hq->host_index % hq->entry_repost)) { 423 doorbell.word0 = 0; 424 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 425 hq->entry_repost); 426 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 427 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 428 } 429 return put_index; 430 } 431 432 /** 433 * lpfc_sli4_rq_release - Updates internal hba index for RQ 434 * @q: The Header Receive Queue to operate on. 435 * 436 * This routine will update the HBA index of a queue to reflect consumption of 437 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 438 * consumed an entry the host calls this function to update the queue's 439 * internal pointers. This routine returns the number of entries that were 440 * consumed by the HBA. 441 **/ 442 static uint32_t 443 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 444 { 445 /* sanity check on queue memory */ 446 if (unlikely(!hq) || unlikely(!dq)) 447 return 0; 448 449 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 450 return 0; 451 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 452 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 453 return 1; 454 } 455 456 /** 457 * lpfc_cmd_iocb - Get next command iocb entry in the ring 458 * @phba: Pointer to HBA context object. 459 * @pring: Pointer to driver SLI ring object. 460 * 461 * This function returns pointer to next command iocb entry 462 * in the command ring. The caller must hold hbalock to prevent 463 * other threads consume the next command iocb. 464 * SLI-2/SLI-3 provide different sized iocbs. 465 **/ 466 static inline IOCB_t * 467 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 468 { 469 return (IOCB_t *) (((char *) pring->cmdringaddr) + 470 pring->cmdidx * phba->iocb_cmd_size); 471 } 472 473 /** 474 * lpfc_resp_iocb - Get next response iocb entry in the ring 475 * @phba: Pointer to HBA context object. 476 * @pring: Pointer to driver SLI ring object. 477 * 478 * This function returns pointer to next response iocb entry 479 * in the response ring. The caller must hold hbalock to make sure 480 * that no other thread consume the next response iocb. 481 * SLI-2/SLI-3 provide different sized iocbs. 482 **/ 483 static inline IOCB_t * 484 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 485 { 486 return (IOCB_t *) (((char *) pring->rspringaddr) + 487 pring->rspidx * phba->iocb_rsp_size); 488 } 489 490 /** 491 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 492 * @phba: Pointer to HBA context object. 493 * 494 * This function is called with hbalock held. This function 495 * allocates a new driver iocb object from the iocb pool. If the 496 * allocation is successful, it returns pointer to the newly 497 * allocated iocb object else it returns NULL. 498 **/ 499 static struct lpfc_iocbq * 500 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 501 { 502 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 503 struct lpfc_iocbq * iocbq = NULL; 504 505 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 506 if (iocbq) 507 phba->iocb_cnt++; 508 if (phba->iocb_cnt > phba->iocb_max) 509 phba->iocb_max = phba->iocb_cnt; 510 return iocbq; 511 } 512 513 /** 514 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 515 * @phba: Pointer to HBA context object. 516 * @xritag: XRI value. 517 * 518 * This function clears the sglq pointer from the array of acive 519 * sglq's. The xritag that is passed in is used to index into the 520 * array. Before the xritag can be used it needs to be adjusted 521 * by subtracting the xribase. 522 * 523 * Returns sglq ponter = success, NULL = Failure. 524 **/ 525 static struct lpfc_sglq * 526 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 527 { 528 struct lpfc_sglq *sglq; 529 530 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 531 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 532 return sglq; 533 } 534 535 /** 536 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 537 * @phba: Pointer to HBA context object. 538 * @xritag: XRI value. 539 * 540 * This function returns the sglq pointer from the array of acive 541 * sglq's. The xritag that is passed in is used to index into the 542 * array. Before the xritag can be used it needs to be adjusted 543 * by subtracting the xribase. 544 * 545 * Returns sglq ponter = success, NULL = Failure. 546 **/ 547 struct lpfc_sglq * 548 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 549 { 550 struct lpfc_sglq *sglq; 551 552 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 553 return sglq; 554 } 555 556 /** 557 * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap. 558 * @phba: Pointer to HBA context object. 559 * @ndlp: nodelist pointer for this target. 560 * @xritag: xri used in this exchange. 561 * @rxid: Remote Exchange ID. 562 * @send_rrq: Flag used to determine if we should send rrq els cmd. 563 * 564 * This function is called with hbalock held. 565 * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an 566 * rrq struct and adds it to the active_rrq_list. 567 * 568 * returns 0 for rrq slot for this xri 569 * < 0 Were not able to get rrq mem or invalid parameter. 570 **/ 571 static int 572 __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 573 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 574 { 575 struct lpfc_node_rrq *rrq; 576 int empty; 577 uint32_t did = 0; 578 579 580 if (!ndlp) 581 return -EINVAL; 582 583 if (!phba->cfg_enable_rrq) 584 return -EINVAL; 585 586 if (phba->pport->load_flag & FC_UNLOADING) { 587 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 588 goto out; 589 } 590 did = ndlp->nlp_DID; 591 592 /* 593 * set the active bit even if there is no mem available. 594 */ 595 if (NLP_CHK_FREE_REQ(ndlp)) 596 goto out; 597 598 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 599 goto out; 600 601 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 602 goto out; 603 604 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 605 if (rrq) { 606 rrq->send_rrq = send_rrq; 607 rrq->xritag = xritag; 608 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 609 rrq->ndlp = ndlp; 610 rrq->nlp_DID = ndlp->nlp_DID; 611 rrq->vport = ndlp->vport; 612 rrq->rxid = rxid; 613 empty = list_empty(&phba->active_rrq_list); 614 rrq->send_rrq = send_rrq; 615 list_add_tail(&rrq->list, &phba->active_rrq_list); 616 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) { 617 phba->hba_flag |= HBA_RRQ_ACTIVE; 618 if (empty) 619 lpfc_worker_wake_up(phba); 620 } 621 return 0; 622 } 623 out: 624 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 625 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 626 " DID:0x%x Send:%d\n", 627 xritag, rxid, did, send_rrq); 628 return -EINVAL; 629 } 630 631 /** 632 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 633 * @phba: Pointer to HBA context object. 634 * @xritag: xri used in this exchange. 635 * @rrq: The RRQ to be cleared. 636 * 637 **/ 638 void 639 lpfc_clr_rrq_active(struct lpfc_hba *phba, 640 uint16_t xritag, 641 struct lpfc_node_rrq *rrq) 642 { 643 struct lpfc_nodelist *ndlp = NULL; 644 645 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 646 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 647 648 /* The target DID could have been swapped (cable swap) 649 * we should use the ndlp from the findnode if it is 650 * available. 651 */ 652 if ((!ndlp) && rrq->ndlp) 653 ndlp = rrq->ndlp; 654 655 if (!ndlp) 656 goto out; 657 658 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) { 659 rrq->send_rrq = 0; 660 rrq->xritag = 0; 661 rrq->rrq_stop_time = 0; 662 } 663 out: 664 mempool_free(rrq, phba->rrq_pool); 665 } 666 667 /** 668 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 669 * @phba: Pointer to HBA context object. 670 * 671 * This function is called with hbalock held. This function 672 * Checks if stop_time (ratov from setting rrq active) has 673 * been reached, if it has and the send_rrq flag is set then 674 * it will call lpfc_send_rrq. If the send_rrq flag is not set 675 * then it will just call the routine to clear the rrq and 676 * free the rrq resource. 677 * The timer is set to the next rrq that is going to expire before 678 * leaving the routine. 679 * 680 **/ 681 void 682 lpfc_handle_rrq_active(struct lpfc_hba *phba) 683 { 684 struct lpfc_node_rrq *rrq; 685 struct lpfc_node_rrq *nextrrq; 686 unsigned long next_time; 687 unsigned long iflags; 688 LIST_HEAD(send_rrq); 689 690 spin_lock_irqsave(&phba->hbalock, iflags); 691 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 692 next_time = jiffies + HZ * (phba->fc_ratov + 1); 693 list_for_each_entry_safe(rrq, nextrrq, 694 &phba->active_rrq_list, list) { 695 if (time_after(jiffies, rrq->rrq_stop_time)) 696 list_move(&rrq->list, &send_rrq); 697 else if (time_before(rrq->rrq_stop_time, next_time)) 698 next_time = rrq->rrq_stop_time; 699 } 700 spin_unlock_irqrestore(&phba->hbalock, iflags); 701 if (!list_empty(&phba->active_rrq_list)) 702 mod_timer(&phba->rrq_tmr, next_time); 703 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 704 list_del(&rrq->list); 705 if (!rrq->send_rrq) 706 /* this call will free the rrq */ 707 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 708 else if (lpfc_send_rrq(phba, rrq)) { 709 /* if we send the rrq then the completion handler 710 * will clear the bit in the xribitmap. 711 */ 712 lpfc_clr_rrq_active(phba, rrq->xritag, 713 rrq); 714 } 715 } 716 } 717 718 /** 719 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 720 * @vport: Pointer to vport context object. 721 * @xri: The xri used in the exchange. 722 * @did: The targets DID for this exchange. 723 * 724 * returns NULL = rrq not found in the phba->active_rrq_list. 725 * rrq = rrq for this xri and target. 726 **/ 727 struct lpfc_node_rrq * 728 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 729 { 730 struct lpfc_hba *phba = vport->phba; 731 struct lpfc_node_rrq *rrq; 732 struct lpfc_node_rrq *nextrrq; 733 unsigned long iflags; 734 735 if (phba->sli_rev != LPFC_SLI_REV4) 736 return NULL; 737 spin_lock_irqsave(&phba->hbalock, iflags); 738 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 739 if (rrq->vport == vport && rrq->xritag == xri && 740 rrq->nlp_DID == did){ 741 list_del(&rrq->list); 742 spin_unlock_irqrestore(&phba->hbalock, iflags); 743 return rrq; 744 } 745 } 746 spin_unlock_irqrestore(&phba->hbalock, iflags); 747 return NULL; 748 } 749 750 /** 751 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 752 * @vport: Pointer to vport context object. 753 * @ndlp: Pointer to the lpfc_node_list structure. 754 * If ndlp is NULL Remove all active RRQs for this vport from the 755 * phba->active_rrq_list and clear the rrq. 756 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 757 **/ 758 void 759 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 760 761 { 762 struct lpfc_hba *phba = vport->phba; 763 struct lpfc_node_rrq *rrq; 764 struct lpfc_node_rrq *nextrrq; 765 unsigned long iflags; 766 LIST_HEAD(rrq_list); 767 768 if (phba->sli_rev != LPFC_SLI_REV4) 769 return; 770 if (!ndlp) { 771 lpfc_sli4_vport_delete_els_xri_aborted(vport); 772 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 773 } 774 spin_lock_irqsave(&phba->hbalock, iflags); 775 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 776 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 777 list_move(&rrq->list, &rrq_list); 778 spin_unlock_irqrestore(&phba->hbalock, iflags); 779 780 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 781 list_del(&rrq->list); 782 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 783 } 784 } 785 786 /** 787 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. 788 * @phba: Pointer to HBA context object. 789 * 790 * Remove all rrqs from the phba->active_rrq_list and free them by 791 * calling __lpfc_clr_active_rrq 792 * 793 **/ 794 void 795 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) 796 { 797 struct lpfc_node_rrq *rrq; 798 struct lpfc_node_rrq *nextrrq; 799 unsigned long next_time; 800 unsigned long iflags; 801 LIST_HEAD(rrq_list); 802 803 if (phba->sli_rev != LPFC_SLI_REV4) 804 return; 805 spin_lock_irqsave(&phba->hbalock, iflags); 806 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 807 next_time = jiffies + HZ * (phba->fc_ratov * 2); 808 list_splice_init(&phba->active_rrq_list, &rrq_list); 809 spin_unlock_irqrestore(&phba->hbalock, iflags); 810 811 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 812 list_del(&rrq->list); 813 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 814 } 815 if (!list_empty(&phba->active_rrq_list)) 816 mod_timer(&phba->rrq_tmr, next_time); 817 } 818 819 820 /** 821 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 822 * @phba: Pointer to HBA context object. 823 * @ndlp: Targets nodelist pointer for this exchange. 824 * @xritag the xri in the bitmap to test. 825 * 826 * This function is called with hbalock held. This function 827 * returns 0 = rrq not active for this xri 828 * 1 = rrq is valid for this xri. 829 **/ 830 int 831 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 832 uint16_t xritag) 833 { 834 if (!ndlp) 835 return 0; 836 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 837 return 1; 838 else 839 return 0; 840 } 841 842 /** 843 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 844 * @phba: Pointer to HBA context object. 845 * @ndlp: nodelist pointer for this target. 846 * @xritag: xri used in this exchange. 847 * @rxid: Remote Exchange ID. 848 * @send_rrq: Flag used to determine if we should send rrq els cmd. 849 * 850 * This function takes the hbalock. 851 * The active bit is always set in the active rrq xri_bitmap even 852 * if there is no slot avaiable for the other rrq information. 853 * 854 * returns 0 rrq actived for this xri 855 * < 0 No memory or invalid ndlp. 856 **/ 857 int 858 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 859 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 860 { 861 int ret; 862 unsigned long iflags; 863 864 spin_lock_irqsave(&phba->hbalock, iflags); 865 ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq); 866 spin_unlock_irqrestore(&phba->hbalock, iflags); 867 return ret; 868 } 869 870 /** 871 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 872 * @phba: Pointer to HBA context object. 873 * @piocb: Pointer to the iocbq. 874 * 875 * This function is called with hbalock held. This function 876 * gets a new driver sglq object from the sglq list. If the 877 * list is not empty then it is successful, it returns pointer to the newly 878 * allocated sglq object else it returns NULL. 879 **/ 880 static struct lpfc_sglq * 881 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 882 { 883 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 884 struct lpfc_sglq *sglq = NULL; 885 struct lpfc_sglq *start_sglq = NULL; 886 struct lpfc_scsi_buf *lpfc_cmd; 887 struct lpfc_nodelist *ndlp; 888 int found = 0; 889 890 if (piocbq->iocb_flag & LPFC_IO_FCP) { 891 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 892 ndlp = lpfc_cmd->rdata->pnode; 893 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 894 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 895 ndlp = piocbq->context_un.ndlp; 896 else 897 ndlp = piocbq->context1; 898 899 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 900 start_sglq = sglq; 901 while (!found) { 902 if (!sglq) 903 return NULL; 904 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { 905 /* This xri has an rrq outstanding for this DID. 906 * put it back in the list and get another xri. 907 */ 908 list_add_tail(&sglq->list, lpfc_sgl_list); 909 sglq = NULL; 910 list_remove_head(lpfc_sgl_list, sglq, 911 struct lpfc_sglq, list); 912 if (sglq == start_sglq) { 913 sglq = NULL; 914 break; 915 } else 916 continue; 917 } 918 sglq->ndlp = ndlp; 919 found = 1; 920 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 921 sglq->state = SGL_ALLOCATED; 922 } 923 return sglq; 924 } 925 926 /** 927 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 928 * @phba: Pointer to HBA context object. 929 * 930 * This function is called with no lock held. This function 931 * allocates a new driver iocb object from the iocb pool. If the 932 * allocation is successful, it returns pointer to the newly 933 * allocated iocb object else it returns NULL. 934 **/ 935 struct lpfc_iocbq * 936 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 937 { 938 struct lpfc_iocbq * iocbq = NULL; 939 unsigned long iflags; 940 941 spin_lock_irqsave(&phba->hbalock, iflags); 942 iocbq = __lpfc_sli_get_iocbq(phba); 943 spin_unlock_irqrestore(&phba->hbalock, iflags); 944 return iocbq; 945 } 946 947 /** 948 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 949 * @phba: Pointer to HBA context object. 950 * @iocbq: Pointer to driver iocb object. 951 * 952 * This function is called with hbalock held to release driver 953 * iocb object to the iocb pool. The iotag in the iocb object 954 * does not change for each use of the iocb object. This function 955 * clears all other fields of the iocb object when it is freed. 956 * The sqlq structure that holds the xritag and phys and virtual 957 * mappings for the scatter gather list is retrieved from the 958 * active array of sglq. The get of the sglq pointer also clears 959 * the entry in the array. If the status of the IO indiactes that 960 * this IO was aborted then the sglq entry it put on the 961 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 962 * IO has good status or fails for any other reason then the sglq 963 * entry is added to the free list (lpfc_sgl_list). 964 **/ 965 static void 966 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 967 { 968 struct lpfc_sglq *sglq; 969 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 970 unsigned long iflag = 0; 971 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 972 973 if (iocbq->sli4_xritag == NO_XRI) 974 sglq = NULL; 975 else 976 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 977 978 if (sglq) { 979 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 980 (sglq->state != SGL_XRI_ABORTED)) { 981 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 982 iflag); 983 list_add(&sglq->list, 984 &phba->sli4_hba.lpfc_abts_els_sgl_list); 985 spin_unlock_irqrestore( 986 &phba->sli4_hba.abts_sgl_list_lock, iflag); 987 } else { 988 sglq->state = SGL_FREED; 989 sglq->ndlp = NULL; 990 list_add_tail(&sglq->list, 991 &phba->sli4_hba.lpfc_sgl_list); 992 993 /* Check if TXQ queue needs to be serviced */ 994 if (pring->txq_cnt) 995 lpfc_worker_wake_up(phba); 996 } 997 } 998 999 1000 /* 1001 * Clean all volatile data fields, preserve iotag and node struct. 1002 */ 1003 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1004 iocbq->sli4_lxritag = NO_XRI; 1005 iocbq->sli4_xritag = NO_XRI; 1006 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1007 } 1008 1009 1010 /** 1011 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1012 * @phba: Pointer to HBA context object. 1013 * @iocbq: Pointer to driver iocb object. 1014 * 1015 * This function is called with hbalock held to release driver 1016 * iocb object to the iocb pool. The iotag in the iocb object 1017 * does not change for each use of the iocb object. This function 1018 * clears all other fields of the iocb object when it is freed. 1019 **/ 1020 static void 1021 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1022 { 1023 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1024 1025 /* 1026 * Clean all volatile data fields, preserve iotag and node struct. 1027 */ 1028 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1029 iocbq->sli4_xritag = NO_XRI; 1030 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1031 } 1032 1033 /** 1034 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1035 * @phba: Pointer to HBA context object. 1036 * @iocbq: Pointer to driver iocb object. 1037 * 1038 * This function is called with hbalock held to release driver 1039 * iocb object to the iocb pool. The iotag in the iocb object 1040 * does not change for each use of the iocb object. This function 1041 * clears all other fields of the iocb object when it is freed. 1042 **/ 1043 static void 1044 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1045 { 1046 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1047 phba->iocb_cnt--; 1048 } 1049 1050 /** 1051 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1052 * @phba: Pointer to HBA context object. 1053 * @iocbq: Pointer to driver iocb object. 1054 * 1055 * This function is called with no lock held to release the iocb to 1056 * iocb pool. 1057 **/ 1058 void 1059 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1060 { 1061 unsigned long iflags; 1062 1063 /* 1064 * Clean all volatile data fields, preserve iotag and node struct. 1065 */ 1066 spin_lock_irqsave(&phba->hbalock, iflags); 1067 __lpfc_sli_release_iocbq(phba, iocbq); 1068 spin_unlock_irqrestore(&phba->hbalock, iflags); 1069 } 1070 1071 /** 1072 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1073 * @phba: Pointer to HBA context object. 1074 * @iocblist: List of IOCBs. 1075 * @ulpstatus: ULP status in IOCB command field. 1076 * @ulpWord4: ULP word-4 in IOCB command field. 1077 * 1078 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1079 * on the list by invoking the complete callback function associated with the 1080 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1081 * fields. 1082 **/ 1083 void 1084 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1085 uint32_t ulpstatus, uint32_t ulpWord4) 1086 { 1087 struct lpfc_iocbq *piocb; 1088 1089 while (!list_empty(iocblist)) { 1090 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1091 1092 if (!piocb->iocb_cmpl) 1093 lpfc_sli_release_iocbq(phba, piocb); 1094 else { 1095 piocb->iocb.ulpStatus = ulpstatus; 1096 piocb->iocb.un.ulpWord[4] = ulpWord4; 1097 (piocb->iocb_cmpl) (phba, piocb, piocb); 1098 } 1099 } 1100 return; 1101 } 1102 1103 /** 1104 * lpfc_sli_iocb_cmd_type - Get the iocb type 1105 * @iocb_cmnd: iocb command code. 1106 * 1107 * This function is called by ring event handler function to get the iocb type. 1108 * This function translates the iocb command to an iocb command type used to 1109 * decide the final disposition of each completed IOCB. 1110 * The function returns 1111 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1112 * LPFC_SOL_IOCB if it is a solicited iocb completion 1113 * LPFC_ABORT_IOCB if it is an abort iocb 1114 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1115 * 1116 * The caller is not required to hold any lock. 1117 **/ 1118 static lpfc_iocb_type 1119 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1120 { 1121 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1122 1123 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1124 return 0; 1125 1126 switch (iocb_cmnd) { 1127 case CMD_XMIT_SEQUENCE_CR: 1128 case CMD_XMIT_SEQUENCE_CX: 1129 case CMD_XMIT_BCAST_CN: 1130 case CMD_XMIT_BCAST_CX: 1131 case CMD_ELS_REQUEST_CR: 1132 case CMD_ELS_REQUEST_CX: 1133 case CMD_CREATE_XRI_CR: 1134 case CMD_CREATE_XRI_CX: 1135 case CMD_GET_RPI_CN: 1136 case CMD_XMIT_ELS_RSP_CX: 1137 case CMD_GET_RPI_CR: 1138 case CMD_FCP_IWRITE_CR: 1139 case CMD_FCP_IWRITE_CX: 1140 case CMD_FCP_IREAD_CR: 1141 case CMD_FCP_IREAD_CX: 1142 case CMD_FCP_ICMND_CR: 1143 case CMD_FCP_ICMND_CX: 1144 case CMD_FCP_TSEND_CX: 1145 case CMD_FCP_TRSP_CX: 1146 case CMD_FCP_TRECEIVE_CX: 1147 case CMD_FCP_AUTO_TRSP_CX: 1148 case CMD_ADAPTER_MSG: 1149 case CMD_ADAPTER_DUMP: 1150 case CMD_XMIT_SEQUENCE64_CR: 1151 case CMD_XMIT_SEQUENCE64_CX: 1152 case CMD_XMIT_BCAST64_CN: 1153 case CMD_XMIT_BCAST64_CX: 1154 case CMD_ELS_REQUEST64_CR: 1155 case CMD_ELS_REQUEST64_CX: 1156 case CMD_FCP_IWRITE64_CR: 1157 case CMD_FCP_IWRITE64_CX: 1158 case CMD_FCP_IREAD64_CR: 1159 case CMD_FCP_IREAD64_CX: 1160 case CMD_FCP_ICMND64_CR: 1161 case CMD_FCP_ICMND64_CX: 1162 case CMD_FCP_TSEND64_CX: 1163 case CMD_FCP_TRSP64_CX: 1164 case CMD_FCP_TRECEIVE64_CX: 1165 case CMD_GEN_REQUEST64_CR: 1166 case CMD_GEN_REQUEST64_CX: 1167 case CMD_XMIT_ELS_RSP64_CX: 1168 case DSSCMD_IWRITE64_CR: 1169 case DSSCMD_IWRITE64_CX: 1170 case DSSCMD_IREAD64_CR: 1171 case DSSCMD_IREAD64_CX: 1172 type = LPFC_SOL_IOCB; 1173 break; 1174 case CMD_ABORT_XRI_CN: 1175 case CMD_ABORT_XRI_CX: 1176 case CMD_CLOSE_XRI_CN: 1177 case CMD_CLOSE_XRI_CX: 1178 case CMD_XRI_ABORTED_CX: 1179 case CMD_ABORT_MXRI64_CN: 1180 case CMD_XMIT_BLS_RSP64_CX: 1181 type = LPFC_ABORT_IOCB; 1182 break; 1183 case CMD_RCV_SEQUENCE_CX: 1184 case CMD_RCV_ELS_REQ_CX: 1185 case CMD_RCV_SEQUENCE64_CX: 1186 case CMD_RCV_ELS_REQ64_CX: 1187 case CMD_ASYNC_STATUS: 1188 case CMD_IOCB_RCV_SEQ64_CX: 1189 case CMD_IOCB_RCV_ELS64_CX: 1190 case CMD_IOCB_RCV_CONT64_CX: 1191 case CMD_IOCB_RET_XRI64_CX: 1192 type = LPFC_UNSOL_IOCB; 1193 break; 1194 case CMD_IOCB_XMIT_MSEQ64_CR: 1195 case CMD_IOCB_XMIT_MSEQ64_CX: 1196 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1197 case CMD_IOCB_RCV_ELS_LIST64_CX: 1198 case CMD_IOCB_CLOSE_EXTENDED_CN: 1199 case CMD_IOCB_ABORT_EXTENDED_CN: 1200 case CMD_IOCB_RET_HBQE64_CN: 1201 case CMD_IOCB_FCP_IBIDIR64_CR: 1202 case CMD_IOCB_FCP_IBIDIR64_CX: 1203 case CMD_IOCB_FCP_ITASKMGT64_CX: 1204 case CMD_IOCB_LOGENTRY_CN: 1205 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1206 printk("%s - Unhandled SLI-3 Command x%x\n", 1207 __func__, iocb_cmnd); 1208 type = LPFC_UNKNOWN_IOCB; 1209 break; 1210 default: 1211 type = LPFC_UNKNOWN_IOCB; 1212 break; 1213 } 1214 1215 return type; 1216 } 1217 1218 /** 1219 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1220 * @phba: Pointer to HBA context object. 1221 * 1222 * This function is called from SLI initialization code 1223 * to configure every ring of the HBA's SLI interface. The 1224 * caller is not required to hold any lock. This function issues 1225 * a config_ring mailbox command for each ring. 1226 * This function returns zero if successful else returns a negative 1227 * error code. 1228 **/ 1229 static int 1230 lpfc_sli_ring_map(struct lpfc_hba *phba) 1231 { 1232 struct lpfc_sli *psli = &phba->sli; 1233 LPFC_MBOXQ_t *pmb; 1234 MAILBOX_t *pmbox; 1235 int i, rc, ret = 0; 1236 1237 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1238 if (!pmb) 1239 return -ENOMEM; 1240 pmbox = &pmb->u.mb; 1241 phba->link_state = LPFC_INIT_MBX_CMDS; 1242 for (i = 0; i < psli->num_rings; i++) { 1243 lpfc_config_ring(phba, i, pmb); 1244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1245 if (rc != MBX_SUCCESS) { 1246 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1247 "0446 Adapter failed to init (%d), " 1248 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1249 "ring %d\n", 1250 rc, pmbox->mbxCommand, 1251 pmbox->mbxStatus, i); 1252 phba->link_state = LPFC_HBA_ERROR; 1253 ret = -ENXIO; 1254 break; 1255 } 1256 } 1257 mempool_free(pmb, phba->mbox_mem_pool); 1258 return ret; 1259 } 1260 1261 /** 1262 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1263 * @phba: Pointer to HBA context object. 1264 * @pring: Pointer to driver SLI ring object. 1265 * @piocb: Pointer to the driver iocb object. 1266 * 1267 * This function is called with hbalock held. The function adds the 1268 * new iocb to txcmplq of the given ring. This function always returns 1269 * 0. If this function is called for ELS ring, this function checks if 1270 * there is a vport associated with the ELS command. This function also 1271 * starts els_tmofunc timer if this is an ELS command. 1272 **/ 1273 static int 1274 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1275 struct lpfc_iocbq *piocb) 1276 { 1277 list_add_tail(&piocb->list, &pring->txcmplq); 1278 piocb->iocb_flag |= LPFC_IO_ON_Q; 1279 pring->txcmplq_cnt++; 1280 if (pring->txcmplq_cnt > pring->txcmplq_max) 1281 pring->txcmplq_max = pring->txcmplq_cnt; 1282 1283 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1284 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1285 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1286 if (!piocb->vport) 1287 BUG(); 1288 else 1289 mod_timer(&piocb->vport->els_tmofunc, 1290 jiffies + HZ * (phba->fc_ratov << 1)); 1291 } 1292 1293 1294 return 0; 1295 } 1296 1297 /** 1298 * lpfc_sli_ringtx_get - Get first element of the txq 1299 * @phba: Pointer to HBA context object. 1300 * @pring: Pointer to driver SLI ring object. 1301 * 1302 * This function is called with hbalock held to get next 1303 * iocb in txq of the given ring. If there is any iocb in 1304 * the txq, the function returns first iocb in the list after 1305 * removing the iocb from the list, else it returns NULL. 1306 **/ 1307 struct lpfc_iocbq * 1308 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1309 { 1310 struct lpfc_iocbq *cmd_iocb; 1311 1312 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1313 if (cmd_iocb != NULL) 1314 pring->txq_cnt--; 1315 return cmd_iocb; 1316 } 1317 1318 /** 1319 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1320 * @phba: Pointer to HBA context object. 1321 * @pring: Pointer to driver SLI ring object. 1322 * 1323 * This function is called with hbalock held and the caller must post the 1324 * iocb without releasing the lock. If the caller releases the lock, 1325 * iocb slot returned by the function is not guaranteed to be available. 1326 * The function returns pointer to the next available iocb slot if there 1327 * is available slot in the ring, else it returns NULL. 1328 * If the get index of the ring is ahead of the put index, the function 1329 * will post an error attention event to the worker thread to take the 1330 * HBA to offline state. 1331 **/ 1332 static IOCB_t * 1333 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1334 { 1335 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1336 uint32_t max_cmd_idx = pring->numCiocb; 1337 if ((pring->next_cmdidx == pring->cmdidx) && 1338 (++pring->next_cmdidx >= max_cmd_idx)) 1339 pring->next_cmdidx = 0; 1340 1341 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 1342 1343 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1344 1345 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 1346 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1347 "0315 Ring %d issue: portCmdGet %d " 1348 "is bigger than cmd ring %d\n", 1349 pring->ringno, 1350 pring->local_getidx, max_cmd_idx); 1351 1352 phba->link_state = LPFC_HBA_ERROR; 1353 /* 1354 * All error attention handlers are posted to 1355 * worker thread 1356 */ 1357 phba->work_ha |= HA_ERATT; 1358 phba->work_hs = HS_FFER3; 1359 1360 lpfc_worker_wake_up(phba); 1361 1362 return NULL; 1363 } 1364 1365 if (pring->local_getidx == pring->next_cmdidx) 1366 return NULL; 1367 } 1368 1369 return lpfc_cmd_iocb(phba, pring); 1370 } 1371 1372 /** 1373 * lpfc_sli_next_iotag - Get an iotag for the iocb 1374 * @phba: Pointer to HBA context object. 1375 * @iocbq: Pointer to driver iocb object. 1376 * 1377 * This function gets an iotag for the iocb. If there is no unused iotag and 1378 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1379 * array and assigns a new iotag. 1380 * The function returns the allocated iotag if successful, else returns zero. 1381 * Zero is not a valid iotag. 1382 * The caller is not required to hold any lock. 1383 **/ 1384 uint16_t 1385 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1386 { 1387 struct lpfc_iocbq **new_arr; 1388 struct lpfc_iocbq **old_arr; 1389 size_t new_len; 1390 struct lpfc_sli *psli = &phba->sli; 1391 uint16_t iotag; 1392 1393 spin_lock_irq(&phba->hbalock); 1394 iotag = psli->last_iotag; 1395 if(++iotag < psli->iocbq_lookup_len) { 1396 psli->last_iotag = iotag; 1397 psli->iocbq_lookup[iotag] = iocbq; 1398 spin_unlock_irq(&phba->hbalock); 1399 iocbq->iotag = iotag; 1400 return iotag; 1401 } else if (psli->iocbq_lookup_len < (0xffff 1402 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1403 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1404 spin_unlock_irq(&phba->hbalock); 1405 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1406 GFP_KERNEL); 1407 if (new_arr) { 1408 spin_lock_irq(&phba->hbalock); 1409 old_arr = psli->iocbq_lookup; 1410 if (new_len <= psli->iocbq_lookup_len) { 1411 /* highly unprobable case */ 1412 kfree(new_arr); 1413 iotag = psli->last_iotag; 1414 if(++iotag < psli->iocbq_lookup_len) { 1415 psli->last_iotag = iotag; 1416 psli->iocbq_lookup[iotag] = iocbq; 1417 spin_unlock_irq(&phba->hbalock); 1418 iocbq->iotag = iotag; 1419 return iotag; 1420 } 1421 spin_unlock_irq(&phba->hbalock); 1422 return 0; 1423 } 1424 if (psli->iocbq_lookup) 1425 memcpy(new_arr, old_arr, 1426 ((psli->last_iotag + 1) * 1427 sizeof (struct lpfc_iocbq *))); 1428 psli->iocbq_lookup = new_arr; 1429 psli->iocbq_lookup_len = new_len; 1430 psli->last_iotag = iotag; 1431 psli->iocbq_lookup[iotag] = iocbq; 1432 spin_unlock_irq(&phba->hbalock); 1433 iocbq->iotag = iotag; 1434 kfree(old_arr); 1435 return iotag; 1436 } 1437 } else 1438 spin_unlock_irq(&phba->hbalock); 1439 1440 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1441 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1442 psli->last_iotag); 1443 1444 return 0; 1445 } 1446 1447 /** 1448 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1449 * @phba: Pointer to HBA context object. 1450 * @pring: Pointer to driver SLI ring object. 1451 * @iocb: Pointer to iocb slot in the ring. 1452 * @nextiocb: Pointer to driver iocb object which need to be 1453 * posted to firmware. 1454 * 1455 * This function is called with hbalock held to post a new iocb to 1456 * the firmware. This function copies the new iocb to ring iocb slot and 1457 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1458 * a completion call back for this iocb else the function will free the 1459 * iocb object. 1460 **/ 1461 static void 1462 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1463 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1464 { 1465 /* 1466 * Set up an iotag 1467 */ 1468 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1469 1470 1471 if (pring->ringno == LPFC_ELS_RING) { 1472 lpfc_debugfs_slow_ring_trc(phba, 1473 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1474 *(((uint32_t *) &nextiocb->iocb) + 4), 1475 *(((uint32_t *) &nextiocb->iocb) + 6), 1476 *(((uint32_t *) &nextiocb->iocb) + 7)); 1477 } 1478 1479 /* 1480 * Issue iocb command to adapter 1481 */ 1482 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1483 wmb(); 1484 pring->stats.iocb_cmd++; 1485 1486 /* 1487 * If there is no completion routine to call, we can release the 1488 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1489 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1490 */ 1491 if (nextiocb->iocb_cmpl) 1492 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1493 else 1494 __lpfc_sli_release_iocbq(phba, nextiocb); 1495 1496 /* 1497 * Let the HBA know what IOCB slot will be the next one the 1498 * driver will put a command into. 1499 */ 1500 pring->cmdidx = pring->next_cmdidx; 1501 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1502 } 1503 1504 /** 1505 * lpfc_sli_update_full_ring - Update the chip attention register 1506 * @phba: Pointer to HBA context object. 1507 * @pring: Pointer to driver SLI ring object. 1508 * 1509 * The caller is not required to hold any lock for calling this function. 1510 * This function updates the chip attention bits for the ring to inform firmware 1511 * that there are pending work to be done for this ring and requests an 1512 * interrupt when there is space available in the ring. This function is 1513 * called when the driver is unable to post more iocbs to the ring due 1514 * to unavailability of space in the ring. 1515 **/ 1516 static void 1517 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1518 { 1519 int ringno = pring->ringno; 1520 1521 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1522 1523 wmb(); 1524 1525 /* 1526 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1527 * The HBA will tell us when an IOCB entry is available. 1528 */ 1529 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1530 readl(phba->CAregaddr); /* flush */ 1531 1532 pring->stats.iocb_cmd_full++; 1533 } 1534 1535 /** 1536 * lpfc_sli_update_ring - Update chip attention register 1537 * @phba: Pointer to HBA context object. 1538 * @pring: Pointer to driver SLI ring object. 1539 * 1540 * This function updates the chip attention register bit for the 1541 * given ring to inform HBA that there is more work to be done 1542 * in this ring. The caller is not required to hold any lock. 1543 **/ 1544 static void 1545 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1546 { 1547 int ringno = pring->ringno; 1548 1549 /* 1550 * Tell the HBA that there is work to do in this ring. 1551 */ 1552 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1553 wmb(); 1554 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1555 readl(phba->CAregaddr); /* flush */ 1556 } 1557 } 1558 1559 /** 1560 * lpfc_sli_resume_iocb - Process iocbs in the txq 1561 * @phba: Pointer to HBA context object. 1562 * @pring: Pointer to driver SLI ring object. 1563 * 1564 * This function is called with hbalock held to post pending iocbs 1565 * in the txq to the firmware. This function is called when driver 1566 * detects space available in the ring. 1567 **/ 1568 static void 1569 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1570 { 1571 IOCB_t *iocb; 1572 struct lpfc_iocbq *nextiocb; 1573 1574 /* 1575 * Check to see if: 1576 * (a) there is anything on the txq to send 1577 * (b) link is up 1578 * (c) link attention events can be processed (fcp ring only) 1579 * (d) IOCB processing is not blocked by the outstanding mbox command. 1580 */ 1581 if (pring->txq_cnt && 1582 lpfc_is_link_up(phba) && 1583 (pring->ringno != phba->sli.fcp_ring || 1584 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1585 1586 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1587 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1588 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1589 1590 if (iocb) 1591 lpfc_sli_update_ring(phba, pring); 1592 else 1593 lpfc_sli_update_full_ring(phba, pring); 1594 } 1595 1596 return; 1597 } 1598 1599 /** 1600 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1601 * @phba: Pointer to HBA context object. 1602 * @hbqno: HBQ number. 1603 * 1604 * This function is called with hbalock held to get the next 1605 * available slot for the given HBQ. If there is free slot 1606 * available for the HBQ it will return pointer to the next available 1607 * HBQ entry else it will return NULL. 1608 **/ 1609 static struct lpfc_hbq_entry * 1610 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1611 { 1612 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1613 1614 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1615 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1616 hbqp->next_hbqPutIdx = 0; 1617 1618 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1619 uint32_t raw_index = phba->hbq_get[hbqno]; 1620 uint32_t getidx = le32_to_cpu(raw_index); 1621 1622 hbqp->local_hbqGetIdx = getidx; 1623 1624 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1625 lpfc_printf_log(phba, KERN_ERR, 1626 LOG_SLI | LOG_VPORT, 1627 "1802 HBQ %d: local_hbqGetIdx " 1628 "%u is > than hbqp->entry_count %u\n", 1629 hbqno, hbqp->local_hbqGetIdx, 1630 hbqp->entry_count); 1631 1632 phba->link_state = LPFC_HBA_ERROR; 1633 return NULL; 1634 } 1635 1636 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1637 return NULL; 1638 } 1639 1640 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1641 hbqp->hbqPutIdx; 1642 } 1643 1644 /** 1645 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1646 * @phba: Pointer to HBA context object. 1647 * 1648 * This function is called with no lock held to free all the 1649 * hbq buffers while uninitializing the SLI interface. It also 1650 * frees the HBQ buffers returned by the firmware but not yet 1651 * processed by the upper layers. 1652 **/ 1653 void 1654 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1655 { 1656 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1657 struct hbq_dmabuf *hbq_buf; 1658 unsigned long flags; 1659 int i, hbq_count; 1660 uint32_t hbqno; 1661 1662 hbq_count = lpfc_sli_hbq_count(); 1663 /* Return all memory used by all HBQs */ 1664 spin_lock_irqsave(&phba->hbalock, flags); 1665 for (i = 0; i < hbq_count; ++i) { 1666 list_for_each_entry_safe(dmabuf, next_dmabuf, 1667 &phba->hbqs[i].hbq_buffer_list, list) { 1668 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1669 list_del(&hbq_buf->dbuf.list); 1670 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1671 } 1672 phba->hbqs[i].buffer_count = 0; 1673 } 1674 /* Return all HBQ buffer that are in-fly */ 1675 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1676 list) { 1677 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1678 list_del(&hbq_buf->dbuf.list); 1679 if (hbq_buf->tag == -1) { 1680 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1681 (phba, hbq_buf); 1682 } else { 1683 hbqno = hbq_buf->tag >> 16; 1684 if (hbqno >= LPFC_MAX_HBQS) 1685 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1686 (phba, hbq_buf); 1687 else 1688 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1689 hbq_buf); 1690 } 1691 } 1692 1693 /* Mark the HBQs not in use */ 1694 phba->hbq_in_use = 0; 1695 spin_unlock_irqrestore(&phba->hbalock, flags); 1696 } 1697 1698 /** 1699 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1700 * @phba: Pointer to HBA context object. 1701 * @hbqno: HBQ number. 1702 * @hbq_buf: Pointer to HBQ buffer. 1703 * 1704 * This function is called with the hbalock held to post a 1705 * hbq buffer to the firmware. If the function finds an empty 1706 * slot in the HBQ, it will post the buffer. The function will return 1707 * pointer to the hbq entry if it successfully post the buffer 1708 * else it will return NULL. 1709 **/ 1710 static int 1711 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1712 struct hbq_dmabuf *hbq_buf) 1713 { 1714 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1715 } 1716 1717 /** 1718 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1719 * @phba: Pointer to HBA context object. 1720 * @hbqno: HBQ number. 1721 * @hbq_buf: Pointer to HBQ buffer. 1722 * 1723 * This function is called with the hbalock held to post a hbq buffer to the 1724 * firmware. If the function finds an empty slot in the HBQ, it will post the 1725 * buffer and place it on the hbq_buffer_list. The function will return zero if 1726 * it successfully post the buffer else it will return an error. 1727 **/ 1728 static int 1729 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1730 struct hbq_dmabuf *hbq_buf) 1731 { 1732 struct lpfc_hbq_entry *hbqe; 1733 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1734 1735 /* Get next HBQ entry slot to use */ 1736 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1737 if (hbqe) { 1738 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1739 1740 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1741 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1742 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1743 hbqe->bde.tus.f.bdeFlags = 0; 1744 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1745 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1746 /* Sync SLIM */ 1747 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1748 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1749 /* flush */ 1750 readl(phba->hbq_put + hbqno); 1751 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1752 return 0; 1753 } else 1754 return -ENOMEM; 1755 } 1756 1757 /** 1758 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1759 * @phba: Pointer to HBA context object. 1760 * @hbqno: HBQ number. 1761 * @hbq_buf: Pointer to HBQ buffer. 1762 * 1763 * This function is called with the hbalock held to post an RQE to the SLI4 1764 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1765 * the hbq_buffer_list and return zero, otherwise it will return an error. 1766 **/ 1767 static int 1768 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1769 struct hbq_dmabuf *hbq_buf) 1770 { 1771 int rc; 1772 struct lpfc_rqe hrqe; 1773 struct lpfc_rqe drqe; 1774 1775 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1776 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1777 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1778 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1779 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1780 &hrqe, &drqe); 1781 if (rc < 0) 1782 return rc; 1783 hbq_buf->tag = rc; 1784 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1785 return 0; 1786 } 1787 1788 /* HBQ for ELS and CT traffic. */ 1789 static struct lpfc_hbq_init lpfc_els_hbq = { 1790 .rn = 1, 1791 .entry_count = 256, 1792 .mask_count = 0, 1793 .profile = 0, 1794 .ring_mask = (1 << LPFC_ELS_RING), 1795 .buffer_count = 0, 1796 .init_count = 40, 1797 .add_count = 40, 1798 }; 1799 1800 /* HBQ for the extra ring if needed */ 1801 static struct lpfc_hbq_init lpfc_extra_hbq = { 1802 .rn = 1, 1803 .entry_count = 200, 1804 .mask_count = 0, 1805 .profile = 0, 1806 .ring_mask = (1 << LPFC_EXTRA_RING), 1807 .buffer_count = 0, 1808 .init_count = 0, 1809 .add_count = 5, 1810 }; 1811 1812 /* Array of HBQs */ 1813 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1814 &lpfc_els_hbq, 1815 &lpfc_extra_hbq, 1816 }; 1817 1818 /** 1819 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1820 * @phba: Pointer to HBA context object. 1821 * @hbqno: HBQ number. 1822 * @count: Number of HBQ buffers to be posted. 1823 * 1824 * This function is called with no lock held to post more hbq buffers to the 1825 * given HBQ. The function returns the number of HBQ buffers successfully 1826 * posted. 1827 **/ 1828 static int 1829 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1830 { 1831 uint32_t i, posted = 0; 1832 unsigned long flags; 1833 struct hbq_dmabuf *hbq_buffer; 1834 LIST_HEAD(hbq_buf_list); 1835 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1836 return 0; 1837 1838 if ((phba->hbqs[hbqno].buffer_count + count) > 1839 lpfc_hbq_defs[hbqno]->entry_count) 1840 count = lpfc_hbq_defs[hbqno]->entry_count - 1841 phba->hbqs[hbqno].buffer_count; 1842 if (!count) 1843 return 0; 1844 /* Allocate HBQ entries */ 1845 for (i = 0; i < count; i++) { 1846 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1847 if (!hbq_buffer) 1848 break; 1849 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1850 } 1851 /* Check whether HBQ is still in use */ 1852 spin_lock_irqsave(&phba->hbalock, flags); 1853 if (!phba->hbq_in_use) 1854 goto err; 1855 while (!list_empty(&hbq_buf_list)) { 1856 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1857 dbuf.list); 1858 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1859 (hbqno << 16)); 1860 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1861 phba->hbqs[hbqno].buffer_count++; 1862 posted++; 1863 } else 1864 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1865 } 1866 spin_unlock_irqrestore(&phba->hbalock, flags); 1867 return posted; 1868 err: 1869 spin_unlock_irqrestore(&phba->hbalock, flags); 1870 while (!list_empty(&hbq_buf_list)) { 1871 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1872 dbuf.list); 1873 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1874 } 1875 return 0; 1876 } 1877 1878 /** 1879 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1880 * @phba: Pointer to HBA context object. 1881 * @qno: HBQ number. 1882 * 1883 * This function posts more buffers to the HBQ. This function 1884 * is called with no lock held. The function returns the number of HBQ entries 1885 * successfully allocated. 1886 **/ 1887 int 1888 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1889 { 1890 if (phba->sli_rev == LPFC_SLI_REV4) 1891 return 0; 1892 else 1893 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1894 lpfc_hbq_defs[qno]->add_count); 1895 } 1896 1897 /** 1898 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1899 * @phba: Pointer to HBA context object. 1900 * @qno: HBQ queue number. 1901 * 1902 * This function is called from SLI initialization code path with 1903 * no lock held to post initial HBQ buffers to firmware. The 1904 * function returns the number of HBQ entries successfully allocated. 1905 **/ 1906 static int 1907 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1908 { 1909 if (phba->sli_rev == LPFC_SLI_REV4) 1910 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1911 lpfc_hbq_defs[qno]->entry_count); 1912 else 1913 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1914 lpfc_hbq_defs[qno]->init_count); 1915 } 1916 1917 /** 1918 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1919 * @phba: Pointer to HBA context object. 1920 * @hbqno: HBQ number. 1921 * 1922 * This function removes the first hbq buffer on an hbq list and returns a 1923 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1924 **/ 1925 static struct hbq_dmabuf * 1926 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1927 { 1928 struct lpfc_dmabuf *d_buf; 1929 1930 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1931 if (!d_buf) 1932 return NULL; 1933 return container_of(d_buf, struct hbq_dmabuf, dbuf); 1934 } 1935 1936 /** 1937 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1938 * @phba: Pointer to HBA context object. 1939 * @tag: Tag of the hbq buffer. 1940 * 1941 * This function is called with hbalock held. This function searches 1942 * for the hbq buffer associated with the given tag in the hbq buffer 1943 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 1944 * it returns NULL. 1945 **/ 1946 static struct hbq_dmabuf * 1947 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 1948 { 1949 struct lpfc_dmabuf *d_buf; 1950 struct hbq_dmabuf *hbq_buf; 1951 uint32_t hbqno; 1952 1953 hbqno = tag >> 16; 1954 if (hbqno >= LPFC_MAX_HBQS) 1955 return NULL; 1956 1957 spin_lock_irq(&phba->hbalock); 1958 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1959 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1960 if (hbq_buf->tag == tag) { 1961 spin_unlock_irq(&phba->hbalock); 1962 return hbq_buf; 1963 } 1964 } 1965 spin_unlock_irq(&phba->hbalock); 1966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1967 "1803 Bad hbq tag. Data: x%x x%x\n", 1968 tag, phba->hbqs[tag >> 16].buffer_count); 1969 return NULL; 1970 } 1971 1972 /** 1973 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 1974 * @phba: Pointer to HBA context object. 1975 * @hbq_buffer: Pointer to HBQ buffer. 1976 * 1977 * This function is called with hbalock. This function gives back 1978 * the hbq buffer to firmware. If the HBQ does not have space to 1979 * post the buffer, it will free the buffer. 1980 **/ 1981 void 1982 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 1983 { 1984 uint32_t hbqno; 1985 1986 if (hbq_buffer) { 1987 hbqno = hbq_buffer->tag >> 16; 1988 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 1989 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1990 } 1991 } 1992 1993 /** 1994 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 1995 * @mbxCommand: mailbox command code. 1996 * 1997 * This function is called by the mailbox event handler function to verify 1998 * that the completed mailbox command is a legitimate mailbox command. If the 1999 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2000 * and the mailbox event handler will take the HBA offline. 2001 **/ 2002 static int 2003 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2004 { 2005 uint8_t ret; 2006 2007 switch (mbxCommand) { 2008 case MBX_LOAD_SM: 2009 case MBX_READ_NV: 2010 case MBX_WRITE_NV: 2011 case MBX_WRITE_VPARMS: 2012 case MBX_RUN_BIU_DIAG: 2013 case MBX_INIT_LINK: 2014 case MBX_DOWN_LINK: 2015 case MBX_CONFIG_LINK: 2016 case MBX_CONFIG_RING: 2017 case MBX_RESET_RING: 2018 case MBX_READ_CONFIG: 2019 case MBX_READ_RCONFIG: 2020 case MBX_READ_SPARM: 2021 case MBX_READ_STATUS: 2022 case MBX_READ_RPI: 2023 case MBX_READ_XRI: 2024 case MBX_READ_REV: 2025 case MBX_READ_LNK_STAT: 2026 case MBX_REG_LOGIN: 2027 case MBX_UNREG_LOGIN: 2028 case MBX_CLEAR_LA: 2029 case MBX_DUMP_MEMORY: 2030 case MBX_DUMP_CONTEXT: 2031 case MBX_RUN_DIAGS: 2032 case MBX_RESTART: 2033 case MBX_UPDATE_CFG: 2034 case MBX_DOWN_LOAD: 2035 case MBX_DEL_LD_ENTRY: 2036 case MBX_RUN_PROGRAM: 2037 case MBX_SET_MASK: 2038 case MBX_SET_VARIABLE: 2039 case MBX_UNREG_D_ID: 2040 case MBX_KILL_BOARD: 2041 case MBX_CONFIG_FARP: 2042 case MBX_BEACON: 2043 case MBX_LOAD_AREA: 2044 case MBX_RUN_BIU_DIAG64: 2045 case MBX_CONFIG_PORT: 2046 case MBX_READ_SPARM64: 2047 case MBX_READ_RPI64: 2048 case MBX_REG_LOGIN64: 2049 case MBX_READ_TOPOLOGY: 2050 case MBX_WRITE_WWN: 2051 case MBX_SET_DEBUG: 2052 case MBX_LOAD_EXP_ROM: 2053 case MBX_ASYNCEVT_ENABLE: 2054 case MBX_REG_VPI: 2055 case MBX_UNREG_VPI: 2056 case MBX_HEARTBEAT: 2057 case MBX_PORT_CAPABILITIES: 2058 case MBX_PORT_IOV_CONTROL: 2059 case MBX_SLI4_CONFIG: 2060 case MBX_SLI4_REQ_FTRS: 2061 case MBX_REG_FCFI: 2062 case MBX_UNREG_FCFI: 2063 case MBX_REG_VFI: 2064 case MBX_UNREG_VFI: 2065 case MBX_INIT_VPI: 2066 case MBX_INIT_VFI: 2067 case MBX_RESUME_RPI: 2068 case MBX_READ_EVENT_LOG_STATUS: 2069 case MBX_READ_EVENT_LOG: 2070 case MBX_SECURITY_MGMT: 2071 case MBX_AUTH_PORT: 2072 ret = mbxCommand; 2073 break; 2074 default: 2075 ret = MBX_SHUTDOWN; 2076 break; 2077 } 2078 return ret; 2079 } 2080 2081 /** 2082 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2083 * @phba: Pointer to HBA context object. 2084 * @pmboxq: Pointer to mailbox command. 2085 * 2086 * This is completion handler function for mailbox commands issued from 2087 * lpfc_sli_issue_mbox_wait function. This function is called by the 2088 * mailbox event handler function with no lock held. This function 2089 * will wake up thread waiting on the wait queue pointed by context1 2090 * of the mailbox. 2091 **/ 2092 void 2093 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2094 { 2095 wait_queue_head_t *pdone_q; 2096 unsigned long drvr_flag; 2097 2098 /* 2099 * If pdone_q is empty, the driver thread gave up waiting and 2100 * continued running. 2101 */ 2102 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2103 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2104 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2105 if (pdone_q) 2106 wake_up_interruptible(pdone_q); 2107 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2108 return; 2109 } 2110 2111 2112 /** 2113 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2114 * @phba: Pointer to HBA context object. 2115 * @pmb: Pointer to mailbox object. 2116 * 2117 * This function is the default mailbox completion handler. It 2118 * frees the memory resources associated with the completed mailbox 2119 * command. If the completed command is a REG_LOGIN mailbox command, 2120 * this function will issue a UREG_LOGIN to re-claim the RPI. 2121 **/ 2122 void 2123 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2124 { 2125 struct lpfc_vport *vport = pmb->vport; 2126 struct lpfc_dmabuf *mp; 2127 struct lpfc_nodelist *ndlp; 2128 struct Scsi_Host *shost; 2129 uint16_t rpi, vpi; 2130 int rc; 2131 2132 mp = (struct lpfc_dmabuf *) (pmb->context1); 2133 2134 if (mp) { 2135 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2136 kfree(mp); 2137 } 2138 2139 /* 2140 * If a REG_LOGIN succeeded after node is destroyed or node 2141 * is in re-discovery driver need to cleanup the RPI. 2142 */ 2143 if (!(phba->pport->load_flag & FC_UNLOADING) && 2144 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2145 !pmb->u.mb.mbxStatus) { 2146 rpi = pmb->u.mb.un.varWords[0]; 2147 vpi = pmb->u.mb.un.varRegLogin.vpi; 2148 lpfc_unreg_login(phba, vpi, rpi, pmb); 2149 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2151 if (rc != MBX_NOT_FINISHED) 2152 return; 2153 } 2154 2155 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2156 !(phba->pport->load_flag & FC_UNLOADING) && 2157 !pmb->u.mb.mbxStatus) { 2158 shost = lpfc_shost_from_vport(vport); 2159 spin_lock_irq(shost->host_lock); 2160 vport->vpi_state |= LPFC_VPI_REGISTERED; 2161 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2162 spin_unlock_irq(shost->host_lock); 2163 } 2164 2165 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2166 ndlp = (struct lpfc_nodelist *)pmb->context2; 2167 lpfc_nlp_put(ndlp); 2168 pmb->context2 = NULL; 2169 } 2170 2171 /* Check security permission status on INIT_LINK mailbox command */ 2172 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2173 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2174 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2175 "2860 SLI authentication is required " 2176 "for INIT_LINK but has not done yet\n"); 2177 2178 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2179 lpfc_sli4_mbox_cmd_free(phba, pmb); 2180 else 2181 mempool_free(pmb, phba->mbox_mem_pool); 2182 } 2183 2184 /** 2185 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2186 * @phba: Pointer to HBA context object. 2187 * 2188 * This function is called with no lock held. This function processes all 2189 * the completed mailbox commands and gives it to upper layers. The interrupt 2190 * service routine processes mailbox completion interrupt and adds completed 2191 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2192 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2193 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2194 * function returns the mailbox commands to the upper layer by calling the 2195 * completion handler function of each mailbox. 2196 **/ 2197 int 2198 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2199 { 2200 MAILBOX_t *pmbox; 2201 LPFC_MBOXQ_t *pmb; 2202 int rc; 2203 LIST_HEAD(cmplq); 2204 2205 phba->sli.slistat.mbox_event++; 2206 2207 /* Get all completed mailboxe buffers into the cmplq */ 2208 spin_lock_irq(&phba->hbalock); 2209 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2210 spin_unlock_irq(&phba->hbalock); 2211 2212 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2213 do { 2214 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2215 if (pmb == NULL) 2216 break; 2217 2218 pmbox = &pmb->u.mb; 2219 2220 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2221 if (pmb->vport) { 2222 lpfc_debugfs_disc_trc(pmb->vport, 2223 LPFC_DISC_TRC_MBOX_VPORT, 2224 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2225 (uint32_t)pmbox->mbxCommand, 2226 pmbox->un.varWords[0], 2227 pmbox->un.varWords[1]); 2228 } 2229 else { 2230 lpfc_debugfs_disc_trc(phba->pport, 2231 LPFC_DISC_TRC_MBOX, 2232 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2233 (uint32_t)pmbox->mbxCommand, 2234 pmbox->un.varWords[0], 2235 pmbox->un.varWords[1]); 2236 } 2237 } 2238 2239 /* 2240 * It is a fatal error if unknown mbox command completion. 2241 */ 2242 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2243 MBX_SHUTDOWN) { 2244 /* Unknown mailbox command compl */ 2245 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2246 "(%d):0323 Unknown Mailbox command " 2247 "x%x (x%x/x%x) Cmpl\n", 2248 pmb->vport ? pmb->vport->vpi : 0, 2249 pmbox->mbxCommand, 2250 lpfc_sli_config_mbox_subsys_get(phba, 2251 pmb), 2252 lpfc_sli_config_mbox_opcode_get(phba, 2253 pmb)); 2254 phba->link_state = LPFC_HBA_ERROR; 2255 phba->work_hs = HS_FFER3; 2256 lpfc_handle_eratt(phba); 2257 continue; 2258 } 2259 2260 if (pmbox->mbxStatus) { 2261 phba->sli.slistat.mbox_stat_err++; 2262 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2263 /* Mbox cmd cmpl error - RETRYing */ 2264 lpfc_printf_log(phba, KERN_INFO, 2265 LOG_MBOX | LOG_SLI, 2266 "(%d):0305 Mbox cmd cmpl " 2267 "error - RETRYing Data: x%x " 2268 "(x%x/x%x) x%x x%x x%x\n", 2269 pmb->vport ? pmb->vport->vpi : 0, 2270 pmbox->mbxCommand, 2271 lpfc_sli_config_mbox_subsys_get(phba, 2272 pmb), 2273 lpfc_sli_config_mbox_opcode_get(phba, 2274 pmb), 2275 pmbox->mbxStatus, 2276 pmbox->un.varWords[0], 2277 pmb->vport->port_state); 2278 pmbox->mbxStatus = 0; 2279 pmbox->mbxOwner = OWN_HOST; 2280 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2281 if (rc != MBX_NOT_FINISHED) 2282 continue; 2283 } 2284 } 2285 2286 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2287 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2288 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2289 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 2290 pmb->vport ? pmb->vport->vpi : 0, 2291 pmbox->mbxCommand, 2292 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2293 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2294 pmb->mbox_cmpl, 2295 *((uint32_t *) pmbox), 2296 pmbox->un.varWords[0], 2297 pmbox->un.varWords[1], 2298 pmbox->un.varWords[2], 2299 pmbox->un.varWords[3], 2300 pmbox->un.varWords[4], 2301 pmbox->un.varWords[5], 2302 pmbox->un.varWords[6], 2303 pmbox->un.varWords[7]); 2304 2305 if (pmb->mbox_cmpl) 2306 pmb->mbox_cmpl(phba,pmb); 2307 } while (1); 2308 return 0; 2309 } 2310 2311 /** 2312 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2313 * @phba: Pointer to HBA context object. 2314 * @pring: Pointer to driver SLI ring object. 2315 * @tag: buffer tag. 2316 * 2317 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2318 * is set in the tag the buffer is posted for a particular exchange, 2319 * the function will return the buffer without replacing the buffer. 2320 * If the buffer is for unsolicited ELS or CT traffic, this function 2321 * returns the buffer and also posts another buffer to the firmware. 2322 **/ 2323 static struct lpfc_dmabuf * 2324 lpfc_sli_get_buff(struct lpfc_hba *phba, 2325 struct lpfc_sli_ring *pring, 2326 uint32_t tag) 2327 { 2328 struct hbq_dmabuf *hbq_entry; 2329 2330 if (tag & QUE_BUFTAG_BIT) 2331 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2332 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2333 if (!hbq_entry) 2334 return NULL; 2335 return &hbq_entry->dbuf; 2336 } 2337 2338 /** 2339 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2340 * @phba: Pointer to HBA context object. 2341 * @pring: Pointer to driver SLI ring object. 2342 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2343 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2344 * @fch_type: the type for the first frame of the sequence. 2345 * 2346 * This function is called with no lock held. This function uses the r_ctl and 2347 * type of the received sequence to find the correct callback function to call 2348 * to process the sequence. 2349 **/ 2350 static int 2351 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2352 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2353 uint32_t fch_type) 2354 { 2355 int i; 2356 2357 /* unSolicited Responses */ 2358 if (pring->prt[0].profile) { 2359 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2360 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2361 saveq); 2362 return 1; 2363 } 2364 /* We must search, based on rctl / type 2365 for the right routine */ 2366 for (i = 0; i < pring->num_mask; i++) { 2367 if ((pring->prt[i].rctl == fch_r_ctl) && 2368 (pring->prt[i].type == fch_type)) { 2369 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2370 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2371 (phba, pring, saveq); 2372 return 1; 2373 } 2374 } 2375 return 0; 2376 } 2377 2378 /** 2379 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2380 * @phba: Pointer to HBA context object. 2381 * @pring: Pointer to driver SLI ring object. 2382 * @saveq: Pointer to the unsolicited iocb. 2383 * 2384 * This function is called with no lock held by the ring event handler 2385 * when there is an unsolicited iocb posted to the response ring by the 2386 * firmware. This function gets the buffer associated with the iocbs 2387 * and calls the event handler for the ring. This function handles both 2388 * qring buffers and hbq buffers. 2389 * When the function returns 1 the caller can free the iocb object otherwise 2390 * upper layer functions will free the iocb objects. 2391 **/ 2392 static int 2393 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2394 struct lpfc_iocbq *saveq) 2395 { 2396 IOCB_t * irsp; 2397 WORD5 * w5p; 2398 uint32_t Rctl, Type; 2399 uint32_t match; 2400 struct lpfc_iocbq *iocbq; 2401 struct lpfc_dmabuf *dmzbuf; 2402 2403 match = 0; 2404 irsp = &(saveq->iocb); 2405 2406 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2407 if (pring->lpfc_sli_rcv_async_status) 2408 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2409 else 2410 lpfc_printf_log(phba, 2411 KERN_WARNING, 2412 LOG_SLI, 2413 "0316 Ring %d handler: unexpected " 2414 "ASYNC_STATUS iocb received evt_code " 2415 "0x%x\n", 2416 pring->ringno, 2417 irsp->un.asyncstat.evt_code); 2418 return 1; 2419 } 2420 2421 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2422 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2423 if (irsp->ulpBdeCount > 0) { 2424 dmzbuf = lpfc_sli_get_buff(phba, pring, 2425 irsp->un.ulpWord[3]); 2426 lpfc_in_buf_free(phba, dmzbuf); 2427 } 2428 2429 if (irsp->ulpBdeCount > 1) { 2430 dmzbuf = lpfc_sli_get_buff(phba, pring, 2431 irsp->unsli3.sli3Words[3]); 2432 lpfc_in_buf_free(phba, dmzbuf); 2433 } 2434 2435 if (irsp->ulpBdeCount > 2) { 2436 dmzbuf = lpfc_sli_get_buff(phba, pring, 2437 irsp->unsli3.sli3Words[7]); 2438 lpfc_in_buf_free(phba, dmzbuf); 2439 } 2440 2441 return 1; 2442 } 2443 2444 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2445 if (irsp->ulpBdeCount != 0) { 2446 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2447 irsp->un.ulpWord[3]); 2448 if (!saveq->context2) 2449 lpfc_printf_log(phba, 2450 KERN_ERR, 2451 LOG_SLI, 2452 "0341 Ring %d Cannot find buffer for " 2453 "an unsolicited iocb. tag 0x%x\n", 2454 pring->ringno, 2455 irsp->un.ulpWord[3]); 2456 } 2457 if (irsp->ulpBdeCount == 2) { 2458 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2459 irsp->unsli3.sli3Words[7]); 2460 if (!saveq->context3) 2461 lpfc_printf_log(phba, 2462 KERN_ERR, 2463 LOG_SLI, 2464 "0342 Ring %d Cannot find buffer for an" 2465 " unsolicited iocb. tag 0x%x\n", 2466 pring->ringno, 2467 irsp->unsli3.sli3Words[7]); 2468 } 2469 list_for_each_entry(iocbq, &saveq->list, list) { 2470 irsp = &(iocbq->iocb); 2471 if (irsp->ulpBdeCount != 0) { 2472 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2473 irsp->un.ulpWord[3]); 2474 if (!iocbq->context2) 2475 lpfc_printf_log(phba, 2476 KERN_ERR, 2477 LOG_SLI, 2478 "0343 Ring %d Cannot find " 2479 "buffer for an unsolicited iocb" 2480 ". tag 0x%x\n", pring->ringno, 2481 irsp->un.ulpWord[3]); 2482 } 2483 if (irsp->ulpBdeCount == 2) { 2484 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2485 irsp->unsli3.sli3Words[7]); 2486 if (!iocbq->context3) 2487 lpfc_printf_log(phba, 2488 KERN_ERR, 2489 LOG_SLI, 2490 "0344 Ring %d Cannot find " 2491 "buffer for an unsolicited " 2492 "iocb. tag 0x%x\n", 2493 pring->ringno, 2494 irsp->unsli3.sli3Words[7]); 2495 } 2496 } 2497 } 2498 if (irsp->ulpBdeCount != 0 && 2499 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2500 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2501 int found = 0; 2502 2503 /* search continue save q for same XRI */ 2504 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2505 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2506 saveq->iocb.unsli3.rcvsli3.ox_id) { 2507 list_add_tail(&saveq->list, &iocbq->list); 2508 found = 1; 2509 break; 2510 } 2511 } 2512 if (!found) 2513 list_add_tail(&saveq->clist, 2514 &pring->iocb_continue_saveq); 2515 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2516 list_del_init(&iocbq->clist); 2517 saveq = iocbq; 2518 irsp = &(saveq->iocb); 2519 } else 2520 return 0; 2521 } 2522 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2523 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2524 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2525 Rctl = FC_RCTL_ELS_REQ; 2526 Type = FC_TYPE_ELS; 2527 } else { 2528 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2529 Rctl = w5p->hcsw.Rctl; 2530 Type = w5p->hcsw.Type; 2531 2532 /* Firmware Workaround */ 2533 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2534 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2535 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2536 Rctl = FC_RCTL_ELS_REQ; 2537 Type = FC_TYPE_ELS; 2538 w5p->hcsw.Rctl = Rctl; 2539 w5p->hcsw.Type = Type; 2540 } 2541 } 2542 2543 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2544 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2545 "0313 Ring %d handler: unexpected Rctl x%x " 2546 "Type x%x received\n", 2547 pring->ringno, Rctl, Type); 2548 2549 return 1; 2550 } 2551 2552 /** 2553 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2554 * @phba: Pointer to HBA context object. 2555 * @pring: Pointer to driver SLI ring object. 2556 * @prspiocb: Pointer to response iocb object. 2557 * 2558 * This function looks up the iocb_lookup table to get the command iocb 2559 * corresponding to the given response iocb using the iotag of the 2560 * response iocb. This function is called with the hbalock held. 2561 * This function returns the command iocb object if it finds the command 2562 * iocb else returns NULL. 2563 **/ 2564 static struct lpfc_iocbq * 2565 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2566 struct lpfc_sli_ring *pring, 2567 struct lpfc_iocbq *prspiocb) 2568 { 2569 struct lpfc_iocbq *cmd_iocb = NULL; 2570 uint16_t iotag; 2571 2572 iotag = prspiocb->iocb.ulpIoTag; 2573 2574 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2575 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2576 list_del_init(&cmd_iocb->list); 2577 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2578 pring->txcmplq_cnt--; 2579 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2580 } 2581 return cmd_iocb; 2582 } 2583 2584 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2585 "0317 iotag x%x is out off " 2586 "range: max iotag x%x wd0 x%x\n", 2587 iotag, phba->sli.last_iotag, 2588 *(((uint32_t *) &prspiocb->iocb) + 7)); 2589 return NULL; 2590 } 2591 2592 /** 2593 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2594 * @phba: Pointer to HBA context object. 2595 * @pring: Pointer to driver SLI ring object. 2596 * @iotag: IOCB tag. 2597 * 2598 * This function looks up the iocb_lookup table to get the command iocb 2599 * corresponding to the given iotag. This function is called with the 2600 * hbalock held. 2601 * This function returns the command iocb object if it finds the command 2602 * iocb else returns NULL. 2603 **/ 2604 static struct lpfc_iocbq * 2605 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2606 struct lpfc_sli_ring *pring, uint16_t iotag) 2607 { 2608 struct lpfc_iocbq *cmd_iocb; 2609 2610 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2611 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2612 list_del_init(&cmd_iocb->list); 2613 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2614 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2615 pring->txcmplq_cnt--; 2616 } 2617 return cmd_iocb; 2618 } 2619 2620 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2621 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2622 iotag, phba->sli.last_iotag); 2623 return NULL; 2624 } 2625 2626 /** 2627 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2628 * @phba: Pointer to HBA context object. 2629 * @pring: Pointer to driver SLI ring object. 2630 * @saveq: Pointer to the response iocb to be processed. 2631 * 2632 * This function is called by the ring event handler for non-fcp 2633 * rings when there is a new response iocb in the response ring. 2634 * The caller is not required to hold any locks. This function 2635 * gets the command iocb associated with the response iocb and 2636 * calls the completion handler for the command iocb. If there 2637 * is no completion handler, the function will free the resources 2638 * associated with command iocb. If the response iocb is for 2639 * an already aborted command iocb, the status of the completion 2640 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2641 * This function always returns 1. 2642 **/ 2643 static int 2644 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2645 struct lpfc_iocbq *saveq) 2646 { 2647 struct lpfc_iocbq *cmdiocbp; 2648 int rc = 1; 2649 unsigned long iflag; 2650 2651 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2652 spin_lock_irqsave(&phba->hbalock, iflag); 2653 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2654 spin_unlock_irqrestore(&phba->hbalock, iflag); 2655 2656 if (cmdiocbp) { 2657 if (cmdiocbp->iocb_cmpl) { 2658 /* 2659 * If an ELS command failed send an event to mgmt 2660 * application. 2661 */ 2662 if (saveq->iocb.ulpStatus && 2663 (pring->ringno == LPFC_ELS_RING) && 2664 (cmdiocbp->iocb.ulpCommand == 2665 CMD_ELS_REQUEST64_CR)) 2666 lpfc_send_els_failure_event(phba, 2667 cmdiocbp, saveq); 2668 2669 /* 2670 * Post all ELS completions to the worker thread. 2671 * All other are passed to the completion callback. 2672 */ 2673 if (pring->ringno == LPFC_ELS_RING) { 2674 if ((phba->sli_rev < LPFC_SLI_REV4) && 2675 (cmdiocbp->iocb_flag & 2676 LPFC_DRIVER_ABORTED)) { 2677 spin_lock_irqsave(&phba->hbalock, 2678 iflag); 2679 cmdiocbp->iocb_flag &= 2680 ~LPFC_DRIVER_ABORTED; 2681 spin_unlock_irqrestore(&phba->hbalock, 2682 iflag); 2683 saveq->iocb.ulpStatus = 2684 IOSTAT_LOCAL_REJECT; 2685 saveq->iocb.un.ulpWord[4] = 2686 IOERR_SLI_ABORTED; 2687 2688 /* Firmware could still be in progress 2689 * of DMAing payload, so don't free data 2690 * buffer till after a hbeat. 2691 */ 2692 spin_lock_irqsave(&phba->hbalock, 2693 iflag); 2694 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2695 spin_unlock_irqrestore(&phba->hbalock, 2696 iflag); 2697 } 2698 if (phba->sli_rev == LPFC_SLI_REV4) { 2699 if (saveq->iocb_flag & 2700 LPFC_EXCHANGE_BUSY) { 2701 /* Set cmdiocb flag for the 2702 * exchange busy so sgl (xri) 2703 * will not be released until 2704 * the abort xri is received 2705 * from hba. 2706 */ 2707 spin_lock_irqsave( 2708 &phba->hbalock, iflag); 2709 cmdiocbp->iocb_flag |= 2710 LPFC_EXCHANGE_BUSY; 2711 spin_unlock_irqrestore( 2712 &phba->hbalock, iflag); 2713 } 2714 if (cmdiocbp->iocb_flag & 2715 LPFC_DRIVER_ABORTED) { 2716 /* 2717 * Clear LPFC_DRIVER_ABORTED 2718 * bit in case it was driver 2719 * initiated abort. 2720 */ 2721 spin_lock_irqsave( 2722 &phba->hbalock, iflag); 2723 cmdiocbp->iocb_flag &= 2724 ~LPFC_DRIVER_ABORTED; 2725 spin_unlock_irqrestore( 2726 &phba->hbalock, iflag); 2727 cmdiocbp->iocb.ulpStatus = 2728 IOSTAT_LOCAL_REJECT; 2729 cmdiocbp->iocb.un.ulpWord[4] = 2730 IOERR_ABORT_REQUESTED; 2731 /* 2732 * For SLI4, irsiocb contains 2733 * NO_XRI in sli_xritag, it 2734 * shall not affect releasing 2735 * sgl (xri) process. 2736 */ 2737 saveq->iocb.ulpStatus = 2738 IOSTAT_LOCAL_REJECT; 2739 saveq->iocb.un.ulpWord[4] = 2740 IOERR_SLI_ABORTED; 2741 spin_lock_irqsave( 2742 &phba->hbalock, iflag); 2743 saveq->iocb_flag |= 2744 LPFC_DELAY_MEM_FREE; 2745 spin_unlock_irqrestore( 2746 &phba->hbalock, iflag); 2747 } 2748 } 2749 } 2750 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2751 } else 2752 lpfc_sli_release_iocbq(phba, cmdiocbp); 2753 } else { 2754 /* 2755 * Unknown initiating command based on the response iotag. 2756 * This could be the case on the ELS ring because of 2757 * lpfc_els_abort(). 2758 */ 2759 if (pring->ringno != LPFC_ELS_RING) { 2760 /* 2761 * Ring <ringno> handler: unexpected completion IoTag 2762 * <IoTag> 2763 */ 2764 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2765 "0322 Ring %d handler: " 2766 "unexpected completion IoTag x%x " 2767 "Data: x%x x%x x%x x%x\n", 2768 pring->ringno, 2769 saveq->iocb.ulpIoTag, 2770 saveq->iocb.ulpStatus, 2771 saveq->iocb.un.ulpWord[4], 2772 saveq->iocb.ulpCommand, 2773 saveq->iocb.ulpContext); 2774 } 2775 } 2776 2777 return rc; 2778 } 2779 2780 /** 2781 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2782 * @phba: Pointer to HBA context object. 2783 * @pring: Pointer to driver SLI ring object. 2784 * 2785 * This function is called from the iocb ring event handlers when 2786 * put pointer is ahead of the get pointer for a ring. This function signal 2787 * an error attention condition to the worker thread and the worker 2788 * thread will transition the HBA to offline state. 2789 **/ 2790 static void 2791 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2792 { 2793 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2794 /* 2795 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2796 * rsp ring <portRspMax> 2797 */ 2798 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2799 "0312 Ring %d handler: portRspPut %d " 2800 "is bigger than rsp ring %d\n", 2801 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2802 pring->numRiocb); 2803 2804 phba->link_state = LPFC_HBA_ERROR; 2805 2806 /* 2807 * All error attention handlers are posted to 2808 * worker thread 2809 */ 2810 phba->work_ha |= HA_ERATT; 2811 phba->work_hs = HS_FFER3; 2812 2813 lpfc_worker_wake_up(phba); 2814 2815 return; 2816 } 2817 2818 /** 2819 * lpfc_poll_eratt - Error attention polling timer timeout handler 2820 * @ptr: Pointer to address of HBA context object. 2821 * 2822 * This function is invoked by the Error Attention polling timer when the 2823 * timer times out. It will check the SLI Error Attention register for 2824 * possible attention events. If so, it will post an Error Attention event 2825 * and wake up worker thread to process it. Otherwise, it will set up the 2826 * Error Attention polling timer for the next poll. 2827 **/ 2828 void lpfc_poll_eratt(unsigned long ptr) 2829 { 2830 struct lpfc_hba *phba; 2831 uint32_t eratt = 0; 2832 2833 phba = (struct lpfc_hba *)ptr; 2834 2835 /* Check chip HA register for error event */ 2836 eratt = lpfc_sli_check_eratt(phba); 2837 2838 if (eratt) 2839 /* Tell the worker thread there is work to do */ 2840 lpfc_worker_wake_up(phba); 2841 else 2842 /* Restart the timer for next eratt poll */ 2843 mod_timer(&phba->eratt_poll, jiffies + 2844 HZ * LPFC_ERATT_POLL_INTERVAL); 2845 return; 2846 } 2847 2848 2849 /** 2850 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2851 * @phba: Pointer to HBA context object. 2852 * @pring: Pointer to driver SLI ring object. 2853 * @mask: Host attention register mask for this ring. 2854 * 2855 * This function is called from the interrupt context when there is a ring 2856 * event for the fcp ring. The caller does not hold any lock. 2857 * The function processes each response iocb in the response ring until it 2858 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 2859 * LE bit set. The function will call the completion handler of the command iocb 2860 * if the response iocb indicates a completion for a command iocb or it is 2861 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2862 * function if this is an unsolicited iocb. 2863 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2864 * to check it explicitly. 2865 */ 2866 int 2867 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2868 struct lpfc_sli_ring *pring, uint32_t mask) 2869 { 2870 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2871 IOCB_t *irsp = NULL; 2872 IOCB_t *entry = NULL; 2873 struct lpfc_iocbq *cmdiocbq = NULL; 2874 struct lpfc_iocbq rspiocbq; 2875 uint32_t status; 2876 uint32_t portRspPut, portRspMax; 2877 int rc = 1; 2878 lpfc_iocb_type type; 2879 unsigned long iflag; 2880 uint32_t rsp_cmpl = 0; 2881 2882 spin_lock_irqsave(&phba->hbalock, iflag); 2883 pring->stats.iocb_event++; 2884 2885 /* 2886 * The next available response entry should never exceed the maximum 2887 * entries. If it does, treat it as an adapter hardware error. 2888 */ 2889 portRspMax = pring->numRiocb; 2890 portRspPut = le32_to_cpu(pgp->rspPutInx); 2891 if (unlikely(portRspPut >= portRspMax)) { 2892 lpfc_sli_rsp_pointers_error(phba, pring); 2893 spin_unlock_irqrestore(&phba->hbalock, iflag); 2894 return 1; 2895 } 2896 if (phba->fcp_ring_in_use) { 2897 spin_unlock_irqrestore(&phba->hbalock, iflag); 2898 return 1; 2899 } else 2900 phba->fcp_ring_in_use = 1; 2901 2902 rmb(); 2903 while (pring->rspidx != portRspPut) { 2904 /* 2905 * Fetch an entry off the ring and copy it into a local data 2906 * structure. The copy involves a byte-swap since the 2907 * network byte order and pci byte orders are different. 2908 */ 2909 entry = lpfc_resp_iocb(phba, pring); 2910 phba->last_completion_time = jiffies; 2911 2912 if (++pring->rspidx >= portRspMax) 2913 pring->rspidx = 0; 2914 2915 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2916 (uint32_t *) &rspiocbq.iocb, 2917 phba->iocb_rsp_size); 2918 INIT_LIST_HEAD(&(rspiocbq.list)); 2919 irsp = &rspiocbq.iocb; 2920 2921 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 2922 pring->stats.iocb_rsp++; 2923 rsp_cmpl++; 2924 2925 if (unlikely(irsp->ulpStatus)) { 2926 /* 2927 * If resource errors reported from HBA, reduce 2928 * queuedepths of the SCSI device. 2929 */ 2930 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2931 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2932 spin_unlock_irqrestore(&phba->hbalock, iflag); 2933 phba->lpfc_rampdown_queue_depth(phba); 2934 spin_lock_irqsave(&phba->hbalock, iflag); 2935 } 2936 2937 /* Rsp ring <ringno> error: IOCB */ 2938 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2939 "0336 Rsp Ring %d error: IOCB Data: " 2940 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 2941 pring->ringno, 2942 irsp->un.ulpWord[0], 2943 irsp->un.ulpWord[1], 2944 irsp->un.ulpWord[2], 2945 irsp->un.ulpWord[3], 2946 irsp->un.ulpWord[4], 2947 irsp->un.ulpWord[5], 2948 *(uint32_t *)&irsp->un1, 2949 *((uint32_t *)&irsp->un1 + 1)); 2950 } 2951 2952 switch (type) { 2953 case LPFC_ABORT_IOCB: 2954 case LPFC_SOL_IOCB: 2955 /* 2956 * Idle exchange closed via ABTS from port. No iocb 2957 * resources need to be recovered. 2958 */ 2959 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 2960 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2961 "0333 IOCB cmd 0x%x" 2962 " processed. Skipping" 2963 " completion\n", 2964 irsp->ulpCommand); 2965 break; 2966 } 2967 2968 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2969 &rspiocbq); 2970 if (unlikely(!cmdiocbq)) 2971 break; 2972 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 2973 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 2974 if (cmdiocbq->iocb_cmpl) { 2975 spin_unlock_irqrestore(&phba->hbalock, iflag); 2976 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2977 &rspiocbq); 2978 spin_lock_irqsave(&phba->hbalock, iflag); 2979 } 2980 break; 2981 case LPFC_UNSOL_IOCB: 2982 spin_unlock_irqrestore(&phba->hbalock, iflag); 2983 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 2984 spin_lock_irqsave(&phba->hbalock, iflag); 2985 break; 2986 default: 2987 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2988 char adaptermsg[LPFC_MAX_ADPTMSG]; 2989 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 2990 memcpy(&adaptermsg[0], (uint8_t *) irsp, 2991 MAX_MSG_DATA); 2992 dev_warn(&((phba->pcidev)->dev), 2993 "lpfc%d: %s\n", 2994 phba->brd_no, adaptermsg); 2995 } else { 2996 /* Unknown IOCB command */ 2997 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2998 "0334 Unknown IOCB command " 2999 "Data: x%x, x%x x%x x%x x%x\n", 3000 type, irsp->ulpCommand, 3001 irsp->ulpStatus, 3002 irsp->ulpIoTag, 3003 irsp->ulpContext); 3004 } 3005 break; 3006 } 3007 3008 /* 3009 * The response IOCB has been processed. Update the ring 3010 * pointer in SLIM. If the port response put pointer has not 3011 * been updated, sync the pgp->rspPutInx and fetch the new port 3012 * response put pointer. 3013 */ 3014 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3015 3016 if (pring->rspidx == portRspPut) 3017 portRspPut = le32_to_cpu(pgp->rspPutInx); 3018 } 3019 3020 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3021 pring->stats.iocb_rsp_full++; 3022 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3023 writel(status, phba->CAregaddr); 3024 readl(phba->CAregaddr); 3025 } 3026 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3027 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3028 pring->stats.iocb_cmd_empty++; 3029 3030 /* Force update of the local copy of cmdGetInx */ 3031 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3032 lpfc_sli_resume_iocb(phba, pring); 3033 3034 if ((pring->lpfc_sli_cmd_available)) 3035 (pring->lpfc_sli_cmd_available) (phba, pring); 3036 3037 } 3038 3039 phba->fcp_ring_in_use = 0; 3040 spin_unlock_irqrestore(&phba->hbalock, iflag); 3041 return rc; 3042 } 3043 3044 /** 3045 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3046 * @phba: Pointer to HBA context object. 3047 * @pring: Pointer to driver SLI ring object. 3048 * @rspiocbp: Pointer to driver response IOCB object. 3049 * 3050 * This function is called from the worker thread when there is a slow-path 3051 * response IOCB to process. This function chains all the response iocbs until 3052 * seeing the iocb with the LE bit set. The function will call 3053 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3054 * completion of a command iocb. The function will call the 3055 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3056 * The function frees the resources or calls the completion handler if this 3057 * iocb is an abort completion. The function returns NULL when the response 3058 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3059 * this function shall chain the iocb on to the iocb_continueq and return the 3060 * response iocb passed in. 3061 **/ 3062 static struct lpfc_iocbq * 3063 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3064 struct lpfc_iocbq *rspiocbp) 3065 { 3066 struct lpfc_iocbq *saveq; 3067 struct lpfc_iocbq *cmdiocbp; 3068 struct lpfc_iocbq *next_iocb; 3069 IOCB_t *irsp = NULL; 3070 uint32_t free_saveq; 3071 uint8_t iocb_cmd_type; 3072 lpfc_iocb_type type; 3073 unsigned long iflag; 3074 int rc; 3075 3076 spin_lock_irqsave(&phba->hbalock, iflag); 3077 /* First add the response iocb to the countinueq list */ 3078 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3079 pring->iocb_continueq_cnt++; 3080 3081 /* Now, determine whether the list is completed for processing */ 3082 irsp = &rspiocbp->iocb; 3083 if (irsp->ulpLe) { 3084 /* 3085 * By default, the driver expects to free all resources 3086 * associated with this iocb completion. 3087 */ 3088 free_saveq = 1; 3089 saveq = list_get_first(&pring->iocb_continueq, 3090 struct lpfc_iocbq, list); 3091 irsp = &(saveq->iocb); 3092 list_del_init(&pring->iocb_continueq); 3093 pring->iocb_continueq_cnt = 0; 3094 3095 pring->stats.iocb_rsp++; 3096 3097 /* 3098 * If resource errors reported from HBA, reduce 3099 * queuedepths of the SCSI device. 3100 */ 3101 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3102 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 3103 spin_unlock_irqrestore(&phba->hbalock, iflag); 3104 phba->lpfc_rampdown_queue_depth(phba); 3105 spin_lock_irqsave(&phba->hbalock, iflag); 3106 } 3107 3108 if (irsp->ulpStatus) { 3109 /* Rsp ring <ringno> error: IOCB */ 3110 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3111 "0328 Rsp Ring %d error: " 3112 "IOCB Data: " 3113 "x%x x%x x%x x%x " 3114 "x%x x%x x%x x%x " 3115 "x%x x%x x%x x%x " 3116 "x%x x%x x%x x%x\n", 3117 pring->ringno, 3118 irsp->un.ulpWord[0], 3119 irsp->un.ulpWord[1], 3120 irsp->un.ulpWord[2], 3121 irsp->un.ulpWord[3], 3122 irsp->un.ulpWord[4], 3123 irsp->un.ulpWord[5], 3124 *(((uint32_t *) irsp) + 6), 3125 *(((uint32_t *) irsp) + 7), 3126 *(((uint32_t *) irsp) + 8), 3127 *(((uint32_t *) irsp) + 9), 3128 *(((uint32_t *) irsp) + 10), 3129 *(((uint32_t *) irsp) + 11), 3130 *(((uint32_t *) irsp) + 12), 3131 *(((uint32_t *) irsp) + 13), 3132 *(((uint32_t *) irsp) + 14), 3133 *(((uint32_t *) irsp) + 15)); 3134 } 3135 3136 /* 3137 * Fetch the IOCB command type and call the correct completion 3138 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3139 * get freed back to the lpfc_iocb_list by the discovery 3140 * kernel thread. 3141 */ 3142 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3143 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3144 switch (type) { 3145 case LPFC_SOL_IOCB: 3146 spin_unlock_irqrestore(&phba->hbalock, iflag); 3147 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3148 spin_lock_irqsave(&phba->hbalock, iflag); 3149 break; 3150 3151 case LPFC_UNSOL_IOCB: 3152 spin_unlock_irqrestore(&phba->hbalock, iflag); 3153 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3154 spin_lock_irqsave(&phba->hbalock, iflag); 3155 if (!rc) 3156 free_saveq = 0; 3157 break; 3158 3159 case LPFC_ABORT_IOCB: 3160 cmdiocbp = NULL; 3161 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3162 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3163 saveq); 3164 if (cmdiocbp) { 3165 /* Call the specified completion routine */ 3166 if (cmdiocbp->iocb_cmpl) { 3167 spin_unlock_irqrestore(&phba->hbalock, 3168 iflag); 3169 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3170 saveq); 3171 spin_lock_irqsave(&phba->hbalock, 3172 iflag); 3173 } else 3174 __lpfc_sli_release_iocbq(phba, 3175 cmdiocbp); 3176 } 3177 break; 3178 3179 case LPFC_UNKNOWN_IOCB: 3180 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3181 char adaptermsg[LPFC_MAX_ADPTMSG]; 3182 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3183 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3184 MAX_MSG_DATA); 3185 dev_warn(&((phba->pcidev)->dev), 3186 "lpfc%d: %s\n", 3187 phba->brd_no, adaptermsg); 3188 } else { 3189 /* Unknown IOCB command */ 3190 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3191 "0335 Unknown IOCB " 3192 "command Data: x%x " 3193 "x%x x%x x%x\n", 3194 irsp->ulpCommand, 3195 irsp->ulpStatus, 3196 irsp->ulpIoTag, 3197 irsp->ulpContext); 3198 } 3199 break; 3200 } 3201 3202 if (free_saveq) { 3203 list_for_each_entry_safe(rspiocbp, next_iocb, 3204 &saveq->list, list) { 3205 list_del(&rspiocbp->list); 3206 __lpfc_sli_release_iocbq(phba, rspiocbp); 3207 } 3208 __lpfc_sli_release_iocbq(phba, saveq); 3209 } 3210 rspiocbp = NULL; 3211 } 3212 spin_unlock_irqrestore(&phba->hbalock, iflag); 3213 return rspiocbp; 3214 } 3215 3216 /** 3217 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3218 * @phba: Pointer to HBA context object. 3219 * @pring: Pointer to driver SLI ring object. 3220 * @mask: Host attention register mask for this ring. 3221 * 3222 * This routine wraps the actual slow_ring event process routine from the 3223 * API jump table function pointer from the lpfc_hba struct. 3224 **/ 3225 void 3226 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3227 struct lpfc_sli_ring *pring, uint32_t mask) 3228 { 3229 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3230 } 3231 3232 /** 3233 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3234 * @phba: Pointer to HBA context object. 3235 * @pring: Pointer to driver SLI ring object. 3236 * @mask: Host attention register mask for this ring. 3237 * 3238 * This function is called from the worker thread when there is a ring event 3239 * for non-fcp rings. The caller does not hold any lock. The function will 3240 * remove each response iocb in the response ring and calls the handle 3241 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3242 **/ 3243 static void 3244 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3245 struct lpfc_sli_ring *pring, uint32_t mask) 3246 { 3247 struct lpfc_pgp *pgp; 3248 IOCB_t *entry; 3249 IOCB_t *irsp = NULL; 3250 struct lpfc_iocbq *rspiocbp = NULL; 3251 uint32_t portRspPut, portRspMax; 3252 unsigned long iflag; 3253 uint32_t status; 3254 3255 pgp = &phba->port_gp[pring->ringno]; 3256 spin_lock_irqsave(&phba->hbalock, iflag); 3257 pring->stats.iocb_event++; 3258 3259 /* 3260 * The next available response entry should never exceed the maximum 3261 * entries. If it does, treat it as an adapter hardware error. 3262 */ 3263 portRspMax = pring->numRiocb; 3264 portRspPut = le32_to_cpu(pgp->rspPutInx); 3265 if (portRspPut >= portRspMax) { 3266 /* 3267 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3268 * rsp ring <portRspMax> 3269 */ 3270 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3271 "0303 Ring %d handler: portRspPut %d " 3272 "is bigger than rsp ring %d\n", 3273 pring->ringno, portRspPut, portRspMax); 3274 3275 phba->link_state = LPFC_HBA_ERROR; 3276 spin_unlock_irqrestore(&phba->hbalock, iflag); 3277 3278 phba->work_hs = HS_FFER3; 3279 lpfc_handle_eratt(phba); 3280 3281 return; 3282 } 3283 3284 rmb(); 3285 while (pring->rspidx != portRspPut) { 3286 /* 3287 * Build a completion list and call the appropriate handler. 3288 * The process is to get the next available response iocb, get 3289 * a free iocb from the list, copy the response data into the 3290 * free iocb, insert to the continuation list, and update the 3291 * next response index to slim. This process makes response 3292 * iocb's in the ring available to DMA as fast as possible but 3293 * pays a penalty for a copy operation. Since the iocb is 3294 * only 32 bytes, this penalty is considered small relative to 3295 * the PCI reads for register values and a slim write. When 3296 * the ulpLe field is set, the entire Command has been 3297 * received. 3298 */ 3299 entry = lpfc_resp_iocb(phba, pring); 3300 3301 phba->last_completion_time = jiffies; 3302 rspiocbp = __lpfc_sli_get_iocbq(phba); 3303 if (rspiocbp == NULL) { 3304 printk(KERN_ERR "%s: out of buffers! Failing " 3305 "completion.\n", __func__); 3306 break; 3307 } 3308 3309 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3310 phba->iocb_rsp_size); 3311 irsp = &rspiocbp->iocb; 3312 3313 if (++pring->rspidx >= portRspMax) 3314 pring->rspidx = 0; 3315 3316 if (pring->ringno == LPFC_ELS_RING) { 3317 lpfc_debugfs_slow_ring_trc(phba, 3318 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3319 *(((uint32_t *) irsp) + 4), 3320 *(((uint32_t *) irsp) + 6), 3321 *(((uint32_t *) irsp) + 7)); 3322 } 3323 3324 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3325 3326 spin_unlock_irqrestore(&phba->hbalock, iflag); 3327 /* Handle the response IOCB */ 3328 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3329 spin_lock_irqsave(&phba->hbalock, iflag); 3330 3331 /* 3332 * If the port response put pointer has not been updated, sync 3333 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3334 * response put pointer. 3335 */ 3336 if (pring->rspidx == portRspPut) { 3337 portRspPut = le32_to_cpu(pgp->rspPutInx); 3338 } 3339 } /* while (pring->rspidx != portRspPut) */ 3340 3341 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3342 /* At least one response entry has been freed */ 3343 pring->stats.iocb_rsp_full++; 3344 /* SET RxRE_RSP in Chip Att register */ 3345 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3346 writel(status, phba->CAregaddr); 3347 readl(phba->CAregaddr); /* flush */ 3348 } 3349 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3350 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3351 pring->stats.iocb_cmd_empty++; 3352 3353 /* Force update of the local copy of cmdGetInx */ 3354 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3355 lpfc_sli_resume_iocb(phba, pring); 3356 3357 if ((pring->lpfc_sli_cmd_available)) 3358 (pring->lpfc_sli_cmd_available) (phba, pring); 3359 3360 } 3361 3362 spin_unlock_irqrestore(&phba->hbalock, iflag); 3363 return; 3364 } 3365 3366 /** 3367 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3368 * @phba: Pointer to HBA context object. 3369 * @pring: Pointer to driver SLI ring object. 3370 * @mask: Host attention register mask for this ring. 3371 * 3372 * This function is called from the worker thread when there is a pending 3373 * ELS response iocb on the driver internal slow-path response iocb worker 3374 * queue. The caller does not hold any lock. The function will remove each 3375 * response iocb from the response worker queue and calls the handle 3376 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3377 **/ 3378 static void 3379 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3380 struct lpfc_sli_ring *pring, uint32_t mask) 3381 { 3382 struct lpfc_iocbq *irspiocbq; 3383 struct hbq_dmabuf *dmabuf; 3384 struct lpfc_cq_event *cq_event; 3385 unsigned long iflag; 3386 3387 spin_lock_irqsave(&phba->hbalock, iflag); 3388 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3389 spin_unlock_irqrestore(&phba->hbalock, iflag); 3390 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3391 /* Get the response iocb from the head of work queue */ 3392 spin_lock_irqsave(&phba->hbalock, iflag); 3393 list_remove_head(&phba->sli4_hba.sp_queue_event, 3394 cq_event, struct lpfc_cq_event, list); 3395 spin_unlock_irqrestore(&phba->hbalock, iflag); 3396 3397 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3398 case CQE_CODE_COMPL_WQE: 3399 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3400 cq_event); 3401 /* Translate ELS WCQE to response IOCBQ */ 3402 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3403 irspiocbq); 3404 if (irspiocbq) 3405 lpfc_sli_sp_handle_rspiocb(phba, pring, 3406 irspiocbq); 3407 break; 3408 case CQE_CODE_RECEIVE: 3409 case CQE_CODE_RECEIVE_V1: 3410 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3411 cq_event); 3412 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3413 break; 3414 default: 3415 break; 3416 } 3417 } 3418 } 3419 3420 /** 3421 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3422 * @phba: Pointer to HBA context object. 3423 * @pring: Pointer to driver SLI ring object. 3424 * 3425 * This function aborts all iocbs in the given ring and frees all the iocb 3426 * objects in txq. This function issues an abort iocb for all the iocb commands 3427 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3428 * the return of this function. The caller is not required to hold any locks. 3429 **/ 3430 void 3431 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3432 { 3433 LIST_HEAD(completions); 3434 struct lpfc_iocbq *iocb, *next_iocb; 3435 3436 if (pring->ringno == LPFC_ELS_RING) { 3437 lpfc_fabric_abort_hba(phba); 3438 } 3439 3440 /* Error everything on txq and txcmplq 3441 * First do the txq. 3442 */ 3443 spin_lock_irq(&phba->hbalock); 3444 list_splice_init(&pring->txq, &completions); 3445 pring->txq_cnt = 0; 3446 3447 /* Next issue ABTS for everything on the txcmplq */ 3448 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3449 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3450 3451 spin_unlock_irq(&phba->hbalock); 3452 3453 /* Cancel all the IOCBs from the completions list */ 3454 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3455 IOERR_SLI_ABORTED); 3456 } 3457 3458 /** 3459 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3460 * @phba: Pointer to HBA context object. 3461 * 3462 * This function flushes all iocbs in the fcp ring and frees all the iocb 3463 * objects in txq and txcmplq. This function will not issue abort iocbs 3464 * for all the iocb commands in txcmplq, they will just be returned with 3465 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3466 * slot has been permanently disabled. 3467 **/ 3468 void 3469 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3470 { 3471 LIST_HEAD(txq); 3472 LIST_HEAD(txcmplq); 3473 struct lpfc_sli *psli = &phba->sli; 3474 struct lpfc_sli_ring *pring; 3475 3476 /* Currently, only one fcp ring */ 3477 pring = &psli->ring[psli->fcp_ring]; 3478 3479 spin_lock_irq(&phba->hbalock); 3480 /* Retrieve everything on txq */ 3481 list_splice_init(&pring->txq, &txq); 3482 pring->txq_cnt = 0; 3483 3484 /* Retrieve everything on the txcmplq */ 3485 list_splice_init(&pring->txcmplq, &txcmplq); 3486 pring->txcmplq_cnt = 0; 3487 spin_unlock_irq(&phba->hbalock); 3488 3489 /* Flush the txq */ 3490 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3491 IOERR_SLI_DOWN); 3492 3493 /* Flush the txcmpq */ 3494 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3495 IOERR_SLI_DOWN); 3496 } 3497 3498 /** 3499 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3500 * @phba: Pointer to HBA context object. 3501 * @mask: Bit mask to be checked. 3502 * 3503 * This function reads the host status register and compares 3504 * with the provided bit mask to check if HBA completed 3505 * the restart. This function will wait in a loop for the 3506 * HBA to complete restart. If the HBA does not restart within 3507 * 15 iterations, the function will reset the HBA again. The 3508 * function returns 1 when HBA fail to restart otherwise returns 3509 * zero. 3510 **/ 3511 static int 3512 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3513 { 3514 uint32_t status; 3515 int i = 0; 3516 int retval = 0; 3517 3518 /* Read the HBA Host Status Register */ 3519 if (lpfc_readl(phba->HSregaddr, &status)) 3520 return 1; 3521 3522 /* 3523 * Check status register every 100ms for 5 retries, then every 3524 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3525 * every 2.5 sec for 4. 3526 * Break our of the loop if errors occurred during init. 3527 */ 3528 while (((status & mask) != mask) && 3529 !(status & HS_FFERM) && 3530 i++ < 20) { 3531 3532 if (i <= 5) 3533 msleep(10); 3534 else if (i <= 10) 3535 msleep(500); 3536 else 3537 msleep(2500); 3538 3539 if (i == 15) { 3540 /* Do post */ 3541 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3542 lpfc_sli_brdrestart(phba); 3543 } 3544 /* Read the HBA Host Status Register */ 3545 if (lpfc_readl(phba->HSregaddr, &status)) { 3546 retval = 1; 3547 break; 3548 } 3549 } 3550 3551 /* Check to see if any errors occurred during init */ 3552 if ((status & HS_FFERM) || (i >= 20)) { 3553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3554 "2751 Adapter failed to restart, " 3555 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3556 status, 3557 readl(phba->MBslimaddr + 0xa8), 3558 readl(phba->MBslimaddr + 0xac)); 3559 phba->link_state = LPFC_HBA_ERROR; 3560 retval = 1; 3561 } 3562 3563 return retval; 3564 } 3565 3566 /** 3567 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3568 * @phba: Pointer to HBA context object. 3569 * @mask: Bit mask to be checked. 3570 * 3571 * This function checks the host status register to check if HBA is 3572 * ready. This function will wait in a loop for the HBA to be ready 3573 * If the HBA is not ready , the function will will reset the HBA PCI 3574 * function again. The function returns 1 when HBA fail to be ready 3575 * otherwise returns zero. 3576 **/ 3577 static int 3578 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3579 { 3580 uint32_t status; 3581 int retval = 0; 3582 3583 /* Read the HBA Host Status Register */ 3584 status = lpfc_sli4_post_status_check(phba); 3585 3586 if (status) { 3587 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3588 lpfc_sli_brdrestart(phba); 3589 status = lpfc_sli4_post_status_check(phba); 3590 } 3591 3592 /* Check to see if any errors occurred during init */ 3593 if (status) { 3594 phba->link_state = LPFC_HBA_ERROR; 3595 retval = 1; 3596 } else 3597 phba->sli4_hba.intr_enable = 0; 3598 3599 return retval; 3600 } 3601 3602 /** 3603 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3604 * @phba: Pointer to HBA context object. 3605 * @mask: Bit mask to be checked. 3606 * 3607 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3608 * from the API jump table function pointer from the lpfc_hba struct. 3609 **/ 3610 int 3611 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3612 { 3613 return phba->lpfc_sli_brdready(phba, mask); 3614 } 3615 3616 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3617 3618 /** 3619 * lpfc_reset_barrier - Make HBA ready for HBA reset 3620 * @phba: Pointer to HBA context object. 3621 * 3622 * This function is called before resetting an HBA. This function is called 3623 * with hbalock held and requests HBA to quiesce DMAs before a reset. 3624 **/ 3625 void lpfc_reset_barrier(struct lpfc_hba *phba) 3626 { 3627 uint32_t __iomem *resp_buf; 3628 uint32_t __iomem *mbox_buf; 3629 volatile uint32_t mbox; 3630 uint32_t hc_copy, ha_copy, resp_data; 3631 int i; 3632 uint8_t hdrtype; 3633 3634 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3635 if (hdrtype != 0x80 || 3636 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3637 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3638 return; 3639 3640 /* 3641 * Tell the other part of the chip to suspend temporarily all 3642 * its DMA activity. 3643 */ 3644 resp_buf = phba->MBslimaddr; 3645 3646 /* Disable the error attention */ 3647 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 3648 return; 3649 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3650 readl(phba->HCregaddr); /* flush */ 3651 phba->link_flag |= LS_IGNORE_ERATT; 3652 3653 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3654 return; 3655 if (ha_copy & HA_ERATT) { 3656 /* Clear Chip error bit */ 3657 writel(HA_ERATT, phba->HAregaddr); 3658 phba->pport->stopped = 1; 3659 } 3660 3661 mbox = 0; 3662 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3663 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3664 3665 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3666 mbox_buf = phba->MBslimaddr; 3667 writel(mbox, mbox_buf); 3668 3669 for (i = 0; i < 50; i++) { 3670 if (lpfc_readl((resp_buf + 1), &resp_data)) 3671 return; 3672 if (resp_data != ~(BARRIER_TEST_PATTERN)) 3673 mdelay(1); 3674 else 3675 break; 3676 } 3677 resp_data = 0; 3678 if (lpfc_readl((resp_buf + 1), &resp_data)) 3679 return; 3680 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 3681 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3682 phba->pport->stopped) 3683 goto restore_hc; 3684 else 3685 goto clear_errat; 3686 } 3687 3688 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3689 resp_data = 0; 3690 for (i = 0; i < 500; i++) { 3691 if (lpfc_readl(resp_buf, &resp_data)) 3692 return; 3693 if (resp_data != mbox) 3694 mdelay(1); 3695 else 3696 break; 3697 } 3698 3699 clear_errat: 3700 3701 while (++i < 500) { 3702 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3703 return; 3704 if (!(ha_copy & HA_ERATT)) 3705 mdelay(1); 3706 else 3707 break; 3708 } 3709 3710 if (readl(phba->HAregaddr) & HA_ERATT) { 3711 writel(HA_ERATT, phba->HAregaddr); 3712 phba->pport->stopped = 1; 3713 } 3714 3715 restore_hc: 3716 phba->link_flag &= ~LS_IGNORE_ERATT; 3717 writel(hc_copy, phba->HCregaddr); 3718 readl(phba->HCregaddr); /* flush */ 3719 } 3720 3721 /** 3722 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3723 * @phba: Pointer to HBA context object. 3724 * 3725 * This function issues a kill_board mailbox command and waits for 3726 * the error attention interrupt. This function is called for stopping 3727 * the firmware processing. The caller is not required to hold any 3728 * locks. This function calls lpfc_hba_down_post function to free 3729 * any pending commands after the kill. The function will return 1 when it 3730 * fails to kill the board else will return 0. 3731 **/ 3732 int 3733 lpfc_sli_brdkill(struct lpfc_hba *phba) 3734 { 3735 struct lpfc_sli *psli; 3736 LPFC_MBOXQ_t *pmb; 3737 uint32_t status; 3738 uint32_t ha_copy; 3739 int retval; 3740 int i = 0; 3741 3742 psli = &phba->sli; 3743 3744 /* Kill HBA */ 3745 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3746 "0329 Kill HBA Data: x%x x%x\n", 3747 phba->pport->port_state, psli->sli_flag); 3748 3749 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3750 if (!pmb) 3751 return 1; 3752 3753 /* Disable the error attention */ 3754 spin_lock_irq(&phba->hbalock); 3755 if (lpfc_readl(phba->HCregaddr, &status)) { 3756 spin_unlock_irq(&phba->hbalock); 3757 mempool_free(pmb, phba->mbox_mem_pool); 3758 return 1; 3759 } 3760 status &= ~HC_ERINT_ENA; 3761 writel(status, phba->HCregaddr); 3762 readl(phba->HCregaddr); /* flush */ 3763 phba->link_flag |= LS_IGNORE_ERATT; 3764 spin_unlock_irq(&phba->hbalock); 3765 3766 lpfc_kill_board(phba, pmb); 3767 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3768 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3769 3770 if (retval != MBX_SUCCESS) { 3771 if (retval != MBX_BUSY) 3772 mempool_free(pmb, phba->mbox_mem_pool); 3773 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3774 "2752 KILL_BOARD command failed retval %d\n", 3775 retval); 3776 spin_lock_irq(&phba->hbalock); 3777 phba->link_flag &= ~LS_IGNORE_ERATT; 3778 spin_unlock_irq(&phba->hbalock); 3779 return 1; 3780 } 3781 3782 spin_lock_irq(&phba->hbalock); 3783 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3784 spin_unlock_irq(&phba->hbalock); 3785 3786 mempool_free(pmb, phba->mbox_mem_pool); 3787 3788 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3789 * attention every 100ms for 3 seconds. If we don't get ERATT after 3790 * 3 seconds we still set HBA_ERROR state because the status of the 3791 * board is now undefined. 3792 */ 3793 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3794 return 1; 3795 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3796 mdelay(100); 3797 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3798 return 1; 3799 } 3800 3801 del_timer_sync(&psli->mbox_tmo); 3802 if (ha_copy & HA_ERATT) { 3803 writel(HA_ERATT, phba->HAregaddr); 3804 phba->pport->stopped = 1; 3805 } 3806 spin_lock_irq(&phba->hbalock); 3807 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3808 psli->mbox_active = NULL; 3809 phba->link_flag &= ~LS_IGNORE_ERATT; 3810 spin_unlock_irq(&phba->hbalock); 3811 3812 lpfc_hba_down_post(phba); 3813 phba->link_state = LPFC_HBA_ERROR; 3814 3815 return ha_copy & HA_ERATT ? 0 : 1; 3816 } 3817 3818 /** 3819 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3820 * @phba: Pointer to HBA context object. 3821 * 3822 * This function resets the HBA by writing HC_INITFF to the control 3823 * register. After the HBA resets, this function resets all the iocb ring 3824 * indices. This function disables PCI layer parity checking during 3825 * the reset. 3826 * This function returns 0 always. 3827 * The caller is not required to hold any locks. 3828 **/ 3829 int 3830 lpfc_sli_brdreset(struct lpfc_hba *phba) 3831 { 3832 struct lpfc_sli *psli; 3833 struct lpfc_sli_ring *pring; 3834 uint16_t cfg_value; 3835 int i; 3836 3837 psli = &phba->sli; 3838 3839 /* Reset HBA */ 3840 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3841 "0325 Reset HBA Data: x%x x%x\n", 3842 phba->pport->port_state, psli->sli_flag); 3843 3844 /* perform board reset */ 3845 phba->fc_eventTag = 0; 3846 phba->link_events = 0; 3847 phba->pport->fc_myDID = 0; 3848 phba->pport->fc_prevDID = 0; 3849 3850 /* Turn off parity checking and serr during the physical reset */ 3851 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3852 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3853 (cfg_value & 3854 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3855 3856 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 3857 3858 /* Now toggle INITFF bit in the Host Control Register */ 3859 writel(HC_INITFF, phba->HCregaddr); 3860 mdelay(1); 3861 readl(phba->HCregaddr); /* flush */ 3862 writel(0, phba->HCregaddr); 3863 readl(phba->HCregaddr); /* flush */ 3864 3865 /* Restore PCI cmd register */ 3866 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3867 3868 /* Initialize relevant SLI info */ 3869 for (i = 0; i < psli->num_rings; i++) { 3870 pring = &psli->ring[i]; 3871 pring->flag = 0; 3872 pring->rspidx = 0; 3873 pring->next_cmdidx = 0; 3874 pring->local_getidx = 0; 3875 pring->cmdidx = 0; 3876 pring->missbufcnt = 0; 3877 } 3878 3879 phba->link_state = LPFC_WARM_START; 3880 return 0; 3881 } 3882 3883 /** 3884 * lpfc_sli4_brdreset - Reset a sli-4 HBA 3885 * @phba: Pointer to HBA context object. 3886 * 3887 * This function resets a SLI4 HBA. This function disables PCI layer parity 3888 * checking during resets the device. The caller is not required to hold 3889 * any locks. 3890 * 3891 * This function returns 0 always. 3892 **/ 3893 int 3894 lpfc_sli4_brdreset(struct lpfc_hba *phba) 3895 { 3896 struct lpfc_sli *psli = &phba->sli; 3897 uint16_t cfg_value; 3898 3899 /* Reset HBA */ 3900 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3901 "0295 Reset HBA Data: x%x x%x\n", 3902 phba->pport->port_state, psli->sli_flag); 3903 3904 /* perform board reset */ 3905 phba->fc_eventTag = 0; 3906 phba->link_events = 0; 3907 phba->pport->fc_myDID = 0; 3908 phba->pport->fc_prevDID = 0; 3909 3910 spin_lock_irq(&phba->hbalock); 3911 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3912 phba->fcf.fcf_flag = 0; 3913 spin_unlock_irq(&phba->hbalock); 3914 3915 /* Now physically reset the device */ 3916 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3917 "0389 Performing PCI function reset!\n"); 3918 3919 /* Turn off parity checking and serr during the physical reset */ 3920 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3921 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 3922 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3923 3924 /* Perform FCoE PCI function reset */ 3925 lpfc_sli4_queue_destroy(phba); 3926 lpfc_pci_function_reset(phba); 3927 3928 /* Restore PCI cmd register */ 3929 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3930 3931 return 0; 3932 } 3933 3934 /** 3935 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 3936 * @phba: Pointer to HBA context object. 3937 * 3938 * This function is called in the SLI initialization code path to 3939 * restart the HBA. The caller is not required to hold any lock. 3940 * This function writes MBX_RESTART mailbox command to the SLIM and 3941 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 3942 * function to free any pending commands. The function enables 3943 * POST only during the first initialization. The function returns zero. 3944 * The function does not guarantee completion of MBX_RESTART mailbox 3945 * command before the return of this function. 3946 **/ 3947 static int 3948 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 3949 { 3950 MAILBOX_t *mb; 3951 struct lpfc_sli *psli; 3952 volatile uint32_t word0; 3953 void __iomem *to_slim; 3954 uint32_t hba_aer_enabled; 3955 3956 spin_lock_irq(&phba->hbalock); 3957 3958 /* Take PCIe device Advanced Error Reporting (AER) state */ 3959 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3960 3961 psli = &phba->sli; 3962 3963 /* Restart HBA */ 3964 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3965 "0337 Restart HBA Data: x%x x%x\n", 3966 phba->pport->port_state, psli->sli_flag); 3967 3968 word0 = 0; 3969 mb = (MAILBOX_t *) &word0; 3970 mb->mbxCommand = MBX_RESTART; 3971 mb->mbxHc = 1; 3972 3973 lpfc_reset_barrier(phba); 3974 3975 to_slim = phba->MBslimaddr; 3976 writel(*(uint32_t *) mb, to_slim); 3977 readl(to_slim); /* flush */ 3978 3979 /* Only skip post after fc_ffinit is completed */ 3980 if (phba->pport->port_state) 3981 word0 = 1; /* This is really setting up word1 */ 3982 else 3983 word0 = 0; /* This is really setting up word1 */ 3984 to_slim = phba->MBslimaddr + sizeof (uint32_t); 3985 writel(*(uint32_t *) mb, to_slim); 3986 readl(to_slim); /* flush */ 3987 3988 lpfc_sli_brdreset(phba); 3989 phba->pport->stopped = 0; 3990 phba->link_state = LPFC_INIT_START; 3991 phba->hba_flag = 0; 3992 spin_unlock_irq(&phba->hbalock); 3993 3994 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3995 psli->stats_start = get_seconds(); 3996 3997 /* Give the INITFF and Post time to settle. */ 3998 mdelay(100); 3999 4000 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4001 if (hba_aer_enabled) 4002 pci_disable_pcie_error_reporting(phba->pcidev); 4003 4004 lpfc_hba_down_post(phba); 4005 4006 return 0; 4007 } 4008 4009 /** 4010 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4011 * @phba: Pointer to HBA context object. 4012 * 4013 * This function is called in the SLI initialization code path to restart 4014 * a SLI4 HBA. The caller is not required to hold any lock. 4015 * At the end of the function, it calls lpfc_hba_down_post function to 4016 * free any pending commands. 4017 **/ 4018 static int 4019 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4020 { 4021 struct lpfc_sli *psli = &phba->sli; 4022 uint32_t hba_aer_enabled; 4023 4024 /* Restart HBA */ 4025 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4026 "0296 Restart HBA Data: x%x x%x\n", 4027 phba->pport->port_state, psli->sli_flag); 4028 4029 /* Take PCIe device Advanced Error Reporting (AER) state */ 4030 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4031 4032 lpfc_sli4_brdreset(phba); 4033 4034 spin_lock_irq(&phba->hbalock); 4035 phba->pport->stopped = 0; 4036 phba->link_state = LPFC_INIT_START; 4037 phba->hba_flag = 0; 4038 spin_unlock_irq(&phba->hbalock); 4039 4040 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4041 psli->stats_start = get_seconds(); 4042 4043 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4044 if (hba_aer_enabled) 4045 pci_disable_pcie_error_reporting(phba->pcidev); 4046 4047 lpfc_hba_down_post(phba); 4048 4049 return 0; 4050 } 4051 4052 /** 4053 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4054 * @phba: Pointer to HBA context object. 4055 * 4056 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4057 * API jump table function pointer from the lpfc_hba struct. 4058 **/ 4059 int 4060 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4061 { 4062 return phba->lpfc_sli_brdrestart(phba); 4063 } 4064 4065 /** 4066 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4067 * @phba: Pointer to HBA context object. 4068 * 4069 * This function is called after a HBA restart to wait for successful 4070 * restart of the HBA. Successful restart of the HBA is indicated by 4071 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4072 * iteration, the function will restart the HBA again. The function returns 4073 * zero if HBA successfully restarted else returns negative error code. 4074 **/ 4075 static int 4076 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4077 { 4078 uint32_t status, i = 0; 4079 4080 /* Read the HBA Host Status Register */ 4081 if (lpfc_readl(phba->HSregaddr, &status)) 4082 return -EIO; 4083 4084 /* Check status register to see what current state is */ 4085 i = 0; 4086 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4087 4088 /* Check every 10ms for 10 retries, then every 100ms for 90 4089 * retries, then every 1 sec for 50 retires for a total of 4090 * ~60 seconds before reset the board again and check every 4091 * 1 sec for 50 retries. The up to 60 seconds before the 4092 * board ready is required by the Falcon FIPS zeroization 4093 * complete, and any reset the board in between shall cause 4094 * restart of zeroization, further delay the board ready. 4095 */ 4096 if (i++ >= 200) { 4097 /* Adapter failed to init, timeout, status reg 4098 <status> */ 4099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4100 "0436 Adapter failed to init, " 4101 "timeout, status reg x%x, " 4102 "FW Data: A8 x%x AC x%x\n", status, 4103 readl(phba->MBslimaddr + 0xa8), 4104 readl(phba->MBslimaddr + 0xac)); 4105 phba->link_state = LPFC_HBA_ERROR; 4106 return -ETIMEDOUT; 4107 } 4108 4109 /* Check to see if any errors occurred during init */ 4110 if (status & HS_FFERM) { 4111 /* ERROR: During chipset initialization */ 4112 /* Adapter failed to init, chipset, status reg 4113 <status> */ 4114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4115 "0437 Adapter failed to init, " 4116 "chipset, status reg x%x, " 4117 "FW Data: A8 x%x AC x%x\n", status, 4118 readl(phba->MBslimaddr + 0xa8), 4119 readl(phba->MBslimaddr + 0xac)); 4120 phba->link_state = LPFC_HBA_ERROR; 4121 return -EIO; 4122 } 4123 4124 if (i <= 10) 4125 msleep(10); 4126 else if (i <= 100) 4127 msleep(100); 4128 else 4129 msleep(1000); 4130 4131 if (i == 150) { 4132 /* Do post */ 4133 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4134 lpfc_sli_brdrestart(phba); 4135 } 4136 /* Read the HBA Host Status Register */ 4137 if (lpfc_readl(phba->HSregaddr, &status)) 4138 return -EIO; 4139 } 4140 4141 /* Check to see if any errors occurred during init */ 4142 if (status & HS_FFERM) { 4143 /* ERROR: During chipset initialization */ 4144 /* Adapter failed to init, chipset, status reg <status> */ 4145 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4146 "0438 Adapter failed to init, chipset, " 4147 "status reg x%x, " 4148 "FW Data: A8 x%x AC x%x\n", status, 4149 readl(phba->MBslimaddr + 0xa8), 4150 readl(phba->MBslimaddr + 0xac)); 4151 phba->link_state = LPFC_HBA_ERROR; 4152 return -EIO; 4153 } 4154 4155 /* Clear all interrupt enable conditions */ 4156 writel(0, phba->HCregaddr); 4157 readl(phba->HCregaddr); /* flush */ 4158 4159 /* setup host attn register */ 4160 writel(0xffffffff, phba->HAregaddr); 4161 readl(phba->HAregaddr); /* flush */ 4162 return 0; 4163 } 4164 4165 /** 4166 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4167 * 4168 * This function calculates and returns the number of HBQs required to be 4169 * configured. 4170 **/ 4171 int 4172 lpfc_sli_hbq_count(void) 4173 { 4174 return ARRAY_SIZE(lpfc_hbq_defs); 4175 } 4176 4177 /** 4178 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4179 * 4180 * This function adds the number of hbq entries in every HBQ to get 4181 * the total number of hbq entries required for the HBA and returns 4182 * the total count. 4183 **/ 4184 static int 4185 lpfc_sli_hbq_entry_count(void) 4186 { 4187 int hbq_count = lpfc_sli_hbq_count(); 4188 int count = 0; 4189 int i; 4190 4191 for (i = 0; i < hbq_count; ++i) 4192 count += lpfc_hbq_defs[i]->entry_count; 4193 return count; 4194 } 4195 4196 /** 4197 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4198 * 4199 * This function calculates amount of memory required for all hbq entries 4200 * to be configured and returns the total memory required. 4201 **/ 4202 int 4203 lpfc_sli_hbq_size(void) 4204 { 4205 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4206 } 4207 4208 /** 4209 * lpfc_sli_hbq_setup - configure and initialize HBQs 4210 * @phba: Pointer to HBA context object. 4211 * 4212 * This function is called during the SLI initialization to configure 4213 * all the HBQs and post buffers to the HBQ. The caller is not 4214 * required to hold any locks. This function will return zero if successful 4215 * else it will return negative error code. 4216 **/ 4217 static int 4218 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4219 { 4220 int hbq_count = lpfc_sli_hbq_count(); 4221 LPFC_MBOXQ_t *pmb; 4222 MAILBOX_t *pmbox; 4223 uint32_t hbqno; 4224 uint32_t hbq_entry_index; 4225 4226 /* Get a Mailbox buffer to setup mailbox 4227 * commands for HBA initialization 4228 */ 4229 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4230 4231 if (!pmb) 4232 return -ENOMEM; 4233 4234 pmbox = &pmb->u.mb; 4235 4236 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4237 phba->link_state = LPFC_INIT_MBX_CMDS; 4238 phba->hbq_in_use = 1; 4239 4240 hbq_entry_index = 0; 4241 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4242 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4243 phba->hbqs[hbqno].hbqPutIdx = 0; 4244 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4245 phba->hbqs[hbqno].entry_count = 4246 lpfc_hbq_defs[hbqno]->entry_count; 4247 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4248 hbq_entry_index, pmb); 4249 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4250 4251 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4252 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4253 mbxStatus <status>, ring <num> */ 4254 4255 lpfc_printf_log(phba, KERN_ERR, 4256 LOG_SLI | LOG_VPORT, 4257 "1805 Adapter failed to init. " 4258 "Data: x%x x%x x%x\n", 4259 pmbox->mbxCommand, 4260 pmbox->mbxStatus, hbqno); 4261 4262 phba->link_state = LPFC_HBA_ERROR; 4263 mempool_free(pmb, phba->mbox_mem_pool); 4264 return -ENXIO; 4265 } 4266 } 4267 phba->hbq_count = hbq_count; 4268 4269 mempool_free(pmb, phba->mbox_mem_pool); 4270 4271 /* Initially populate or replenish the HBQs */ 4272 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4273 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4274 return 0; 4275 } 4276 4277 /** 4278 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4279 * @phba: Pointer to HBA context object. 4280 * 4281 * This function is called during the SLI initialization to configure 4282 * all the HBQs and post buffers to the HBQ. The caller is not 4283 * required to hold any locks. This function will return zero if successful 4284 * else it will return negative error code. 4285 **/ 4286 static int 4287 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4288 { 4289 phba->hbq_in_use = 1; 4290 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 4291 phba->hbq_count = 1; 4292 /* Initially populate or replenish the HBQs */ 4293 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 4294 return 0; 4295 } 4296 4297 /** 4298 * lpfc_sli_config_port - Issue config port mailbox command 4299 * @phba: Pointer to HBA context object. 4300 * @sli_mode: sli mode - 2/3 4301 * 4302 * This function is called by the sli intialization code path 4303 * to issue config_port mailbox command. This function restarts the 4304 * HBA firmware and issues a config_port mailbox command to configure 4305 * the SLI interface in the sli mode specified by sli_mode 4306 * variable. The caller is not required to hold any locks. 4307 * The function returns 0 if successful, else returns negative error 4308 * code. 4309 **/ 4310 int 4311 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4312 { 4313 LPFC_MBOXQ_t *pmb; 4314 uint32_t resetcount = 0, rc = 0, done = 0; 4315 4316 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4317 if (!pmb) { 4318 phba->link_state = LPFC_HBA_ERROR; 4319 return -ENOMEM; 4320 } 4321 4322 phba->sli_rev = sli_mode; 4323 while (resetcount < 2 && !done) { 4324 spin_lock_irq(&phba->hbalock); 4325 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4326 spin_unlock_irq(&phba->hbalock); 4327 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4328 lpfc_sli_brdrestart(phba); 4329 rc = lpfc_sli_chipset_init(phba); 4330 if (rc) 4331 break; 4332 4333 spin_lock_irq(&phba->hbalock); 4334 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4335 spin_unlock_irq(&phba->hbalock); 4336 resetcount++; 4337 4338 /* Call pre CONFIG_PORT mailbox command initialization. A 4339 * value of 0 means the call was successful. Any other 4340 * nonzero value is a failure, but if ERESTART is returned, 4341 * the driver may reset the HBA and try again. 4342 */ 4343 rc = lpfc_config_port_prep(phba); 4344 if (rc == -ERESTART) { 4345 phba->link_state = LPFC_LINK_UNKNOWN; 4346 continue; 4347 } else if (rc) 4348 break; 4349 4350 phba->link_state = LPFC_INIT_MBX_CMDS; 4351 lpfc_config_port(phba, pmb); 4352 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4353 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4354 LPFC_SLI3_HBQ_ENABLED | 4355 LPFC_SLI3_CRP_ENABLED | 4356 LPFC_SLI3_BG_ENABLED | 4357 LPFC_SLI3_DSS_ENABLED); 4358 if (rc != MBX_SUCCESS) { 4359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4360 "0442 Adapter failed to init, mbxCmd x%x " 4361 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4362 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4363 spin_lock_irq(&phba->hbalock); 4364 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4365 spin_unlock_irq(&phba->hbalock); 4366 rc = -ENXIO; 4367 } else { 4368 /* Allow asynchronous mailbox command to go through */ 4369 spin_lock_irq(&phba->hbalock); 4370 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4371 spin_unlock_irq(&phba->hbalock); 4372 done = 1; 4373 4374 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4375 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4376 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4377 "3110 Port did not grant ASABT\n"); 4378 } 4379 } 4380 if (!done) { 4381 rc = -EINVAL; 4382 goto do_prep_failed; 4383 } 4384 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4385 if (!pmb->u.mb.un.varCfgPort.cMA) { 4386 rc = -ENXIO; 4387 goto do_prep_failed; 4388 } 4389 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4390 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4391 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4392 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4393 phba->max_vpi : phba->max_vports; 4394 4395 } else 4396 phba->max_vpi = 0; 4397 phba->fips_level = 0; 4398 phba->fips_spec_rev = 0; 4399 if (pmb->u.mb.un.varCfgPort.gdss) { 4400 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4401 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4402 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4403 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4404 "2850 Security Crypto Active. FIPS x%d " 4405 "(Spec Rev: x%d)", 4406 phba->fips_level, phba->fips_spec_rev); 4407 } 4408 if (pmb->u.mb.un.varCfgPort.sec_err) { 4409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4410 "2856 Config Port Security Crypto " 4411 "Error: x%x ", 4412 pmb->u.mb.un.varCfgPort.sec_err); 4413 } 4414 if (pmb->u.mb.un.varCfgPort.gerbm) 4415 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4416 if (pmb->u.mb.un.varCfgPort.gcrp) 4417 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4418 4419 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4420 phba->port_gp = phba->mbox->us.s3_pgp.port; 4421 4422 if (phba->cfg_enable_bg) { 4423 if (pmb->u.mb.un.varCfgPort.gbg) 4424 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4425 else 4426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4427 "0443 Adapter did not grant " 4428 "BlockGuard\n"); 4429 } 4430 } else { 4431 phba->hbq_get = NULL; 4432 phba->port_gp = phba->mbox->us.s2.port; 4433 phba->max_vpi = 0; 4434 } 4435 do_prep_failed: 4436 mempool_free(pmb, phba->mbox_mem_pool); 4437 return rc; 4438 } 4439 4440 4441 /** 4442 * lpfc_sli_hba_setup - SLI intialization function 4443 * @phba: Pointer to HBA context object. 4444 * 4445 * This function is the main SLI intialization function. This function 4446 * is called by the HBA intialization code, HBA reset code and HBA 4447 * error attention handler code. Caller is not required to hold any 4448 * locks. This function issues config_port mailbox command to configure 4449 * the SLI, setup iocb rings and HBQ rings. In the end the function 4450 * calls the config_port_post function to issue init_link mailbox 4451 * command and to start the discovery. The function will return zero 4452 * if successful, else it will return negative error code. 4453 **/ 4454 int 4455 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4456 { 4457 uint32_t rc; 4458 int mode = 3, i; 4459 int longs; 4460 4461 switch (lpfc_sli_mode) { 4462 case 2: 4463 if (phba->cfg_enable_npiv) { 4464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4465 "1824 NPIV enabled: Override lpfc_sli_mode " 4466 "parameter (%d) to auto (0).\n", 4467 lpfc_sli_mode); 4468 break; 4469 } 4470 mode = 2; 4471 break; 4472 case 0: 4473 case 3: 4474 break; 4475 default: 4476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4477 "1819 Unrecognized lpfc_sli_mode " 4478 "parameter: %d.\n", lpfc_sli_mode); 4479 4480 break; 4481 } 4482 4483 rc = lpfc_sli_config_port(phba, mode); 4484 4485 if (rc && lpfc_sli_mode == 3) 4486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4487 "1820 Unable to select SLI-3. " 4488 "Not supported by adapter.\n"); 4489 if (rc && mode != 2) 4490 rc = lpfc_sli_config_port(phba, 2); 4491 if (rc) 4492 goto lpfc_sli_hba_setup_error; 4493 4494 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4495 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4496 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4497 if (!rc) { 4498 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4499 "2709 This device supports " 4500 "Advanced Error Reporting (AER)\n"); 4501 spin_lock_irq(&phba->hbalock); 4502 phba->hba_flag |= HBA_AER_ENABLED; 4503 spin_unlock_irq(&phba->hbalock); 4504 } else { 4505 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4506 "2708 This device does not support " 4507 "Advanced Error Reporting (AER)\n"); 4508 phba->cfg_aer_support = 0; 4509 } 4510 } 4511 4512 if (phba->sli_rev == 3) { 4513 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4514 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4515 } else { 4516 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4517 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4518 phba->sli3_options = 0; 4519 } 4520 4521 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4522 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4523 phba->sli_rev, phba->max_vpi); 4524 rc = lpfc_sli_ring_map(phba); 4525 4526 if (rc) 4527 goto lpfc_sli_hba_setup_error; 4528 4529 /* Initialize VPIs. */ 4530 if (phba->sli_rev == LPFC_SLI_REV3) { 4531 /* 4532 * The VPI bitmask and physical ID array are allocated 4533 * and initialized once only - at driver load. A port 4534 * reset doesn't need to reinitialize this memory. 4535 */ 4536 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 4537 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 4538 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), 4539 GFP_KERNEL); 4540 if (!phba->vpi_bmask) { 4541 rc = -ENOMEM; 4542 goto lpfc_sli_hba_setup_error; 4543 } 4544 4545 phba->vpi_ids = kzalloc( 4546 (phba->max_vpi+1) * sizeof(uint16_t), 4547 GFP_KERNEL); 4548 if (!phba->vpi_ids) { 4549 kfree(phba->vpi_bmask); 4550 rc = -ENOMEM; 4551 goto lpfc_sli_hba_setup_error; 4552 } 4553 for (i = 0; i < phba->max_vpi; i++) 4554 phba->vpi_ids[i] = i; 4555 } 4556 } 4557 4558 /* Init HBQs */ 4559 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4560 rc = lpfc_sli_hbq_setup(phba); 4561 if (rc) 4562 goto lpfc_sli_hba_setup_error; 4563 } 4564 spin_lock_irq(&phba->hbalock); 4565 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4566 spin_unlock_irq(&phba->hbalock); 4567 4568 rc = lpfc_config_port_post(phba); 4569 if (rc) 4570 goto lpfc_sli_hba_setup_error; 4571 4572 return rc; 4573 4574 lpfc_sli_hba_setup_error: 4575 phba->link_state = LPFC_HBA_ERROR; 4576 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4577 "0445 Firmware initialization failed\n"); 4578 return rc; 4579 } 4580 4581 /** 4582 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4583 * @phba: Pointer to HBA context object. 4584 * @mboxq: mailbox pointer. 4585 * This function issue a dump mailbox command to read config region 4586 * 23 and parse the records in the region and populate driver 4587 * data structure. 4588 **/ 4589 static int 4590 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 4591 { 4592 LPFC_MBOXQ_t *mboxq; 4593 struct lpfc_dmabuf *mp; 4594 struct lpfc_mqe *mqe; 4595 uint32_t data_length; 4596 int rc; 4597 4598 /* Program the default value of vlan_id and fc_map */ 4599 phba->valid_vlan = 0; 4600 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4601 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4602 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4603 4604 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4605 if (!mboxq) 4606 return -ENOMEM; 4607 4608 mqe = &mboxq->u.mqe; 4609 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 4610 rc = -ENOMEM; 4611 goto out_free_mboxq; 4612 } 4613 4614 mp = (struct lpfc_dmabuf *) mboxq->context1; 4615 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4616 4617 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4618 "(%d):2571 Mailbox cmd x%x Status x%x " 4619 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4620 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4621 "CQ: x%x x%x x%x x%x\n", 4622 mboxq->vport ? mboxq->vport->vpi : 0, 4623 bf_get(lpfc_mqe_command, mqe), 4624 bf_get(lpfc_mqe_status, mqe), 4625 mqe->un.mb_words[0], mqe->un.mb_words[1], 4626 mqe->un.mb_words[2], mqe->un.mb_words[3], 4627 mqe->un.mb_words[4], mqe->un.mb_words[5], 4628 mqe->un.mb_words[6], mqe->un.mb_words[7], 4629 mqe->un.mb_words[8], mqe->un.mb_words[9], 4630 mqe->un.mb_words[10], mqe->un.mb_words[11], 4631 mqe->un.mb_words[12], mqe->un.mb_words[13], 4632 mqe->un.mb_words[14], mqe->un.mb_words[15], 4633 mqe->un.mb_words[16], mqe->un.mb_words[50], 4634 mboxq->mcqe.word0, 4635 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4636 mboxq->mcqe.trailer); 4637 4638 if (rc) { 4639 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4640 kfree(mp); 4641 rc = -EIO; 4642 goto out_free_mboxq; 4643 } 4644 data_length = mqe->un.mb_words[5]; 4645 if (data_length > DMP_RGN23_SIZE) { 4646 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4647 kfree(mp); 4648 rc = -EIO; 4649 goto out_free_mboxq; 4650 } 4651 4652 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4653 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4654 kfree(mp); 4655 rc = 0; 4656 4657 out_free_mboxq: 4658 mempool_free(mboxq, phba->mbox_mem_pool); 4659 return rc; 4660 } 4661 4662 /** 4663 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4664 * @phba: pointer to lpfc hba data structure. 4665 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4666 * @vpd: pointer to the memory to hold resulting port vpd data. 4667 * @vpd_size: On input, the number of bytes allocated to @vpd. 4668 * On output, the number of data bytes in @vpd. 4669 * 4670 * This routine executes a READ_REV SLI4 mailbox command. In 4671 * addition, this routine gets the port vpd data. 4672 * 4673 * Return codes 4674 * 0 - successful 4675 * -ENOMEM - could not allocated memory. 4676 **/ 4677 static int 4678 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4679 uint8_t *vpd, uint32_t *vpd_size) 4680 { 4681 int rc = 0; 4682 uint32_t dma_size; 4683 struct lpfc_dmabuf *dmabuf; 4684 struct lpfc_mqe *mqe; 4685 4686 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4687 if (!dmabuf) 4688 return -ENOMEM; 4689 4690 /* 4691 * Get a DMA buffer for the vpd data resulting from the READ_REV 4692 * mailbox command. 4693 */ 4694 dma_size = *vpd_size; 4695 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4696 dma_size, 4697 &dmabuf->phys, 4698 GFP_KERNEL); 4699 if (!dmabuf->virt) { 4700 kfree(dmabuf); 4701 return -ENOMEM; 4702 } 4703 memset(dmabuf->virt, 0, dma_size); 4704 4705 /* 4706 * The SLI4 implementation of READ_REV conflicts at word1, 4707 * bits 31:16 and SLI4 adds vpd functionality not present 4708 * in SLI3. This code corrects the conflicts. 4709 */ 4710 lpfc_read_rev(phba, mboxq); 4711 mqe = &mboxq->u.mqe; 4712 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4713 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4714 mqe->un.read_rev.word1 &= 0x0000FFFF; 4715 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4716 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4717 4718 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4719 if (rc) { 4720 dma_free_coherent(&phba->pcidev->dev, dma_size, 4721 dmabuf->virt, dmabuf->phys); 4722 kfree(dmabuf); 4723 return -EIO; 4724 } 4725 4726 /* 4727 * The available vpd length cannot be bigger than the 4728 * DMA buffer passed to the port. Catch the less than 4729 * case and update the caller's size. 4730 */ 4731 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4732 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4733 4734 memcpy(vpd, dmabuf->virt, *vpd_size); 4735 4736 dma_free_coherent(&phba->pcidev->dev, dma_size, 4737 dmabuf->virt, dmabuf->phys); 4738 kfree(dmabuf); 4739 return 0; 4740 } 4741 4742 /** 4743 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 4744 * @phba: pointer to lpfc hba data structure. 4745 * 4746 * This routine retrieves SLI4 device physical port name this PCI function 4747 * is attached to. 4748 * 4749 * Return codes 4750 * 0 - sucessful 4751 * otherwise - failed to retrieve physical port name 4752 **/ 4753 static int 4754 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 4755 { 4756 LPFC_MBOXQ_t *mboxq; 4757 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 4758 struct lpfc_controller_attribute *cntl_attr; 4759 struct lpfc_mbx_get_port_name *get_port_name; 4760 void *virtaddr = NULL; 4761 uint32_t alloclen, reqlen; 4762 uint32_t shdr_status, shdr_add_status; 4763 union lpfc_sli4_cfg_shdr *shdr; 4764 char cport_name = 0; 4765 int rc; 4766 4767 /* We assume nothing at this point */ 4768 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4769 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 4770 4771 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4772 if (!mboxq) 4773 return -ENOMEM; 4774 /* obtain link type and link number via READ_CONFIG */ 4775 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4776 lpfc_sli4_read_config(phba); 4777 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 4778 goto retrieve_ppname; 4779 4780 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 4781 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 4782 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4783 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 4784 LPFC_SLI4_MBX_NEMBED); 4785 if (alloclen < reqlen) { 4786 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4787 "3084 Allocated DMA memory size (%d) is " 4788 "less than the requested DMA memory size " 4789 "(%d)\n", alloclen, reqlen); 4790 rc = -ENOMEM; 4791 goto out_free_mboxq; 4792 } 4793 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4794 virtaddr = mboxq->sge_array->addr[0]; 4795 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 4796 shdr = &mbx_cntl_attr->cfg_shdr; 4797 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4798 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4799 if (shdr_status || shdr_add_status || rc) { 4800 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4801 "3085 Mailbox x%x (x%x/x%x) failed, " 4802 "rc:x%x, status:x%x, add_status:x%x\n", 4803 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4804 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4805 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4806 rc, shdr_status, shdr_add_status); 4807 rc = -ENXIO; 4808 goto out_free_mboxq; 4809 } 4810 cntl_attr = &mbx_cntl_attr->cntl_attr; 4811 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 4812 phba->sli4_hba.lnk_info.lnk_tp = 4813 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 4814 phba->sli4_hba.lnk_info.lnk_no = 4815 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 4816 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4817 "3086 lnk_type:%d, lnk_numb:%d\n", 4818 phba->sli4_hba.lnk_info.lnk_tp, 4819 phba->sli4_hba.lnk_info.lnk_no); 4820 4821 retrieve_ppname: 4822 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4823 LPFC_MBOX_OPCODE_GET_PORT_NAME, 4824 sizeof(struct lpfc_mbx_get_port_name) - 4825 sizeof(struct lpfc_sli4_cfg_mhdr), 4826 LPFC_SLI4_MBX_EMBED); 4827 get_port_name = &mboxq->u.mqe.un.get_port_name; 4828 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 4829 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 4830 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 4831 phba->sli4_hba.lnk_info.lnk_tp); 4832 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4833 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4834 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4835 if (shdr_status || shdr_add_status || rc) { 4836 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4837 "3087 Mailbox x%x (x%x/x%x) failed: " 4838 "rc:x%x, status:x%x, add_status:x%x\n", 4839 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4840 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4841 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4842 rc, shdr_status, shdr_add_status); 4843 rc = -ENXIO; 4844 goto out_free_mboxq; 4845 } 4846 switch (phba->sli4_hba.lnk_info.lnk_no) { 4847 case LPFC_LINK_NUMBER_0: 4848 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 4849 &get_port_name->u.response); 4850 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4851 break; 4852 case LPFC_LINK_NUMBER_1: 4853 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 4854 &get_port_name->u.response); 4855 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4856 break; 4857 case LPFC_LINK_NUMBER_2: 4858 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 4859 &get_port_name->u.response); 4860 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4861 break; 4862 case LPFC_LINK_NUMBER_3: 4863 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 4864 &get_port_name->u.response); 4865 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4866 break; 4867 default: 4868 break; 4869 } 4870 4871 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 4872 phba->Port[0] = cport_name; 4873 phba->Port[1] = '\0'; 4874 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4875 "3091 SLI get port name: %s\n", phba->Port); 4876 } 4877 4878 out_free_mboxq: 4879 if (rc != MBX_TIMEOUT) { 4880 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 4881 lpfc_sli4_mbox_cmd_free(phba, mboxq); 4882 else 4883 mempool_free(mboxq, phba->mbox_mem_pool); 4884 } 4885 return rc; 4886 } 4887 4888 /** 4889 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4890 * @phba: pointer to lpfc hba data structure. 4891 * 4892 * This routine is called to explicitly arm the SLI4 device's completion and 4893 * event queues 4894 **/ 4895 static void 4896 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4897 { 4898 uint8_t fcp_eqidx; 4899 4900 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4901 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4902 fcp_eqidx = 0; 4903 if (phba->sli4_hba.fcp_cq) { 4904 do 4905 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4906 LPFC_QUEUE_REARM); 4907 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4908 } 4909 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4910 if (phba->sli4_hba.fp_eq) { 4911 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; 4912 fcp_eqidx++) 4913 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4914 LPFC_QUEUE_REARM); 4915 } 4916 } 4917 4918 /** 4919 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 4920 * @phba: Pointer to HBA context object. 4921 * @type: The resource extent type. 4922 * @extnt_count: buffer to hold port available extent count. 4923 * @extnt_size: buffer to hold element count per extent. 4924 * 4925 * This function calls the port and retrievs the number of available 4926 * extents and their size for a particular extent type. 4927 * 4928 * Returns: 0 if successful. Nonzero otherwise. 4929 **/ 4930 int 4931 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 4932 uint16_t *extnt_count, uint16_t *extnt_size) 4933 { 4934 int rc = 0; 4935 uint32_t length; 4936 uint32_t mbox_tmo; 4937 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 4938 LPFC_MBOXQ_t *mbox; 4939 4940 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4941 if (!mbox) 4942 return -ENOMEM; 4943 4944 /* Find out how many extents are available for this resource type */ 4945 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 4946 sizeof(struct lpfc_sli4_cfg_mhdr)); 4947 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 4948 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 4949 length, LPFC_SLI4_MBX_EMBED); 4950 4951 /* Send an extents count of 0 - the GET doesn't use it. */ 4952 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 4953 LPFC_SLI4_MBX_EMBED); 4954 if (unlikely(rc)) { 4955 rc = -EIO; 4956 goto err_exit; 4957 } 4958 4959 if (!phba->sli4_hba.intr_enable) 4960 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 4961 else { 4962 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 4963 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 4964 } 4965 if (unlikely(rc)) { 4966 rc = -EIO; 4967 goto err_exit; 4968 } 4969 4970 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 4971 if (bf_get(lpfc_mbox_hdr_status, 4972 &rsrc_info->header.cfg_shdr.response)) { 4973 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4974 "2930 Failed to get resource extents " 4975 "Status 0x%x Add'l Status 0x%x\n", 4976 bf_get(lpfc_mbox_hdr_status, 4977 &rsrc_info->header.cfg_shdr.response), 4978 bf_get(lpfc_mbox_hdr_add_status, 4979 &rsrc_info->header.cfg_shdr.response)); 4980 rc = -EIO; 4981 goto err_exit; 4982 } 4983 4984 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 4985 &rsrc_info->u.rsp); 4986 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 4987 &rsrc_info->u.rsp); 4988 err_exit: 4989 mempool_free(mbox, phba->mbox_mem_pool); 4990 return rc; 4991 } 4992 4993 /** 4994 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 4995 * @phba: Pointer to HBA context object. 4996 * @type: The extent type to check. 4997 * 4998 * This function reads the current available extents from the port and checks 4999 * if the extent count or extent size has changed since the last access. 5000 * Callers use this routine post port reset to understand if there is a 5001 * extent reprovisioning requirement. 5002 * 5003 * Returns: 5004 * -Error: error indicates problem. 5005 * 1: Extent count or size has changed. 5006 * 0: No changes. 5007 **/ 5008 static int 5009 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5010 { 5011 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5012 uint16_t size_diff, rsrc_ext_size; 5013 int rc = 0; 5014 struct lpfc_rsrc_blks *rsrc_entry; 5015 struct list_head *rsrc_blk_list = NULL; 5016 5017 size_diff = 0; 5018 curr_ext_cnt = 0; 5019 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5020 &rsrc_ext_cnt, 5021 &rsrc_ext_size); 5022 if (unlikely(rc)) 5023 return -EIO; 5024 5025 switch (type) { 5026 case LPFC_RSC_TYPE_FCOE_RPI: 5027 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5028 break; 5029 case LPFC_RSC_TYPE_FCOE_VPI: 5030 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5031 break; 5032 case LPFC_RSC_TYPE_FCOE_XRI: 5033 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5034 break; 5035 case LPFC_RSC_TYPE_FCOE_VFI: 5036 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5037 break; 5038 default: 5039 break; 5040 } 5041 5042 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5043 curr_ext_cnt++; 5044 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5045 size_diff++; 5046 } 5047 5048 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5049 rc = 1; 5050 5051 return rc; 5052 } 5053 5054 /** 5055 * lpfc_sli4_cfg_post_extnts - 5056 * @phba: Pointer to HBA context object. 5057 * @extnt_cnt - number of available extents. 5058 * @type - the extent type (rpi, xri, vfi, vpi). 5059 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5060 * @mbox - pointer to the caller's allocated mailbox structure. 5061 * 5062 * This function executes the extents allocation request. It also 5063 * takes care of the amount of memory needed to allocate or get the 5064 * allocated extents. It is the caller's responsibility to evaluate 5065 * the response. 5066 * 5067 * Returns: 5068 * -Error: Error value describes the condition found. 5069 * 0: if successful 5070 **/ 5071 static int 5072 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, 5073 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5074 { 5075 int rc = 0; 5076 uint32_t req_len; 5077 uint32_t emb_len; 5078 uint32_t alloc_len, mbox_tmo; 5079 5080 /* Calculate the total requested length of the dma memory */ 5081 req_len = *extnt_cnt * sizeof(uint16_t); 5082 5083 /* 5084 * Calculate the size of an embedded mailbox. The uint32_t 5085 * accounts for extents-specific word. 5086 */ 5087 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5088 sizeof(uint32_t); 5089 5090 /* 5091 * Presume the allocation and response will fit into an embedded 5092 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5093 */ 5094 *emb = LPFC_SLI4_MBX_EMBED; 5095 if (req_len > emb_len) { 5096 req_len = *extnt_cnt * sizeof(uint16_t) + 5097 sizeof(union lpfc_sli4_cfg_shdr) + 5098 sizeof(uint32_t); 5099 *emb = LPFC_SLI4_MBX_NEMBED; 5100 } 5101 5102 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5103 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5104 req_len, *emb); 5105 if (alloc_len < req_len) { 5106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5107 "2982 Allocated DMA memory size (x%x) is " 5108 "less than the requested DMA memory " 5109 "size (x%x)\n", alloc_len, req_len); 5110 return -ENOMEM; 5111 } 5112 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb); 5113 if (unlikely(rc)) 5114 return -EIO; 5115 5116 if (!phba->sli4_hba.intr_enable) 5117 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5118 else { 5119 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5120 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5121 } 5122 5123 if (unlikely(rc)) 5124 rc = -EIO; 5125 return rc; 5126 } 5127 5128 /** 5129 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5130 * @phba: Pointer to HBA context object. 5131 * @type: The resource extent type to allocate. 5132 * 5133 * This function allocates the number of elements for the specified 5134 * resource type. 5135 **/ 5136 static int 5137 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5138 { 5139 bool emb = false; 5140 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5141 uint16_t rsrc_id, rsrc_start, j, k; 5142 uint16_t *ids; 5143 int i, rc; 5144 unsigned long longs; 5145 unsigned long *bmask; 5146 struct lpfc_rsrc_blks *rsrc_blks; 5147 LPFC_MBOXQ_t *mbox; 5148 uint32_t length; 5149 struct lpfc_id_range *id_array = NULL; 5150 void *virtaddr = NULL; 5151 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5152 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5153 struct list_head *ext_blk_list; 5154 5155 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5156 &rsrc_cnt, 5157 &rsrc_size); 5158 if (unlikely(rc)) 5159 return -EIO; 5160 5161 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5162 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5163 "3009 No available Resource Extents " 5164 "for resource type 0x%x: Count: 0x%x, " 5165 "Size 0x%x\n", type, rsrc_cnt, 5166 rsrc_size); 5167 return -ENOMEM; 5168 } 5169 5170 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT, 5171 "2903 Available Resource Extents " 5172 "for resource type 0x%x: Count: 0x%x, " 5173 "Size 0x%x\n", type, rsrc_cnt, 5174 rsrc_size); 5175 5176 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5177 if (!mbox) 5178 return -ENOMEM; 5179 5180 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox); 5181 if (unlikely(rc)) { 5182 rc = -EIO; 5183 goto err_exit; 5184 } 5185 5186 /* 5187 * Figure out where the response is located. Then get local pointers 5188 * to the response data. The port does not guarantee to respond to 5189 * all extents counts request so update the local variable with the 5190 * allocated count from the port. 5191 */ 5192 if (emb == LPFC_SLI4_MBX_EMBED) { 5193 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5194 id_array = &rsrc_ext->u.rsp.id[0]; 5195 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5196 } else { 5197 virtaddr = mbox->sge_array->addr[0]; 5198 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5199 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5200 id_array = &n_rsrc->id; 5201 } 5202 5203 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5204 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5205 5206 /* 5207 * Based on the resource size and count, correct the base and max 5208 * resource values. 5209 */ 5210 length = sizeof(struct lpfc_rsrc_blks); 5211 switch (type) { 5212 case LPFC_RSC_TYPE_FCOE_RPI: 5213 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5214 sizeof(unsigned long), 5215 GFP_KERNEL); 5216 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5217 rc = -ENOMEM; 5218 goto err_exit; 5219 } 5220 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * 5221 sizeof(uint16_t), 5222 GFP_KERNEL); 5223 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5224 kfree(phba->sli4_hba.rpi_bmask); 5225 rc = -ENOMEM; 5226 goto err_exit; 5227 } 5228 5229 /* 5230 * The next_rpi was initialized with the maximum available 5231 * count but the port may allocate a smaller number. Catch 5232 * that case and update the next_rpi. 5233 */ 5234 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5235 5236 /* Initialize local ptrs for common extent processing later. */ 5237 bmask = phba->sli4_hba.rpi_bmask; 5238 ids = phba->sli4_hba.rpi_ids; 5239 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5240 break; 5241 case LPFC_RSC_TYPE_FCOE_VPI: 5242 phba->vpi_bmask = kzalloc(longs * 5243 sizeof(unsigned long), 5244 GFP_KERNEL); 5245 if (unlikely(!phba->vpi_bmask)) { 5246 rc = -ENOMEM; 5247 goto err_exit; 5248 } 5249 phba->vpi_ids = kzalloc(rsrc_id_cnt * 5250 sizeof(uint16_t), 5251 GFP_KERNEL); 5252 if (unlikely(!phba->vpi_ids)) { 5253 kfree(phba->vpi_bmask); 5254 rc = -ENOMEM; 5255 goto err_exit; 5256 } 5257 5258 /* Initialize local ptrs for common extent processing later. */ 5259 bmask = phba->vpi_bmask; 5260 ids = phba->vpi_ids; 5261 ext_blk_list = &phba->lpfc_vpi_blk_list; 5262 break; 5263 case LPFC_RSC_TYPE_FCOE_XRI: 5264 phba->sli4_hba.xri_bmask = kzalloc(longs * 5265 sizeof(unsigned long), 5266 GFP_KERNEL); 5267 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5268 rc = -ENOMEM; 5269 goto err_exit; 5270 } 5271 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5272 sizeof(uint16_t), 5273 GFP_KERNEL); 5274 if (unlikely(!phba->sli4_hba.xri_ids)) { 5275 kfree(phba->sli4_hba.xri_bmask); 5276 rc = -ENOMEM; 5277 goto err_exit; 5278 } 5279 5280 /* Initialize local ptrs for common extent processing later. */ 5281 bmask = phba->sli4_hba.xri_bmask; 5282 ids = phba->sli4_hba.xri_ids; 5283 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5284 break; 5285 case LPFC_RSC_TYPE_FCOE_VFI: 5286 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5287 sizeof(unsigned long), 5288 GFP_KERNEL); 5289 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5290 rc = -ENOMEM; 5291 goto err_exit; 5292 } 5293 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * 5294 sizeof(uint16_t), 5295 GFP_KERNEL); 5296 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5297 kfree(phba->sli4_hba.vfi_bmask); 5298 rc = -ENOMEM; 5299 goto err_exit; 5300 } 5301 5302 /* Initialize local ptrs for common extent processing later. */ 5303 bmask = phba->sli4_hba.vfi_bmask; 5304 ids = phba->sli4_hba.vfi_ids; 5305 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5306 break; 5307 default: 5308 /* Unsupported Opcode. Fail call. */ 5309 id_array = NULL; 5310 bmask = NULL; 5311 ids = NULL; 5312 ext_blk_list = NULL; 5313 goto err_exit; 5314 } 5315 5316 /* 5317 * Complete initializing the extent configuration with the 5318 * allocated ids assigned to this function. The bitmask serves 5319 * as an index into the array and manages the available ids. The 5320 * array just stores the ids communicated to the port via the wqes. 5321 */ 5322 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5323 if ((i % 2) == 0) 5324 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5325 &id_array[k]); 5326 else 5327 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5328 &id_array[k]); 5329 5330 rsrc_blks = kzalloc(length, GFP_KERNEL); 5331 if (unlikely(!rsrc_blks)) { 5332 rc = -ENOMEM; 5333 kfree(bmask); 5334 kfree(ids); 5335 goto err_exit; 5336 } 5337 rsrc_blks->rsrc_start = rsrc_id; 5338 rsrc_blks->rsrc_size = rsrc_size; 5339 list_add_tail(&rsrc_blks->list, ext_blk_list); 5340 rsrc_start = rsrc_id; 5341 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) 5342 phba->sli4_hba.scsi_xri_start = rsrc_start + 5343 lpfc_sli4_get_els_iocb_cnt(phba); 5344 5345 while (rsrc_id < (rsrc_start + rsrc_size)) { 5346 ids[j] = rsrc_id; 5347 rsrc_id++; 5348 j++; 5349 } 5350 /* Entire word processed. Get next word.*/ 5351 if ((i % 2) == 1) 5352 k++; 5353 } 5354 err_exit: 5355 lpfc_sli4_mbox_cmd_free(phba, mbox); 5356 return rc; 5357 } 5358 5359 /** 5360 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5361 * @phba: Pointer to HBA context object. 5362 * @type: the extent's type. 5363 * 5364 * This function deallocates all extents of a particular resource type. 5365 * SLI4 does not allow for deallocating a particular extent range. It 5366 * is the caller's responsibility to release all kernel memory resources. 5367 **/ 5368 static int 5369 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 5370 { 5371 int rc; 5372 uint32_t length, mbox_tmo = 0; 5373 LPFC_MBOXQ_t *mbox; 5374 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 5375 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 5376 5377 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5378 if (!mbox) 5379 return -ENOMEM; 5380 5381 /* 5382 * This function sends an embedded mailbox because it only sends the 5383 * the resource type. All extents of this type are released by the 5384 * port. 5385 */ 5386 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 5387 sizeof(struct lpfc_sli4_cfg_mhdr)); 5388 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5389 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 5390 length, LPFC_SLI4_MBX_EMBED); 5391 5392 /* Send an extents count of 0 - the dealloc doesn't use it. */ 5393 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5394 LPFC_SLI4_MBX_EMBED); 5395 if (unlikely(rc)) { 5396 rc = -EIO; 5397 goto out_free_mbox; 5398 } 5399 if (!phba->sli4_hba.intr_enable) 5400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5401 else { 5402 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5403 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5404 } 5405 if (unlikely(rc)) { 5406 rc = -EIO; 5407 goto out_free_mbox; 5408 } 5409 5410 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 5411 if (bf_get(lpfc_mbox_hdr_status, 5412 &dealloc_rsrc->header.cfg_shdr.response)) { 5413 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5414 "2919 Failed to release resource extents " 5415 "for type %d - Status 0x%x Add'l Status 0x%x. " 5416 "Resource memory not released.\n", 5417 type, 5418 bf_get(lpfc_mbox_hdr_status, 5419 &dealloc_rsrc->header.cfg_shdr.response), 5420 bf_get(lpfc_mbox_hdr_add_status, 5421 &dealloc_rsrc->header.cfg_shdr.response)); 5422 rc = -EIO; 5423 goto out_free_mbox; 5424 } 5425 5426 /* Release kernel memory resources for the specific type. */ 5427 switch (type) { 5428 case LPFC_RSC_TYPE_FCOE_VPI: 5429 kfree(phba->vpi_bmask); 5430 kfree(phba->vpi_ids); 5431 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5432 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5433 &phba->lpfc_vpi_blk_list, list) { 5434 list_del_init(&rsrc_blk->list); 5435 kfree(rsrc_blk); 5436 } 5437 break; 5438 case LPFC_RSC_TYPE_FCOE_XRI: 5439 kfree(phba->sli4_hba.xri_bmask); 5440 kfree(phba->sli4_hba.xri_ids); 5441 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5442 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5443 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5444 list_del_init(&rsrc_blk->list); 5445 kfree(rsrc_blk); 5446 } 5447 break; 5448 case LPFC_RSC_TYPE_FCOE_VFI: 5449 kfree(phba->sli4_hba.vfi_bmask); 5450 kfree(phba->sli4_hba.vfi_ids); 5451 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5452 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5453 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 5454 list_del_init(&rsrc_blk->list); 5455 kfree(rsrc_blk); 5456 } 5457 break; 5458 case LPFC_RSC_TYPE_FCOE_RPI: 5459 /* RPI bitmask and physical id array are cleaned up earlier. */ 5460 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5461 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 5462 list_del_init(&rsrc_blk->list); 5463 kfree(rsrc_blk); 5464 } 5465 break; 5466 default: 5467 break; 5468 } 5469 5470 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5471 5472 out_free_mbox: 5473 mempool_free(mbox, phba->mbox_mem_pool); 5474 return rc; 5475 } 5476 5477 /** 5478 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 5479 * @phba: Pointer to HBA context object. 5480 * 5481 * This function allocates all SLI4 resource identifiers. 5482 **/ 5483 int 5484 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 5485 { 5486 int i, rc, error = 0; 5487 uint16_t count, base; 5488 unsigned long longs; 5489 5490 if (!phba->sli4_hba.rpi_hdrs_in_use) 5491 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5492 if (phba->sli4_hba.extents_in_use) { 5493 /* 5494 * The port supports resource extents. The XRI, VPI, VFI, RPI 5495 * resource extent count must be read and allocated before 5496 * provisioning the resource id arrays. 5497 */ 5498 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5499 LPFC_IDX_RSRC_RDY) { 5500 /* 5501 * Extent-based resources are set - the driver could 5502 * be in a port reset. Figure out if any corrective 5503 * actions need to be taken. 5504 */ 5505 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5506 LPFC_RSC_TYPE_FCOE_VFI); 5507 if (rc != 0) 5508 error++; 5509 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5510 LPFC_RSC_TYPE_FCOE_VPI); 5511 if (rc != 0) 5512 error++; 5513 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5514 LPFC_RSC_TYPE_FCOE_XRI); 5515 if (rc != 0) 5516 error++; 5517 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5518 LPFC_RSC_TYPE_FCOE_RPI); 5519 if (rc != 0) 5520 error++; 5521 5522 /* 5523 * It's possible that the number of resources 5524 * provided to this port instance changed between 5525 * resets. Detect this condition and reallocate 5526 * resources. Otherwise, there is no action. 5527 */ 5528 if (error) { 5529 lpfc_printf_log(phba, KERN_INFO, 5530 LOG_MBOX | LOG_INIT, 5531 "2931 Detected extent resource " 5532 "change. Reallocating all " 5533 "extents.\n"); 5534 rc = lpfc_sli4_dealloc_extent(phba, 5535 LPFC_RSC_TYPE_FCOE_VFI); 5536 rc = lpfc_sli4_dealloc_extent(phba, 5537 LPFC_RSC_TYPE_FCOE_VPI); 5538 rc = lpfc_sli4_dealloc_extent(phba, 5539 LPFC_RSC_TYPE_FCOE_XRI); 5540 rc = lpfc_sli4_dealloc_extent(phba, 5541 LPFC_RSC_TYPE_FCOE_RPI); 5542 } else 5543 return 0; 5544 } 5545 5546 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5547 if (unlikely(rc)) 5548 goto err_exit; 5549 5550 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5551 if (unlikely(rc)) 5552 goto err_exit; 5553 5554 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5555 if (unlikely(rc)) 5556 goto err_exit; 5557 5558 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5559 if (unlikely(rc)) 5560 goto err_exit; 5561 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5562 LPFC_IDX_RSRC_RDY); 5563 return rc; 5564 } else { 5565 /* 5566 * The port does not support resource extents. The XRI, VPI, 5567 * VFI, RPI resource ids were determined from READ_CONFIG. 5568 * Just allocate the bitmasks and provision the resource id 5569 * arrays. If a port reset is active, the resources don't 5570 * need any action - just exit. 5571 */ 5572 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5573 LPFC_IDX_RSRC_RDY) { 5574 lpfc_sli4_dealloc_resource_identifiers(phba); 5575 lpfc_sli4_remove_rpis(phba); 5576 } 5577 /* RPIs. */ 5578 count = phba->sli4_hba.max_cfg_param.max_rpi; 5579 base = phba->sli4_hba.max_cfg_param.rpi_base; 5580 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5581 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5582 sizeof(unsigned long), 5583 GFP_KERNEL); 5584 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5585 rc = -ENOMEM; 5586 goto err_exit; 5587 } 5588 phba->sli4_hba.rpi_ids = kzalloc(count * 5589 sizeof(uint16_t), 5590 GFP_KERNEL); 5591 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5592 rc = -ENOMEM; 5593 goto free_rpi_bmask; 5594 } 5595 5596 for (i = 0; i < count; i++) 5597 phba->sli4_hba.rpi_ids[i] = base + i; 5598 5599 /* VPIs. */ 5600 count = phba->sli4_hba.max_cfg_param.max_vpi; 5601 base = phba->sli4_hba.max_cfg_param.vpi_base; 5602 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5603 phba->vpi_bmask = kzalloc(longs * 5604 sizeof(unsigned long), 5605 GFP_KERNEL); 5606 if (unlikely(!phba->vpi_bmask)) { 5607 rc = -ENOMEM; 5608 goto free_rpi_ids; 5609 } 5610 phba->vpi_ids = kzalloc(count * 5611 sizeof(uint16_t), 5612 GFP_KERNEL); 5613 if (unlikely(!phba->vpi_ids)) { 5614 rc = -ENOMEM; 5615 goto free_vpi_bmask; 5616 } 5617 5618 for (i = 0; i < count; i++) 5619 phba->vpi_ids[i] = base + i; 5620 5621 /* XRIs. */ 5622 count = phba->sli4_hba.max_cfg_param.max_xri; 5623 base = phba->sli4_hba.max_cfg_param.xri_base; 5624 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5625 phba->sli4_hba.xri_bmask = kzalloc(longs * 5626 sizeof(unsigned long), 5627 GFP_KERNEL); 5628 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5629 rc = -ENOMEM; 5630 goto free_vpi_ids; 5631 } 5632 phba->sli4_hba.xri_ids = kzalloc(count * 5633 sizeof(uint16_t), 5634 GFP_KERNEL); 5635 if (unlikely(!phba->sli4_hba.xri_ids)) { 5636 rc = -ENOMEM; 5637 goto free_xri_bmask; 5638 } 5639 5640 for (i = 0; i < count; i++) 5641 phba->sli4_hba.xri_ids[i] = base + i; 5642 5643 /* VFIs. */ 5644 count = phba->sli4_hba.max_cfg_param.max_vfi; 5645 base = phba->sli4_hba.max_cfg_param.vfi_base; 5646 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5647 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5648 sizeof(unsigned long), 5649 GFP_KERNEL); 5650 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5651 rc = -ENOMEM; 5652 goto free_xri_ids; 5653 } 5654 phba->sli4_hba.vfi_ids = kzalloc(count * 5655 sizeof(uint16_t), 5656 GFP_KERNEL); 5657 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5658 rc = -ENOMEM; 5659 goto free_vfi_bmask; 5660 } 5661 5662 for (i = 0; i < count; i++) 5663 phba->sli4_hba.vfi_ids[i] = base + i; 5664 5665 /* 5666 * Mark all resources ready. An HBA reset doesn't need 5667 * to reset the initialization. 5668 */ 5669 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5670 LPFC_IDX_RSRC_RDY); 5671 return 0; 5672 } 5673 5674 free_vfi_bmask: 5675 kfree(phba->sli4_hba.vfi_bmask); 5676 free_xri_ids: 5677 kfree(phba->sli4_hba.xri_ids); 5678 free_xri_bmask: 5679 kfree(phba->sli4_hba.xri_bmask); 5680 free_vpi_ids: 5681 kfree(phba->vpi_ids); 5682 free_vpi_bmask: 5683 kfree(phba->vpi_bmask); 5684 free_rpi_ids: 5685 kfree(phba->sli4_hba.rpi_ids); 5686 free_rpi_bmask: 5687 kfree(phba->sli4_hba.rpi_bmask); 5688 err_exit: 5689 return rc; 5690 } 5691 5692 /** 5693 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 5694 * @phba: Pointer to HBA context object. 5695 * 5696 * This function allocates the number of elements for the specified 5697 * resource type. 5698 **/ 5699 int 5700 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 5701 { 5702 if (phba->sli4_hba.extents_in_use) { 5703 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5704 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5705 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5706 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5707 } else { 5708 kfree(phba->vpi_bmask); 5709 kfree(phba->vpi_ids); 5710 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5711 kfree(phba->sli4_hba.xri_bmask); 5712 kfree(phba->sli4_hba.xri_ids); 5713 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5714 kfree(phba->sli4_hba.vfi_bmask); 5715 kfree(phba->sli4_hba.vfi_ids); 5716 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5717 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5718 } 5719 5720 return 0; 5721 } 5722 5723 /** 5724 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 5725 * @phba: Pointer to HBA context object. 5726 * @type: The resource extent type. 5727 * @extnt_count: buffer to hold port extent count response 5728 * @extnt_size: buffer to hold port extent size response. 5729 * 5730 * This function calls the port to read the host allocated extents 5731 * for a particular type. 5732 **/ 5733 int 5734 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 5735 uint16_t *extnt_cnt, uint16_t *extnt_size) 5736 { 5737 bool emb; 5738 int rc = 0; 5739 uint16_t curr_blks = 0; 5740 uint32_t req_len, emb_len; 5741 uint32_t alloc_len, mbox_tmo; 5742 struct list_head *blk_list_head; 5743 struct lpfc_rsrc_blks *rsrc_blk; 5744 LPFC_MBOXQ_t *mbox; 5745 void *virtaddr = NULL; 5746 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5747 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5748 union lpfc_sli4_cfg_shdr *shdr; 5749 5750 switch (type) { 5751 case LPFC_RSC_TYPE_FCOE_VPI: 5752 blk_list_head = &phba->lpfc_vpi_blk_list; 5753 break; 5754 case LPFC_RSC_TYPE_FCOE_XRI: 5755 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 5756 break; 5757 case LPFC_RSC_TYPE_FCOE_VFI: 5758 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 5759 break; 5760 case LPFC_RSC_TYPE_FCOE_RPI: 5761 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 5762 break; 5763 default: 5764 return -EIO; 5765 } 5766 5767 /* Count the number of extents currently allocatd for this type. */ 5768 list_for_each_entry(rsrc_blk, blk_list_head, list) { 5769 if (curr_blks == 0) { 5770 /* 5771 * The GET_ALLOCATED mailbox does not return the size, 5772 * just the count. The size should be just the size 5773 * stored in the current allocated block and all sizes 5774 * for an extent type are the same so set the return 5775 * value now. 5776 */ 5777 *extnt_size = rsrc_blk->rsrc_size; 5778 } 5779 curr_blks++; 5780 } 5781 5782 /* Calculate the total requested length of the dma memory. */ 5783 req_len = curr_blks * sizeof(uint16_t); 5784 5785 /* 5786 * Calculate the size of an embedded mailbox. The uint32_t 5787 * accounts for extents-specific word. 5788 */ 5789 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5790 sizeof(uint32_t); 5791 5792 /* 5793 * Presume the allocation and response will fit into an embedded 5794 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5795 */ 5796 emb = LPFC_SLI4_MBX_EMBED; 5797 req_len = emb_len; 5798 if (req_len > emb_len) { 5799 req_len = curr_blks * sizeof(uint16_t) + 5800 sizeof(union lpfc_sli4_cfg_shdr) + 5801 sizeof(uint32_t); 5802 emb = LPFC_SLI4_MBX_NEMBED; 5803 } 5804 5805 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5806 if (!mbox) 5807 return -ENOMEM; 5808 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 5809 5810 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5811 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 5812 req_len, emb); 5813 if (alloc_len < req_len) { 5814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5815 "2983 Allocated DMA memory size (x%x) is " 5816 "less than the requested DMA memory " 5817 "size (x%x)\n", alloc_len, req_len); 5818 rc = -ENOMEM; 5819 goto err_exit; 5820 } 5821 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 5822 if (unlikely(rc)) { 5823 rc = -EIO; 5824 goto err_exit; 5825 } 5826 5827 if (!phba->sli4_hba.intr_enable) 5828 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5829 else { 5830 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5831 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5832 } 5833 5834 if (unlikely(rc)) { 5835 rc = -EIO; 5836 goto err_exit; 5837 } 5838 5839 /* 5840 * Figure out where the response is located. Then get local pointers 5841 * to the response data. The port does not guarantee to respond to 5842 * all extents counts request so update the local variable with the 5843 * allocated count from the port. 5844 */ 5845 if (emb == LPFC_SLI4_MBX_EMBED) { 5846 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5847 shdr = &rsrc_ext->header.cfg_shdr; 5848 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5849 } else { 5850 virtaddr = mbox->sge_array->addr[0]; 5851 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5852 shdr = &n_rsrc->cfg_shdr; 5853 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5854 } 5855 5856 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 5857 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5858 "2984 Failed to read allocated resources " 5859 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 5860 type, 5861 bf_get(lpfc_mbox_hdr_status, &shdr->response), 5862 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 5863 rc = -EIO; 5864 goto err_exit; 5865 } 5866 err_exit: 5867 lpfc_sli4_mbox_cmd_free(phba, mbox); 5868 return rc; 5869 } 5870 5871 /** 5872 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5873 * @phba: Pointer to HBA context object. 5874 * 5875 * This function is the main SLI4 device intialization PCI function. This 5876 * function is called by the HBA intialization code, HBA reset code and 5877 * HBA error attention handler code. Caller is not required to hold any 5878 * locks. 5879 **/ 5880 int 5881 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 5882 { 5883 int rc; 5884 LPFC_MBOXQ_t *mboxq; 5885 struct lpfc_mqe *mqe; 5886 uint8_t *vpd; 5887 uint32_t vpd_size; 5888 uint32_t ftr_rsp = 0; 5889 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 5890 struct lpfc_vport *vport = phba->pport; 5891 struct lpfc_dmabuf *mp; 5892 5893 /* Perform a PCI function reset to start from clean */ 5894 rc = lpfc_pci_function_reset(phba); 5895 if (unlikely(rc)) 5896 return -ENODEV; 5897 5898 /* Check the HBA Host Status Register for readyness */ 5899 rc = lpfc_sli4_post_status_check(phba); 5900 if (unlikely(rc)) 5901 return -ENODEV; 5902 else { 5903 spin_lock_irq(&phba->hbalock); 5904 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 5905 spin_unlock_irq(&phba->hbalock); 5906 } 5907 5908 /* 5909 * Allocate a single mailbox container for initializing the 5910 * port. 5911 */ 5912 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5913 if (!mboxq) 5914 return -ENOMEM; 5915 5916 /* Issue READ_REV to collect vpd and FW information. */ 5917 vpd_size = SLI4_PAGE_SIZE; 5918 vpd = kzalloc(vpd_size, GFP_KERNEL); 5919 if (!vpd) { 5920 rc = -ENOMEM; 5921 goto out_free_mbox; 5922 } 5923 5924 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 5925 if (unlikely(rc)) { 5926 kfree(vpd); 5927 goto out_free_mbox; 5928 } 5929 mqe = &mboxq->u.mqe; 5930 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 5931 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 5932 phba->hba_flag |= HBA_FCOE_MODE; 5933 else 5934 phba->hba_flag &= ~HBA_FCOE_MODE; 5935 5936 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 5937 LPFC_DCBX_CEE_MODE) 5938 phba->hba_flag |= HBA_FIP_SUPPORT; 5939 else 5940 phba->hba_flag &= ~HBA_FIP_SUPPORT; 5941 5942 if (phba->sli_rev != LPFC_SLI_REV4) { 5943 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5944 "0376 READ_REV Error. SLI Level %d " 5945 "FCoE enabled %d\n", 5946 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 5947 rc = -EIO; 5948 kfree(vpd); 5949 goto out_free_mbox; 5950 } 5951 5952 /* 5953 * Continue initialization with default values even if driver failed 5954 * to read FCoE param config regions, only read parameters if the 5955 * board is FCoE 5956 */ 5957 if (phba->hba_flag & HBA_FCOE_MODE && 5958 lpfc_sli4_read_fcoe_params(phba)) 5959 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 5960 "2570 Failed to read FCoE parameters\n"); 5961 5962 /* 5963 * Retrieve sli4 device physical port name, failure of doing it 5964 * is considered as non-fatal. 5965 */ 5966 rc = lpfc_sli4_retrieve_pport_name(phba); 5967 if (!rc) 5968 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5969 "3080 Successful retrieving SLI4 device " 5970 "physical port name: %s.\n", phba->Port); 5971 5972 /* 5973 * Evaluate the read rev and vpd data. Populate the driver 5974 * state with the results. If this routine fails, the failure 5975 * is not fatal as the driver will use generic values. 5976 */ 5977 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 5978 if (unlikely(!rc)) { 5979 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5980 "0377 Error %d parsing vpd. " 5981 "Using defaults.\n", rc); 5982 rc = 0; 5983 } 5984 kfree(vpd); 5985 5986 /* Save information as VPD data */ 5987 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 5988 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 5989 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 5990 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 5991 &mqe->un.read_rev); 5992 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 5993 &mqe->un.read_rev); 5994 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 5995 &mqe->un.read_rev); 5996 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 5997 &mqe->un.read_rev); 5998 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 5999 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 6000 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 6001 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 6002 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 6003 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 6004 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6005 "(%d):0380 READ_REV Status x%x " 6006 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 6007 mboxq->vport ? mboxq->vport->vpi : 0, 6008 bf_get(lpfc_mqe_status, mqe), 6009 phba->vpd.rev.opFwName, 6010 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 6011 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 6012 6013 /* 6014 * Discover the port's supported feature set and match it against the 6015 * hosts requests. 6016 */ 6017 lpfc_request_features(phba, mboxq); 6018 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6019 if (unlikely(rc)) { 6020 rc = -EIO; 6021 goto out_free_mbox; 6022 } 6023 6024 /* 6025 * The port must support FCP initiator mode as this is the 6026 * only mode running in the host. 6027 */ 6028 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 6029 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6030 "0378 No support for fcpi mode.\n"); 6031 ftr_rsp++; 6032 } 6033 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 6034 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 6035 else 6036 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 6037 /* 6038 * If the port cannot support the host's requested features 6039 * then turn off the global config parameters to disable the 6040 * feature in the driver. This is not a fatal error. 6041 */ 6042 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 6043 if (phba->cfg_enable_bg) { 6044 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)) 6045 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 6046 else 6047 ftr_rsp++; 6048 } 6049 6050 if (phba->max_vpi && phba->cfg_enable_npiv && 6051 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6052 ftr_rsp++; 6053 6054 if (ftr_rsp) { 6055 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6056 "0379 Feature Mismatch Data: x%08x %08x " 6057 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 6058 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 6059 phba->cfg_enable_npiv, phba->max_vpi); 6060 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 6061 phba->cfg_enable_bg = 0; 6062 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6063 phba->cfg_enable_npiv = 0; 6064 } 6065 6066 /* These SLI3 features are assumed in SLI4 */ 6067 spin_lock_irq(&phba->hbalock); 6068 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 6069 spin_unlock_irq(&phba->hbalock); 6070 6071 /* 6072 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 6073 * calls depends on these resources to complete port setup. 6074 */ 6075 rc = lpfc_sli4_alloc_resource_identifiers(phba); 6076 if (rc) { 6077 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6078 "2920 Failed to alloc Resource IDs " 6079 "rc = x%x\n", rc); 6080 goto out_free_mbox; 6081 } 6082 /* update physical xri mappings in the scsi buffers */ 6083 lpfc_scsi_buf_update(phba); 6084 6085 /* Read the port's service parameters. */ 6086 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6087 if (rc) { 6088 phba->link_state = LPFC_HBA_ERROR; 6089 rc = -ENOMEM; 6090 goto out_free_mbox; 6091 } 6092 6093 mboxq->vport = vport; 6094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6095 mp = (struct lpfc_dmabuf *) mboxq->context1; 6096 if (rc == MBX_SUCCESS) { 6097 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 6098 rc = 0; 6099 } 6100 6101 /* 6102 * This memory was allocated by the lpfc_read_sparam routine. Release 6103 * it to the mbuf pool. 6104 */ 6105 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6106 kfree(mp); 6107 mboxq->context1 = NULL; 6108 if (unlikely(rc)) { 6109 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6110 "0382 READ_SPARAM command failed " 6111 "status %d, mbxStatus x%x\n", 6112 rc, bf_get(lpfc_mqe_status, mqe)); 6113 phba->link_state = LPFC_HBA_ERROR; 6114 rc = -EIO; 6115 goto out_free_mbox; 6116 } 6117 6118 lpfc_update_vport_wwn(vport); 6119 6120 /* Update the fc_host data structures with new wwn. */ 6121 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6122 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6123 6124 /* Register SGL pool to the device using non-embedded mailbox command */ 6125 if (!phba->sli4_hba.extents_in_use) { 6126 rc = lpfc_sli4_post_els_sgl_list(phba); 6127 if (unlikely(rc)) { 6128 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6129 "0582 Error %d during els sgl post " 6130 "operation\n", rc); 6131 rc = -ENODEV; 6132 goto out_free_mbox; 6133 } 6134 } else { 6135 rc = lpfc_sli4_post_els_sgl_list_ext(phba); 6136 if (unlikely(rc)) { 6137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6138 "2560 Error %d during els sgl post " 6139 "operation\n", rc); 6140 rc = -ENODEV; 6141 goto out_free_mbox; 6142 } 6143 } 6144 6145 /* Register SCSI SGL pool to the device */ 6146 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6147 if (unlikely(rc)) { 6148 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6149 "0383 Error %d during scsi sgl post " 6150 "operation\n", rc); 6151 /* Some Scsi buffers were moved to the abort scsi list */ 6152 /* A pci function reset will repost them */ 6153 rc = -ENODEV; 6154 goto out_free_mbox; 6155 } 6156 6157 /* Post the rpi header region to the device. */ 6158 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 6159 if (unlikely(rc)) { 6160 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6161 "0393 Error %d during rpi post operation\n", 6162 rc); 6163 rc = -ENODEV; 6164 goto out_free_mbox; 6165 } 6166 6167 /* Create all the SLI4 queues */ 6168 rc = lpfc_sli4_queue_create(phba); 6169 if (rc) { 6170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6171 "3089 Failed to allocate queues\n"); 6172 rc = -ENODEV; 6173 goto out_stop_timers; 6174 } 6175 /* Set up all the queues to the device */ 6176 rc = lpfc_sli4_queue_setup(phba); 6177 if (unlikely(rc)) { 6178 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6179 "0381 Error %d during queue setup.\n ", rc); 6180 goto out_destroy_queue; 6181 } 6182 6183 /* Arm the CQs and then EQs on device */ 6184 lpfc_sli4_arm_cqeq_intr(phba); 6185 6186 /* Indicate device interrupt mode */ 6187 phba->sli4_hba.intr_enable = 1; 6188 6189 /* Allow asynchronous mailbox command to go through */ 6190 spin_lock_irq(&phba->hbalock); 6191 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6192 spin_unlock_irq(&phba->hbalock); 6193 6194 /* Post receive buffers to the device */ 6195 lpfc_sli4_rb_setup(phba); 6196 6197 /* Reset HBA FCF states after HBA reset */ 6198 phba->fcf.fcf_flag = 0; 6199 phba->fcf.current_rec.flag = 0; 6200 6201 /* Start the ELS watchdog timer */ 6202 mod_timer(&vport->els_tmofunc, 6203 jiffies + HZ * (phba->fc_ratov * 2)); 6204 6205 /* Start heart beat timer */ 6206 mod_timer(&phba->hb_tmofunc, 6207 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 6208 phba->hb_outstanding = 0; 6209 phba->last_completion_time = jiffies; 6210 6211 /* Start error attention (ERATT) polling timer */ 6212 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 6213 6214 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 6215 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 6216 rc = pci_enable_pcie_error_reporting(phba->pcidev); 6217 if (!rc) { 6218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6219 "2829 This device supports " 6220 "Advanced Error Reporting (AER)\n"); 6221 spin_lock_irq(&phba->hbalock); 6222 phba->hba_flag |= HBA_AER_ENABLED; 6223 spin_unlock_irq(&phba->hbalock); 6224 } else { 6225 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6226 "2830 This device does not support " 6227 "Advanced Error Reporting (AER)\n"); 6228 phba->cfg_aer_support = 0; 6229 } 6230 rc = 0; 6231 } 6232 6233 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 6234 /* 6235 * The FC Port needs to register FCFI (index 0) 6236 */ 6237 lpfc_reg_fcfi(phba, mboxq); 6238 mboxq->vport = phba->pport; 6239 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6240 if (rc != MBX_SUCCESS) 6241 goto out_unset_queue; 6242 rc = 0; 6243 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 6244 &mboxq->u.mqe.un.reg_fcfi); 6245 6246 /* Check if the port is configured to be disabled */ 6247 lpfc_sli_read_link_ste(phba); 6248 } 6249 6250 /* 6251 * The port is ready, set the host's link state to LINK_DOWN 6252 * in preparation for link interrupts. 6253 */ 6254 spin_lock_irq(&phba->hbalock); 6255 phba->link_state = LPFC_LINK_DOWN; 6256 spin_unlock_irq(&phba->hbalock); 6257 if (!(phba->hba_flag & HBA_FCOE_MODE) && 6258 (phba->hba_flag & LINK_DISABLED)) { 6259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6260 "3103 Adapter Link is disabled.\n"); 6261 lpfc_down_link(phba, mboxq); 6262 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6263 if (rc != MBX_SUCCESS) { 6264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6265 "3104 Adapter failed to issue " 6266 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 6267 goto out_unset_queue; 6268 } 6269 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 6270 /* don't perform init_link on SLI4 FC port loopback test */ 6271 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 6272 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6273 if (rc) 6274 goto out_unset_queue; 6275 } 6276 } 6277 mempool_free(mboxq, phba->mbox_mem_pool); 6278 return rc; 6279 out_unset_queue: 6280 /* Unset all the queues set up in this routine when error out */ 6281 lpfc_sli4_queue_unset(phba); 6282 out_destroy_queue: 6283 lpfc_sli4_queue_destroy(phba); 6284 out_stop_timers: 6285 lpfc_stop_hba_timers(phba); 6286 out_free_mbox: 6287 mempool_free(mboxq, phba->mbox_mem_pool); 6288 return rc; 6289 } 6290 6291 /** 6292 * lpfc_mbox_timeout - Timeout call back function for mbox timer 6293 * @ptr: context object - pointer to hba structure. 6294 * 6295 * This is the callback function for mailbox timer. The mailbox 6296 * timer is armed when a new mailbox command is issued and the timer 6297 * is deleted when the mailbox complete. The function is called by 6298 * the kernel timer code when a mailbox does not complete within 6299 * expected time. This function wakes up the worker thread to 6300 * process the mailbox timeout and returns. All the processing is 6301 * done by the worker thread function lpfc_mbox_timeout_handler. 6302 **/ 6303 void 6304 lpfc_mbox_timeout(unsigned long ptr) 6305 { 6306 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 6307 unsigned long iflag; 6308 uint32_t tmo_posted; 6309 6310 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 6311 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 6312 if (!tmo_posted) 6313 phba->pport->work_port_events |= WORKER_MBOX_TMO; 6314 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 6315 6316 if (!tmo_posted) 6317 lpfc_worker_wake_up(phba); 6318 return; 6319 } 6320 6321 6322 /** 6323 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 6324 * @phba: Pointer to HBA context object. 6325 * 6326 * This function is called from worker thread when a mailbox command times out. 6327 * The caller is not required to hold any locks. This function will reset the 6328 * HBA and recover all the pending commands. 6329 **/ 6330 void 6331 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 6332 { 6333 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6334 MAILBOX_t *mb = &pmbox->u.mb; 6335 struct lpfc_sli *psli = &phba->sli; 6336 struct lpfc_sli_ring *pring; 6337 6338 /* Check the pmbox pointer first. There is a race condition 6339 * between the mbox timeout handler getting executed in the 6340 * worklist and the mailbox actually completing. When this 6341 * race condition occurs, the mbox_active will be NULL. 6342 */ 6343 spin_lock_irq(&phba->hbalock); 6344 if (pmbox == NULL) { 6345 lpfc_printf_log(phba, KERN_WARNING, 6346 LOG_MBOX | LOG_SLI, 6347 "0353 Active Mailbox cleared - mailbox timeout " 6348 "exiting\n"); 6349 spin_unlock_irq(&phba->hbalock); 6350 return; 6351 } 6352 6353 /* Mbox cmd <mbxCommand> timeout */ 6354 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6355 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 6356 mb->mbxCommand, 6357 phba->pport->port_state, 6358 phba->sli.sli_flag, 6359 phba->sli.mbox_active); 6360 spin_unlock_irq(&phba->hbalock); 6361 6362 /* Setting state unknown so lpfc_sli_abort_iocb_ring 6363 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 6364 * it to fail all outstanding SCSI IO. 6365 */ 6366 spin_lock_irq(&phba->pport->work_port_lock); 6367 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6368 spin_unlock_irq(&phba->pport->work_port_lock); 6369 spin_lock_irq(&phba->hbalock); 6370 phba->link_state = LPFC_LINK_UNKNOWN; 6371 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6372 spin_unlock_irq(&phba->hbalock); 6373 6374 pring = &psli->ring[psli->fcp_ring]; 6375 lpfc_sli_abort_iocb_ring(phba, pring); 6376 6377 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6378 "0345 Resetting board due to mailbox timeout\n"); 6379 6380 /* Reset the HBA device */ 6381 lpfc_reset_hba(phba); 6382 } 6383 6384 /** 6385 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 6386 * @phba: Pointer to HBA context object. 6387 * @pmbox: Pointer to mailbox object. 6388 * @flag: Flag indicating how the mailbox need to be processed. 6389 * 6390 * This function is called by discovery code and HBA management code 6391 * to submit a mailbox command to firmware with SLI-3 interface spec. This 6392 * function gets the hbalock to protect the data structures. 6393 * The mailbox command can be submitted in polling mode, in which case 6394 * this function will wait in a polling loop for the completion of the 6395 * mailbox. 6396 * If the mailbox is submitted in no_wait mode (not polling) the 6397 * function will submit the command and returns immediately without waiting 6398 * for the mailbox completion. The no_wait is supported only when HBA 6399 * is in SLI2/SLI3 mode - interrupts are enabled. 6400 * The SLI interface allows only one mailbox pending at a time. If the 6401 * mailbox is issued in polling mode and there is already a mailbox 6402 * pending, then the function will return an error. If the mailbox is issued 6403 * in NO_WAIT mode and there is a mailbox pending already, the function 6404 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 6405 * The sli layer owns the mailbox object until the completion of mailbox 6406 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 6407 * return codes the caller owns the mailbox command after the return of 6408 * the function. 6409 **/ 6410 static int 6411 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 6412 uint32_t flag) 6413 { 6414 MAILBOX_t *mb; 6415 struct lpfc_sli *psli = &phba->sli; 6416 uint32_t status, evtctr; 6417 uint32_t ha_copy, hc_copy; 6418 int i; 6419 unsigned long timeout; 6420 unsigned long drvr_flag = 0; 6421 uint32_t word0, ldata; 6422 void __iomem *to_slim; 6423 int processing_queue = 0; 6424 6425 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6426 if (!pmbox) { 6427 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6428 /* processing mbox queue from intr_handler */ 6429 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6430 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6431 return MBX_SUCCESS; 6432 } 6433 processing_queue = 1; 6434 pmbox = lpfc_mbox_get(phba); 6435 if (!pmbox) { 6436 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6437 return MBX_SUCCESS; 6438 } 6439 } 6440 6441 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 6442 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 6443 if(!pmbox->vport) { 6444 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6445 lpfc_printf_log(phba, KERN_ERR, 6446 LOG_MBOX | LOG_VPORT, 6447 "1806 Mbox x%x failed. No vport\n", 6448 pmbox->u.mb.mbxCommand); 6449 dump_stack(); 6450 goto out_not_finished; 6451 } 6452 } 6453 6454 /* If the PCI channel is in offline state, do not post mbox. */ 6455 if (unlikely(pci_channel_offline(phba->pcidev))) { 6456 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6457 goto out_not_finished; 6458 } 6459 6460 /* If HBA has a deferred error attention, fail the iocb. */ 6461 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 6462 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6463 goto out_not_finished; 6464 } 6465 6466 psli = &phba->sli; 6467 6468 mb = &pmbox->u.mb; 6469 status = MBX_SUCCESS; 6470 6471 if (phba->link_state == LPFC_HBA_ERROR) { 6472 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6473 6474 /* Mbox command <mbxCommand> cannot issue */ 6475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6476 "(%d):0311 Mailbox command x%x cannot " 6477 "issue Data: x%x x%x\n", 6478 pmbox->vport ? pmbox->vport->vpi : 0, 6479 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6480 goto out_not_finished; 6481 } 6482 6483 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 6484 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 6485 !(hc_copy & HC_MBINT_ENA)) { 6486 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6487 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6488 "(%d):2528 Mailbox command x%x cannot " 6489 "issue Data: x%x x%x\n", 6490 pmbox->vport ? pmbox->vport->vpi : 0, 6491 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6492 goto out_not_finished; 6493 } 6494 } 6495 6496 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6497 /* Polling for a mbox command when another one is already active 6498 * is not allowed in SLI. Also, the driver must have established 6499 * SLI2 mode to queue and process multiple mbox commands. 6500 */ 6501 6502 if (flag & MBX_POLL) { 6503 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6504 6505 /* Mbox command <mbxCommand> cannot issue */ 6506 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6507 "(%d):2529 Mailbox command x%x " 6508 "cannot issue Data: x%x x%x\n", 6509 pmbox->vport ? pmbox->vport->vpi : 0, 6510 pmbox->u.mb.mbxCommand, 6511 psli->sli_flag, flag); 6512 goto out_not_finished; 6513 } 6514 6515 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 6516 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6517 /* Mbox command <mbxCommand> cannot issue */ 6518 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6519 "(%d):2530 Mailbox command x%x " 6520 "cannot issue Data: x%x x%x\n", 6521 pmbox->vport ? pmbox->vport->vpi : 0, 6522 pmbox->u.mb.mbxCommand, 6523 psli->sli_flag, flag); 6524 goto out_not_finished; 6525 } 6526 6527 /* Another mailbox command is still being processed, queue this 6528 * command to be processed later. 6529 */ 6530 lpfc_mbox_put(phba, pmbox); 6531 6532 /* Mbox cmd issue - BUSY */ 6533 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6534 "(%d):0308 Mbox cmd issue - BUSY Data: " 6535 "x%x x%x x%x x%x\n", 6536 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 6537 mb->mbxCommand, phba->pport->port_state, 6538 psli->sli_flag, flag); 6539 6540 psli->slistat.mbox_busy++; 6541 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6542 6543 if (pmbox->vport) { 6544 lpfc_debugfs_disc_trc(pmbox->vport, 6545 LPFC_DISC_TRC_MBOX_VPORT, 6546 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 6547 (uint32_t)mb->mbxCommand, 6548 mb->un.varWords[0], mb->un.varWords[1]); 6549 } 6550 else { 6551 lpfc_debugfs_disc_trc(phba->pport, 6552 LPFC_DISC_TRC_MBOX, 6553 "MBOX Bsy: cmd:x%x mb:x%x x%x", 6554 (uint32_t)mb->mbxCommand, 6555 mb->un.varWords[0], mb->un.varWords[1]); 6556 } 6557 6558 return MBX_BUSY; 6559 } 6560 6561 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6562 6563 /* If we are not polling, we MUST be in SLI2 mode */ 6564 if (flag != MBX_POLL) { 6565 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 6566 (mb->mbxCommand != MBX_KILL_BOARD)) { 6567 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6568 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6569 /* Mbox command <mbxCommand> cannot issue */ 6570 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6571 "(%d):2531 Mailbox command x%x " 6572 "cannot issue Data: x%x x%x\n", 6573 pmbox->vport ? pmbox->vport->vpi : 0, 6574 pmbox->u.mb.mbxCommand, 6575 psli->sli_flag, flag); 6576 goto out_not_finished; 6577 } 6578 /* timeout active mbox command */ 6579 mod_timer(&psli->mbox_tmo, (jiffies + 6580 (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); 6581 } 6582 6583 /* Mailbox cmd <cmd> issue */ 6584 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6585 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 6586 "x%x\n", 6587 pmbox->vport ? pmbox->vport->vpi : 0, 6588 mb->mbxCommand, phba->pport->port_state, 6589 psli->sli_flag, flag); 6590 6591 if (mb->mbxCommand != MBX_HEARTBEAT) { 6592 if (pmbox->vport) { 6593 lpfc_debugfs_disc_trc(pmbox->vport, 6594 LPFC_DISC_TRC_MBOX_VPORT, 6595 "MBOX Send vport: cmd:x%x mb:x%x x%x", 6596 (uint32_t)mb->mbxCommand, 6597 mb->un.varWords[0], mb->un.varWords[1]); 6598 } 6599 else { 6600 lpfc_debugfs_disc_trc(phba->pport, 6601 LPFC_DISC_TRC_MBOX, 6602 "MBOX Send: cmd:x%x mb:x%x x%x", 6603 (uint32_t)mb->mbxCommand, 6604 mb->un.varWords[0], mb->un.varWords[1]); 6605 } 6606 } 6607 6608 psli->slistat.mbox_cmd++; 6609 evtctr = psli->slistat.mbox_event; 6610 6611 /* next set own bit for the adapter and copy over command word */ 6612 mb->mbxOwner = OWN_CHIP; 6613 6614 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6615 /* Populate mbox extension offset word. */ 6616 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 6617 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 6618 = (uint8_t *)phba->mbox_ext 6619 - (uint8_t *)phba->mbox; 6620 } 6621 6622 /* Copy the mailbox extension data */ 6623 if (pmbox->in_ext_byte_len && pmbox->context2) { 6624 lpfc_sli_pcimem_bcopy(pmbox->context2, 6625 (uint8_t *)phba->mbox_ext, 6626 pmbox->in_ext_byte_len); 6627 } 6628 /* Copy command data to host SLIM area */ 6629 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 6630 } else { 6631 /* Populate mbox extension offset word. */ 6632 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 6633 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 6634 = MAILBOX_HBA_EXT_OFFSET; 6635 6636 /* Copy the mailbox extension data */ 6637 if (pmbox->in_ext_byte_len && pmbox->context2) { 6638 lpfc_memcpy_to_slim(phba->MBslimaddr + 6639 MAILBOX_HBA_EXT_OFFSET, 6640 pmbox->context2, pmbox->in_ext_byte_len); 6641 6642 } 6643 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6644 /* copy command data into host mbox for cmpl */ 6645 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 6646 } 6647 6648 /* First copy mbox command data to HBA SLIM, skip past first 6649 word */ 6650 to_slim = phba->MBslimaddr + sizeof (uint32_t); 6651 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 6652 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 6653 6654 /* Next copy over first word, with mbxOwner set */ 6655 ldata = *((uint32_t *)mb); 6656 to_slim = phba->MBslimaddr; 6657 writel(ldata, to_slim); 6658 readl(to_slim); /* flush */ 6659 6660 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6661 /* switch over to host mailbox */ 6662 psli->sli_flag |= LPFC_SLI_ACTIVE; 6663 } 6664 } 6665 6666 wmb(); 6667 6668 switch (flag) { 6669 case MBX_NOWAIT: 6670 /* Set up reference to mailbox command */ 6671 psli->mbox_active = pmbox; 6672 /* Interrupt board to do it */ 6673 writel(CA_MBATT, phba->CAregaddr); 6674 readl(phba->CAregaddr); /* flush */ 6675 /* Don't wait for it to finish, just return */ 6676 break; 6677 6678 case MBX_POLL: 6679 /* Set up null reference to mailbox command */ 6680 psli->mbox_active = NULL; 6681 /* Interrupt board to do it */ 6682 writel(CA_MBATT, phba->CAregaddr); 6683 readl(phba->CAregaddr); /* flush */ 6684 6685 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6686 /* First read mbox status word */ 6687 word0 = *((uint32_t *)phba->mbox); 6688 word0 = le32_to_cpu(word0); 6689 } else { 6690 /* First read mbox status word */ 6691 if (lpfc_readl(phba->MBslimaddr, &word0)) { 6692 spin_unlock_irqrestore(&phba->hbalock, 6693 drvr_flag); 6694 goto out_not_finished; 6695 } 6696 } 6697 6698 /* Read the HBA Host Attention Register */ 6699 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 6700 spin_unlock_irqrestore(&phba->hbalock, 6701 drvr_flag); 6702 goto out_not_finished; 6703 } 6704 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 6705 1000) + jiffies; 6706 i = 0; 6707 /* Wait for command to complete */ 6708 while (((word0 & OWN_CHIP) == OWN_CHIP) || 6709 (!(ha_copy & HA_MBATT) && 6710 (phba->link_state > LPFC_WARM_START))) { 6711 if (time_after(jiffies, timeout)) { 6712 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6713 spin_unlock_irqrestore(&phba->hbalock, 6714 drvr_flag); 6715 goto out_not_finished; 6716 } 6717 6718 /* Check if we took a mbox interrupt while we were 6719 polling */ 6720 if (((word0 & OWN_CHIP) != OWN_CHIP) 6721 && (evtctr != psli->slistat.mbox_event)) 6722 break; 6723 6724 if (i++ > 10) { 6725 spin_unlock_irqrestore(&phba->hbalock, 6726 drvr_flag); 6727 msleep(1); 6728 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6729 } 6730 6731 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6732 /* First copy command data */ 6733 word0 = *((uint32_t *)phba->mbox); 6734 word0 = le32_to_cpu(word0); 6735 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6736 MAILBOX_t *slimmb; 6737 uint32_t slimword0; 6738 /* Check real SLIM for any errors */ 6739 slimword0 = readl(phba->MBslimaddr); 6740 slimmb = (MAILBOX_t *) & slimword0; 6741 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 6742 && slimmb->mbxStatus) { 6743 psli->sli_flag &= 6744 ~LPFC_SLI_ACTIVE; 6745 word0 = slimword0; 6746 } 6747 } 6748 } else { 6749 /* First copy command data */ 6750 word0 = readl(phba->MBslimaddr); 6751 } 6752 /* Read the HBA Host Attention Register */ 6753 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 6754 spin_unlock_irqrestore(&phba->hbalock, 6755 drvr_flag); 6756 goto out_not_finished; 6757 } 6758 } 6759 6760 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6761 /* copy results back to user */ 6762 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 6763 /* Copy the mailbox extension data */ 6764 if (pmbox->out_ext_byte_len && pmbox->context2) { 6765 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 6766 pmbox->context2, 6767 pmbox->out_ext_byte_len); 6768 } 6769 } else { 6770 /* First copy command data */ 6771 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 6772 MAILBOX_CMD_SIZE); 6773 /* Copy the mailbox extension data */ 6774 if (pmbox->out_ext_byte_len && pmbox->context2) { 6775 lpfc_memcpy_from_slim(pmbox->context2, 6776 phba->MBslimaddr + 6777 MAILBOX_HBA_EXT_OFFSET, 6778 pmbox->out_ext_byte_len); 6779 } 6780 } 6781 6782 writel(HA_MBATT, phba->HAregaddr); 6783 readl(phba->HAregaddr); /* flush */ 6784 6785 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6786 status = mb->mbxStatus; 6787 } 6788 6789 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6790 return status; 6791 6792 out_not_finished: 6793 if (processing_queue) { 6794 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 6795 lpfc_mbox_cmpl_put(phba, pmbox); 6796 } 6797 return MBX_NOT_FINISHED; 6798 } 6799 6800 /** 6801 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 6802 * @phba: Pointer to HBA context object. 6803 * 6804 * The function blocks the posting of SLI4 asynchronous mailbox commands from 6805 * the driver internal pending mailbox queue. It will then try to wait out the 6806 * possible outstanding mailbox command before return. 6807 * 6808 * Returns: 6809 * 0 - the outstanding mailbox command completed; otherwise, the wait for 6810 * the outstanding mailbox command timed out. 6811 **/ 6812 static int 6813 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 6814 { 6815 struct lpfc_sli *psli = &phba->sli; 6816 int rc = 0; 6817 unsigned long timeout = 0; 6818 6819 /* Mark the asynchronous mailbox command posting as blocked */ 6820 spin_lock_irq(&phba->hbalock); 6821 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 6822 /* Determine how long we might wait for the active mailbox 6823 * command to be gracefully completed by firmware. 6824 */ 6825 if (phba->sli.mbox_active) 6826 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 6827 phba->sli.mbox_active) * 6828 1000) + jiffies; 6829 spin_unlock_irq(&phba->hbalock); 6830 6831 /* Wait for the outstnading mailbox command to complete */ 6832 while (phba->sli.mbox_active) { 6833 /* Check active mailbox complete status every 2ms */ 6834 msleep(2); 6835 if (time_after(jiffies, timeout)) { 6836 /* Timeout, marked the outstanding cmd not complete */ 6837 rc = 1; 6838 break; 6839 } 6840 } 6841 6842 /* Can not cleanly block async mailbox command, fails it */ 6843 if (rc) { 6844 spin_lock_irq(&phba->hbalock); 6845 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6846 spin_unlock_irq(&phba->hbalock); 6847 } 6848 return rc; 6849 } 6850 6851 /** 6852 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 6853 * @phba: Pointer to HBA context object. 6854 * 6855 * The function unblocks and resume posting of SLI4 asynchronous mailbox 6856 * commands from the driver internal pending mailbox queue. It makes sure 6857 * that there is no outstanding mailbox command before resuming posting 6858 * asynchronous mailbox commands. If, for any reason, there is outstanding 6859 * mailbox command, it will try to wait it out before resuming asynchronous 6860 * mailbox command posting. 6861 **/ 6862 static void 6863 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 6864 { 6865 struct lpfc_sli *psli = &phba->sli; 6866 6867 spin_lock_irq(&phba->hbalock); 6868 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6869 /* Asynchronous mailbox posting is not blocked, do nothing */ 6870 spin_unlock_irq(&phba->hbalock); 6871 return; 6872 } 6873 6874 /* Outstanding synchronous mailbox command is guaranteed to be done, 6875 * successful or timeout, after timing-out the outstanding mailbox 6876 * command shall always be removed, so just unblock posting async 6877 * mailbox command and resume 6878 */ 6879 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6880 spin_unlock_irq(&phba->hbalock); 6881 6882 /* wake up worker thread to post asynchronlous mailbox command */ 6883 lpfc_worker_wake_up(phba); 6884 } 6885 6886 /** 6887 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 6888 * @phba: Pointer to HBA context object. 6889 * @mboxq: Pointer to mailbox object. 6890 * 6891 * The function posts a mailbox to the port. The mailbox is expected 6892 * to be comletely filled in and ready for the port to operate on it. 6893 * This routine executes a synchronous completion operation on the 6894 * mailbox by polling for its completion. 6895 * 6896 * The caller must not be holding any locks when calling this routine. 6897 * 6898 * Returns: 6899 * MBX_SUCCESS - mailbox posted successfully 6900 * Any of the MBX error values. 6901 **/ 6902 static int 6903 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 6904 { 6905 int rc = MBX_SUCCESS; 6906 unsigned long iflag; 6907 uint32_t db_ready; 6908 uint32_t mcqe_status; 6909 uint32_t mbx_cmnd; 6910 unsigned long timeout; 6911 struct lpfc_sli *psli = &phba->sli; 6912 struct lpfc_mqe *mb = &mboxq->u.mqe; 6913 struct lpfc_bmbx_create *mbox_rgn; 6914 struct dma_address *dma_address; 6915 struct lpfc_register bmbx_reg; 6916 6917 /* 6918 * Only one mailbox can be active to the bootstrap mailbox region 6919 * at a time and there is no queueing provided. 6920 */ 6921 spin_lock_irqsave(&phba->hbalock, iflag); 6922 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6923 spin_unlock_irqrestore(&phba->hbalock, iflag); 6924 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6925 "(%d):2532 Mailbox command x%x (x%x/x%x) " 6926 "cannot issue Data: x%x x%x\n", 6927 mboxq->vport ? mboxq->vport->vpi : 0, 6928 mboxq->u.mb.mbxCommand, 6929 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 6930 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 6931 psli->sli_flag, MBX_POLL); 6932 return MBXERR_ERROR; 6933 } 6934 /* The server grabs the token and owns it until release */ 6935 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6936 phba->sli.mbox_active = mboxq; 6937 spin_unlock_irqrestore(&phba->hbalock, iflag); 6938 6939 /* 6940 * Initialize the bootstrap memory region to avoid stale data areas 6941 * in the mailbox post. Then copy the caller's mailbox contents to 6942 * the bmbx mailbox region. 6943 */ 6944 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 6945 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 6946 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 6947 sizeof(struct lpfc_mqe)); 6948 6949 /* Post the high mailbox dma address to the port and wait for ready. */ 6950 dma_address = &phba->sli4_hba.bmbx.dma_address; 6951 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 6952 6953 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 6954 * 1000) + jiffies; 6955 do { 6956 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 6957 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 6958 if (!db_ready) 6959 msleep(2); 6960 6961 if (time_after(jiffies, timeout)) { 6962 rc = MBXERR_ERROR; 6963 goto exit; 6964 } 6965 } while (!db_ready); 6966 6967 /* Post the low mailbox dma address to the port. */ 6968 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 6969 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 6970 * 1000) + jiffies; 6971 do { 6972 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 6973 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 6974 if (!db_ready) 6975 msleep(2); 6976 6977 if (time_after(jiffies, timeout)) { 6978 rc = MBXERR_ERROR; 6979 goto exit; 6980 } 6981 } while (!db_ready); 6982 6983 /* 6984 * Read the CQ to ensure the mailbox has completed. 6985 * If so, update the mailbox status so that the upper layers 6986 * can complete the request normally. 6987 */ 6988 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 6989 sizeof(struct lpfc_mqe)); 6990 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 6991 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 6992 sizeof(struct lpfc_mcqe)); 6993 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 6994 /* 6995 * When the CQE status indicates a failure and the mailbox status 6996 * indicates success then copy the CQE status into the mailbox status 6997 * (and prefix it with x4000). 6998 */ 6999 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 7000 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 7001 bf_set(lpfc_mqe_status, mb, 7002 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 7003 rc = MBXERR_ERROR; 7004 } else 7005 lpfc_sli4_swap_str(phba, mboxq); 7006 7007 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7008 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 7009 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 7010 " x%x x%x CQ: x%x x%x x%x x%x\n", 7011 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7012 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7013 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7014 bf_get(lpfc_mqe_status, mb), 7015 mb->un.mb_words[0], mb->un.mb_words[1], 7016 mb->un.mb_words[2], mb->un.mb_words[3], 7017 mb->un.mb_words[4], mb->un.mb_words[5], 7018 mb->un.mb_words[6], mb->un.mb_words[7], 7019 mb->un.mb_words[8], mb->un.mb_words[9], 7020 mb->un.mb_words[10], mb->un.mb_words[11], 7021 mb->un.mb_words[12], mboxq->mcqe.word0, 7022 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 7023 mboxq->mcqe.trailer); 7024 exit: 7025 /* We are holding the token, no needed for lock when release */ 7026 spin_lock_irqsave(&phba->hbalock, iflag); 7027 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7028 phba->sli.mbox_active = NULL; 7029 spin_unlock_irqrestore(&phba->hbalock, iflag); 7030 return rc; 7031 } 7032 7033 /** 7034 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 7035 * @phba: Pointer to HBA context object. 7036 * @pmbox: Pointer to mailbox object. 7037 * @flag: Flag indicating how the mailbox need to be processed. 7038 * 7039 * This function is called by discovery code and HBA management code to submit 7040 * a mailbox command to firmware with SLI-4 interface spec. 7041 * 7042 * Return codes the caller owns the mailbox command after the return of the 7043 * function. 7044 **/ 7045 static int 7046 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 7047 uint32_t flag) 7048 { 7049 struct lpfc_sli *psli = &phba->sli; 7050 unsigned long iflags; 7051 int rc; 7052 7053 /* dump from issue mailbox command if setup */ 7054 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 7055 7056 rc = lpfc_mbox_dev_check(phba); 7057 if (unlikely(rc)) { 7058 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7059 "(%d):2544 Mailbox command x%x (x%x/x%x) " 7060 "cannot issue Data: x%x x%x\n", 7061 mboxq->vport ? mboxq->vport->vpi : 0, 7062 mboxq->u.mb.mbxCommand, 7063 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7064 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7065 psli->sli_flag, flag); 7066 goto out_not_finished; 7067 } 7068 7069 /* Detect polling mode and jump to a handler */ 7070 if (!phba->sli4_hba.intr_enable) { 7071 if (flag == MBX_POLL) 7072 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7073 else 7074 rc = -EIO; 7075 if (rc != MBX_SUCCESS) 7076 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7077 "(%d):2541 Mailbox command x%x " 7078 "(x%x/x%x) cannot issue Data: " 7079 "x%x x%x\n", 7080 mboxq->vport ? mboxq->vport->vpi : 0, 7081 mboxq->u.mb.mbxCommand, 7082 lpfc_sli_config_mbox_subsys_get(phba, 7083 mboxq), 7084 lpfc_sli_config_mbox_opcode_get(phba, 7085 mboxq), 7086 psli->sli_flag, flag); 7087 return rc; 7088 } else if (flag == MBX_POLL) { 7089 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7090 "(%d):2542 Try to issue mailbox command " 7091 "x%x (x%x/x%x) synchronously ahead of async" 7092 "mailbox command queue: x%x x%x\n", 7093 mboxq->vport ? mboxq->vport->vpi : 0, 7094 mboxq->u.mb.mbxCommand, 7095 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7096 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7097 psli->sli_flag, flag); 7098 /* Try to block the asynchronous mailbox posting */ 7099 rc = lpfc_sli4_async_mbox_block(phba); 7100 if (!rc) { 7101 /* Successfully blocked, now issue sync mbox cmd */ 7102 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7103 if (rc != MBX_SUCCESS) 7104 lpfc_printf_log(phba, KERN_ERR, 7105 LOG_MBOX | LOG_SLI, 7106 "(%d):2597 Mailbox command " 7107 "x%x (x%x/x%x) cannot issue " 7108 "Data: x%x x%x\n", 7109 mboxq->vport ? 7110 mboxq->vport->vpi : 0, 7111 mboxq->u.mb.mbxCommand, 7112 lpfc_sli_config_mbox_subsys_get(phba, 7113 mboxq), 7114 lpfc_sli_config_mbox_opcode_get(phba, 7115 mboxq), 7116 psli->sli_flag, flag); 7117 /* Unblock the async mailbox posting afterward */ 7118 lpfc_sli4_async_mbox_unblock(phba); 7119 } 7120 return rc; 7121 } 7122 7123 /* Now, interrupt mode asynchrous mailbox command */ 7124 rc = lpfc_mbox_cmd_check(phba, mboxq); 7125 if (rc) { 7126 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7127 "(%d):2543 Mailbox command x%x (x%x/x%x) " 7128 "cannot issue Data: x%x x%x\n", 7129 mboxq->vport ? mboxq->vport->vpi : 0, 7130 mboxq->u.mb.mbxCommand, 7131 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7132 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7133 psli->sli_flag, flag); 7134 goto out_not_finished; 7135 } 7136 7137 /* Put the mailbox command to the driver internal FIFO */ 7138 psli->slistat.mbox_busy++; 7139 spin_lock_irqsave(&phba->hbalock, iflags); 7140 lpfc_mbox_put(phba, mboxq); 7141 spin_unlock_irqrestore(&phba->hbalock, iflags); 7142 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7143 "(%d):0354 Mbox cmd issue - Enqueue Data: " 7144 "x%x (x%x/x%x) x%x x%x x%x\n", 7145 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 7146 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 7147 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7148 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7149 phba->pport->port_state, 7150 psli->sli_flag, MBX_NOWAIT); 7151 /* Wake up worker thread to transport mailbox command from head */ 7152 lpfc_worker_wake_up(phba); 7153 7154 return MBX_BUSY; 7155 7156 out_not_finished: 7157 return MBX_NOT_FINISHED; 7158 } 7159 7160 /** 7161 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 7162 * @phba: Pointer to HBA context object. 7163 * 7164 * This function is called by worker thread to send a mailbox command to 7165 * SLI4 HBA firmware. 7166 * 7167 **/ 7168 int 7169 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 7170 { 7171 struct lpfc_sli *psli = &phba->sli; 7172 LPFC_MBOXQ_t *mboxq; 7173 int rc = MBX_SUCCESS; 7174 unsigned long iflags; 7175 struct lpfc_mqe *mqe; 7176 uint32_t mbx_cmnd; 7177 7178 /* Check interrupt mode before post async mailbox command */ 7179 if (unlikely(!phba->sli4_hba.intr_enable)) 7180 return MBX_NOT_FINISHED; 7181 7182 /* Check for mailbox command service token */ 7183 spin_lock_irqsave(&phba->hbalock, iflags); 7184 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7185 spin_unlock_irqrestore(&phba->hbalock, iflags); 7186 return MBX_NOT_FINISHED; 7187 } 7188 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7189 spin_unlock_irqrestore(&phba->hbalock, iflags); 7190 return MBX_NOT_FINISHED; 7191 } 7192 if (unlikely(phba->sli.mbox_active)) { 7193 spin_unlock_irqrestore(&phba->hbalock, iflags); 7194 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7195 "0384 There is pending active mailbox cmd\n"); 7196 return MBX_NOT_FINISHED; 7197 } 7198 /* Take the mailbox command service token */ 7199 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7200 7201 /* Get the next mailbox command from head of queue */ 7202 mboxq = lpfc_mbox_get(phba); 7203 7204 /* If no more mailbox command waiting for post, we're done */ 7205 if (!mboxq) { 7206 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7207 spin_unlock_irqrestore(&phba->hbalock, iflags); 7208 return MBX_SUCCESS; 7209 } 7210 phba->sli.mbox_active = mboxq; 7211 spin_unlock_irqrestore(&phba->hbalock, iflags); 7212 7213 /* Check device readiness for posting mailbox command */ 7214 rc = lpfc_mbox_dev_check(phba); 7215 if (unlikely(rc)) 7216 /* Driver clean routine will clean up pending mailbox */ 7217 goto out_not_finished; 7218 7219 /* Prepare the mbox command to be posted */ 7220 mqe = &mboxq->u.mqe; 7221 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 7222 7223 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7224 mod_timer(&psli->mbox_tmo, (jiffies + 7225 (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); 7226 7227 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7228 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 7229 "x%x x%x\n", 7230 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7231 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7232 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7233 phba->pport->port_state, psli->sli_flag); 7234 7235 if (mbx_cmnd != MBX_HEARTBEAT) { 7236 if (mboxq->vport) { 7237 lpfc_debugfs_disc_trc(mboxq->vport, 7238 LPFC_DISC_TRC_MBOX_VPORT, 7239 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7240 mbx_cmnd, mqe->un.mb_words[0], 7241 mqe->un.mb_words[1]); 7242 } else { 7243 lpfc_debugfs_disc_trc(phba->pport, 7244 LPFC_DISC_TRC_MBOX, 7245 "MBOX Send: cmd:x%x mb:x%x x%x", 7246 mbx_cmnd, mqe->un.mb_words[0], 7247 mqe->un.mb_words[1]); 7248 } 7249 } 7250 psli->slistat.mbox_cmd++; 7251 7252 /* Post the mailbox command to the port */ 7253 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 7254 if (rc != MBX_SUCCESS) { 7255 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7256 "(%d):2533 Mailbox command x%x (x%x/x%x) " 7257 "cannot issue Data: x%x x%x\n", 7258 mboxq->vport ? mboxq->vport->vpi : 0, 7259 mboxq->u.mb.mbxCommand, 7260 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7261 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7262 psli->sli_flag, MBX_NOWAIT); 7263 goto out_not_finished; 7264 } 7265 7266 return rc; 7267 7268 out_not_finished: 7269 spin_lock_irqsave(&phba->hbalock, iflags); 7270 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7271 __lpfc_mbox_cmpl_put(phba, mboxq); 7272 /* Release the token */ 7273 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7274 phba->sli.mbox_active = NULL; 7275 spin_unlock_irqrestore(&phba->hbalock, iflags); 7276 7277 return MBX_NOT_FINISHED; 7278 } 7279 7280 /** 7281 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 7282 * @phba: Pointer to HBA context object. 7283 * @pmbox: Pointer to mailbox object. 7284 * @flag: Flag indicating how the mailbox need to be processed. 7285 * 7286 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 7287 * the API jump table function pointer from the lpfc_hba struct. 7288 * 7289 * Return codes the caller owns the mailbox command after the return of the 7290 * function. 7291 **/ 7292 int 7293 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 7294 { 7295 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 7296 } 7297 7298 /** 7299 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 7300 * @phba: The hba struct for which this call is being executed. 7301 * @dev_grp: The HBA PCI-Device group number. 7302 * 7303 * This routine sets up the mbox interface API function jump table in @phba 7304 * struct. 7305 * Returns: 0 - success, -ENODEV - failure. 7306 **/ 7307 int 7308 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7309 { 7310 7311 switch (dev_grp) { 7312 case LPFC_PCI_DEV_LP: 7313 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 7314 phba->lpfc_sli_handle_slow_ring_event = 7315 lpfc_sli_handle_slow_ring_event_s3; 7316 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 7317 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 7318 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 7319 break; 7320 case LPFC_PCI_DEV_OC: 7321 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 7322 phba->lpfc_sli_handle_slow_ring_event = 7323 lpfc_sli_handle_slow_ring_event_s4; 7324 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 7325 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 7326 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 7327 break; 7328 default: 7329 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7330 "1420 Invalid HBA PCI-device group: 0x%x\n", 7331 dev_grp); 7332 return -ENODEV; 7333 break; 7334 } 7335 return 0; 7336 } 7337 7338 /** 7339 * __lpfc_sli_ringtx_put - Add an iocb to the txq 7340 * @phba: Pointer to HBA context object. 7341 * @pring: Pointer to driver SLI ring object. 7342 * @piocb: Pointer to address of newly added command iocb. 7343 * 7344 * This function is called with hbalock held to add a command 7345 * iocb to the txq when SLI layer cannot submit the command iocb 7346 * to the ring. 7347 **/ 7348 void 7349 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7350 struct lpfc_iocbq *piocb) 7351 { 7352 /* Insert the caller's iocb in the txq tail for later processing. */ 7353 list_add_tail(&piocb->list, &pring->txq); 7354 pring->txq_cnt++; 7355 } 7356 7357 /** 7358 * lpfc_sli_next_iocb - Get the next iocb in the txq 7359 * @phba: Pointer to HBA context object. 7360 * @pring: Pointer to driver SLI ring object. 7361 * @piocb: Pointer to address of newly added command iocb. 7362 * 7363 * This function is called with hbalock held before a new 7364 * iocb is submitted to the firmware. This function checks 7365 * txq to flush the iocbs in txq to Firmware before 7366 * submitting new iocbs to the Firmware. 7367 * If there are iocbs in the txq which need to be submitted 7368 * to firmware, lpfc_sli_next_iocb returns the first element 7369 * of the txq after dequeuing it from txq. 7370 * If there is no iocb in the txq then the function will return 7371 * *piocb and *piocb is set to NULL. Caller needs to check 7372 * *piocb to find if there are more commands in the txq. 7373 **/ 7374 static struct lpfc_iocbq * 7375 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7376 struct lpfc_iocbq **piocb) 7377 { 7378 struct lpfc_iocbq * nextiocb; 7379 7380 nextiocb = lpfc_sli_ringtx_get(phba, pring); 7381 if (!nextiocb) { 7382 nextiocb = *piocb; 7383 *piocb = NULL; 7384 } 7385 7386 return nextiocb; 7387 } 7388 7389 /** 7390 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 7391 * @phba: Pointer to HBA context object. 7392 * @ring_number: SLI ring number to issue iocb on. 7393 * @piocb: Pointer to command iocb. 7394 * @flag: Flag indicating if this command can be put into txq. 7395 * 7396 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 7397 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 7398 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 7399 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 7400 * this function allows only iocbs for posting buffers. This function finds 7401 * next available slot in the command ring and posts the command to the 7402 * available slot and writes the port attention register to request HBA start 7403 * processing new iocb. If there is no slot available in the ring and 7404 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 7405 * the function returns IOCB_BUSY. 7406 * 7407 * This function is called with hbalock held. The function will return success 7408 * after it successfully submit the iocb to firmware or after adding to the 7409 * txq. 7410 **/ 7411 static int 7412 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 7413 struct lpfc_iocbq *piocb, uint32_t flag) 7414 { 7415 struct lpfc_iocbq *nextiocb; 7416 IOCB_t *iocb; 7417 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 7418 7419 if (piocb->iocb_cmpl && (!piocb->vport) && 7420 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 7421 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 7422 lpfc_printf_log(phba, KERN_ERR, 7423 LOG_SLI | LOG_VPORT, 7424 "1807 IOCB x%x failed. No vport\n", 7425 piocb->iocb.ulpCommand); 7426 dump_stack(); 7427 return IOCB_ERROR; 7428 } 7429 7430 7431 /* If the PCI channel is in offline state, do not post iocbs. */ 7432 if (unlikely(pci_channel_offline(phba->pcidev))) 7433 return IOCB_ERROR; 7434 7435 /* If HBA has a deferred error attention, fail the iocb. */ 7436 if (unlikely(phba->hba_flag & DEFER_ERATT)) 7437 return IOCB_ERROR; 7438 7439 /* 7440 * We should never get an IOCB if we are in a < LINK_DOWN state 7441 */ 7442 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 7443 return IOCB_ERROR; 7444 7445 /* 7446 * Check to see if we are blocking IOCB processing because of a 7447 * outstanding event. 7448 */ 7449 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 7450 goto iocb_busy; 7451 7452 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 7453 /* 7454 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 7455 * can be issued if the link is not up. 7456 */ 7457 switch (piocb->iocb.ulpCommand) { 7458 case CMD_GEN_REQUEST64_CR: 7459 case CMD_GEN_REQUEST64_CX: 7460 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 7461 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 7462 FC_RCTL_DD_UNSOL_CMD) || 7463 (piocb->iocb.un.genreq64.w5.hcsw.Type != 7464 MENLO_TRANSPORT_TYPE)) 7465 7466 goto iocb_busy; 7467 break; 7468 case CMD_QUE_RING_BUF_CN: 7469 case CMD_QUE_RING_BUF64_CN: 7470 /* 7471 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 7472 * completion, iocb_cmpl MUST be 0. 7473 */ 7474 if (piocb->iocb_cmpl) 7475 piocb->iocb_cmpl = NULL; 7476 /*FALLTHROUGH*/ 7477 case CMD_CREATE_XRI_CR: 7478 case CMD_CLOSE_XRI_CN: 7479 case CMD_CLOSE_XRI_CX: 7480 break; 7481 default: 7482 goto iocb_busy; 7483 } 7484 7485 /* 7486 * For FCP commands, we must be in a state where we can process link 7487 * attention events. 7488 */ 7489 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 7490 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 7491 goto iocb_busy; 7492 } 7493 7494 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 7495 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 7496 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 7497 7498 if (iocb) 7499 lpfc_sli_update_ring(phba, pring); 7500 else 7501 lpfc_sli_update_full_ring(phba, pring); 7502 7503 if (!piocb) 7504 return IOCB_SUCCESS; 7505 7506 goto out_busy; 7507 7508 iocb_busy: 7509 pring->stats.iocb_cmd_delay++; 7510 7511 out_busy: 7512 7513 if (!(flag & SLI_IOCB_RET_IOCB)) { 7514 __lpfc_sli_ringtx_put(phba, pring, piocb); 7515 return IOCB_SUCCESS; 7516 } 7517 7518 return IOCB_BUSY; 7519 } 7520 7521 /** 7522 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 7523 * @phba: Pointer to HBA context object. 7524 * @piocb: Pointer to command iocb. 7525 * @sglq: Pointer to the scatter gather queue object. 7526 * 7527 * This routine converts the bpl or bde that is in the IOCB 7528 * to a sgl list for the sli4 hardware. The physical address 7529 * of the bpl/bde is converted back to a virtual address. 7530 * If the IOCB contains a BPL then the list of BDE's is 7531 * converted to sli4_sge's. If the IOCB contains a single 7532 * BDE then it is converted to a single sli_sge. 7533 * The IOCB is still in cpu endianess so the contents of 7534 * the bpl can be used without byte swapping. 7535 * 7536 * Returns valid XRI = Success, NO_XRI = Failure. 7537 **/ 7538 static uint16_t 7539 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 7540 struct lpfc_sglq *sglq) 7541 { 7542 uint16_t xritag = NO_XRI; 7543 struct ulp_bde64 *bpl = NULL; 7544 struct ulp_bde64 bde; 7545 struct sli4_sge *sgl = NULL; 7546 struct lpfc_dmabuf *dmabuf; 7547 IOCB_t *icmd; 7548 int numBdes = 0; 7549 int i = 0; 7550 uint32_t offset = 0; /* accumulated offset in the sg request list */ 7551 int inbound = 0; /* number of sg reply entries inbound from firmware */ 7552 7553 if (!piocbq || !sglq) 7554 return xritag; 7555 7556 sgl = (struct sli4_sge *)sglq->sgl; 7557 icmd = &piocbq->iocb; 7558 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7559 numBdes = icmd->un.genreq64.bdl.bdeSize / 7560 sizeof(struct ulp_bde64); 7561 /* The addrHigh and addrLow fields within the IOCB 7562 * have not been byteswapped yet so there is no 7563 * need to swap them back. 7564 */ 7565 if (piocbq->context3) 7566 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 7567 else 7568 return xritag; 7569 7570 bpl = (struct ulp_bde64 *)dmabuf->virt; 7571 if (!bpl) 7572 return xritag; 7573 7574 for (i = 0; i < numBdes; i++) { 7575 /* Should already be byte swapped. */ 7576 sgl->addr_hi = bpl->addrHigh; 7577 sgl->addr_lo = bpl->addrLow; 7578 7579 sgl->word2 = le32_to_cpu(sgl->word2); 7580 if ((i+1) == numBdes) 7581 bf_set(lpfc_sli4_sge_last, sgl, 1); 7582 else 7583 bf_set(lpfc_sli4_sge_last, sgl, 0); 7584 /* swap the size field back to the cpu so we 7585 * can assign it to the sgl. 7586 */ 7587 bde.tus.w = le32_to_cpu(bpl->tus.w); 7588 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 7589 /* The offsets in the sgl need to be accumulated 7590 * separately for the request and reply lists. 7591 * The request is always first, the reply follows. 7592 */ 7593 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 7594 /* add up the reply sg entries */ 7595 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 7596 inbound++; 7597 /* first inbound? reset the offset */ 7598 if (inbound == 1) 7599 offset = 0; 7600 bf_set(lpfc_sli4_sge_offset, sgl, offset); 7601 bf_set(lpfc_sli4_sge_type, sgl, 7602 LPFC_SGE_TYPE_DATA); 7603 offset += bde.tus.f.bdeSize; 7604 } 7605 sgl->word2 = cpu_to_le32(sgl->word2); 7606 bpl++; 7607 sgl++; 7608 } 7609 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 7610 /* The addrHigh and addrLow fields of the BDE have not 7611 * been byteswapped yet so they need to be swapped 7612 * before putting them in the sgl. 7613 */ 7614 sgl->addr_hi = 7615 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 7616 sgl->addr_lo = 7617 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 7618 sgl->word2 = le32_to_cpu(sgl->word2); 7619 bf_set(lpfc_sli4_sge_last, sgl, 1); 7620 sgl->word2 = cpu_to_le32(sgl->word2); 7621 sgl->sge_len = 7622 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 7623 } 7624 return sglq->sli4_xritag; 7625 } 7626 7627 /** 7628 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 7629 * @phba: Pointer to HBA context object. 7630 * 7631 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 7632 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 7633 * held. 7634 * 7635 * Return: index into SLI4 fast-path FCP queue index. 7636 **/ 7637 static uint32_t 7638 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7639 { 7640 ++phba->fcp_qidx; 7641 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 7642 phba->fcp_qidx = 0; 7643 7644 return phba->fcp_qidx; 7645 } 7646 7647 /** 7648 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 7649 * @phba: Pointer to HBA context object. 7650 * @piocb: Pointer to command iocb. 7651 * @wqe: Pointer to the work queue entry. 7652 * 7653 * This routine converts the iocb command to its Work Queue Entry 7654 * equivalent. The wqe pointer should not have any fields set when 7655 * this routine is called because it will memcpy over them. 7656 * This routine does not set the CQ_ID or the WQEC bits in the 7657 * wqe. 7658 * 7659 * Returns: 0 = Success, IOCB_ERROR = Failure. 7660 **/ 7661 static int 7662 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 7663 union lpfc_wqe *wqe) 7664 { 7665 uint32_t xmit_len = 0, total_len = 0; 7666 uint8_t ct = 0; 7667 uint32_t fip; 7668 uint32_t abort_tag; 7669 uint8_t command_type = ELS_COMMAND_NON_FIP; 7670 uint8_t cmnd; 7671 uint16_t xritag; 7672 uint16_t abrt_iotag; 7673 struct lpfc_iocbq *abrtiocbq; 7674 struct ulp_bde64 *bpl = NULL; 7675 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 7676 int numBdes, i; 7677 struct ulp_bde64 bde; 7678 struct lpfc_nodelist *ndlp; 7679 uint32_t *pcmd; 7680 uint32_t if_type; 7681 7682 fip = phba->hba_flag & HBA_FIP_SUPPORT; 7683 /* The fcp commands will set command type */ 7684 if (iocbq->iocb_flag & LPFC_IO_FCP) 7685 command_type = FCP_COMMAND; 7686 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 7687 command_type = ELS_COMMAND_FIP; 7688 else 7689 command_type = ELS_COMMAND_NON_FIP; 7690 7691 /* Some of the fields are in the right position already */ 7692 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 7693 abort_tag = (uint32_t) iocbq->iotag; 7694 xritag = iocbq->sli4_xritag; 7695 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 7696 /* words0-2 bpl convert bde */ 7697 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7698 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 7699 sizeof(struct ulp_bde64); 7700 bpl = (struct ulp_bde64 *) 7701 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 7702 if (!bpl) 7703 return IOCB_ERROR; 7704 7705 /* Should already be byte swapped. */ 7706 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 7707 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 7708 /* swap the size field back to the cpu so we 7709 * can assign it to the sgl. 7710 */ 7711 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 7712 xmit_len = wqe->generic.bde.tus.f.bdeSize; 7713 total_len = 0; 7714 for (i = 0; i < numBdes; i++) { 7715 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 7716 total_len += bde.tus.f.bdeSize; 7717 } 7718 } else 7719 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 7720 7721 iocbq->iocb.ulpIoTag = iocbq->iotag; 7722 cmnd = iocbq->iocb.ulpCommand; 7723 7724 switch (iocbq->iocb.ulpCommand) { 7725 case CMD_ELS_REQUEST64_CR: 7726 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7727 if (!iocbq->iocb.ulpLe) { 7728 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7729 "2007 Only Limited Edition cmd Format" 7730 " supported 0x%x\n", 7731 iocbq->iocb.ulpCommand); 7732 return IOCB_ERROR; 7733 } 7734 7735 wqe->els_req.payload_len = xmit_len; 7736 /* Els_reguest64 has a TMO */ 7737 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 7738 iocbq->iocb.ulpTimeout); 7739 /* Need a VF for word 4 set the vf bit*/ 7740 bf_set(els_req64_vf, &wqe->els_req, 0); 7741 /* And a VFID for word 12 */ 7742 bf_set(els_req64_vfid, &wqe->els_req, 0); 7743 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 7744 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7745 iocbq->iocb.ulpContext); 7746 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 7747 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 7748 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 7749 if (command_type == ELS_COMMAND_FIP) 7750 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 7751 >> LPFC_FIP_ELS_ID_SHIFT); 7752 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 7753 iocbq->context2)->virt); 7754 if_type = bf_get(lpfc_sli_intf_if_type, 7755 &phba->sli4_hba.sli_intf); 7756 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7757 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 7758 *pcmd == ELS_CMD_SCR || 7759 *pcmd == ELS_CMD_PLOGI)) { 7760 bf_set(els_req64_sp, &wqe->els_req, 1); 7761 bf_set(els_req64_sid, &wqe->els_req, 7762 iocbq->vport->fc_myDID); 7763 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 7764 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7765 phba->vpi_ids[phba->pport->vpi]); 7766 } else if (iocbq->context1) { 7767 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 7768 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7769 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7770 } 7771 } 7772 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 7773 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7774 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 7775 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 7776 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 7777 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 7778 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7779 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 7780 break; 7781 case CMD_XMIT_SEQUENCE64_CX: 7782 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 7783 iocbq->iocb.un.ulpWord[3]); 7784 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 7785 iocbq->iocb.unsli3.rcvsli3.ox_id); 7786 /* The entire sequence is transmitted for this IOCB */ 7787 xmit_len = total_len; 7788 cmnd = CMD_XMIT_SEQUENCE64_CR; 7789 if (phba->link_flag & LS_LOOPBACK_MODE) 7790 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 7791 case CMD_XMIT_SEQUENCE64_CR: 7792 /* word3 iocb=io_tag32 wqe=reserved */ 7793 wqe->xmit_sequence.rsvd3 = 0; 7794 /* word4 relative_offset memcpy */ 7795 /* word5 r_ctl/df_ctl memcpy */ 7796 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 7797 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 7798 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 7799 LPFC_WQE_IOD_WRITE); 7800 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 7801 LPFC_WQE_LENLOC_WORD12); 7802 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 7803 wqe->xmit_sequence.xmit_len = xmit_len; 7804 command_type = OTHER_COMMAND; 7805 break; 7806 case CMD_XMIT_BCAST64_CN: 7807 /* word3 iocb=iotag32 wqe=seq_payload_len */ 7808 wqe->xmit_bcast64.seq_payload_len = xmit_len; 7809 /* word4 iocb=rsvd wqe=rsvd */ 7810 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 7811 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 7812 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 7813 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7814 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 7815 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 7816 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 7817 LPFC_WQE_LENLOC_WORD3); 7818 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 7819 break; 7820 case CMD_FCP_IWRITE64_CR: 7821 command_type = FCP_COMMAND_DATA_OUT; 7822 /* word3 iocb=iotag wqe=payload_offset_len */ 7823 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7824 wqe->fcp_iwrite.payload_offset_len = 7825 xmit_len + sizeof(struct fcp_rsp); 7826 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 7827 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 7828 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 7829 iocbq->iocb.ulpFCP2Rcvy); 7830 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 7831 /* Always open the exchange */ 7832 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 7833 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 7834 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 7835 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 7836 LPFC_WQE_LENLOC_WORD4); 7837 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 7838 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 7839 break; 7840 case CMD_FCP_IREAD64_CR: 7841 /* word3 iocb=iotag wqe=payload_offset_len */ 7842 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7843 wqe->fcp_iread.payload_offset_len = 7844 xmit_len + sizeof(struct fcp_rsp); 7845 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 7846 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 7847 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 7848 iocbq->iocb.ulpFCP2Rcvy); 7849 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 7850 /* Always open the exchange */ 7851 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 7852 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 7853 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 7854 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 7855 LPFC_WQE_LENLOC_WORD4); 7856 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 7857 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 7858 break; 7859 case CMD_FCP_ICMND64_CR: 7860 /* word3 iocb=IO_TAG wqe=reserved */ 7861 wqe->fcp_icmd.rsrvd3 = 0; 7862 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 7863 /* Always open the exchange */ 7864 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 7865 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 7866 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 7867 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 7868 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 7869 LPFC_WQE_LENLOC_NONE); 7870 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 7871 break; 7872 case CMD_GEN_REQUEST64_CR: 7873 /* For this command calculate the xmit length of the 7874 * request bde. 7875 */ 7876 xmit_len = 0; 7877 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 7878 sizeof(struct ulp_bde64); 7879 for (i = 0; i < numBdes; i++) { 7880 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 7881 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 7882 break; 7883 xmit_len += bde.tus.f.bdeSize; 7884 } 7885 /* word3 iocb=IO_TAG wqe=request_payload_len */ 7886 wqe->gen_req.request_payload_len = xmit_len; 7887 /* word4 iocb=parameter wqe=relative_offset memcpy */ 7888 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 7889 /* word6 context tag copied in memcpy */ 7890 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 7891 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 7892 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7893 "2015 Invalid CT %x command 0x%x\n", 7894 ct, iocbq->iocb.ulpCommand); 7895 return IOCB_ERROR; 7896 } 7897 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 7898 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 7899 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 7900 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 7901 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 7902 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 7903 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7904 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 7905 command_type = OTHER_COMMAND; 7906 break; 7907 case CMD_XMIT_ELS_RSP64_CX: 7908 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7909 /* words0-2 BDE memcpy */ 7910 /* word3 iocb=iotag32 wqe=response_payload_len */ 7911 wqe->xmit_els_rsp.response_payload_len = xmit_len; 7912 /* word4 iocb=did wge=rsvd. */ 7913 wqe->xmit_els_rsp.rsvd4 = 0; 7914 /* word5 iocb=rsvd wge=did */ 7915 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 7916 iocbq->iocb.un.elsreq64.remoteID); 7917 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 7918 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7919 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 7920 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7921 iocbq->iocb.unsli3.rcvsli3.ox_id); 7922 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7923 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7924 phba->vpi_ids[iocbq->vport->vpi]); 7925 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 7926 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 7927 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 7928 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 7929 LPFC_WQE_LENLOC_WORD3); 7930 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 7931 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 7932 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7933 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 7934 iocbq->context2)->virt); 7935 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 7936 bf_set(els_req64_sp, &wqe->els_req, 1); 7937 bf_set(els_req64_sid, &wqe->els_req, 7938 iocbq->vport->fc_myDID); 7939 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 7940 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7941 phba->vpi_ids[phba->pport->vpi]); 7942 } 7943 command_type = OTHER_COMMAND; 7944 break; 7945 case CMD_CLOSE_XRI_CN: 7946 case CMD_ABORT_XRI_CN: 7947 case CMD_ABORT_XRI_CX: 7948 /* words 0-2 memcpy should be 0 rserved */ 7949 /* port will send abts */ 7950 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 7951 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 7952 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 7953 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 7954 } else 7955 fip = 0; 7956 7957 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 7958 /* 7959 * The link is down, or the command was ELS_FIP 7960 * so the fw does not need to send abts 7961 * on the wire. 7962 */ 7963 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 7964 else 7965 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 7966 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 7967 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 7968 wqe->abort_cmd.rsrvd5 = 0; 7969 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 7970 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7971 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 7972 /* 7973 * The abort handler will send us CMD_ABORT_XRI_CN or 7974 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 7975 */ 7976 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 7977 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 7978 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 7979 LPFC_WQE_LENLOC_NONE); 7980 cmnd = CMD_ABORT_XRI_CX; 7981 command_type = OTHER_COMMAND; 7982 xritag = 0; 7983 break; 7984 case CMD_XMIT_BLS_RSP64_CX: 7985 /* As BLS ABTS RSP WQE is very different from other WQEs, 7986 * we re-construct this WQE here based on information in 7987 * iocbq from scratch. 7988 */ 7989 memset(wqe, 0, sizeof(union lpfc_wqe)); 7990 /* OX_ID is invariable to who sent ABTS to CT exchange */ 7991 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 7992 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 7993 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 7994 LPFC_ABTS_UNSOL_INT) { 7995 /* ABTS sent by initiator to CT exchange, the 7996 * RX_ID field will be filled with the newly 7997 * allocated responder XRI. 7998 */ 7999 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8000 iocbq->sli4_xritag); 8001 } else { 8002 /* ABTS sent by responder to CT exchange, the 8003 * RX_ID field will be filled with the responder 8004 * RX_ID from ABTS. 8005 */ 8006 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8007 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 8008 } 8009 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 8010 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 8011 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 8012 iocbq->iocb.ulpContext); 8013 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 8014 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 8015 LPFC_WQE_LENLOC_NONE); 8016 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 8017 command_type = OTHER_COMMAND; 8018 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 8019 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 8020 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 8021 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 8022 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 8023 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 8024 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 8025 } 8026 8027 break; 8028 case CMD_XRI_ABORTED_CX: 8029 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 8030 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 8031 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 8032 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 8033 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 8034 default: 8035 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8036 "2014 Invalid command 0x%x\n", 8037 iocbq->iocb.ulpCommand); 8038 return IOCB_ERROR; 8039 break; 8040 } 8041 8042 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 8043 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 8044 wqe->generic.wqe_com.abort_tag = abort_tag; 8045 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 8046 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 8047 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 8048 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 8049 return 0; 8050 } 8051 8052 /** 8053 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 8054 * @phba: Pointer to HBA context object. 8055 * @ring_number: SLI ring number to issue iocb on. 8056 * @piocb: Pointer to command iocb. 8057 * @flag: Flag indicating if this command can be put into txq. 8058 * 8059 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 8060 * an iocb command to an HBA with SLI-4 interface spec. 8061 * 8062 * This function is called with hbalock held. The function will return success 8063 * after it successfully submit the iocb to firmware or after adding to the 8064 * txq. 8065 **/ 8066 static int 8067 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 8068 struct lpfc_iocbq *piocb, uint32_t flag) 8069 { 8070 struct lpfc_sglq *sglq; 8071 union lpfc_wqe wqe; 8072 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 8073 8074 if (piocb->sli4_xritag == NO_XRI) { 8075 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8076 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || 8077 piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) 8078 sglq = NULL; 8079 else { 8080 if (pring->txq_cnt) { 8081 if (!(flag & SLI_IOCB_RET_IOCB)) { 8082 __lpfc_sli_ringtx_put(phba, 8083 pring, piocb); 8084 return IOCB_SUCCESS; 8085 } else { 8086 return IOCB_BUSY; 8087 } 8088 } else { 8089 sglq = __lpfc_sli_get_sglq(phba, piocb); 8090 if (!sglq) { 8091 if (!(flag & SLI_IOCB_RET_IOCB)) { 8092 __lpfc_sli_ringtx_put(phba, 8093 pring, 8094 piocb); 8095 return IOCB_SUCCESS; 8096 } else 8097 return IOCB_BUSY; 8098 } 8099 } 8100 } 8101 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 8102 /* These IO's already have an XRI and a mapped sgl. */ 8103 sglq = NULL; 8104 } else { 8105 /* 8106 * This is a continuation of a commandi,(CX) so this 8107 * sglq is on the active list 8108 */ 8109 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 8110 if (!sglq) 8111 return IOCB_ERROR; 8112 } 8113 8114 if (sglq) { 8115 piocb->sli4_lxritag = sglq->sli4_lxritag; 8116 piocb->sli4_xritag = sglq->sli4_xritag; 8117 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 8118 return IOCB_ERROR; 8119 } 8120 8121 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 8122 return IOCB_ERROR; 8123 8124 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8125 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8126 /* 8127 * For FCP command IOCB, get a new WQ index to distribute 8128 * WQE across the WQsr. On the other hand, for abort IOCB, 8129 * it carries the same WQ index to the original command 8130 * IOCB. 8131 */ 8132 if (piocb->iocb_flag & LPFC_IO_FCP) 8133 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8134 if (unlikely(!phba->sli4_hba.fcp_wq)) 8135 return IOCB_ERROR; 8136 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8137 &wqe)) 8138 return IOCB_ERROR; 8139 } else { 8140 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8141 return IOCB_ERROR; 8142 } 8143 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 8144 8145 return 0; 8146 } 8147 8148 /** 8149 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 8150 * 8151 * This routine wraps the actual lockless version for issusing IOCB function 8152 * pointer from the lpfc_hba struct. 8153 * 8154 * Return codes: 8155 * IOCB_ERROR - Error 8156 * IOCB_SUCCESS - Success 8157 * IOCB_BUSY - Busy 8158 **/ 8159 int 8160 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8161 struct lpfc_iocbq *piocb, uint32_t flag) 8162 { 8163 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8164 } 8165 8166 /** 8167 * lpfc_sli_api_table_setup - Set up sli api function jump table 8168 * @phba: The hba struct for which this call is being executed. 8169 * @dev_grp: The HBA PCI-Device group number. 8170 * 8171 * This routine sets up the SLI interface API function jump table in @phba 8172 * struct. 8173 * Returns: 0 - success, -ENODEV - failure. 8174 **/ 8175 int 8176 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8177 { 8178 8179 switch (dev_grp) { 8180 case LPFC_PCI_DEV_LP: 8181 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 8182 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 8183 break; 8184 case LPFC_PCI_DEV_OC: 8185 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 8186 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 8187 break; 8188 default: 8189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8190 "1419 Invalid HBA PCI-device group: 0x%x\n", 8191 dev_grp); 8192 return -ENODEV; 8193 break; 8194 } 8195 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 8196 return 0; 8197 } 8198 8199 /** 8200 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 8201 * @phba: Pointer to HBA context object. 8202 * @pring: Pointer to driver SLI ring object. 8203 * @piocb: Pointer to command iocb. 8204 * @flag: Flag indicating if this command can be put into txq. 8205 * 8206 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 8207 * function. This function gets the hbalock and calls 8208 * __lpfc_sli_issue_iocb function and will return the error returned 8209 * by __lpfc_sli_issue_iocb function. This wrapper is used by 8210 * functions which do not hold hbalock. 8211 **/ 8212 int 8213 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8214 struct lpfc_iocbq *piocb, uint32_t flag) 8215 { 8216 unsigned long iflags; 8217 int rc; 8218 8219 spin_lock_irqsave(&phba->hbalock, iflags); 8220 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8221 spin_unlock_irqrestore(&phba->hbalock, iflags); 8222 8223 return rc; 8224 } 8225 8226 /** 8227 * lpfc_extra_ring_setup - Extra ring setup function 8228 * @phba: Pointer to HBA context object. 8229 * 8230 * This function is called while driver attaches with the 8231 * HBA to setup the extra ring. The extra ring is used 8232 * only when driver needs to support target mode functionality 8233 * or IP over FC functionalities. 8234 * 8235 * This function is called with no lock held. 8236 **/ 8237 static int 8238 lpfc_extra_ring_setup( struct lpfc_hba *phba) 8239 { 8240 struct lpfc_sli *psli; 8241 struct lpfc_sli_ring *pring; 8242 8243 psli = &phba->sli; 8244 8245 /* Adjust cmd/rsp ring iocb entries more evenly */ 8246 8247 /* Take some away from the FCP ring */ 8248 pring = &psli->ring[psli->fcp_ring]; 8249 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8250 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8251 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8252 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8253 8254 /* and give them to the extra ring */ 8255 pring = &psli->ring[psli->extra_ring]; 8256 8257 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8258 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8259 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8260 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8261 8262 /* Setup default profile for this ring */ 8263 pring->iotag_max = 4096; 8264 pring->num_mask = 1; 8265 pring->prt[0].profile = 0; /* Mask 0 */ 8266 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 8267 pring->prt[0].type = phba->cfg_multi_ring_type; 8268 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 8269 return 0; 8270 } 8271 8272 /* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS. 8273 * @vport: pointer to virtual port object. 8274 * @ndlp: nodelist pointer for the impacted rport. 8275 * 8276 * The driver calls this routine in response to a XRI ABORT CQE 8277 * event from the port. In this event, the driver is required to 8278 * recover its login to the rport even though its login may be valid 8279 * from the driver's perspective. The failed ABTS notice from the 8280 * port indicates the rport is not responding. 8281 */ 8282 static void 8283 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 8284 struct lpfc_nodelist *ndlp) 8285 { 8286 struct Scsi_Host *shost; 8287 struct lpfc_hba *phba; 8288 unsigned long flags = 0; 8289 8290 shost = lpfc_shost_from_vport(vport); 8291 phba = vport->phba; 8292 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 8293 lpfc_printf_log(phba, KERN_INFO, 8294 LOG_SLI, "3093 No rport recovery needed. " 8295 "rport in state 0x%x\n", 8296 ndlp->nlp_state); 8297 return; 8298 } 8299 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8300 "3094 Start rport recovery on shost id 0x%x " 8301 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 8302 "flags 0x%x\n", 8303 shost->host_no, ndlp->nlp_DID, 8304 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 8305 ndlp->nlp_flag); 8306 /* 8307 * The rport is not responding. Don't attempt ADISC recovery. 8308 * Remove the FCP-2 flag to force a PLOGI. 8309 */ 8310 spin_lock_irqsave(shost->host_lock, flags); 8311 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 8312 spin_unlock_irqrestore(shost->host_lock, flags); 8313 lpfc_disc_state_machine(vport, ndlp, NULL, 8314 NLP_EVT_DEVICE_RECOVERY); 8315 lpfc_cancel_retry_delay_tmo(vport, ndlp); 8316 spin_lock_irqsave(shost->host_lock, flags); 8317 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 8318 spin_unlock_irqrestore(shost->host_lock, flags); 8319 lpfc_disc_start(vport); 8320 } 8321 8322 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 8323 * @phba: Pointer to HBA context object. 8324 * @iocbq: Pointer to iocb object. 8325 * 8326 * The async_event handler calls this routine when it receives 8327 * an ASYNC_STATUS_CN event from the port. The port generates 8328 * this event when an Abort Sequence request to an rport fails 8329 * twice in succession. The abort could be originated by the 8330 * driver or by the port. The ABTS could have been for an ELS 8331 * or FCP IO. The port only generates this event when an ABTS 8332 * fails to complete after one retry. 8333 */ 8334 static void 8335 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 8336 struct lpfc_iocbq *iocbq) 8337 { 8338 struct lpfc_nodelist *ndlp = NULL; 8339 uint16_t rpi = 0, vpi = 0; 8340 struct lpfc_vport *vport = NULL; 8341 8342 /* The rpi in the ulpContext is vport-sensitive. */ 8343 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 8344 rpi = iocbq->iocb.ulpContext; 8345 8346 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8347 "3092 Port generated ABTS async event " 8348 "on vpi %d rpi %d status 0x%x\n", 8349 vpi, rpi, iocbq->iocb.ulpStatus); 8350 8351 vport = lpfc_find_vport_by_vpid(phba, vpi); 8352 if (!vport) 8353 goto err_exit; 8354 ndlp = lpfc_findnode_rpi(vport, rpi); 8355 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 8356 goto err_exit; 8357 8358 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 8359 lpfc_sli_abts_recover_port(vport, ndlp); 8360 return; 8361 8362 err_exit: 8363 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8364 "3095 Event Context not found, no " 8365 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 8366 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 8367 vpi, rpi); 8368 } 8369 8370 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 8371 * @phba: pointer to HBA context object. 8372 * @ndlp: nodelist pointer for the impacted rport. 8373 * @axri: pointer to the wcqe containing the failed exchange. 8374 * 8375 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 8376 * port. The port generates this event when an abort exchange request to an 8377 * rport fails twice in succession with no reply. The abort could be originated 8378 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 8379 */ 8380 void 8381 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 8382 struct lpfc_nodelist *ndlp, 8383 struct sli4_wcqe_xri_aborted *axri) 8384 { 8385 struct lpfc_vport *vport; 8386 8387 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 8388 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8389 "3115 Node Context not found, driver " 8390 "ignoring abts err event\n"); 8391 vport = ndlp->vport; 8392 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8393 "3116 Port generated FCP XRI ABORT event on " 8394 "vpi %d rpi %d xri x%x status 0x%x\n", 8395 ndlp->vport->vpi, ndlp->nlp_rpi, 8396 bf_get(lpfc_wcqe_xa_xri, axri), 8397 bf_get(lpfc_wcqe_xa_status, axri)); 8398 8399 if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) 8400 lpfc_sli_abts_recover_port(vport, ndlp); 8401 } 8402 8403 /** 8404 * lpfc_sli_async_event_handler - ASYNC iocb handler function 8405 * @phba: Pointer to HBA context object. 8406 * @pring: Pointer to driver SLI ring object. 8407 * @iocbq: Pointer to iocb object. 8408 * 8409 * This function is called by the slow ring event handler 8410 * function when there is an ASYNC event iocb in the ring. 8411 * This function is called with no lock held. 8412 * Currently this function handles only temperature related 8413 * ASYNC events. The function decodes the temperature sensor 8414 * event message and posts events for the management applications. 8415 **/ 8416 static void 8417 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 8418 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 8419 { 8420 IOCB_t *icmd; 8421 uint16_t evt_code; 8422 struct temp_event temp_event_data; 8423 struct Scsi_Host *shost; 8424 uint32_t *iocb_w; 8425 8426 icmd = &iocbq->iocb; 8427 evt_code = icmd->un.asyncstat.evt_code; 8428 8429 switch (evt_code) { 8430 case ASYNC_TEMP_WARN: 8431 case ASYNC_TEMP_SAFE: 8432 temp_event_data.data = (uint32_t) icmd->ulpContext; 8433 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 8434 if (evt_code == ASYNC_TEMP_WARN) { 8435 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 8436 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8437 "0347 Adapter is very hot, please take " 8438 "corrective action. temperature : %d Celsius\n", 8439 (uint32_t) icmd->ulpContext); 8440 } else { 8441 temp_event_data.event_code = LPFC_NORMAL_TEMP; 8442 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8443 "0340 Adapter temperature is OK now. " 8444 "temperature : %d Celsius\n", 8445 (uint32_t) icmd->ulpContext); 8446 } 8447 8448 /* Send temperature change event to applications */ 8449 shost = lpfc_shost_from_vport(phba->pport); 8450 fc_host_post_vendor_event(shost, fc_get_event_number(), 8451 sizeof(temp_event_data), (char *) &temp_event_data, 8452 LPFC_NL_VENDOR_ID); 8453 break; 8454 case ASYNC_STATUS_CN: 8455 lpfc_sli_abts_err_handler(phba, iocbq); 8456 break; 8457 default: 8458 iocb_w = (uint32_t *) icmd; 8459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8460 "0346 Ring %d handler: unexpected ASYNC_STATUS" 8461 " evt_code 0x%x\n" 8462 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 8463 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 8464 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 8465 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 8466 pring->ringno, icmd->un.asyncstat.evt_code, 8467 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 8468 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 8469 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 8470 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 8471 8472 break; 8473 } 8474 } 8475 8476 8477 /** 8478 * lpfc_sli_setup - SLI ring setup function 8479 * @phba: Pointer to HBA context object. 8480 * 8481 * lpfc_sli_setup sets up rings of the SLI interface with 8482 * number of iocbs per ring and iotags. This function is 8483 * called while driver attach to the HBA and before the 8484 * interrupts are enabled. So there is no need for locking. 8485 * 8486 * This function always returns 0. 8487 **/ 8488 int 8489 lpfc_sli_setup(struct lpfc_hba *phba) 8490 { 8491 int i, totiocbsize = 0; 8492 struct lpfc_sli *psli = &phba->sli; 8493 struct lpfc_sli_ring *pring; 8494 8495 psli->num_rings = MAX_CONFIGURED_RINGS; 8496 psli->sli_flag = 0; 8497 psli->fcp_ring = LPFC_FCP_RING; 8498 psli->next_ring = LPFC_FCP_NEXT_RING; 8499 psli->extra_ring = LPFC_EXTRA_RING; 8500 8501 psli->iocbq_lookup = NULL; 8502 psli->iocbq_lookup_len = 0; 8503 psli->last_iotag = 0; 8504 8505 for (i = 0; i < psli->num_rings; i++) { 8506 pring = &psli->ring[i]; 8507 switch (i) { 8508 case LPFC_FCP_RING: /* ring 0 - FCP */ 8509 /* numCiocb and numRiocb are used in config_port */ 8510 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 8511 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 8512 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8513 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8514 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8515 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8516 pring->sizeCiocb = (phba->sli_rev == 3) ? 8517 SLI3_IOCB_CMD_SIZE : 8518 SLI2_IOCB_CMD_SIZE; 8519 pring->sizeRiocb = (phba->sli_rev == 3) ? 8520 SLI3_IOCB_RSP_SIZE : 8521 SLI2_IOCB_RSP_SIZE; 8522 pring->iotag_ctr = 0; 8523 pring->iotag_max = 8524 (phba->cfg_hba_queue_depth * 2); 8525 pring->fast_iotag = pring->iotag_max; 8526 pring->num_mask = 0; 8527 break; 8528 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 8529 /* numCiocb and numRiocb are used in config_port */ 8530 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 8531 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 8532 pring->sizeCiocb = (phba->sli_rev == 3) ? 8533 SLI3_IOCB_CMD_SIZE : 8534 SLI2_IOCB_CMD_SIZE; 8535 pring->sizeRiocb = (phba->sli_rev == 3) ? 8536 SLI3_IOCB_RSP_SIZE : 8537 SLI2_IOCB_RSP_SIZE; 8538 pring->iotag_max = phba->cfg_hba_queue_depth; 8539 pring->num_mask = 0; 8540 break; 8541 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 8542 /* numCiocb and numRiocb are used in config_port */ 8543 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 8544 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 8545 pring->sizeCiocb = (phba->sli_rev == 3) ? 8546 SLI3_IOCB_CMD_SIZE : 8547 SLI2_IOCB_CMD_SIZE; 8548 pring->sizeRiocb = (phba->sli_rev == 3) ? 8549 SLI3_IOCB_RSP_SIZE : 8550 SLI2_IOCB_RSP_SIZE; 8551 pring->fast_iotag = 0; 8552 pring->iotag_ctr = 0; 8553 pring->iotag_max = 4096; 8554 pring->lpfc_sli_rcv_async_status = 8555 lpfc_sli_async_event_handler; 8556 pring->num_mask = LPFC_MAX_RING_MASK; 8557 pring->prt[0].profile = 0; /* Mask 0 */ 8558 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 8559 pring->prt[0].type = FC_TYPE_ELS; 8560 pring->prt[0].lpfc_sli_rcv_unsol_event = 8561 lpfc_els_unsol_event; 8562 pring->prt[1].profile = 0; /* Mask 1 */ 8563 pring->prt[1].rctl = FC_RCTL_ELS_REP; 8564 pring->prt[1].type = FC_TYPE_ELS; 8565 pring->prt[1].lpfc_sli_rcv_unsol_event = 8566 lpfc_els_unsol_event; 8567 pring->prt[2].profile = 0; /* Mask 2 */ 8568 /* NameServer Inquiry */ 8569 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 8570 /* NameServer */ 8571 pring->prt[2].type = FC_TYPE_CT; 8572 pring->prt[2].lpfc_sli_rcv_unsol_event = 8573 lpfc_ct_unsol_event; 8574 pring->prt[3].profile = 0; /* Mask 3 */ 8575 /* NameServer response */ 8576 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 8577 /* NameServer */ 8578 pring->prt[3].type = FC_TYPE_CT; 8579 pring->prt[3].lpfc_sli_rcv_unsol_event = 8580 lpfc_ct_unsol_event; 8581 /* abort unsolicited sequence */ 8582 pring->prt[4].profile = 0; /* Mask 4 */ 8583 pring->prt[4].rctl = FC_RCTL_BA_ABTS; 8584 pring->prt[4].type = FC_TYPE_BLS; 8585 pring->prt[4].lpfc_sli_rcv_unsol_event = 8586 lpfc_sli4_ct_abort_unsol_event; 8587 break; 8588 } 8589 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 8590 (pring->numRiocb * pring->sizeRiocb); 8591 } 8592 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 8593 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 8594 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 8595 "SLI2 SLIM Data: x%x x%lx\n", 8596 phba->brd_no, totiocbsize, 8597 (unsigned long) MAX_SLIM_IOCB_SIZE); 8598 } 8599 if (phba->cfg_multi_ring_support == 2) 8600 lpfc_extra_ring_setup(phba); 8601 8602 return 0; 8603 } 8604 8605 /** 8606 * lpfc_sli_queue_setup - Queue initialization function 8607 * @phba: Pointer to HBA context object. 8608 * 8609 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 8610 * ring. This function also initializes ring indices of each ring. 8611 * This function is called during the initialization of the SLI 8612 * interface of an HBA. 8613 * This function is called with no lock held and always returns 8614 * 1. 8615 **/ 8616 int 8617 lpfc_sli_queue_setup(struct lpfc_hba *phba) 8618 { 8619 struct lpfc_sli *psli; 8620 struct lpfc_sli_ring *pring; 8621 int i; 8622 8623 psli = &phba->sli; 8624 spin_lock_irq(&phba->hbalock); 8625 INIT_LIST_HEAD(&psli->mboxq); 8626 INIT_LIST_HEAD(&psli->mboxq_cmpl); 8627 /* Initialize list headers for txq and txcmplq as double linked lists */ 8628 for (i = 0; i < psli->num_rings; i++) { 8629 pring = &psli->ring[i]; 8630 pring->ringno = i; 8631 pring->next_cmdidx = 0; 8632 pring->local_getidx = 0; 8633 pring->cmdidx = 0; 8634 INIT_LIST_HEAD(&pring->txq); 8635 INIT_LIST_HEAD(&pring->txcmplq); 8636 INIT_LIST_HEAD(&pring->iocb_continueq); 8637 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 8638 INIT_LIST_HEAD(&pring->postbufq); 8639 } 8640 spin_unlock_irq(&phba->hbalock); 8641 return 1; 8642 } 8643 8644 /** 8645 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 8646 * @phba: Pointer to HBA context object. 8647 * 8648 * This routine flushes the mailbox command subsystem. It will unconditionally 8649 * flush all the mailbox commands in the three possible stages in the mailbox 8650 * command sub-system: pending mailbox command queue; the outstanding mailbox 8651 * command; and completed mailbox command queue. It is caller's responsibility 8652 * to make sure that the driver is in the proper state to flush the mailbox 8653 * command sub-system. Namely, the posting of mailbox commands into the 8654 * pending mailbox command queue from the various clients must be stopped; 8655 * either the HBA is in a state that it will never works on the outstanding 8656 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 8657 * mailbox command has been completed. 8658 **/ 8659 static void 8660 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 8661 { 8662 LIST_HEAD(completions); 8663 struct lpfc_sli *psli = &phba->sli; 8664 LPFC_MBOXQ_t *pmb; 8665 unsigned long iflag; 8666 8667 /* Flush all the mailbox commands in the mbox system */ 8668 spin_lock_irqsave(&phba->hbalock, iflag); 8669 /* The pending mailbox command queue */ 8670 list_splice_init(&phba->sli.mboxq, &completions); 8671 /* The outstanding active mailbox command */ 8672 if (psli->mbox_active) { 8673 list_add_tail(&psli->mbox_active->list, &completions); 8674 psli->mbox_active = NULL; 8675 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8676 } 8677 /* The completed mailbox command queue */ 8678 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 8679 spin_unlock_irqrestore(&phba->hbalock, iflag); 8680 8681 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 8682 while (!list_empty(&completions)) { 8683 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 8684 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 8685 if (pmb->mbox_cmpl) 8686 pmb->mbox_cmpl(phba, pmb); 8687 } 8688 } 8689 8690 /** 8691 * lpfc_sli_host_down - Vport cleanup function 8692 * @vport: Pointer to virtual port object. 8693 * 8694 * lpfc_sli_host_down is called to clean up the resources 8695 * associated with a vport before destroying virtual 8696 * port data structures. 8697 * This function does following operations: 8698 * - Free discovery resources associated with this virtual 8699 * port. 8700 * - Free iocbs associated with this virtual port in 8701 * the txq. 8702 * - Send abort for all iocb commands associated with this 8703 * vport in txcmplq. 8704 * 8705 * This function is called with no lock held and always returns 1. 8706 **/ 8707 int 8708 lpfc_sli_host_down(struct lpfc_vport *vport) 8709 { 8710 LIST_HEAD(completions); 8711 struct lpfc_hba *phba = vport->phba; 8712 struct lpfc_sli *psli = &phba->sli; 8713 struct lpfc_sli_ring *pring; 8714 struct lpfc_iocbq *iocb, *next_iocb; 8715 int i; 8716 unsigned long flags = 0; 8717 uint16_t prev_pring_flag; 8718 8719 lpfc_cleanup_discovery_resources(vport); 8720 8721 spin_lock_irqsave(&phba->hbalock, flags); 8722 for (i = 0; i < psli->num_rings; i++) { 8723 pring = &psli->ring[i]; 8724 prev_pring_flag = pring->flag; 8725 /* Only slow rings */ 8726 if (pring->ringno == LPFC_ELS_RING) { 8727 pring->flag |= LPFC_DEFERRED_RING_EVENT; 8728 /* Set the lpfc data pending flag */ 8729 set_bit(LPFC_DATA_READY, &phba->data_flags); 8730 } 8731 /* 8732 * Error everything on the txq since these iocbs have not been 8733 * given to the FW yet. 8734 */ 8735 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 8736 if (iocb->vport != vport) 8737 continue; 8738 list_move_tail(&iocb->list, &completions); 8739 pring->txq_cnt--; 8740 } 8741 8742 /* Next issue ABTS for everything on the txcmplq */ 8743 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 8744 list) { 8745 if (iocb->vport != vport) 8746 continue; 8747 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 8748 } 8749 8750 pring->flag = prev_pring_flag; 8751 } 8752 8753 spin_unlock_irqrestore(&phba->hbalock, flags); 8754 8755 /* Cancel all the IOCBs from the completions list */ 8756 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 8757 IOERR_SLI_DOWN); 8758 return 1; 8759 } 8760 8761 /** 8762 * lpfc_sli_hba_down - Resource cleanup function for the HBA 8763 * @phba: Pointer to HBA context object. 8764 * 8765 * This function cleans up all iocb, buffers, mailbox commands 8766 * while shutting down the HBA. This function is called with no 8767 * lock held and always returns 1. 8768 * This function does the following to cleanup driver resources: 8769 * - Free discovery resources for each virtual port 8770 * - Cleanup any pending fabric iocbs 8771 * - Iterate through the iocb txq and free each entry 8772 * in the list. 8773 * - Free up any buffer posted to the HBA 8774 * - Free mailbox commands in the mailbox queue. 8775 **/ 8776 int 8777 lpfc_sli_hba_down(struct lpfc_hba *phba) 8778 { 8779 LIST_HEAD(completions); 8780 struct lpfc_sli *psli = &phba->sli; 8781 struct lpfc_sli_ring *pring; 8782 struct lpfc_dmabuf *buf_ptr; 8783 unsigned long flags = 0; 8784 int i; 8785 8786 /* Shutdown the mailbox command sub-system */ 8787 lpfc_sli_mbox_sys_shutdown(phba); 8788 8789 lpfc_hba_down_prep(phba); 8790 8791 lpfc_fabric_abort_hba(phba); 8792 8793 spin_lock_irqsave(&phba->hbalock, flags); 8794 for (i = 0; i < psli->num_rings; i++) { 8795 pring = &psli->ring[i]; 8796 /* Only slow rings */ 8797 if (pring->ringno == LPFC_ELS_RING) { 8798 pring->flag |= LPFC_DEFERRED_RING_EVENT; 8799 /* Set the lpfc data pending flag */ 8800 set_bit(LPFC_DATA_READY, &phba->data_flags); 8801 } 8802 8803 /* 8804 * Error everything on the txq since these iocbs have not been 8805 * given to the FW yet. 8806 */ 8807 list_splice_init(&pring->txq, &completions); 8808 pring->txq_cnt = 0; 8809 8810 } 8811 spin_unlock_irqrestore(&phba->hbalock, flags); 8812 8813 /* Cancel all the IOCBs from the completions list */ 8814 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 8815 IOERR_SLI_DOWN); 8816 8817 spin_lock_irqsave(&phba->hbalock, flags); 8818 list_splice_init(&phba->elsbuf, &completions); 8819 phba->elsbuf_cnt = 0; 8820 phba->elsbuf_prev_cnt = 0; 8821 spin_unlock_irqrestore(&phba->hbalock, flags); 8822 8823 while (!list_empty(&completions)) { 8824 list_remove_head(&completions, buf_ptr, 8825 struct lpfc_dmabuf, list); 8826 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 8827 kfree(buf_ptr); 8828 } 8829 8830 /* Return any active mbox cmds */ 8831 del_timer_sync(&psli->mbox_tmo); 8832 8833 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 8834 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 8835 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 8836 8837 return 1; 8838 } 8839 8840 /** 8841 * lpfc_sli_pcimem_bcopy - SLI memory copy function 8842 * @srcp: Source memory pointer. 8843 * @destp: Destination memory pointer. 8844 * @cnt: Number of words required to be copied. 8845 * 8846 * This function is used for copying data between driver memory 8847 * and the SLI memory. This function also changes the endianness 8848 * of each word if native endianness is different from SLI 8849 * endianness. This function can be called with or without 8850 * lock. 8851 **/ 8852 void 8853 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 8854 { 8855 uint32_t *src = srcp; 8856 uint32_t *dest = destp; 8857 uint32_t ldata; 8858 int i; 8859 8860 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 8861 ldata = *src; 8862 ldata = le32_to_cpu(ldata); 8863 *dest = ldata; 8864 src++; 8865 dest++; 8866 } 8867 } 8868 8869 8870 /** 8871 * lpfc_sli_bemem_bcopy - SLI memory copy function 8872 * @srcp: Source memory pointer. 8873 * @destp: Destination memory pointer. 8874 * @cnt: Number of words required to be copied. 8875 * 8876 * This function is used for copying data between a data structure 8877 * with big endian representation to local endianness. 8878 * This function can be called with or without lock. 8879 **/ 8880 void 8881 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 8882 { 8883 uint32_t *src = srcp; 8884 uint32_t *dest = destp; 8885 uint32_t ldata; 8886 int i; 8887 8888 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 8889 ldata = *src; 8890 ldata = be32_to_cpu(ldata); 8891 *dest = ldata; 8892 src++; 8893 dest++; 8894 } 8895 } 8896 8897 /** 8898 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 8899 * @phba: Pointer to HBA context object. 8900 * @pring: Pointer to driver SLI ring object. 8901 * @mp: Pointer to driver buffer object. 8902 * 8903 * This function is called with no lock held. 8904 * It always return zero after adding the buffer to the postbufq 8905 * buffer list. 8906 **/ 8907 int 8908 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8909 struct lpfc_dmabuf *mp) 8910 { 8911 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 8912 later */ 8913 spin_lock_irq(&phba->hbalock); 8914 list_add_tail(&mp->list, &pring->postbufq); 8915 pring->postbufq_cnt++; 8916 spin_unlock_irq(&phba->hbalock); 8917 return 0; 8918 } 8919 8920 /** 8921 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 8922 * @phba: Pointer to HBA context object. 8923 * 8924 * When HBQ is enabled, buffers are searched based on tags. This function 8925 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 8926 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 8927 * does not conflict with tags of buffer posted for unsolicited events. 8928 * The function returns the allocated tag. The function is called with 8929 * no locks held. 8930 **/ 8931 uint32_t 8932 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 8933 { 8934 spin_lock_irq(&phba->hbalock); 8935 phba->buffer_tag_count++; 8936 /* 8937 * Always set the QUE_BUFTAG_BIT to distiguish between 8938 * a tag assigned by HBQ. 8939 */ 8940 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 8941 spin_unlock_irq(&phba->hbalock); 8942 return phba->buffer_tag_count; 8943 } 8944 8945 /** 8946 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 8947 * @phba: Pointer to HBA context object. 8948 * @pring: Pointer to driver SLI ring object. 8949 * @tag: Buffer tag. 8950 * 8951 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 8952 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 8953 * iocb is posted to the response ring with the tag of the buffer. 8954 * This function searches the pring->postbufq list using the tag 8955 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 8956 * iocb. If the buffer is found then lpfc_dmabuf object of the 8957 * buffer is returned to the caller else NULL is returned. 8958 * This function is called with no lock held. 8959 **/ 8960 struct lpfc_dmabuf * 8961 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8962 uint32_t tag) 8963 { 8964 struct lpfc_dmabuf *mp, *next_mp; 8965 struct list_head *slp = &pring->postbufq; 8966 8967 /* Search postbufq, from the beginning, looking for a match on tag */ 8968 spin_lock_irq(&phba->hbalock); 8969 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 8970 if (mp->buffer_tag == tag) { 8971 list_del_init(&mp->list); 8972 pring->postbufq_cnt--; 8973 spin_unlock_irq(&phba->hbalock); 8974 return mp; 8975 } 8976 } 8977 8978 spin_unlock_irq(&phba->hbalock); 8979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8980 "0402 Cannot find virtual addr for buffer tag on " 8981 "ring %d Data x%lx x%p x%p x%x\n", 8982 pring->ringno, (unsigned long) tag, 8983 slp->next, slp->prev, pring->postbufq_cnt); 8984 8985 return NULL; 8986 } 8987 8988 /** 8989 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 8990 * @phba: Pointer to HBA context object. 8991 * @pring: Pointer to driver SLI ring object. 8992 * @phys: DMA address of the buffer. 8993 * 8994 * This function searches the buffer list using the dma_address 8995 * of unsolicited event to find the driver's lpfc_dmabuf object 8996 * corresponding to the dma_address. The function returns the 8997 * lpfc_dmabuf object if a buffer is found else it returns NULL. 8998 * This function is called by the ct and els unsolicited event 8999 * handlers to get the buffer associated with the unsolicited 9000 * event. 9001 * 9002 * This function is called with no lock held. 9003 **/ 9004 struct lpfc_dmabuf * 9005 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9006 dma_addr_t phys) 9007 { 9008 struct lpfc_dmabuf *mp, *next_mp; 9009 struct list_head *slp = &pring->postbufq; 9010 9011 /* Search postbufq, from the beginning, looking for a match on phys */ 9012 spin_lock_irq(&phba->hbalock); 9013 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9014 if (mp->phys == phys) { 9015 list_del_init(&mp->list); 9016 pring->postbufq_cnt--; 9017 spin_unlock_irq(&phba->hbalock); 9018 return mp; 9019 } 9020 } 9021 9022 spin_unlock_irq(&phba->hbalock); 9023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9024 "0410 Cannot find virtual addr for mapped buf on " 9025 "ring %d Data x%llx x%p x%p x%x\n", 9026 pring->ringno, (unsigned long long)phys, 9027 slp->next, slp->prev, pring->postbufq_cnt); 9028 return NULL; 9029 } 9030 9031 /** 9032 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 9033 * @phba: Pointer to HBA context object. 9034 * @cmdiocb: Pointer to driver command iocb object. 9035 * @rspiocb: Pointer to driver response iocb object. 9036 * 9037 * This function is the completion handler for the abort iocbs for 9038 * ELS commands. This function is called from the ELS ring event 9039 * handler with no lock held. This function frees memory resources 9040 * associated with the abort iocb. 9041 **/ 9042 static void 9043 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9044 struct lpfc_iocbq *rspiocb) 9045 { 9046 IOCB_t *irsp = &rspiocb->iocb; 9047 uint16_t abort_iotag, abort_context; 9048 struct lpfc_iocbq *abort_iocb = NULL; 9049 9050 if (irsp->ulpStatus) { 9051 9052 /* 9053 * Assume that the port already completed and returned, or 9054 * will return the iocb. Just Log the message. 9055 */ 9056 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 9057 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 9058 9059 spin_lock_irq(&phba->hbalock); 9060 if (phba->sli_rev < LPFC_SLI_REV4) { 9061 if (abort_iotag != 0 && 9062 abort_iotag <= phba->sli.last_iotag) 9063 abort_iocb = 9064 phba->sli.iocbq_lookup[abort_iotag]; 9065 } else 9066 /* For sli4 the abort_tag is the XRI, 9067 * so the abort routine puts the iotag of the iocb 9068 * being aborted in the context field of the abort 9069 * IOCB. 9070 */ 9071 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 9072 9073 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 9074 "0327 Cannot abort els iocb %p " 9075 "with tag %x context %x, abort status %x, " 9076 "abort code %x\n", 9077 abort_iocb, abort_iotag, abort_context, 9078 irsp->ulpStatus, irsp->un.ulpWord[4]); 9079 9080 spin_unlock_irq(&phba->hbalock); 9081 } 9082 lpfc_sli_release_iocbq(phba, cmdiocb); 9083 return; 9084 } 9085 9086 /** 9087 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 9088 * @phba: Pointer to HBA context object. 9089 * @cmdiocb: Pointer to driver command iocb object. 9090 * @rspiocb: Pointer to driver response iocb object. 9091 * 9092 * The function is called from SLI ring event handler with no 9093 * lock held. This function is the completion handler for ELS commands 9094 * which are aborted. The function frees memory resources used for 9095 * the aborted ELS commands. 9096 **/ 9097 static void 9098 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9099 struct lpfc_iocbq *rspiocb) 9100 { 9101 IOCB_t *irsp = &rspiocb->iocb; 9102 9103 /* ELS cmd tag <ulpIoTag> completes */ 9104 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9105 "0139 Ignoring ELS cmd tag x%x completion Data: " 9106 "x%x x%x x%x\n", 9107 irsp->ulpIoTag, irsp->ulpStatus, 9108 irsp->un.ulpWord[4], irsp->ulpTimeout); 9109 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 9110 lpfc_ct_free_iocb(phba, cmdiocb); 9111 else 9112 lpfc_els_free_iocb(phba, cmdiocb); 9113 return; 9114 } 9115 9116 /** 9117 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 9118 * @phba: Pointer to HBA context object. 9119 * @pring: Pointer to driver SLI ring object. 9120 * @cmdiocb: Pointer to driver command iocb object. 9121 * 9122 * This function issues an abort iocb for the provided command iocb down to 9123 * the port. Other than the case the outstanding command iocb is an abort 9124 * request, this function issues abort out unconditionally. This function is 9125 * called with hbalock held. The function returns 0 when it fails due to 9126 * memory allocation failure or when the command iocb is an abort request. 9127 **/ 9128 static int 9129 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9130 struct lpfc_iocbq *cmdiocb) 9131 { 9132 struct lpfc_vport *vport = cmdiocb->vport; 9133 struct lpfc_iocbq *abtsiocbp; 9134 IOCB_t *icmd = NULL; 9135 IOCB_t *iabt = NULL; 9136 int retval; 9137 9138 /* 9139 * There are certain command types we don't want to abort. And we 9140 * don't want to abort commands that are already in the process of 9141 * being aborted. 9142 */ 9143 icmd = &cmdiocb->iocb; 9144 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9145 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9146 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9147 return 0; 9148 9149 /* issue ABTS for this IOCB based on iotag */ 9150 abtsiocbp = __lpfc_sli_get_iocbq(phba); 9151 if (abtsiocbp == NULL) 9152 return 0; 9153 9154 /* This signals the response to set the correct status 9155 * before calling the completion handler 9156 */ 9157 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 9158 9159 iabt = &abtsiocbp->iocb; 9160 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 9161 iabt->un.acxri.abortContextTag = icmd->ulpContext; 9162 if (phba->sli_rev == LPFC_SLI_REV4) { 9163 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 9164 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 9165 } 9166 else 9167 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 9168 iabt->ulpLe = 1; 9169 iabt->ulpClass = icmd->ulpClass; 9170 9171 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9172 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 9173 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 9174 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 9175 9176 if (phba->link_state >= LPFC_LINK_UP) 9177 iabt->ulpCommand = CMD_ABORT_XRI_CN; 9178 else 9179 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 9180 9181 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 9182 9183 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 9184 "0339 Abort xri x%x, original iotag x%x, " 9185 "abort cmd iotag x%x\n", 9186 iabt->un.acxri.abortIoTag, 9187 iabt->un.acxri.abortContextTag, 9188 abtsiocbp->iotag); 9189 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 9190 9191 if (retval) 9192 __lpfc_sli_release_iocbq(phba, abtsiocbp); 9193 9194 /* 9195 * Caller to this routine should check for IOCB_ERROR 9196 * and handle it properly. This routine no longer removes 9197 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9198 */ 9199 return retval; 9200 } 9201 9202 /** 9203 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 9204 * @phba: Pointer to HBA context object. 9205 * @pring: Pointer to driver SLI ring object. 9206 * @cmdiocb: Pointer to driver command iocb object. 9207 * 9208 * This function issues an abort iocb for the provided command iocb. In case 9209 * of unloading, the abort iocb will not be issued to commands on the ELS 9210 * ring. Instead, the callback function shall be changed to those commands 9211 * so that nothing happens when them finishes. This function is called with 9212 * hbalock held. The function returns 0 when the command iocb is an abort 9213 * request. 9214 **/ 9215 int 9216 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9217 struct lpfc_iocbq *cmdiocb) 9218 { 9219 struct lpfc_vport *vport = cmdiocb->vport; 9220 int retval = IOCB_ERROR; 9221 IOCB_t *icmd = NULL; 9222 9223 /* 9224 * There are certain command types we don't want to abort. And we 9225 * don't want to abort commands that are already in the process of 9226 * being aborted. 9227 */ 9228 icmd = &cmdiocb->iocb; 9229 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9230 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9231 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9232 return 0; 9233 9234 /* 9235 * If we're unloading, don't abort iocb on the ELS ring, but change 9236 * the callback so that nothing happens when it finishes. 9237 */ 9238 if ((vport->load_flag & FC_UNLOADING) && 9239 (pring->ringno == LPFC_ELS_RING)) { 9240 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 9241 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 9242 else 9243 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 9244 goto abort_iotag_exit; 9245 } 9246 9247 /* Now, we try to issue the abort to the cmdiocb out */ 9248 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 9249 9250 abort_iotag_exit: 9251 /* 9252 * Caller to this routine should check for IOCB_ERROR 9253 * and handle it properly. This routine no longer removes 9254 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9255 */ 9256 return retval; 9257 } 9258 9259 /** 9260 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 9261 * @phba: Pointer to HBA context object. 9262 * @pring: Pointer to driver SLI ring object. 9263 * 9264 * This function aborts all iocbs in the given ring and frees all the iocb 9265 * objects in txq. This function issues abort iocbs unconditionally for all 9266 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 9267 * to complete before the return of this function. The caller is not required 9268 * to hold any locks. 9269 **/ 9270 static void 9271 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 9272 { 9273 LIST_HEAD(completions); 9274 struct lpfc_iocbq *iocb, *next_iocb; 9275 9276 if (pring->ringno == LPFC_ELS_RING) 9277 lpfc_fabric_abort_hba(phba); 9278 9279 spin_lock_irq(&phba->hbalock); 9280 9281 /* Take off all the iocbs on txq for cancelling */ 9282 list_splice_init(&pring->txq, &completions); 9283 pring->txq_cnt = 0; 9284 9285 /* Next issue ABTS for everything on the txcmplq */ 9286 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 9287 lpfc_sli_abort_iotag_issue(phba, pring, iocb); 9288 9289 spin_unlock_irq(&phba->hbalock); 9290 9291 /* Cancel all the IOCBs from the completions list */ 9292 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9293 IOERR_SLI_ABORTED); 9294 } 9295 9296 /** 9297 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 9298 * @phba: pointer to lpfc HBA data structure. 9299 * 9300 * This routine will abort all pending and outstanding iocbs to an HBA. 9301 **/ 9302 void 9303 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 9304 { 9305 struct lpfc_sli *psli = &phba->sli; 9306 struct lpfc_sli_ring *pring; 9307 int i; 9308 9309 for (i = 0; i < psli->num_rings; i++) { 9310 pring = &psli->ring[i]; 9311 lpfc_sli_iocb_ring_abort(phba, pring); 9312 } 9313 } 9314 9315 /** 9316 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 9317 * @iocbq: Pointer to driver iocb object. 9318 * @vport: Pointer to driver virtual port object. 9319 * @tgt_id: SCSI ID of the target. 9320 * @lun_id: LUN ID of the scsi device. 9321 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 9322 * 9323 * This function acts as an iocb filter for functions which abort or count 9324 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 9325 * 0 if the filtering criteria is met for the given iocb and will return 9326 * 1 if the filtering criteria is not met. 9327 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 9328 * given iocb is for the SCSI device specified by vport, tgt_id and 9329 * lun_id parameter. 9330 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 9331 * given iocb is for the SCSI target specified by vport and tgt_id 9332 * parameters. 9333 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 9334 * given iocb is for the SCSI host associated with the given vport. 9335 * This function is called with no locks held. 9336 **/ 9337 static int 9338 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 9339 uint16_t tgt_id, uint64_t lun_id, 9340 lpfc_ctx_cmd ctx_cmd) 9341 { 9342 struct lpfc_scsi_buf *lpfc_cmd; 9343 int rc = 1; 9344 9345 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 9346 return rc; 9347 9348 if (iocbq->vport != vport) 9349 return rc; 9350 9351 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 9352 9353 if (lpfc_cmd->pCmd == NULL) 9354 return rc; 9355 9356 switch (ctx_cmd) { 9357 case LPFC_CTX_LUN: 9358 if ((lpfc_cmd->rdata->pnode) && 9359 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 9360 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 9361 rc = 0; 9362 break; 9363 case LPFC_CTX_TGT: 9364 if ((lpfc_cmd->rdata->pnode) && 9365 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 9366 rc = 0; 9367 break; 9368 case LPFC_CTX_HOST: 9369 rc = 0; 9370 break; 9371 default: 9372 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 9373 __func__, ctx_cmd); 9374 break; 9375 } 9376 9377 return rc; 9378 } 9379 9380 /** 9381 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 9382 * @vport: Pointer to virtual port. 9383 * @tgt_id: SCSI ID of the target. 9384 * @lun_id: LUN ID of the scsi device. 9385 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9386 * 9387 * This function returns number of FCP commands pending for the vport. 9388 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 9389 * commands pending on the vport associated with SCSI device specified 9390 * by tgt_id and lun_id parameters. 9391 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 9392 * commands pending on the vport associated with SCSI target specified 9393 * by tgt_id parameter. 9394 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 9395 * commands pending on the vport. 9396 * This function returns the number of iocbs which satisfy the filter. 9397 * This function is called without any lock held. 9398 **/ 9399 int 9400 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 9401 lpfc_ctx_cmd ctx_cmd) 9402 { 9403 struct lpfc_hba *phba = vport->phba; 9404 struct lpfc_iocbq *iocbq; 9405 int sum, i; 9406 9407 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 9408 iocbq = phba->sli.iocbq_lookup[i]; 9409 9410 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 9411 ctx_cmd) == 0) 9412 sum++; 9413 } 9414 9415 return sum; 9416 } 9417 9418 /** 9419 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 9420 * @phba: Pointer to HBA context object 9421 * @cmdiocb: Pointer to command iocb object. 9422 * @rspiocb: Pointer to response iocb object. 9423 * 9424 * This function is called when an aborted FCP iocb completes. This 9425 * function is called by the ring event handler with no lock held. 9426 * This function frees the iocb. 9427 **/ 9428 void 9429 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9430 struct lpfc_iocbq *rspiocb) 9431 { 9432 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9433 "3096 ABORT_XRI_CN completing on xri x%x " 9434 "original iotag x%x, abort cmd iotag x%x " 9435 "status 0x%x, reason 0x%x\n", 9436 cmdiocb->iocb.un.acxri.abortContextTag, 9437 cmdiocb->iocb.un.acxri.abortIoTag, 9438 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 9439 rspiocb->iocb.un.ulpWord[4]); 9440 lpfc_sli_release_iocbq(phba, cmdiocb); 9441 return; 9442 } 9443 9444 /** 9445 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 9446 * @vport: Pointer to virtual port. 9447 * @pring: Pointer to driver SLI ring object. 9448 * @tgt_id: SCSI ID of the target. 9449 * @lun_id: LUN ID of the scsi device. 9450 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9451 * 9452 * This function sends an abort command for every SCSI command 9453 * associated with the given virtual port pending on the ring 9454 * filtered by lpfc_sli_validate_fcp_iocb function. 9455 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 9456 * FCP iocbs associated with lun specified by tgt_id and lun_id 9457 * parameters 9458 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 9459 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 9460 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 9461 * FCP iocbs associated with virtual port. 9462 * This function returns number of iocbs it failed to abort. 9463 * This function is called with no locks held. 9464 **/ 9465 int 9466 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 9467 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 9468 { 9469 struct lpfc_hba *phba = vport->phba; 9470 struct lpfc_iocbq *iocbq; 9471 struct lpfc_iocbq *abtsiocb; 9472 IOCB_t *cmd = NULL; 9473 int errcnt = 0, ret_val = 0; 9474 int i; 9475 9476 for (i = 1; i <= phba->sli.last_iotag; i++) { 9477 iocbq = phba->sli.iocbq_lookup[i]; 9478 9479 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 9480 abort_cmd) != 0) 9481 continue; 9482 9483 /* issue ABTS for this IOCB based on iotag */ 9484 abtsiocb = lpfc_sli_get_iocbq(phba); 9485 if (abtsiocb == NULL) { 9486 errcnt++; 9487 continue; 9488 } 9489 9490 cmd = &iocbq->iocb; 9491 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 9492 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 9493 if (phba->sli_rev == LPFC_SLI_REV4) 9494 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 9495 else 9496 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 9497 abtsiocb->iocb.ulpLe = 1; 9498 abtsiocb->iocb.ulpClass = cmd->ulpClass; 9499 abtsiocb->vport = phba->pport; 9500 9501 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9502 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 9503 if (iocbq->iocb_flag & LPFC_IO_FCP) 9504 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 9505 9506 if (lpfc_is_link_up(phba)) 9507 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 9508 else 9509 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 9510 9511 /* Setup callback routine and issue the command. */ 9512 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 9513 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 9514 abtsiocb, 0); 9515 if (ret_val == IOCB_ERROR) { 9516 lpfc_sli_release_iocbq(phba, abtsiocb); 9517 errcnt++; 9518 continue; 9519 } 9520 } 9521 9522 return errcnt; 9523 } 9524 9525 /** 9526 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 9527 * @phba: Pointer to HBA context object. 9528 * @cmdiocbq: Pointer to command iocb. 9529 * @rspiocbq: Pointer to response iocb. 9530 * 9531 * This function is the completion handler for iocbs issued using 9532 * lpfc_sli_issue_iocb_wait function. This function is called by the 9533 * ring event handler function without any lock held. This function 9534 * can be called from both worker thread context and interrupt 9535 * context. This function also can be called from other thread which 9536 * cleans up the SLI layer objects. 9537 * This function copy the contents of the response iocb to the 9538 * response iocb memory object provided by the caller of 9539 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 9540 * sleeps for the iocb completion. 9541 **/ 9542 static void 9543 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 9544 struct lpfc_iocbq *cmdiocbq, 9545 struct lpfc_iocbq *rspiocbq) 9546 { 9547 wait_queue_head_t *pdone_q; 9548 unsigned long iflags; 9549 struct lpfc_scsi_buf *lpfc_cmd; 9550 9551 spin_lock_irqsave(&phba->hbalock, iflags); 9552 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 9553 if (cmdiocbq->context2 && rspiocbq) 9554 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 9555 &rspiocbq->iocb, sizeof(IOCB_t)); 9556 9557 /* Set the exchange busy flag for task management commands */ 9558 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 9559 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 9560 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 9561 cur_iocbq); 9562 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 9563 } 9564 9565 pdone_q = cmdiocbq->context_un.wait_queue; 9566 if (pdone_q) 9567 wake_up(pdone_q); 9568 spin_unlock_irqrestore(&phba->hbalock, iflags); 9569 return; 9570 } 9571 9572 /** 9573 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 9574 * @phba: Pointer to HBA context object.. 9575 * @piocbq: Pointer to command iocb. 9576 * @flag: Flag to test. 9577 * 9578 * This routine grabs the hbalock and then test the iocb_flag to 9579 * see if the passed in flag is set. 9580 * Returns: 9581 * 1 if flag is set. 9582 * 0 if flag is not set. 9583 **/ 9584 static int 9585 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 9586 struct lpfc_iocbq *piocbq, uint32_t flag) 9587 { 9588 unsigned long iflags; 9589 int ret; 9590 9591 spin_lock_irqsave(&phba->hbalock, iflags); 9592 ret = piocbq->iocb_flag & flag; 9593 spin_unlock_irqrestore(&phba->hbalock, iflags); 9594 return ret; 9595 9596 } 9597 9598 /** 9599 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 9600 * @phba: Pointer to HBA context object.. 9601 * @pring: Pointer to sli ring. 9602 * @piocb: Pointer to command iocb. 9603 * @prspiocbq: Pointer to response iocb. 9604 * @timeout: Timeout in number of seconds. 9605 * 9606 * This function issues the iocb to firmware and waits for the 9607 * iocb to complete. If the iocb command is not 9608 * completed within timeout seconds, it returns IOCB_TIMEDOUT. 9609 * Caller should not free the iocb resources if this function 9610 * returns IOCB_TIMEDOUT. 9611 * The function waits for the iocb completion using an 9612 * non-interruptible wait. 9613 * This function will sleep while waiting for iocb completion. 9614 * So, this function should not be called from any context which 9615 * does not allow sleeping. Due to the same reason, this function 9616 * cannot be called with interrupt disabled. 9617 * This function assumes that the iocb completions occur while 9618 * this function sleep. So, this function cannot be called from 9619 * the thread which process iocb completion for this ring. 9620 * This function clears the iocb_flag of the iocb object before 9621 * issuing the iocb and the iocb completion handler sets this 9622 * flag and wakes this thread when the iocb completes. 9623 * The contents of the response iocb will be copied to prspiocbq 9624 * by the completion handler when the command completes. 9625 * This function returns IOCB_SUCCESS when success. 9626 * This function is called with no lock held. 9627 **/ 9628 int 9629 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 9630 uint32_t ring_number, 9631 struct lpfc_iocbq *piocb, 9632 struct lpfc_iocbq *prspiocbq, 9633 uint32_t timeout) 9634 { 9635 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 9636 long timeleft, timeout_req = 0; 9637 int retval = IOCB_SUCCESS; 9638 uint32_t creg_val; 9639 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 9640 /* 9641 * If the caller has provided a response iocbq buffer, then context2 9642 * is NULL or its an error. 9643 */ 9644 if (prspiocbq) { 9645 if (piocb->context2) 9646 return IOCB_ERROR; 9647 piocb->context2 = prspiocbq; 9648 } 9649 9650 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 9651 piocb->context_un.wait_queue = &done_q; 9652 piocb->iocb_flag &= ~LPFC_IO_WAKE; 9653 9654 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9655 if (lpfc_readl(phba->HCregaddr, &creg_val)) 9656 return IOCB_ERROR; 9657 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 9658 writel(creg_val, phba->HCregaddr); 9659 readl(phba->HCregaddr); /* flush */ 9660 } 9661 9662 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 9663 SLI_IOCB_RET_IOCB); 9664 if (retval == IOCB_SUCCESS) { 9665 timeout_req = timeout * HZ; 9666 timeleft = wait_event_timeout(done_q, 9667 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 9668 timeout_req); 9669 9670 if (piocb->iocb_flag & LPFC_IO_WAKE) { 9671 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9672 "0331 IOCB wake signaled\n"); 9673 } else if (timeleft == 0) { 9674 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9675 "0338 IOCB wait timeout error - no " 9676 "wake response Data x%x\n", timeout); 9677 retval = IOCB_TIMEDOUT; 9678 } else { 9679 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9680 "0330 IOCB wake NOT set, " 9681 "Data x%x x%lx\n", 9682 timeout, (timeleft / jiffies)); 9683 retval = IOCB_TIMEDOUT; 9684 } 9685 } else if (retval == IOCB_BUSY) { 9686 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9687 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 9688 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); 9689 return retval; 9690 } else { 9691 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9692 "0332 IOCB wait issue failed, Data x%x\n", 9693 retval); 9694 retval = IOCB_ERROR; 9695 } 9696 9697 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9698 if (lpfc_readl(phba->HCregaddr, &creg_val)) 9699 return IOCB_ERROR; 9700 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 9701 writel(creg_val, phba->HCregaddr); 9702 readl(phba->HCregaddr); /* flush */ 9703 } 9704 9705 if (prspiocbq) 9706 piocb->context2 = NULL; 9707 9708 piocb->context_un.wait_queue = NULL; 9709 piocb->iocb_cmpl = NULL; 9710 return retval; 9711 } 9712 9713 /** 9714 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 9715 * @phba: Pointer to HBA context object. 9716 * @pmboxq: Pointer to driver mailbox object. 9717 * @timeout: Timeout in number of seconds. 9718 * 9719 * This function issues the mailbox to firmware and waits for the 9720 * mailbox command to complete. If the mailbox command is not 9721 * completed within timeout seconds, it returns MBX_TIMEOUT. 9722 * The function waits for the mailbox completion using an 9723 * interruptible wait. If the thread is woken up due to a 9724 * signal, MBX_TIMEOUT error is returned to the caller. Caller 9725 * should not free the mailbox resources, if this function returns 9726 * MBX_TIMEOUT. 9727 * This function will sleep while waiting for mailbox completion. 9728 * So, this function should not be called from any context which 9729 * does not allow sleeping. Due to the same reason, this function 9730 * cannot be called with interrupt disabled. 9731 * This function assumes that the mailbox completion occurs while 9732 * this function sleep. So, this function cannot be called from 9733 * the worker thread which processes mailbox completion. 9734 * This function is called in the context of HBA management 9735 * applications. 9736 * This function returns MBX_SUCCESS when successful. 9737 * This function is called with no lock held. 9738 **/ 9739 int 9740 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 9741 uint32_t timeout) 9742 { 9743 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 9744 int retval; 9745 unsigned long flag; 9746 9747 /* The caller must leave context1 empty. */ 9748 if (pmboxq->context1) 9749 return MBX_NOT_FINISHED; 9750 9751 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 9752 /* setup wake call as IOCB callback */ 9753 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 9754 /* setup context field to pass wait_queue pointer to wake function */ 9755 pmboxq->context1 = &done_q; 9756 9757 /* now issue the command */ 9758 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 9759 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 9760 wait_event_interruptible_timeout(done_q, 9761 pmboxq->mbox_flag & LPFC_MBX_WAKE, 9762 timeout * HZ); 9763 9764 spin_lock_irqsave(&phba->hbalock, flag); 9765 pmboxq->context1 = NULL; 9766 /* 9767 * if LPFC_MBX_WAKE flag is set the mailbox is completed 9768 * else do not free the resources. 9769 */ 9770 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 9771 retval = MBX_SUCCESS; 9772 lpfc_sli4_swap_str(phba, pmboxq); 9773 } else { 9774 retval = MBX_TIMEOUT; 9775 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 9776 } 9777 spin_unlock_irqrestore(&phba->hbalock, flag); 9778 } 9779 9780 return retval; 9781 } 9782 9783 /** 9784 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 9785 * @phba: Pointer to HBA context. 9786 * 9787 * This function is called to shutdown the driver's mailbox sub-system. 9788 * It first marks the mailbox sub-system is in a block state to prevent 9789 * the asynchronous mailbox command from issued off the pending mailbox 9790 * command queue. If the mailbox command sub-system shutdown is due to 9791 * HBA error conditions such as EEH or ERATT, this routine shall invoke 9792 * the mailbox sub-system flush routine to forcefully bring down the 9793 * mailbox sub-system. Otherwise, if it is due to normal condition (such 9794 * as with offline or HBA function reset), this routine will wait for the 9795 * outstanding mailbox command to complete before invoking the mailbox 9796 * sub-system flush routine to gracefully bring down mailbox sub-system. 9797 **/ 9798 void 9799 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) 9800 { 9801 struct lpfc_sli *psli = &phba->sli; 9802 unsigned long timeout; 9803 9804 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 9805 spin_lock_irq(&phba->hbalock); 9806 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9807 spin_unlock_irq(&phba->hbalock); 9808 9809 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9810 spin_lock_irq(&phba->hbalock); 9811 /* Determine how long we might wait for the active mailbox 9812 * command to be gracefully completed by firmware. 9813 */ 9814 if (phba->sli.mbox_active) 9815 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 9816 phba->sli.mbox_active) * 9817 1000) + jiffies; 9818 spin_unlock_irq(&phba->hbalock); 9819 9820 while (phba->sli.mbox_active) { 9821 /* Check active mailbox complete status every 2ms */ 9822 msleep(2); 9823 if (time_after(jiffies, timeout)) 9824 /* Timeout, let the mailbox flush routine to 9825 * forcefully release active mailbox command 9826 */ 9827 break; 9828 } 9829 } 9830 lpfc_sli_mbox_sys_flush(phba); 9831 } 9832 9833 /** 9834 * lpfc_sli_eratt_read - read sli-3 error attention events 9835 * @phba: Pointer to HBA context. 9836 * 9837 * This function is called to read the SLI3 device error attention registers 9838 * for possible error attention events. The caller must hold the hostlock 9839 * with spin_lock_irq(). 9840 * 9841 * This function returns 1 when there is Error Attention in the Host Attention 9842 * Register and returns 0 otherwise. 9843 **/ 9844 static int 9845 lpfc_sli_eratt_read(struct lpfc_hba *phba) 9846 { 9847 uint32_t ha_copy; 9848 9849 /* Read chip Host Attention (HA) register */ 9850 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 9851 goto unplug_err; 9852 9853 if (ha_copy & HA_ERATT) { 9854 /* Read host status register to retrieve error event */ 9855 if (lpfc_sli_read_hs(phba)) 9856 goto unplug_err; 9857 9858 /* Check if there is a deferred error condition is active */ 9859 if ((HS_FFER1 & phba->work_hs) && 9860 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 9861 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 9862 phba->hba_flag |= DEFER_ERATT; 9863 /* Clear all interrupt enable conditions */ 9864 writel(0, phba->HCregaddr); 9865 readl(phba->HCregaddr); 9866 } 9867 9868 /* Set the driver HA work bitmap */ 9869 phba->work_ha |= HA_ERATT; 9870 /* Indicate polling handles this ERATT */ 9871 phba->hba_flag |= HBA_ERATT_HANDLED; 9872 return 1; 9873 } 9874 return 0; 9875 9876 unplug_err: 9877 /* Set the driver HS work bitmap */ 9878 phba->work_hs |= UNPLUG_ERR; 9879 /* Set the driver HA work bitmap */ 9880 phba->work_ha |= HA_ERATT; 9881 /* Indicate polling handles this ERATT */ 9882 phba->hba_flag |= HBA_ERATT_HANDLED; 9883 return 1; 9884 } 9885 9886 /** 9887 * lpfc_sli4_eratt_read - read sli-4 error attention events 9888 * @phba: Pointer to HBA context. 9889 * 9890 * This function is called to read the SLI4 device error attention registers 9891 * for possible error attention events. The caller must hold the hostlock 9892 * with spin_lock_irq(). 9893 * 9894 * This function returns 1 when there is Error Attention in the Host Attention 9895 * Register and returns 0 otherwise. 9896 **/ 9897 static int 9898 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 9899 { 9900 uint32_t uerr_sta_hi, uerr_sta_lo; 9901 uint32_t if_type, portsmphr; 9902 struct lpfc_register portstat_reg; 9903 9904 /* 9905 * For now, use the SLI4 device internal unrecoverable error 9906 * registers for error attention. This can be changed later. 9907 */ 9908 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9909 switch (if_type) { 9910 case LPFC_SLI_INTF_IF_TYPE_0: 9911 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 9912 &uerr_sta_lo) || 9913 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 9914 &uerr_sta_hi)) { 9915 phba->work_hs |= UNPLUG_ERR; 9916 phba->work_ha |= HA_ERATT; 9917 phba->hba_flag |= HBA_ERATT_HANDLED; 9918 return 1; 9919 } 9920 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 9921 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 9922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9923 "1423 HBA Unrecoverable error: " 9924 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 9925 "ue_mask_lo_reg=0x%x, " 9926 "ue_mask_hi_reg=0x%x\n", 9927 uerr_sta_lo, uerr_sta_hi, 9928 phba->sli4_hba.ue_mask_lo, 9929 phba->sli4_hba.ue_mask_hi); 9930 phba->work_status[0] = uerr_sta_lo; 9931 phba->work_status[1] = uerr_sta_hi; 9932 phba->work_ha |= HA_ERATT; 9933 phba->hba_flag |= HBA_ERATT_HANDLED; 9934 return 1; 9935 } 9936 break; 9937 case LPFC_SLI_INTF_IF_TYPE_2: 9938 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9939 &portstat_reg.word0) || 9940 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9941 &portsmphr)){ 9942 phba->work_hs |= UNPLUG_ERR; 9943 phba->work_ha |= HA_ERATT; 9944 phba->hba_flag |= HBA_ERATT_HANDLED; 9945 return 1; 9946 } 9947 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 9948 phba->work_status[0] = 9949 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 9950 phba->work_status[1] = 9951 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 9952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9953 "2885 Port Status Event: " 9954 "port status reg 0x%x, " 9955 "port smphr reg 0x%x, " 9956 "error 1=0x%x, error 2=0x%x\n", 9957 portstat_reg.word0, 9958 portsmphr, 9959 phba->work_status[0], 9960 phba->work_status[1]); 9961 phba->work_ha |= HA_ERATT; 9962 phba->hba_flag |= HBA_ERATT_HANDLED; 9963 return 1; 9964 } 9965 break; 9966 case LPFC_SLI_INTF_IF_TYPE_1: 9967 default: 9968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9969 "2886 HBA Error Attention on unsupported " 9970 "if type %d.", if_type); 9971 return 1; 9972 } 9973 9974 return 0; 9975 } 9976 9977 /** 9978 * lpfc_sli_check_eratt - check error attention events 9979 * @phba: Pointer to HBA context. 9980 * 9981 * This function is called from timer soft interrupt context to check HBA's 9982 * error attention register bit for error attention events. 9983 * 9984 * This function returns 1 when there is Error Attention in the Host Attention 9985 * Register and returns 0 otherwise. 9986 **/ 9987 int 9988 lpfc_sli_check_eratt(struct lpfc_hba *phba) 9989 { 9990 uint32_t ha_copy; 9991 9992 /* If somebody is waiting to handle an eratt, don't process it 9993 * here. The brdkill function will do this. 9994 */ 9995 if (phba->link_flag & LS_IGNORE_ERATT) 9996 return 0; 9997 9998 /* Check if interrupt handler handles this ERATT */ 9999 spin_lock_irq(&phba->hbalock); 10000 if (phba->hba_flag & HBA_ERATT_HANDLED) { 10001 /* Interrupt handler has handled ERATT */ 10002 spin_unlock_irq(&phba->hbalock); 10003 return 0; 10004 } 10005 10006 /* 10007 * If there is deferred error attention, do not check for error 10008 * attention 10009 */ 10010 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10011 spin_unlock_irq(&phba->hbalock); 10012 return 0; 10013 } 10014 10015 /* If PCI channel is offline, don't process it */ 10016 if (unlikely(pci_channel_offline(phba->pcidev))) { 10017 spin_unlock_irq(&phba->hbalock); 10018 return 0; 10019 } 10020 10021 switch (phba->sli_rev) { 10022 case LPFC_SLI_REV2: 10023 case LPFC_SLI_REV3: 10024 /* Read chip Host Attention (HA) register */ 10025 ha_copy = lpfc_sli_eratt_read(phba); 10026 break; 10027 case LPFC_SLI_REV4: 10028 /* Read device Uncoverable Error (UERR) registers */ 10029 ha_copy = lpfc_sli4_eratt_read(phba); 10030 break; 10031 default: 10032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10033 "0299 Invalid SLI revision (%d)\n", 10034 phba->sli_rev); 10035 ha_copy = 0; 10036 break; 10037 } 10038 spin_unlock_irq(&phba->hbalock); 10039 10040 return ha_copy; 10041 } 10042 10043 /** 10044 * lpfc_intr_state_check - Check device state for interrupt handling 10045 * @phba: Pointer to HBA context. 10046 * 10047 * This inline routine checks whether a device or its PCI slot is in a state 10048 * that the interrupt should be handled. 10049 * 10050 * This function returns 0 if the device or the PCI slot is in a state that 10051 * interrupt should be handled, otherwise -EIO. 10052 */ 10053 static inline int 10054 lpfc_intr_state_check(struct lpfc_hba *phba) 10055 { 10056 /* If the pci channel is offline, ignore all the interrupts */ 10057 if (unlikely(pci_channel_offline(phba->pcidev))) 10058 return -EIO; 10059 10060 /* Update device level interrupt statistics */ 10061 phba->sli.slistat.sli_intr++; 10062 10063 /* Ignore all interrupts during initialization. */ 10064 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 10065 return -EIO; 10066 10067 return 0; 10068 } 10069 10070 /** 10071 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 10072 * @irq: Interrupt number. 10073 * @dev_id: The device context pointer. 10074 * 10075 * This function is directly called from the PCI layer as an interrupt 10076 * service routine when device with SLI-3 interface spec is enabled with 10077 * MSI-X multi-message interrupt mode and there are slow-path events in 10078 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 10079 * interrupt mode, this function is called as part of the device-level 10080 * interrupt handler. When the PCI slot is in error recovery or the HBA 10081 * is undergoing initialization, the interrupt handler will not process 10082 * the interrupt. The link attention and ELS ring attention events are 10083 * handled by the worker thread. The interrupt handler signals the worker 10084 * thread and returns for these events. This function is called without 10085 * any lock held. It gets the hbalock to access and update SLI data 10086 * structures. 10087 * 10088 * This function returns IRQ_HANDLED when interrupt is handled else it 10089 * returns IRQ_NONE. 10090 **/ 10091 irqreturn_t 10092 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 10093 { 10094 struct lpfc_hba *phba; 10095 uint32_t ha_copy, hc_copy; 10096 uint32_t work_ha_copy; 10097 unsigned long status; 10098 unsigned long iflag; 10099 uint32_t control; 10100 10101 MAILBOX_t *mbox, *pmbox; 10102 struct lpfc_vport *vport; 10103 struct lpfc_nodelist *ndlp; 10104 struct lpfc_dmabuf *mp; 10105 LPFC_MBOXQ_t *pmb; 10106 int rc; 10107 10108 /* 10109 * Get the driver's phba structure from the dev_id and 10110 * assume the HBA is not interrupting. 10111 */ 10112 phba = (struct lpfc_hba *)dev_id; 10113 10114 if (unlikely(!phba)) 10115 return IRQ_NONE; 10116 10117 /* 10118 * Stuff needs to be attented to when this function is invoked as an 10119 * individual interrupt handler in MSI-X multi-message interrupt mode 10120 */ 10121 if (phba->intr_type == MSIX) { 10122 /* Check device state for handling interrupt */ 10123 if (lpfc_intr_state_check(phba)) 10124 return IRQ_NONE; 10125 /* Need to read HA REG for slow-path events */ 10126 spin_lock_irqsave(&phba->hbalock, iflag); 10127 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10128 goto unplug_error; 10129 /* If somebody is waiting to handle an eratt don't process it 10130 * here. The brdkill function will do this. 10131 */ 10132 if (phba->link_flag & LS_IGNORE_ERATT) 10133 ha_copy &= ~HA_ERATT; 10134 /* Check the need for handling ERATT in interrupt handler */ 10135 if (ha_copy & HA_ERATT) { 10136 if (phba->hba_flag & HBA_ERATT_HANDLED) 10137 /* ERATT polling has handled ERATT */ 10138 ha_copy &= ~HA_ERATT; 10139 else 10140 /* Indicate interrupt handler handles ERATT */ 10141 phba->hba_flag |= HBA_ERATT_HANDLED; 10142 } 10143 10144 /* 10145 * If there is deferred error attention, do not check for any 10146 * interrupt. 10147 */ 10148 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10149 spin_unlock_irqrestore(&phba->hbalock, iflag); 10150 return IRQ_NONE; 10151 } 10152 10153 /* Clear up only attention source related to slow-path */ 10154 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 10155 goto unplug_error; 10156 10157 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 10158 HC_LAINT_ENA | HC_ERINT_ENA), 10159 phba->HCregaddr); 10160 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 10161 phba->HAregaddr); 10162 writel(hc_copy, phba->HCregaddr); 10163 readl(phba->HAregaddr); /* flush */ 10164 spin_unlock_irqrestore(&phba->hbalock, iflag); 10165 } else 10166 ha_copy = phba->ha_copy; 10167 10168 work_ha_copy = ha_copy & phba->work_ha_mask; 10169 10170 if (work_ha_copy) { 10171 if (work_ha_copy & HA_LATT) { 10172 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 10173 /* 10174 * Turn off Link Attention interrupts 10175 * until CLEAR_LA done 10176 */ 10177 spin_lock_irqsave(&phba->hbalock, iflag); 10178 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 10179 if (lpfc_readl(phba->HCregaddr, &control)) 10180 goto unplug_error; 10181 control &= ~HC_LAINT_ENA; 10182 writel(control, phba->HCregaddr); 10183 readl(phba->HCregaddr); /* flush */ 10184 spin_unlock_irqrestore(&phba->hbalock, iflag); 10185 } 10186 else 10187 work_ha_copy &= ~HA_LATT; 10188 } 10189 10190 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 10191 /* 10192 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 10193 * the only slow ring. 10194 */ 10195 status = (work_ha_copy & 10196 (HA_RXMASK << (4*LPFC_ELS_RING))); 10197 status >>= (4*LPFC_ELS_RING); 10198 if (status & HA_RXMASK) { 10199 spin_lock_irqsave(&phba->hbalock, iflag); 10200 if (lpfc_readl(phba->HCregaddr, &control)) 10201 goto unplug_error; 10202 10203 lpfc_debugfs_slow_ring_trc(phba, 10204 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 10205 control, status, 10206 (uint32_t)phba->sli.slistat.sli_intr); 10207 10208 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 10209 lpfc_debugfs_slow_ring_trc(phba, 10210 "ISR Disable ring:" 10211 "pwork:x%x hawork:x%x wait:x%x", 10212 phba->work_ha, work_ha_copy, 10213 (uint32_t)((unsigned long) 10214 &phba->work_waitq)); 10215 10216 control &= 10217 ~(HC_R0INT_ENA << LPFC_ELS_RING); 10218 writel(control, phba->HCregaddr); 10219 readl(phba->HCregaddr); /* flush */ 10220 } 10221 else { 10222 lpfc_debugfs_slow_ring_trc(phba, 10223 "ISR slow ring: pwork:" 10224 "x%x hawork:x%x wait:x%x", 10225 phba->work_ha, work_ha_copy, 10226 (uint32_t)((unsigned long) 10227 &phba->work_waitq)); 10228 } 10229 spin_unlock_irqrestore(&phba->hbalock, iflag); 10230 } 10231 } 10232 spin_lock_irqsave(&phba->hbalock, iflag); 10233 if (work_ha_copy & HA_ERATT) { 10234 if (lpfc_sli_read_hs(phba)) 10235 goto unplug_error; 10236 /* 10237 * Check if there is a deferred error condition 10238 * is active 10239 */ 10240 if ((HS_FFER1 & phba->work_hs) && 10241 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 10242 HS_FFER6 | HS_FFER7 | HS_FFER8) & 10243 phba->work_hs)) { 10244 phba->hba_flag |= DEFER_ERATT; 10245 /* Clear all interrupt enable conditions */ 10246 writel(0, phba->HCregaddr); 10247 readl(phba->HCregaddr); 10248 } 10249 } 10250 10251 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 10252 pmb = phba->sli.mbox_active; 10253 pmbox = &pmb->u.mb; 10254 mbox = phba->mbox; 10255 vport = pmb->vport; 10256 10257 /* First check out the status word */ 10258 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 10259 if (pmbox->mbxOwner != OWN_HOST) { 10260 spin_unlock_irqrestore(&phba->hbalock, iflag); 10261 /* 10262 * Stray Mailbox Interrupt, mbxCommand <cmd> 10263 * mbxStatus <status> 10264 */ 10265 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10266 LOG_SLI, 10267 "(%d):0304 Stray Mailbox " 10268 "Interrupt mbxCommand x%x " 10269 "mbxStatus x%x\n", 10270 (vport ? vport->vpi : 0), 10271 pmbox->mbxCommand, 10272 pmbox->mbxStatus); 10273 /* clear mailbox attention bit */ 10274 work_ha_copy &= ~HA_MBATT; 10275 } else { 10276 phba->sli.mbox_active = NULL; 10277 spin_unlock_irqrestore(&phba->hbalock, iflag); 10278 phba->last_completion_time = jiffies; 10279 del_timer(&phba->sli.mbox_tmo); 10280 if (pmb->mbox_cmpl) { 10281 lpfc_sli_pcimem_bcopy(mbox, pmbox, 10282 MAILBOX_CMD_SIZE); 10283 if (pmb->out_ext_byte_len && 10284 pmb->context2) 10285 lpfc_sli_pcimem_bcopy( 10286 phba->mbox_ext, 10287 pmb->context2, 10288 pmb->out_ext_byte_len); 10289 } 10290 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 10291 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 10292 10293 lpfc_debugfs_disc_trc(vport, 10294 LPFC_DISC_TRC_MBOX_VPORT, 10295 "MBOX dflt rpi: : " 10296 "status:x%x rpi:x%x", 10297 (uint32_t)pmbox->mbxStatus, 10298 pmbox->un.varWords[0], 0); 10299 10300 if (!pmbox->mbxStatus) { 10301 mp = (struct lpfc_dmabuf *) 10302 (pmb->context1); 10303 ndlp = (struct lpfc_nodelist *) 10304 pmb->context2; 10305 10306 /* Reg_LOGIN of dflt RPI was 10307 * successful. new lets get 10308 * rid of the RPI using the 10309 * same mbox buffer. 10310 */ 10311 lpfc_unreg_login(phba, 10312 vport->vpi, 10313 pmbox->un.varWords[0], 10314 pmb); 10315 pmb->mbox_cmpl = 10316 lpfc_mbx_cmpl_dflt_rpi; 10317 pmb->context1 = mp; 10318 pmb->context2 = ndlp; 10319 pmb->vport = vport; 10320 rc = lpfc_sli_issue_mbox(phba, 10321 pmb, 10322 MBX_NOWAIT); 10323 if (rc != MBX_BUSY) 10324 lpfc_printf_log(phba, 10325 KERN_ERR, 10326 LOG_MBOX | LOG_SLI, 10327 "0350 rc should have" 10328 "been MBX_BUSY\n"); 10329 if (rc != MBX_NOT_FINISHED) 10330 goto send_current_mbox; 10331 } 10332 } 10333 spin_lock_irqsave( 10334 &phba->pport->work_port_lock, 10335 iflag); 10336 phba->pport->work_port_events &= 10337 ~WORKER_MBOX_TMO; 10338 spin_unlock_irqrestore( 10339 &phba->pport->work_port_lock, 10340 iflag); 10341 lpfc_mbox_cmpl_put(phba, pmb); 10342 } 10343 } else 10344 spin_unlock_irqrestore(&phba->hbalock, iflag); 10345 10346 if ((work_ha_copy & HA_MBATT) && 10347 (phba->sli.mbox_active == NULL)) { 10348 send_current_mbox: 10349 /* Process next mailbox command if there is one */ 10350 do { 10351 rc = lpfc_sli_issue_mbox(phba, NULL, 10352 MBX_NOWAIT); 10353 } while (rc == MBX_NOT_FINISHED); 10354 if (rc != MBX_SUCCESS) 10355 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10356 LOG_SLI, "0349 rc should be " 10357 "MBX_SUCCESS\n"); 10358 } 10359 10360 spin_lock_irqsave(&phba->hbalock, iflag); 10361 phba->work_ha |= work_ha_copy; 10362 spin_unlock_irqrestore(&phba->hbalock, iflag); 10363 lpfc_worker_wake_up(phba); 10364 } 10365 return IRQ_HANDLED; 10366 unplug_error: 10367 spin_unlock_irqrestore(&phba->hbalock, iflag); 10368 return IRQ_HANDLED; 10369 10370 } /* lpfc_sli_sp_intr_handler */ 10371 10372 /** 10373 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 10374 * @irq: Interrupt number. 10375 * @dev_id: The device context pointer. 10376 * 10377 * This function is directly called from the PCI layer as an interrupt 10378 * service routine when device with SLI-3 interface spec is enabled with 10379 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 10380 * ring event in the HBA. However, when the device is enabled with either 10381 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 10382 * device-level interrupt handler. When the PCI slot is in error recovery 10383 * or the HBA is undergoing initialization, the interrupt handler will not 10384 * process the interrupt. The SCSI FCP fast-path ring event are handled in 10385 * the intrrupt context. This function is called without any lock held. 10386 * It gets the hbalock to access and update SLI data structures. 10387 * 10388 * This function returns IRQ_HANDLED when interrupt is handled else it 10389 * returns IRQ_NONE. 10390 **/ 10391 irqreturn_t 10392 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 10393 { 10394 struct lpfc_hba *phba; 10395 uint32_t ha_copy; 10396 unsigned long status; 10397 unsigned long iflag; 10398 10399 /* Get the driver's phba structure from the dev_id and 10400 * assume the HBA is not interrupting. 10401 */ 10402 phba = (struct lpfc_hba *) dev_id; 10403 10404 if (unlikely(!phba)) 10405 return IRQ_NONE; 10406 10407 /* 10408 * Stuff needs to be attented to when this function is invoked as an 10409 * individual interrupt handler in MSI-X multi-message interrupt mode 10410 */ 10411 if (phba->intr_type == MSIX) { 10412 /* Check device state for handling interrupt */ 10413 if (lpfc_intr_state_check(phba)) 10414 return IRQ_NONE; 10415 /* Need to read HA REG for FCP ring and other ring events */ 10416 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10417 return IRQ_HANDLED; 10418 /* Clear up only attention source related to fast-path */ 10419 spin_lock_irqsave(&phba->hbalock, iflag); 10420 /* 10421 * If there is deferred error attention, do not check for 10422 * any interrupt. 10423 */ 10424 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10425 spin_unlock_irqrestore(&phba->hbalock, iflag); 10426 return IRQ_NONE; 10427 } 10428 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 10429 phba->HAregaddr); 10430 readl(phba->HAregaddr); /* flush */ 10431 spin_unlock_irqrestore(&phba->hbalock, iflag); 10432 } else 10433 ha_copy = phba->ha_copy; 10434 10435 /* 10436 * Process all events on FCP ring. Take the optimized path for FCP IO. 10437 */ 10438 ha_copy &= ~(phba->work_ha_mask); 10439 10440 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 10441 status >>= (4*LPFC_FCP_RING); 10442 if (status & HA_RXMASK) 10443 lpfc_sli_handle_fast_ring_event(phba, 10444 &phba->sli.ring[LPFC_FCP_RING], 10445 status); 10446 10447 if (phba->cfg_multi_ring_support == 2) { 10448 /* 10449 * Process all events on extra ring. Take the optimized path 10450 * for extra ring IO. 10451 */ 10452 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 10453 status >>= (4*LPFC_EXTRA_RING); 10454 if (status & HA_RXMASK) { 10455 lpfc_sli_handle_fast_ring_event(phba, 10456 &phba->sli.ring[LPFC_EXTRA_RING], 10457 status); 10458 } 10459 } 10460 return IRQ_HANDLED; 10461 } /* lpfc_sli_fp_intr_handler */ 10462 10463 /** 10464 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 10465 * @irq: Interrupt number. 10466 * @dev_id: The device context pointer. 10467 * 10468 * This function is the HBA device-level interrupt handler to device with 10469 * SLI-3 interface spec, called from the PCI layer when either MSI or 10470 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 10471 * requires driver attention. This function invokes the slow-path interrupt 10472 * attention handling function and fast-path interrupt attention handling 10473 * function in turn to process the relevant HBA attention events. This 10474 * function is called without any lock held. It gets the hbalock to access 10475 * and update SLI data structures. 10476 * 10477 * This function returns IRQ_HANDLED when interrupt is handled, else it 10478 * returns IRQ_NONE. 10479 **/ 10480 irqreturn_t 10481 lpfc_sli_intr_handler(int irq, void *dev_id) 10482 { 10483 struct lpfc_hba *phba; 10484 irqreturn_t sp_irq_rc, fp_irq_rc; 10485 unsigned long status1, status2; 10486 uint32_t hc_copy; 10487 10488 /* 10489 * Get the driver's phba structure from the dev_id and 10490 * assume the HBA is not interrupting. 10491 */ 10492 phba = (struct lpfc_hba *) dev_id; 10493 10494 if (unlikely(!phba)) 10495 return IRQ_NONE; 10496 10497 /* Check device state for handling interrupt */ 10498 if (lpfc_intr_state_check(phba)) 10499 return IRQ_NONE; 10500 10501 spin_lock(&phba->hbalock); 10502 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 10503 spin_unlock(&phba->hbalock); 10504 return IRQ_HANDLED; 10505 } 10506 10507 if (unlikely(!phba->ha_copy)) { 10508 spin_unlock(&phba->hbalock); 10509 return IRQ_NONE; 10510 } else if (phba->ha_copy & HA_ERATT) { 10511 if (phba->hba_flag & HBA_ERATT_HANDLED) 10512 /* ERATT polling has handled ERATT */ 10513 phba->ha_copy &= ~HA_ERATT; 10514 else 10515 /* Indicate interrupt handler handles ERATT */ 10516 phba->hba_flag |= HBA_ERATT_HANDLED; 10517 } 10518 10519 /* 10520 * If there is deferred error attention, do not check for any interrupt. 10521 */ 10522 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10523 spin_unlock(&phba->hbalock); 10524 return IRQ_NONE; 10525 } 10526 10527 /* Clear attention sources except link and error attentions */ 10528 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 10529 spin_unlock(&phba->hbalock); 10530 return IRQ_HANDLED; 10531 } 10532 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 10533 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 10534 phba->HCregaddr); 10535 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 10536 writel(hc_copy, phba->HCregaddr); 10537 readl(phba->HAregaddr); /* flush */ 10538 spin_unlock(&phba->hbalock); 10539 10540 /* 10541 * Invokes slow-path host attention interrupt handling as appropriate. 10542 */ 10543 10544 /* status of events with mailbox and link attention */ 10545 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 10546 10547 /* status of events with ELS ring */ 10548 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 10549 status2 >>= (4*LPFC_ELS_RING); 10550 10551 if (status1 || (status2 & HA_RXMASK)) 10552 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 10553 else 10554 sp_irq_rc = IRQ_NONE; 10555 10556 /* 10557 * Invoke fast-path host attention interrupt handling as appropriate. 10558 */ 10559 10560 /* status of events with FCP ring */ 10561 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 10562 status1 >>= (4*LPFC_FCP_RING); 10563 10564 /* status of events with extra ring */ 10565 if (phba->cfg_multi_ring_support == 2) { 10566 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 10567 status2 >>= (4*LPFC_EXTRA_RING); 10568 } else 10569 status2 = 0; 10570 10571 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 10572 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 10573 else 10574 fp_irq_rc = IRQ_NONE; 10575 10576 /* Return device-level interrupt handling status */ 10577 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 10578 } /* lpfc_sli_intr_handler */ 10579 10580 /** 10581 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 10582 * @phba: pointer to lpfc hba data structure. 10583 * 10584 * This routine is invoked by the worker thread to process all the pending 10585 * SLI4 FCP abort XRI events. 10586 **/ 10587 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 10588 { 10589 struct lpfc_cq_event *cq_event; 10590 10591 /* First, declare the fcp xri abort event has been handled */ 10592 spin_lock_irq(&phba->hbalock); 10593 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 10594 spin_unlock_irq(&phba->hbalock); 10595 /* Now, handle all the fcp xri abort events */ 10596 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 10597 /* Get the first event from the head of the event queue */ 10598 spin_lock_irq(&phba->hbalock); 10599 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 10600 cq_event, struct lpfc_cq_event, list); 10601 spin_unlock_irq(&phba->hbalock); 10602 /* Notify aborted XRI for FCP work queue */ 10603 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 10604 /* Free the event processed back to the free pool */ 10605 lpfc_sli4_cq_event_release(phba, cq_event); 10606 } 10607 } 10608 10609 /** 10610 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 10611 * @phba: pointer to lpfc hba data structure. 10612 * 10613 * This routine is invoked by the worker thread to process all the pending 10614 * SLI4 els abort xri events. 10615 **/ 10616 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 10617 { 10618 struct lpfc_cq_event *cq_event; 10619 10620 /* First, declare the els xri abort event has been handled */ 10621 spin_lock_irq(&phba->hbalock); 10622 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 10623 spin_unlock_irq(&phba->hbalock); 10624 /* Now, handle all the els xri abort events */ 10625 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 10626 /* Get the first event from the head of the event queue */ 10627 spin_lock_irq(&phba->hbalock); 10628 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10629 cq_event, struct lpfc_cq_event, list); 10630 spin_unlock_irq(&phba->hbalock); 10631 /* Notify aborted XRI for ELS work queue */ 10632 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 10633 /* Free the event processed back to the free pool */ 10634 lpfc_sli4_cq_event_release(phba, cq_event); 10635 } 10636 } 10637 10638 /** 10639 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 10640 * @phba: pointer to lpfc hba data structure 10641 * @pIocbIn: pointer to the rspiocbq 10642 * @pIocbOut: pointer to the cmdiocbq 10643 * @wcqe: pointer to the complete wcqe 10644 * 10645 * This routine transfers the fields of a command iocbq to a response iocbq 10646 * by copying all the IOCB fields from command iocbq and transferring the 10647 * completion status information from the complete wcqe. 10648 **/ 10649 static void 10650 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 10651 struct lpfc_iocbq *pIocbIn, 10652 struct lpfc_iocbq *pIocbOut, 10653 struct lpfc_wcqe_complete *wcqe) 10654 { 10655 unsigned long iflags; 10656 size_t offset = offsetof(struct lpfc_iocbq, iocb); 10657 10658 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 10659 sizeof(struct lpfc_iocbq) - offset); 10660 /* Map WCQE parameters into irspiocb parameters */ 10661 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 10662 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 10663 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 10664 pIocbIn->iocb.un.fcpi.fcpi_parm = 10665 pIocbOut->iocb.un.fcpi.fcpi_parm - 10666 wcqe->total_data_placed; 10667 else 10668 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 10669 else { 10670 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 10671 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; 10672 } 10673 10674 /* Pick up HBA exchange busy condition */ 10675 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 10676 spin_lock_irqsave(&phba->hbalock, iflags); 10677 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 10678 spin_unlock_irqrestore(&phba->hbalock, iflags); 10679 } 10680 } 10681 10682 /** 10683 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 10684 * @phba: Pointer to HBA context object. 10685 * @wcqe: Pointer to work-queue completion queue entry. 10686 * 10687 * This routine handles an ELS work-queue completion event and construct 10688 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 10689 * discovery engine to handle. 10690 * 10691 * Return: Pointer to the receive IOCBQ, NULL otherwise. 10692 **/ 10693 static struct lpfc_iocbq * 10694 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 10695 struct lpfc_iocbq *irspiocbq) 10696 { 10697 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 10698 struct lpfc_iocbq *cmdiocbq; 10699 struct lpfc_wcqe_complete *wcqe; 10700 unsigned long iflags; 10701 10702 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 10703 spin_lock_irqsave(&phba->hbalock, iflags); 10704 pring->stats.iocb_event++; 10705 /* Look up the ELS command IOCB and create pseudo response IOCB */ 10706 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 10707 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10708 spin_unlock_irqrestore(&phba->hbalock, iflags); 10709 10710 if (unlikely(!cmdiocbq)) { 10711 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10712 "0386 ELS complete with no corresponding " 10713 "cmdiocb: iotag (%d)\n", 10714 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10715 lpfc_sli_release_iocbq(phba, irspiocbq); 10716 return NULL; 10717 } 10718 10719 /* Fake the irspiocbq and copy necessary response information */ 10720 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 10721 10722 return irspiocbq; 10723 } 10724 10725 /** 10726 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 10727 * @phba: Pointer to HBA context object. 10728 * @cqe: Pointer to mailbox completion queue entry. 10729 * 10730 * This routine process a mailbox completion queue entry with asynchrous 10731 * event. 10732 * 10733 * Return: true if work posted to worker thread, otherwise false. 10734 **/ 10735 static bool 10736 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 10737 { 10738 struct lpfc_cq_event *cq_event; 10739 unsigned long iflags; 10740 10741 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10742 "0392 Async Event: word0:x%x, word1:x%x, " 10743 "word2:x%x, word3:x%x\n", mcqe->word0, 10744 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 10745 10746 /* Allocate a new internal CQ_EVENT entry */ 10747 cq_event = lpfc_sli4_cq_event_alloc(phba); 10748 if (!cq_event) { 10749 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10750 "0394 Failed to allocate CQ_EVENT entry\n"); 10751 return false; 10752 } 10753 10754 /* Move the CQE into an asynchronous event entry */ 10755 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 10756 spin_lock_irqsave(&phba->hbalock, iflags); 10757 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 10758 /* Set the async event flag */ 10759 phba->hba_flag |= ASYNC_EVENT; 10760 spin_unlock_irqrestore(&phba->hbalock, iflags); 10761 10762 return true; 10763 } 10764 10765 /** 10766 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 10767 * @phba: Pointer to HBA context object. 10768 * @cqe: Pointer to mailbox completion queue entry. 10769 * 10770 * This routine process a mailbox completion queue entry with mailbox 10771 * completion event. 10772 * 10773 * Return: true if work posted to worker thread, otherwise false. 10774 **/ 10775 static bool 10776 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 10777 { 10778 uint32_t mcqe_status; 10779 MAILBOX_t *mbox, *pmbox; 10780 struct lpfc_mqe *mqe; 10781 struct lpfc_vport *vport; 10782 struct lpfc_nodelist *ndlp; 10783 struct lpfc_dmabuf *mp; 10784 unsigned long iflags; 10785 LPFC_MBOXQ_t *pmb; 10786 bool workposted = false; 10787 int rc; 10788 10789 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 10790 if (!bf_get(lpfc_trailer_completed, mcqe)) 10791 goto out_no_mqe_complete; 10792 10793 /* Get the reference to the active mbox command */ 10794 spin_lock_irqsave(&phba->hbalock, iflags); 10795 pmb = phba->sli.mbox_active; 10796 if (unlikely(!pmb)) { 10797 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 10798 "1832 No pending MBOX command to handle\n"); 10799 spin_unlock_irqrestore(&phba->hbalock, iflags); 10800 goto out_no_mqe_complete; 10801 } 10802 spin_unlock_irqrestore(&phba->hbalock, iflags); 10803 mqe = &pmb->u.mqe; 10804 pmbox = (MAILBOX_t *)&pmb->u.mqe; 10805 mbox = phba->mbox; 10806 vport = pmb->vport; 10807 10808 /* Reset heartbeat timer */ 10809 phba->last_completion_time = jiffies; 10810 del_timer(&phba->sli.mbox_tmo); 10811 10812 /* Move mbox data to caller's mailbox region, do endian swapping */ 10813 if (pmb->mbox_cmpl && mbox) 10814 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 10815 10816 /* 10817 * For mcqe errors, conditionally move a modified error code to 10818 * the mbox so that the error will not be missed. 10819 */ 10820 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 10821 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 10822 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 10823 bf_set(lpfc_mqe_status, mqe, 10824 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 10825 } 10826 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 10827 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 10828 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 10829 "MBOX dflt rpi: status:x%x rpi:x%x", 10830 mcqe_status, 10831 pmbox->un.varWords[0], 0); 10832 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 10833 mp = (struct lpfc_dmabuf *)(pmb->context1); 10834 ndlp = (struct lpfc_nodelist *)pmb->context2; 10835 /* Reg_LOGIN of dflt RPI was successful. Now lets get 10836 * RID of the PPI using the same mbox buffer. 10837 */ 10838 lpfc_unreg_login(phba, vport->vpi, 10839 pmbox->un.varWords[0], pmb); 10840 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 10841 pmb->context1 = mp; 10842 pmb->context2 = ndlp; 10843 pmb->vport = vport; 10844 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 10845 if (rc != MBX_BUSY) 10846 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10847 LOG_SLI, "0385 rc should " 10848 "have been MBX_BUSY\n"); 10849 if (rc != MBX_NOT_FINISHED) 10850 goto send_current_mbox; 10851 } 10852 } 10853 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 10854 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10855 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 10856 10857 /* There is mailbox completion work to do */ 10858 spin_lock_irqsave(&phba->hbalock, iflags); 10859 __lpfc_mbox_cmpl_put(phba, pmb); 10860 phba->work_ha |= HA_MBATT; 10861 spin_unlock_irqrestore(&phba->hbalock, iflags); 10862 workposted = true; 10863 10864 send_current_mbox: 10865 spin_lock_irqsave(&phba->hbalock, iflags); 10866 /* Release the mailbox command posting token */ 10867 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10868 /* Setting active mailbox pointer need to be in sync to flag clear */ 10869 phba->sli.mbox_active = NULL; 10870 spin_unlock_irqrestore(&phba->hbalock, iflags); 10871 /* Wake up worker thread to post the next pending mailbox command */ 10872 lpfc_worker_wake_up(phba); 10873 out_no_mqe_complete: 10874 if (bf_get(lpfc_trailer_consumed, mcqe)) 10875 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 10876 return workposted; 10877 } 10878 10879 /** 10880 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 10881 * @phba: Pointer to HBA context object. 10882 * @cqe: Pointer to mailbox completion queue entry. 10883 * 10884 * This routine process a mailbox completion queue entry, it invokes the 10885 * proper mailbox complete handling or asynchrous event handling routine 10886 * according to the MCQE's async bit. 10887 * 10888 * Return: true if work posted to worker thread, otherwise false. 10889 **/ 10890 static bool 10891 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 10892 { 10893 struct lpfc_mcqe mcqe; 10894 bool workposted; 10895 10896 /* Copy the mailbox MCQE and convert endian order as needed */ 10897 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 10898 10899 /* Invoke the proper event handling routine */ 10900 if (!bf_get(lpfc_trailer_async, &mcqe)) 10901 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 10902 else 10903 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 10904 return workposted; 10905 } 10906 10907 /** 10908 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 10909 * @phba: Pointer to HBA context object. 10910 * @wcqe: Pointer to work-queue completion queue entry. 10911 * 10912 * This routine handles an ELS work-queue completion event. 10913 * 10914 * Return: true if work posted to worker thread, otherwise false. 10915 **/ 10916 static bool 10917 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 10918 struct lpfc_wcqe_complete *wcqe) 10919 { 10920 struct lpfc_iocbq *irspiocbq; 10921 unsigned long iflags; 10922 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 10923 10924 /* Get an irspiocbq for later ELS response processing use */ 10925 irspiocbq = lpfc_sli_get_iocbq(phba); 10926 if (!irspiocbq) { 10927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10928 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 10929 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 10930 pring->txq_cnt, phba->iocb_cnt, 10931 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, 10932 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); 10933 return false; 10934 } 10935 10936 /* Save off the slow-path queue event for work thread to process */ 10937 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 10938 spin_lock_irqsave(&phba->hbalock, iflags); 10939 list_add_tail(&irspiocbq->cq_event.list, 10940 &phba->sli4_hba.sp_queue_event); 10941 phba->hba_flag |= HBA_SP_QUEUE_EVT; 10942 spin_unlock_irqrestore(&phba->hbalock, iflags); 10943 10944 return true; 10945 } 10946 10947 /** 10948 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 10949 * @phba: Pointer to HBA context object. 10950 * @wcqe: Pointer to work-queue completion queue entry. 10951 * 10952 * This routine handles slow-path WQ entry comsumed event by invoking the 10953 * proper WQ release routine to the slow-path WQ. 10954 **/ 10955 static void 10956 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 10957 struct lpfc_wcqe_release *wcqe) 10958 { 10959 /* sanity check on queue memory */ 10960 if (unlikely(!phba->sli4_hba.els_wq)) 10961 return; 10962 /* Check for the slow-path ELS work queue */ 10963 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 10964 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 10965 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 10966 else 10967 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10968 "2579 Slow-path wqe consume event carries " 10969 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 10970 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 10971 phba->sli4_hba.els_wq->queue_id); 10972 } 10973 10974 /** 10975 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 10976 * @phba: Pointer to HBA context object. 10977 * @cq: Pointer to a WQ completion queue. 10978 * @wcqe: Pointer to work-queue completion queue entry. 10979 * 10980 * This routine handles an XRI abort event. 10981 * 10982 * Return: true if work posted to worker thread, otherwise false. 10983 **/ 10984 static bool 10985 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 10986 struct lpfc_queue *cq, 10987 struct sli4_wcqe_xri_aborted *wcqe) 10988 { 10989 bool workposted = false; 10990 struct lpfc_cq_event *cq_event; 10991 unsigned long iflags; 10992 10993 /* Allocate a new internal CQ_EVENT entry */ 10994 cq_event = lpfc_sli4_cq_event_alloc(phba); 10995 if (!cq_event) { 10996 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10997 "0602 Failed to allocate CQ_EVENT entry\n"); 10998 return false; 10999 } 11000 11001 /* Move the CQE into the proper xri abort event list */ 11002 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 11003 switch (cq->subtype) { 11004 case LPFC_FCP: 11005 spin_lock_irqsave(&phba->hbalock, iflags); 11006 list_add_tail(&cq_event->list, 11007 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 11008 /* Set the fcp xri abort event flag */ 11009 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 11010 spin_unlock_irqrestore(&phba->hbalock, iflags); 11011 workposted = true; 11012 break; 11013 case LPFC_ELS: 11014 spin_lock_irqsave(&phba->hbalock, iflags); 11015 list_add_tail(&cq_event->list, 11016 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 11017 /* Set the els xri abort event flag */ 11018 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 11019 spin_unlock_irqrestore(&phba->hbalock, iflags); 11020 workposted = true; 11021 break; 11022 default: 11023 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11024 "0603 Invalid work queue CQE subtype (x%x)\n", 11025 cq->subtype); 11026 workposted = false; 11027 break; 11028 } 11029 return workposted; 11030 } 11031 11032 /** 11033 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 11034 * @phba: Pointer to HBA context object. 11035 * @rcqe: Pointer to receive-queue completion queue entry. 11036 * 11037 * This routine process a receive-queue completion queue entry. 11038 * 11039 * Return: true if work posted to worker thread, otherwise false. 11040 **/ 11041 static bool 11042 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 11043 { 11044 bool workposted = false; 11045 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 11046 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 11047 struct hbq_dmabuf *dma_buf; 11048 uint32_t status, rq_id; 11049 unsigned long iflags; 11050 11051 /* sanity check on queue memory */ 11052 if (unlikely(!hrq) || unlikely(!drq)) 11053 return workposted; 11054 11055 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 11056 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 11057 else 11058 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 11059 if (rq_id != hrq->queue_id) 11060 goto out; 11061 11062 status = bf_get(lpfc_rcqe_status, rcqe); 11063 switch (status) { 11064 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11065 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11066 "2537 Receive Frame Truncated!!\n"); 11067 case FC_STATUS_RQ_SUCCESS: 11068 lpfc_sli4_rq_release(hrq, drq); 11069 spin_lock_irqsave(&phba->hbalock, iflags); 11070 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11071 if (!dma_buf) { 11072 spin_unlock_irqrestore(&phba->hbalock, iflags); 11073 goto out; 11074 } 11075 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11076 /* save off the frame for the word thread to process */ 11077 list_add_tail(&dma_buf->cq_event.list, 11078 &phba->sli4_hba.sp_queue_event); 11079 /* Frame received */ 11080 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11081 spin_unlock_irqrestore(&phba->hbalock, iflags); 11082 workposted = true; 11083 break; 11084 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11085 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11086 /* Post more buffers if possible */ 11087 spin_lock_irqsave(&phba->hbalock, iflags); 11088 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11089 spin_unlock_irqrestore(&phba->hbalock, iflags); 11090 workposted = true; 11091 break; 11092 } 11093 out: 11094 return workposted; 11095 } 11096 11097 /** 11098 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 11099 * @phba: Pointer to HBA context object. 11100 * @cq: Pointer to the completion queue. 11101 * @wcqe: Pointer to a completion queue entry. 11102 * 11103 * This routine process a slow-path work-queue or receive queue completion queue 11104 * entry. 11105 * 11106 * Return: true if work posted to worker thread, otherwise false. 11107 **/ 11108 static bool 11109 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11110 struct lpfc_cqe *cqe) 11111 { 11112 struct lpfc_cqe cqevt; 11113 bool workposted = false; 11114 11115 /* Copy the work queue CQE and convert endian order if needed */ 11116 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 11117 11118 /* Check and process for different type of WCQE and dispatch */ 11119 switch (bf_get(lpfc_cqe_code, &cqevt)) { 11120 case CQE_CODE_COMPL_WQE: 11121 /* Process the WQ/RQ complete event */ 11122 phba->last_completion_time = jiffies; 11123 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 11124 (struct lpfc_wcqe_complete *)&cqevt); 11125 break; 11126 case CQE_CODE_RELEASE_WQE: 11127 /* Process the WQ release event */ 11128 lpfc_sli4_sp_handle_rel_wcqe(phba, 11129 (struct lpfc_wcqe_release *)&cqevt); 11130 break; 11131 case CQE_CODE_XRI_ABORTED: 11132 /* Process the WQ XRI abort event */ 11133 phba->last_completion_time = jiffies; 11134 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11135 (struct sli4_wcqe_xri_aborted *)&cqevt); 11136 break; 11137 case CQE_CODE_RECEIVE: 11138 case CQE_CODE_RECEIVE_V1: 11139 /* Process the RQ event */ 11140 phba->last_completion_time = jiffies; 11141 workposted = lpfc_sli4_sp_handle_rcqe(phba, 11142 (struct lpfc_rcqe *)&cqevt); 11143 break; 11144 default: 11145 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11146 "0388 Not a valid WCQE code: x%x\n", 11147 bf_get(lpfc_cqe_code, &cqevt)); 11148 break; 11149 } 11150 return workposted; 11151 } 11152 11153 /** 11154 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 11155 * @phba: Pointer to HBA context object. 11156 * @eqe: Pointer to fast-path event queue entry. 11157 * 11158 * This routine process a event queue entry from the slow-path event queue. 11159 * It will check the MajorCode and MinorCode to determine this is for a 11160 * completion event on a completion queue, if not, an error shall be logged 11161 * and just return. Otherwise, it will get to the corresponding completion 11162 * queue and process all the entries on that completion queue, rearm the 11163 * completion queue, and then return. 11164 * 11165 **/ 11166 static void 11167 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 11168 { 11169 struct lpfc_queue *cq = NULL, *childq, *speq; 11170 struct lpfc_cqe *cqe; 11171 bool workposted = false; 11172 int ecount = 0; 11173 uint16_t cqid; 11174 11175 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { 11176 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11177 "0359 Not a valid slow-path completion " 11178 "event: majorcode=x%x, minorcode=x%x\n", 11179 bf_get_le32(lpfc_eqe_major_code, eqe), 11180 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11181 return; 11182 } 11183 11184 /* Get the reference to the corresponding CQ */ 11185 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11186 11187 /* Search for completion queue pointer matching this cqid */ 11188 speq = phba->sli4_hba.sp_eq; 11189 /* sanity check on queue memory */ 11190 if (unlikely(!speq)) 11191 return; 11192 list_for_each_entry(childq, &speq->child_list, list) { 11193 if (childq->queue_id == cqid) { 11194 cq = childq; 11195 break; 11196 } 11197 } 11198 if (unlikely(!cq)) { 11199 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11200 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11201 "0365 Slow-path CQ identifier " 11202 "(%d) does not exist\n", cqid); 11203 return; 11204 } 11205 11206 /* Process all the entries to the CQ */ 11207 switch (cq->type) { 11208 case LPFC_MCQ: 11209 while ((cqe = lpfc_sli4_cq_get(cq))) { 11210 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11211 if (!(++ecount % cq->entry_repost)) 11212 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11213 } 11214 break; 11215 case LPFC_WCQ: 11216 while ((cqe = lpfc_sli4_cq_get(cq))) { 11217 if (cq->subtype == LPFC_FCP) 11218 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, 11219 cqe); 11220 else 11221 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 11222 cqe); 11223 if (!(++ecount % cq->entry_repost)) 11224 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11225 } 11226 break; 11227 default: 11228 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11229 "0370 Invalid completion queue type (%d)\n", 11230 cq->type); 11231 return; 11232 } 11233 11234 /* Catch the no cq entry condition, log an error */ 11235 if (unlikely(ecount == 0)) 11236 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11237 "0371 No entry from the CQ: identifier " 11238 "(x%x), type (%d)\n", cq->queue_id, cq->type); 11239 11240 /* In any case, flash and re-arm the RCQ */ 11241 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 11242 11243 /* wake up worker thread if there are works to be done */ 11244 if (workposted) 11245 lpfc_worker_wake_up(phba); 11246 } 11247 11248 /** 11249 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 11250 * @eqe: Pointer to fast-path completion queue entry. 11251 * 11252 * This routine process a fast-path work queue completion entry from fast-path 11253 * event queue for FCP command response completion. 11254 **/ 11255 static void 11256 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 11257 struct lpfc_wcqe_complete *wcqe) 11258 { 11259 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11260 struct lpfc_iocbq *cmdiocbq; 11261 struct lpfc_iocbq irspiocbq; 11262 unsigned long iflags; 11263 11264 spin_lock_irqsave(&phba->hbalock, iflags); 11265 pring->stats.iocb_event++; 11266 spin_unlock_irqrestore(&phba->hbalock, iflags); 11267 11268 /* Check for response status */ 11269 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 11270 /* If resource errors reported from HBA, reduce queue 11271 * depth of the SCSI device. 11272 */ 11273 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 11274 IOSTAT_LOCAL_REJECT) && 11275 (wcqe->parameter == IOERR_NO_RESOURCES)) { 11276 phba->lpfc_rampdown_queue_depth(phba); 11277 } 11278 /* Log the error status */ 11279 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11280 "0373 FCP complete error: status=x%x, " 11281 "hw_status=x%x, total_data_specified=%d, " 11282 "parameter=x%x, word3=x%x\n", 11283 bf_get(lpfc_wcqe_c_status, wcqe), 11284 bf_get(lpfc_wcqe_c_hw_status, wcqe), 11285 wcqe->total_data_placed, wcqe->parameter, 11286 wcqe->word3); 11287 } 11288 11289 /* Look up the FCP command IOCB and create pseudo response IOCB */ 11290 spin_lock_irqsave(&phba->hbalock, iflags); 11291 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11292 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11293 spin_unlock_irqrestore(&phba->hbalock, iflags); 11294 if (unlikely(!cmdiocbq)) { 11295 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11296 "0374 FCP complete with no corresponding " 11297 "cmdiocb: iotag (%d)\n", 11298 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11299 return; 11300 } 11301 if (unlikely(!cmdiocbq->iocb_cmpl)) { 11302 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11303 "0375 FCP cmdiocb not callback function " 11304 "iotag: (%d)\n", 11305 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11306 return; 11307 } 11308 11309 /* Fake the irspiocb and copy necessary response information */ 11310 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 11311 11312 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 11313 spin_lock_irqsave(&phba->hbalock, iflags); 11314 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 11315 spin_unlock_irqrestore(&phba->hbalock, iflags); 11316 } 11317 11318 /* Pass the cmd_iocb and the rsp state to the upper layer */ 11319 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 11320 } 11321 11322 /** 11323 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 11324 * @phba: Pointer to HBA context object. 11325 * @cq: Pointer to completion queue. 11326 * @wcqe: Pointer to work-queue completion queue entry. 11327 * 11328 * This routine handles an fast-path WQ entry comsumed event by invoking the 11329 * proper WQ release routine to the slow-path WQ. 11330 **/ 11331 static void 11332 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11333 struct lpfc_wcqe_release *wcqe) 11334 { 11335 struct lpfc_queue *childwq; 11336 bool wqid_matched = false; 11337 uint16_t fcp_wqid; 11338 11339 /* Check for fast-path FCP work queue release */ 11340 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 11341 list_for_each_entry(childwq, &cq->child_list, list) { 11342 if (childwq->queue_id == fcp_wqid) { 11343 lpfc_sli4_wq_release(childwq, 11344 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 11345 wqid_matched = true; 11346 break; 11347 } 11348 } 11349 /* Report warning log message if no match found */ 11350 if (wqid_matched != true) 11351 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11352 "2580 Fast-path wqe consume event carries " 11353 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 11354 } 11355 11356 /** 11357 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 11358 * @cq: Pointer to the completion queue. 11359 * @eqe: Pointer to fast-path completion queue entry. 11360 * 11361 * This routine process a fast-path work queue completion entry from fast-path 11362 * event queue for FCP command response completion. 11363 **/ 11364 static int 11365 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11366 struct lpfc_cqe *cqe) 11367 { 11368 struct lpfc_wcqe_release wcqe; 11369 bool workposted = false; 11370 11371 /* Copy the work queue CQE and convert endian order if needed */ 11372 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 11373 11374 /* Check and process for different type of WCQE and dispatch */ 11375 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 11376 case CQE_CODE_COMPL_WQE: 11377 /* Process the WQ complete event */ 11378 phba->last_completion_time = jiffies; 11379 lpfc_sli4_fp_handle_fcp_wcqe(phba, 11380 (struct lpfc_wcqe_complete *)&wcqe); 11381 break; 11382 case CQE_CODE_RELEASE_WQE: 11383 /* Process the WQ release event */ 11384 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 11385 (struct lpfc_wcqe_release *)&wcqe); 11386 break; 11387 case CQE_CODE_XRI_ABORTED: 11388 /* Process the WQ XRI abort event */ 11389 phba->last_completion_time = jiffies; 11390 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11391 (struct sli4_wcqe_xri_aborted *)&wcqe); 11392 break; 11393 default: 11394 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11395 "0144 Not a valid WCQE code: x%x\n", 11396 bf_get(lpfc_wcqe_c_code, &wcqe)); 11397 break; 11398 } 11399 return workposted; 11400 } 11401 11402 /** 11403 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 11404 * @phba: Pointer to HBA context object. 11405 * @eqe: Pointer to fast-path event queue entry. 11406 * 11407 * This routine process a event queue entry from the fast-path event queue. 11408 * It will check the MajorCode and MinorCode to determine this is for a 11409 * completion event on a completion queue, if not, an error shall be logged 11410 * and just return. Otherwise, it will get to the corresponding completion 11411 * queue and process all the entries on the completion queue, rearm the 11412 * completion queue, and then return. 11413 **/ 11414 static void 11415 lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11416 uint32_t fcp_cqidx) 11417 { 11418 struct lpfc_queue *cq; 11419 struct lpfc_cqe *cqe; 11420 bool workposted = false; 11421 uint16_t cqid; 11422 int ecount = 0; 11423 11424 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 11425 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11426 "0366 Not a valid fast-path completion " 11427 "event: majorcode=x%x, minorcode=x%x\n", 11428 bf_get_le32(lpfc_eqe_major_code, eqe), 11429 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11430 return; 11431 } 11432 11433 if (unlikely(!phba->sli4_hba.fcp_cq)) { 11434 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11435 "3146 Fast-path completion queues " 11436 "does not exist\n"); 11437 return; 11438 } 11439 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11440 if (unlikely(!cq)) { 11441 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11442 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11443 "0367 Fast-path completion queue " 11444 "(%d) does not exist\n", fcp_cqidx); 11445 return; 11446 } 11447 11448 /* Get the reference to the corresponding CQ */ 11449 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11450 if (unlikely(cqid != cq->queue_id)) { 11451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11452 "0368 Miss-matched fast-path completion " 11453 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 11454 cqid, cq->queue_id); 11455 return; 11456 } 11457 11458 /* Process all the entries to the CQ */ 11459 while ((cqe = lpfc_sli4_cq_get(cq))) { 11460 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 11461 if (!(++ecount % cq->entry_repost)) 11462 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11463 } 11464 11465 /* Catch the no cq entry condition */ 11466 if (unlikely(ecount == 0)) 11467 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11468 "0369 No entry from fast-path completion " 11469 "queue fcpcqid=%d\n", cq->queue_id); 11470 11471 /* In any case, flash and re-arm the CQ */ 11472 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 11473 11474 /* wake up worker thread if there are works to be done */ 11475 if (workposted) 11476 lpfc_worker_wake_up(phba); 11477 } 11478 11479 static void 11480 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 11481 { 11482 struct lpfc_eqe *eqe; 11483 11484 /* walk all the EQ entries and drop on the floor */ 11485 while ((eqe = lpfc_sli4_eq_get(eq))) 11486 ; 11487 11488 /* Clear and re-arm the EQ */ 11489 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 11490 } 11491 11492 /** 11493 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 11494 * @irq: Interrupt number. 11495 * @dev_id: The device context pointer. 11496 * 11497 * This function is directly called from the PCI layer as an interrupt 11498 * service routine when device with SLI-4 interface spec is enabled with 11499 * MSI-X multi-message interrupt mode and there are slow-path events in 11500 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 11501 * interrupt mode, this function is called as part of the device-level 11502 * interrupt handler. When the PCI slot is in error recovery or the HBA is 11503 * undergoing initialization, the interrupt handler will not process the 11504 * interrupt. The link attention and ELS ring attention events are handled 11505 * by the worker thread. The interrupt handler signals the worker thread 11506 * and returns for these events. This function is called without any lock 11507 * held. It gets the hbalock to access and update SLI data structures. 11508 * 11509 * This function returns IRQ_HANDLED when interrupt is handled else it 11510 * returns IRQ_NONE. 11511 **/ 11512 irqreturn_t 11513 lpfc_sli4_sp_intr_handler(int irq, void *dev_id) 11514 { 11515 struct lpfc_hba *phba; 11516 struct lpfc_queue *speq; 11517 struct lpfc_eqe *eqe; 11518 unsigned long iflag; 11519 int ecount = 0; 11520 11521 /* 11522 * Get the driver's phba structure from the dev_id 11523 */ 11524 phba = (struct lpfc_hba *)dev_id; 11525 11526 if (unlikely(!phba)) 11527 return IRQ_NONE; 11528 11529 /* Get to the EQ struct associated with this vector */ 11530 speq = phba->sli4_hba.sp_eq; 11531 if (unlikely(!speq)) 11532 return IRQ_NONE; 11533 11534 /* Check device state for handling interrupt */ 11535 if (unlikely(lpfc_intr_state_check(phba))) { 11536 /* Check again for link_state with lock held */ 11537 spin_lock_irqsave(&phba->hbalock, iflag); 11538 if (phba->link_state < LPFC_LINK_DOWN) 11539 /* Flush, clear interrupt, and rearm the EQ */ 11540 lpfc_sli4_eq_flush(phba, speq); 11541 spin_unlock_irqrestore(&phba->hbalock, iflag); 11542 return IRQ_NONE; 11543 } 11544 11545 /* 11546 * Process all the event on FCP slow-path EQ 11547 */ 11548 while ((eqe = lpfc_sli4_eq_get(speq))) { 11549 lpfc_sli4_sp_handle_eqe(phba, eqe); 11550 if (!(++ecount % speq->entry_repost)) 11551 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 11552 } 11553 11554 /* Always clear and re-arm the slow-path EQ */ 11555 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); 11556 11557 /* Catch the no cq entry condition */ 11558 if (unlikely(ecount == 0)) { 11559 if (phba->intr_type == MSIX) 11560 /* MSI-X treated interrupt served as no EQ share INT */ 11561 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11562 "0357 MSI-X interrupt with no EQE\n"); 11563 else 11564 /* Non MSI-X treated on interrupt as EQ share INT */ 11565 return IRQ_NONE; 11566 } 11567 11568 return IRQ_HANDLED; 11569 } /* lpfc_sli4_sp_intr_handler */ 11570 11571 /** 11572 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device 11573 * @irq: Interrupt number. 11574 * @dev_id: The device context pointer. 11575 * 11576 * This function is directly called from the PCI layer as an interrupt 11577 * service routine when device with SLI-4 interface spec is enabled with 11578 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 11579 * ring event in the HBA. However, when the device is enabled with either 11580 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 11581 * device-level interrupt handler. When the PCI slot is in error recovery 11582 * or the HBA is undergoing initialization, the interrupt handler will not 11583 * process the interrupt. The SCSI FCP fast-path ring event are handled in 11584 * the intrrupt context. This function is called without any lock held. 11585 * It gets the hbalock to access and update SLI data structures. Note that, 11586 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 11587 * equal to that of FCP CQ index. 11588 * 11589 * This function returns IRQ_HANDLED when interrupt is handled else it 11590 * returns IRQ_NONE. 11591 **/ 11592 irqreturn_t 11593 lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 11594 { 11595 struct lpfc_hba *phba; 11596 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 11597 struct lpfc_queue *fpeq; 11598 struct lpfc_eqe *eqe; 11599 unsigned long iflag; 11600 int ecount = 0; 11601 uint32_t fcp_eqidx; 11602 11603 /* Get the driver's phba structure from the dev_id */ 11604 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 11605 phba = fcp_eq_hdl->phba; 11606 fcp_eqidx = fcp_eq_hdl->idx; 11607 11608 if (unlikely(!phba)) 11609 return IRQ_NONE; 11610 if (unlikely(!phba->sli4_hba.fp_eq)) 11611 return IRQ_NONE; 11612 11613 /* Get to the EQ struct associated with this vector */ 11614 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11615 if (unlikely(!fpeq)) 11616 return IRQ_NONE; 11617 11618 /* Check device state for handling interrupt */ 11619 if (unlikely(lpfc_intr_state_check(phba))) { 11620 /* Check again for link_state with lock held */ 11621 spin_lock_irqsave(&phba->hbalock, iflag); 11622 if (phba->link_state < LPFC_LINK_DOWN) 11623 /* Flush, clear interrupt, and rearm the EQ */ 11624 lpfc_sli4_eq_flush(phba, fpeq); 11625 spin_unlock_irqrestore(&phba->hbalock, iflag); 11626 return IRQ_NONE; 11627 } 11628 11629 /* 11630 * Process all the event on FCP fast-path EQ 11631 */ 11632 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11633 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11634 if (!(++ecount % fpeq->entry_repost)) 11635 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11636 } 11637 11638 /* Always clear and re-arm the fast-path EQ */ 11639 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 11640 11641 if (unlikely(ecount == 0)) { 11642 if (phba->intr_type == MSIX) 11643 /* MSI-X treated interrupt served as no EQ share INT */ 11644 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11645 "0358 MSI-X interrupt with no EQE\n"); 11646 else 11647 /* Non MSI-X treated on interrupt as EQ share INT */ 11648 return IRQ_NONE; 11649 } 11650 11651 return IRQ_HANDLED; 11652 } /* lpfc_sli4_fp_intr_handler */ 11653 11654 /** 11655 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 11656 * @irq: Interrupt number. 11657 * @dev_id: The device context pointer. 11658 * 11659 * This function is the device-level interrupt handler to device with SLI-4 11660 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 11661 * interrupt mode is enabled and there is an event in the HBA which requires 11662 * driver attention. This function invokes the slow-path interrupt attention 11663 * handling function and fast-path interrupt attention handling function in 11664 * turn to process the relevant HBA attention events. This function is called 11665 * without any lock held. It gets the hbalock to access and update SLI data 11666 * structures. 11667 * 11668 * This function returns IRQ_HANDLED when interrupt is handled, else it 11669 * returns IRQ_NONE. 11670 **/ 11671 irqreturn_t 11672 lpfc_sli4_intr_handler(int irq, void *dev_id) 11673 { 11674 struct lpfc_hba *phba; 11675 irqreturn_t sp_irq_rc, fp_irq_rc; 11676 bool fp_handled = false; 11677 uint32_t fcp_eqidx; 11678 11679 /* Get the driver's phba structure from the dev_id */ 11680 phba = (struct lpfc_hba *)dev_id; 11681 11682 if (unlikely(!phba)) 11683 return IRQ_NONE; 11684 11685 /* 11686 * Invokes slow-path host attention interrupt handling as appropriate. 11687 */ 11688 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); 11689 11690 /* 11691 * Invoke fast-path host attention interrupt handling as appropriate. 11692 */ 11693 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 11694 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 11695 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 11696 if (fp_irq_rc == IRQ_HANDLED) 11697 fp_handled |= true; 11698 } 11699 11700 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 11701 } /* lpfc_sli4_intr_handler */ 11702 11703 /** 11704 * lpfc_sli4_queue_free - free a queue structure and associated memory 11705 * @queue: The queue structure to free. 11706 * 11707 * This function frees a queue structure and the DMAable memory used for 11708 * the host resident queue. This function must be called after destroying the 11709 * queue on the HBA. 11710 **/ 11711 void 11712 lpfc_sli4_queue_free(struct lpfc_queue *queue) 11713 { 11714 struct lpfc_dmabuf *dmabuf; 11715 11716 if (!queue) 11717 return; 11718 11719 while (!list_empty(&queue->page_list)) { 11720 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 11721 list); 11722 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 11723 dmabuf->virt, dmabuf->phys); 11724 kfree(dmabuf); 11725 } 11726 kfree(queue); 11727 return; 11728 } 11729 11730 /** 11731 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 11732 * @phba: The HBA that this queue is being created on. 11733 * @entry_size: The size of each queue entry for this queue. 11734 * @entry count: The number of entries that this queue will handle. 11735 * 11736 * This function allocates a queue structure and the DMAable memory used for 11737 * the host resident queue. This function must be called before creating the 11738 * queue on the HBA. 11739 **/ 11740 struct lpfc_queue * 11741 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 11742 uint32_t entry_count) 11743 { 11744 struct lpfc_queue *queue; 11745 struct lpfc_dmabuf *dmabuf; 11746 int x, total_qe_count; 11747 void *dma_pointer; 11748 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11749 11750 if (!phba->sli4_hba.pc_sli4_params.supported) 11751 hw_page_size = SLI4_PAGE_SIZE; 11752 11753 queue = kzalloc(sizeof(struct lpfc_queue) + 11754 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 11755 if (!queue) 11756 return NULL; 11757 queue->page_count = (ALIGN(entry_size * entry_count, 11758 hw_page_size))/hw_page_size; 11759 INIT_LIST_HEAD(&queue->list); 11760 INIT_LIST_HEAD(&queue->page_list); 11761 INIT_LIST_HEAD(&queue->child_list); 11762 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 11763 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 11764 if (!dmabuf) 11765 goto out_fail; 11766 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 11767 hw_page_size, &dmabuf->phys, 11768 GFP_KERNEL); 11769 if (!dmabuf->virt) { 11770 kfree(dmabuf); 11771 goto out_fail; 11772 } 11773 memset(dmabuf->virt, 0, hw_page_size); 11774 dmabuf->buffer_tag = x; 11775 list_add_tail(&dmabuf->list, &queue->page_list); 11776 /* initialize queue's entry array */ 11777 dma_pointer = dmabuf->virt; 11778 for (; total_qe_count < entry_count && 11779 dma_pointer < (hw_page_size + dmabuf->virt); 11780 total_qe_count++, dma_pointer += entry_size) { 11781 queue->qe[total_qe_count].address = dma_pointer; 11782 } 11783 } 11784 queue->entry_size = entry_size; 11785 queue->entry_count = entry_count; 11786 11787 /* 11788 * entry_repost is calculated based on the number of entries in the 11789 * queue. This works out except for RQs. If buffers are NOT initially 11790 * posted for every RQE, entry_repost should be adjusted accordingly. 11791 */ 11792 queue->entry_repost = (entry_count >> 3); 11793 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST) 11794 queue->entry_repost = LPFC_QUEUE_MIN_REPOST; 11795 queue->phba = phba; 11796 11797 return queue; 11798 out_fail: 11799 lpfc_sli4_queue_free(queue); 11800 return NULL; 11801 } 11802 11803 /** 11804 * lpfc_eq_create - Create an Event Queue on the HBA 11805 * @phba: HBA structure that indicates port to create a queue on. 11806 * @eq: The queue structure to use to create the event queue. 11807 * @imax: The maximum interrupt per second limit. 11808 * 11809 * This function creates an event queue, as detailed in @eq, on a port, 11810 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 11811 * 11812 * The @phba struct is used to send mailbox command to HBA. The @eq struct 11813 * is used to get the entry count and entry size that are necessary to 11814 * determine the number of pages to allocate and use for this queue. This 11815 * function will send the EQ_CREATE mailbox command to the HBA to setup the 11816 * event queue. This function is asynchronous and will wait for the mailbox 11817 * command to finish before continuing. 11818 * 11819 * On success this function will return a zero. If unable to allocate enough 11820 * memory this function will return -ENOMEM. If the queue create mailbox command 11821 * fails this function will return -ENXIO. 11822 **/ 11823 uint32_t 11824 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 11825 { 11826 struct lpfc_mbx_eq_create *eq_create; 11827 LPFC_MBOXQ_t *mbox; 11828 int rc, length, status = 0; 11829 struct lpfc_dmabuf *dmabuf; 11830 uint32_t shdr_status, shdr_add_status; 11831 union lpfc_sli4_cfg_shdr *shdr; 11832 uint16_t dmult; 11833 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11834 11835 /* sanity check on queue memory */ 11836 if (!eq) 11837 return -ENODEV; 11838 if (!phba->sli4_hba.pc_sli4_params.supported) 11839 hw_page_size = SLI4_PAGE_SIZE; 11840 11841 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11842 if (!mbox) 11843 return -ENOMEM; 11844 length = (sizeof(struct lpfc_mbx_eq_create) - 11845 sizeof(struct lpfc_sli4_cfg_mhdr)); 11846 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 11847 LPFC_MBOX_OPCODE_EQ_CREATE, 11848 length, LPFC_SLI4_MBX_EMBED); 11849 eq_create = &mbox->u.mqe.un.eq_create; 11850 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 11851 eq->page_count); 11852 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 11853 LPFC_EQE_SIZE); 11854 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 11855 /* Calculate delay multiper from maximum interrupt per second */ 11856 dmult = LPFC_DMULT_CONST/imax - 1; 11857 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 11858 dmult); 11859 switch (eq->entry_count) { 11860 default: 11861 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11862 "0360 Unsupported EQ count. (%d)\n", 11863 eq->entry_count); 11864 if (eq->entry_count < 256) 11865 return -EINVAL; 11866 /* otherwise default to smallest count (drop through) */ 11867 case 256: 11868 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11869 LPFC_EQ_CNT_256); 11870 break; 11871 case 512: 11872 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11873 LPFC_EQ_CNT_512); 11874 break; 11875 case 1024: 11876 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11877 LPFC_EQ_CNT_1024); 11878 break; 11879 case 2048: 11880 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11881 LPFC_EQ_CNT_2048); 11882 break; 11883 case 4096: 11884 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11885 LPFC_EQ_CNT_4096); 11886 break; 11887 } 11888 list_for_each_entry(dmabuf, &eq->page_list, list) { 11889 memset(dmabuf->virt, 0, hw_page_size); 11890 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 11891 putPaddrLow(dmabuf->phys); 11892 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 11893 putPaddrHigh(dmabuf->phys); 11894 } 11895 mbox->vport = phba->pport; 11896 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11897 mbox->context1 = NULL; 11898 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11899 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 11900 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11901 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11902 if (shdr_status || shdr_add_status || rc) { 11903 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11904 "2500 EQ_CREATE mailbox failed with " 11905 "status x%x add_status x%x, mbx status x%x\n", 11906 shdr_status, shdr_add_status, rc); 11907 status = -ENXIO; 11908 } 11909 eq->type = LPFC_EQ; 11910 eq->subtype = LPFC_NONE; 11911 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 11912 if (eq->queue_id == 0xFFFF) 11913 status = -ENXIO; 11914 eq->host_index = 0; 11915 eq->hba_index = 0; 11916 11917 mempool_free(mbox, phba->mbox_mem_pool); 11918 return status; 11919 } 11920 11921 /** 11922 * lpfc_cq_create - Create a Completion Queue on the HBA 11923 * @phba: HBA structure that indicates port to create a queue on. 11924 * @cq: The queue structure to use to create the completion queue. 11925 * @eq: The event queue to bind this completion queue to. 11926 * 11927 * This function creates a completion queue, as detailed in @wq, on a port, 11928 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 11929 * 11930 * The @phba struct is used to send mailbox command to HBA. The @cq struct 11931 * is used to get the entry count and entry size that are necessary to 11932 * determine the number of pages to allocate and use for this queue. The @eq 11933 * is used to indicate which event queue to bind this completion queue to. This 11934 * function will send the CQ_CREATE mailbox command to the HBA to setup the 11935 * completion queue. This function is asynchronous and will wait for the mailbox 11936 * command to finish before continuing. 11937 * 11938 * On success this function will return a zero. If unable to allocate enough 11939 * memory this function will return -ENOMEM. If the queue create mailbox command 11940 * fails this function will return -ENXIO. 11941 **/ 11942 uint32_t 11943 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 11944 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 11945 { 11946 struct lpfc_mbx_cq_create *cq_create; 11947 struct lpfc_dmabuf *dmabuf; 11948 LPFC_MBOXQ_t *mbox; 11949 int rc, length, status = 0; 11950 uint32_t shdr_status, shdr_add_status; 11951 union lpfc_sli4_cfg_shdr *shdr; 11952 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11953 11954 /* sanity check on queue memory */ 11955 if (!cq || !eq) 11956 return -ENODEV; 11957 if (!phba->sli4_hba.pc_sli4_params.supported) 11958 hw_page_size = SLI4_PAGE_SIZE; 11959 11960 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11961 if (!mbox) 11962 return -ENOMEM; 11963 length = (sizeof(struct lpfc_mbx_cq_create) - 11964 sizeof(struct lpfc_sli4_cfg_mhdr)); 11965 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 11966 LPFC_MBOX_OPCODE_CQ_CREATE, 11967 length, LPFC_SLI4_MBX_EMBED); 11968 cq_create = &mbox->u.mqe.un.cq_create; 11969 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 11970 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 11971 cq->page_count); 11972 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 11973 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 11974 bf_set(lpfc_mbox_hdr_version, &shdr->request, 11975 phba->sli4_hba.pc_sli4_params.cqv); 11976 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 11977 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ 11978 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); 11979 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 11980 eq->queue_id); 11981 } else { 11982 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 11983 eq->queue_id); 11984 } 11985 switch (cq->entry_count) { 11986 default: 11987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11988 "0361 Unsupported CQ count. (%d)\n", 11989 cq->entry_count); 11990 if (cq->entry_count < 256) 11991 return -EINVAL; 11992 /* otherwise default to smallest count (drop through) */ 11993 case 256: 11994 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 11995 LPFC_CQ_CNT_256); 11996 break; 11997 case 512: 11998 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 11999 LPFC_CQ_CNT_512); 12000 break; 12001 case 1024: 12002 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12003 LPFC_CQ_CNT_1024); 12004 break; 12005 } 12006 list_for_each_entry(dmabuf, &cq->page_list, list) { 12007 memset(dmabuf->virt, 0, hw_page_size); 12008 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12009 putPaddrLow(dmabuf->phys); 12010 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12011 putPaddrHigh(dmabuf->phys); 12012 } 12013 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12014 12015 /* The IOCTL status is embedded in the mailbox subheader. */ 12016 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12017 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12018 if (shdr_status || shdr_add_status || rc) { 12019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12020 "2501 CQ_CREATE mailbox failed with " 12021 "status x%x add_status x%x, mbx status x%x\n", 12022 shdr_status, shdr_add_status, rc); 12023 status = -ENXIO; 12024 goto out; 12025 } 12026 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12027 if (cq->queue_id == 0xFFFF) { 12028 status = -ENXIO; 12029 goto out; 12030 } 12031 /* link the cq onto the parent eq child list */ 12032 list_add_tail(&cq->list, &eq->child_list); 12033 /* Set up completion queue's type and subtype */ 12034 cq->type = type; 12035 cq->subtype = subtype; 12036 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12037 cq->assoc_qid = eq->queue_id; 12038 cq->host_index = 0; 12039 cq->hba_index = 0; 12040 12041 out: 12042 mempool_free(mbox, phba->mbox_mem_pool); 12043 return status; 12044 } 12045 12046 /** 12047 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 12048 * @phba: HBA structure that indicates port to create a queue on. 12049 * @mq: The queue structure to use to create the mailbox queue. 12050 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 12051 * @cq: The completion queue to associate with this cq. 12052 * 12053 * This function provides failback (fb) functionality when the 12054 * mq_create_ext fails on older FW generations. It's purpose is identical 12055 * to mq_create_ext otherwise. 12056 * 12057 * This routine cannot fail as all attributes were previously accessed and 12058 * initialized in mq_create_ext. 12059 **/ 12060 static void 12061 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 12062 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 12063 { 12064 struct lpfc_mbx_mq_create *mq_create; 12065 struct lpfc_dmabuf *dmabuf; 12066 int length; 12067 12068 length = (sizeof(struct lpfc_mbx_mq_create) - 12069 sizeof(struct lpfc_sli4_cfg_mhdr)); 12070 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12071 LPFC_MBOX_OPCODE_MQ_CREATE, 12072 length, LPFC_SLI4_MBX_EMBED); 12073 mq_create = &mbox->u.mqe.un.mq_create; 12074 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 12075 mq->page_count); 12076 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 12077 cq->queue_id); 12078 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 12079 switch (mq->entry_count) { 12080 case 16: 12081 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12082 LPFC_MQ_RING_SIZE_16); 12083 break; 12084 case 32: 12085 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12086 LPFC_MQ_RING_SIZE_32); 12087 break; 12088 case 64: 12089 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12090 LPFC_MQ_RING_SIZE_64); 12091 break; 12092 case 128: 12093 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12094 LPFC_MQ_RING_SIZE_128); 12095 break; 12096 } 12097 list_for_each_entry(dmabuf, &mq->page_list, list) { 12098 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12099 putPaddrLow(dmabuf->phys); 12100 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12101 putPaddrHigh(dmabuf->phys); 12102 } 12103 } 12104 12105 /** 12106 * lpfc_mq_create - Create a mailbox Queue on the HBA 12107 * @phba: HBA structure that indicates port to create a queue on. 12108 * @mq: The queue structure to use to create the mailbox queue. 12109 * @cq: The completion queue to associate with this cq. 12110 * @subtype: The queue's subtype. 12111 * 12112 * This function creates a mailbox queue, as detailed in @mq, on a port, 12113 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 12114 * 12115 * The @phba struct is used to send mailbox command to HBA. The @cq struct 12116 * is used to get the entry count and entry size that are necessary to 12117 * determine the number of pages to allocate and use for this queue. This 12118 * function will send the MQ_CREATE mailbox command to the HBA to setup the 12119 * mailbox queue. This function is asynchronous and will wait for the mailbox 12120 * command to finish before continuing. 12121 * 12122 * On success this function will return a zero. If unable to allocate enough 12123 * memory this function will return -ENOMEM. If the queue create mailbox command 12124 * fails this function will return -ENXIO. 12125 **/ 12126 int32_t 12127 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 12128 struct lpfc_queue *cq, uint32_t subtype) 12129 { 12130 struct lpfc_mbx_mq_create *mq_create; 12131 struct lpfc_mbx_mq_create_ext *mq_create_ext; 12132 struct lpfc_dmabuf *dmabuf; 12133 LPFC_MBOXQ_t *mbox; 12134 int rc, length, status = 0; 12135 uint32_t shdr_status, shdr_add_status; 12136 union lpfc_sli4_cfg_shdr *shdr; 12137 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12138 12139 /* sanity check on queue memory */ 12140 if (!mq || !cq) 12141 return -ENODEV; 12142 if (!phba->sli4_hba.pc_sli4_params.supported) 12143 hw_page_size = SLI4_PAGE_SIZE; 12144 12145 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12146 if (!mbox) 12147 return -ENOMEM; 12148 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 12149 sizeof(struct lpfc_sli4_cfg_mhdr)); 12150 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12151 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 12152 length, LPFC_SLI4_MBX_EMBED); 12153 12154 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 12155 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 12156 bf_set(lpfc_mbx_mq_create_ext_num_pages, 12157 &mq_create_ext->u.request, mq->page_count); 12158 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 12159 &mq_create_ext->u.request, 1); 12160 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 12161 &mq_create_ext->u.request, 1); 12162 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 12163 &mq_create_ext->u.request, 1); 12164 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 12165 &mq_create_ext->u.request, 1); 12166 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 12167 &mq_create_ext->u.request, 1); 12168 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 12169 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12170 phba->sli4_hba.pc_sli4_params.mqv); 12171 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 12172 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 12173 cq->queue_id); 12174 else 12175 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 12176 cq->queue_id); 12177 switch (mq->entry_count) { 12178 default: 12179 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12180 "0362 Unsupported MQ count. (%d)\n", 12181 mq->entry_count); 12182 if (mq->entry_count < 16) 12183 return -EINVAL; 12184 /* otherwise default to smallest count (drop through) */ 12185 case 16: 12186 bf_set(lpfc_mq_context_ring_size, 12187 &mq_create_ext->u.request.context, 12188 LPFC_MQ_RING_SIZE_16); 12189 break; 12190 case 32: 12191 bf_set(lpfc_mq_context_ring_size, 12192 &mq_create_ext->u.request.context, 12193 LPFC_MQ_RING_SIZE_32); 12194 break; 12195 case 64: 12196 bf_set(lpfc_mq_context_ring_size, 12197 &mq_create_ext->u.request.context, 12198 LPFC_MQ_RING_SIZE_64); 12199 break; 12200 case 128: 12201 bf_set(lpfc_mq_context_ring_size, 12202 &mq_create_ext->u.request.context, 12203 LPFC_MQ_RING_SIZE_128); 12204 break; 12205 } 12206 list_for_each_entry(dmabuf, &mq->page_list, list) { 12207 memset(dmabuf->virt, 0, hw_page_size); 12208 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 12209 putPaddrLow(dmabuf->phys); 12210 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 12211 putPaddrHigh(dmabuf->phys); 12212 } 12213 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12214 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 12215 &mq_create_ext->u.response); 12216 if (rc != MBX_SUCCESS) { 12217 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12218 "2795 MQ_CREATE_EXT failed with " 12219 "status x%x. Failback to MQ_CREATE.\n", 12220 rc); 12221 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 12222 mq_create = &mbox->u.mqe.un.mq_create; 12223 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12224 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 12225 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 12226 &mq_create->u.response); 12227 } 12228 12229 /* The IOCTL status is embedded in the mailbox subheader. */ 12230 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12231 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12232 if (shdr_status || shdr_add_status || rc) { 12233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12234 "2502 MQ_CREATE mailbox failed with " 12235 "status x%x add_status x%x, mbx status x%x\n", 12236 shdr_status, shdr_add_status, rc); 12237 status = -ENXIO; 12238 goto out; 12239 } 12240 if (mq->queue_id == 0xFFFF) { 12241 status = -ENXIO; 12242 goto out; 12243 } 12244 mq->type = LPFC_MQ; 12245 mq->assoc_qid = cq->queue_id; 12246 mq->subtype = subtype; 12247 mq->host_index = 0; 12248 mq->hba_index = 0; 12249 12250 /* link the mq onto the parent cq child list */ 12251 list_add_tail(&mq->list, &cq->child_list); 12252 out: 12253 mempool_free(mbox, phba->mbox_mem_pool); 12254 return status; 12255 } 12256 12257 /** 12258 * lpfc_wq_create - Create a Work Queue on the HBA 12259 * @phba: HBA structure that indicates port to create a queue on. 12260 * @wq: The queue structure to use to create the work queue. 12261 * @cq: The completion queue to bind this work queue to. 12262 * @subtype: The subtype of the work queue indicating its functionality. 12263 * 12264 * This function creates a work queue, as detailed in @wq, on a port, described 12265 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 12266 * 12267 * The @phba struct is used to send mailbox command to HBA. The @wq struct 12268 * is used to get the entry count and entry size that are necessary to 12269 * determine the number of pages to allocate and use for this queue. The @cq 12270 * is used to indicate which completion queue to bind this work queue to. This 12271 * function will send the WQ_CREATE mailbox command to the HBA to setup the 12272 * work queue. This function is asynchronous and will wait for the mailbox 12273 * command to finish before continuing. 12274 * 12275 * On success this function will return a zero. If unable to allocate enough 12276 * memory this function will return -ENOMEM. If the queue create mailbox command 12277 * fails this function will return -ENXIO. 12278 **/ 12279 uint32_t 12280 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 12281 struct lpfc_queue *cq, uint32_t subtype) 12282 { 12283 struct lpfc_mbx_wq_create *wq_create; 12284 struct lpfc_dmabuf *dmabuf; 12285 LPFC_MBOXQ_t *mbox; 12286 int rc, length, status = 0; 12287 uint32_t shdr_status, shdr_add_status; 12288 union lpfc_sli4_cfg_shdr *shdr; 12289 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12290 struct dma_address *page; 12291 12292 /* sanity check on queue memory */ 12293 if (!wq || !cq) 12294 return -ENODEV; 12295 if (!phba->sli4_hba.pc_sli4_params.supported) 12296 hw_page_size = SLI4_PAGE_SIZE; 12297 12298 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12299 if (!mbox) 12300 return -ENOMEM; 12301 length = (sizeof(struct lpfc_mbx_wq_create) - 12302 sizeof(struct lpfc_sli4_cfg_mhdr)); 12303 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12304 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 12305 length, LPFC_SLI4_MBX_EMBED); 12306 wq_create = &mbox->u.mqe.un.wq_create; 12307 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 12308 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 12309 wq->page_count); 12310 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 12311 cq->queue_id); 12312 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12313 phba->sli4_hba.pc_sli4_params.wqv); 12314 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { 12315 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 12316 wq->entry_count); 12317 switch (wq->entry_size) { 12318 default: 12319 case 64: 12320 bf_set(lpfc_mbx_wq_create_wqe_size, 12321 &wq_create->u.request_1, 12322 LPFC_WQ_WQE_SIZE_64); 12323 break; 12324 case 128: 12325 bf_set(lpfc_mbx_wq_create_wqe_size, 12326 &wq_create->u.request_1, 12327 LPFC_WQ_WQE_SIZE_128); 12328 break; 12329 } 12330 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, 12331 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12332 page = wq_create->u.request_1.page; 12333 } else { 12334 page = wq_create->u.request.page; 12335 } 12336 list_for_each_entry(dmabuf, &wq->page_list, list) { 12337 memset(dmabuf->virt, 0, hw_page_size); 12338 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 12339 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 12340 } 12341 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12342 /* The IOCTL status is embedded in the mailbox subheader. */ 12343 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12344 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12345 if (shdr_status || shdr_add_status || rc) { 12346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12347 "2503 WQ_CREATE mailbox failed with " 12348 "status x%x add_status x%x, mbx status x%x\n", 12349 shdr_status, shdr_add_status, rc); 12350 status = -ENXIO; 12351 goto out; 12352 } 12353 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 12354 if (wq->queue_id == 0xFFFF) { 12355 status = -ENXIO; 12356 goto out; 12357 } 12358 wq->type = LPFC_WQ; 12359 wq->assoc_qid = cq->queue_id; 12360 wq->subtype = subtype; 12361 wq->host_index = 0; 12362 wq->hba_index = 0; 12363 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 12364 12365 /* link the wq onto the parent cq child list */ 12366 list_add_tail(&wq->list, &cq->child_list); 12367 out: 12368 mempool_free(mbox, phba->mbox_mem_pool); 12369 return status; 12370 } 12371 12372 /** 12373 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ 12374 * @phba: HBA structure that indicates port to create a queue on. 12375 * @rq: The queue structure to use for the receive queue. 12376 * @qno: The associated HBQ number 12377 * 12378 * 12379 * For SLI4 we need to adjust the RQ repost value based on 12380 * the number of buffers that are initially posted to the RQ. 12381 */ 12382 void 12383 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) 12384 { 12385 uint32_t cnt; 12386 12387 /* sanity check on queue memory */ 12388 if (!rq) 12389 return; 12390 cnt = lpfc_hbq_defs[qno]->entry_count; 12391 12392 /* Recalc repost for RQs based on buffers initially posted */ 12393 cnt = (cnt >> 3); 12394 if (cnt < LPFC_QUEUE_MIN_REPOST) 12395 cnt = LPFC_QUEUE_MIN_REPOST; 12396 12397 rq->entry_repost = cnt; 12398 } 12399 12400 /** 12401 * lpfc_rq_create - Create a Receive Queue on the HBA 12402 * @phba: HBA structure that indicates port to create a queue on. 12403 * @hrq: The queue structure to use to create the header receive queue. 12404 * @drq: The queue structure to use to create the data receive queue. 12405 * @cq: The completion queue to bind this work queue to. 12406 * 12407 * This function creates a receive buffer queue pair , as detailed in @hrq and 12408 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 12409 * to the HBA. 12410 * 12411 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 12412 * struct is used to get the entry count that is necessary to determine the 12413 * number of pages to use for this queue. The @cq is used to indicate which 12414 * completion queue to bind received buffers that are posted to these queues to. 12415 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 12416 * receive queue pair. This function is asynchronous and will wait for the 12417 * mailbox command to finish before continuing. 12418 * 12419 * On success this function will return a zero. If unable to allocate enough 12420 * memory this function will return -ENOMEM. If the queue create mailbox command 12421 * fails this function will return -ENXIO. 12422 **/ 12423 uint32_t 12424 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 12425 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 12426 { 12427 struct lpfc_mbx_rq_create *rq_create; 12428 struct lpfc_dmabuf *dmabuf; 12429 LPFC_MBOXQ_t *mbox; 12430 int rc, length, status = 0; 12431 uint32_t shdr_status, shdr_add_status; 12432 union lpfc_sli4_cfg_shdr *shdr; 12433 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12434 12435 /* sanity check on queue memory */ 12436 if (!hrq || !drq || !cq) 12437 return -ENODEV; 12438 if (!phba->sli4_hba.pc_sli4_params.supported) 12439 hw_page_size = SLI4_PAGE_SIZE; 12440 12441 if (hrq->entry_count != drq->entry_count) 12442 return -EINVAL; 12443 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12444 if (!mbox) 12445 return -ENOMEM; 12446 length = (sizeof(struct lpfc_mbx_rq_create) - 12447 sizeof(struct lpfc_sli4_cfg_mhdr)); 12448 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12449 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 12450 length, LPFC_SLI4_MBX_EMBED); 12451 rq_create = &mbox->u.mqe.un.rq_create; 12452 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 12453 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12454 phba->sli4_hba.pc_sli4_params.rqv); 12455 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 12456 bf_set(lpfc_rq_context_rqe_count_1, 12457 &rq_create->u.request.context, 12458 hrq->entry_count); 12459 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 12460 bf_set(lpfc_rq_context_rqe_size, 12461 &rq_create->u.request.context, 12462 LPFC_RQE_SIZE_8); 12463 bf_set(lpfc_rq_context_page_size, 12464 &rq_create->u.request.context, 12465 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12466 } else { 12467 switch (hrq->entry_count) { 12468 default: 12469 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12470 "2535 Unsupported RQ count. (%d)\n", 12471 hrq->entry_count); 12472 if (hrq->entry_count < 512) 12473 return -EINVAL; 12474 /* otherwise default to smallest count (drop through) */ 12475 case 512: 12476 bf_set(lpfc_rq_context_rqe_count, 12477 &rq_create->u.request.context, 12478 LPFC_RQ_RING_SIZE_512); 12479 break; 12480 case 1024: 12481 bf_set(lpfc_rq_context_rqe_count, 12482 &rq_create->u.request.context, 12483 LPFC_RQ_RING_SIZE_1024); 12484 break; 12485 case 2048: 12486 bf_set(lpfc_rq_context_rqe_count, 12487 &rq_create->u.request.context, 12488 LPFC_RQ_RING_SIZE_2048); 12489 break; 12490 case 4096: 12491 bf_set(lpfc_rq_context_rqe_count, 12492 &rq_create->u.request.context, 12493 LPFC_RQ_RING_SIZE_4096); 12494 break; 12495 } 12496 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 12497 LPFC_HDR_BUF_SIZE); 12498 } 12499 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 12500 cq->queue_id); 12501 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 12502 hrq->page_count); 12503 list_for_each_entry(dmabuf, &hrq->page_list, list) { 12504 memset(dmabuf->virt, 0, hw_page_size); 12505 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12506 putPaddrLow(dmabuf->phys); 12507 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12508 putPaddrHigh(dmabuf->phys); 12509 } 12510 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12511 /* The IOCTL status is embedded in the mailbox subheader. */ 12512 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12513 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12514 if (shdr_status || shdr_add_status || rc) { 12515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12516 "2504 RQ_CREATE mailbox failed with " 12517 "status x%x add_status x%x, mbx status x%x\n", 12518 shdr_status, shdr_add_status, rc); 12519 status = -ENXIO; 12520 goto out; 12521 } 12522 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 12523 if (hrq->queue_id == 0xFFFF) { 12524 status = -ENXIO; 12525 goto out; 12526 } 12527 hrq->type = LPFC_HRQ; 12528 hrq->assoc_qid = cq->queue_id; 12529 hrq->subtype = subtype; 12530 hrq->host_index = 0; 12531 hrq->hba_index = 0; 12532 12533 /* now create the data queue */ 12534 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12535 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 12536 length, LPFC_SLI4_MBX_EMBED); 12537 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12538 phba->sli4_hba.pc_sli4_params.rqv); 12539 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 12540 bf_set(lpfc_rq_context_rqe_count_1, 12541 &rq_create->u.request.context, hrq->entry_count); 12542 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 12543 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 12544 LPFC_RQE_SIZE_8); 12545 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 12546 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12547 } else { 12548 switch (drq->entry_count) { 12549 default: 12550 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12551 "2536 Unsupported RQ count. (%d)\n", 12552 drq->entry_count); 12553 if (drq->entry_count < 512) 12554 return -EINVAL; 12555 /* otherwise default to smallest count (drop through) */ 12556 case 512: 12557 bf_set(lpfc_rq_context_rqe_count, 12558 &rq_create->u.request.context, 12559 LPFC_RQ_RING_SIZE_512); 12560 break; 12561 case 1024: 12562 bf_set(lpfc_rq_context_rqe_count, 12563 &rq_create->u.request.context, 12564 LPFC_RQ_RING_SIZE_1024); 12565 break; 12566 case 2048: 12567 bf_set(lpfc_rq_context_rqe_count, 12568 &rq_create->u.request.context, 12569 LPFC_RQ_RING_SIZE_2048); 12570 break; 12571 case 4096: 12572 bf_set(lpfc_rq_context_rqe_count, 12573 &rq_create->u.request.context, 12574 LPFC_RQ_RING_SIZE_4096); 12575 break; 12576 } 12577 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 12578 LPFC_DATA_BUF_SIZE); 12579 } 12580 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 12581 cq->queue_id); 12582 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 12583 drq->page_count); 12584 list_for_each_entry(dmabuf, &drq->page_list, list) { 12585 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12586 putPaddrLow(dmabuf->phys); 12587 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12588 putPaddrHigh(dmabuf->phys); 12589 } 12590 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12591 /* The IOCTL status is embedded in the mailbox subheader. */ 12592 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 12593 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12594 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12595 if (shdr_status || shdr_add_status || rc) { 12596 status = -ENXIO; 12597 goto out; 12598 } 12599 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 12600 if (drq->queue_id == 0xFFFF) { 12601 status = -ENXIO; 12602 goto out; 12603 } 12604 drq->type = LPFC_DRQ; 12605 drq->assoc_qid = cq->queue_id; 12606 drq->subtype = subtype; 12607 drq->host_index = 0; 12608 drq->hba_index = 0; 12609 12610 /* link the header and data RQs onto the parent cq child list */ 12611 list_add_tail(&hrq->list, &cq->child_list); 12612 list_add_tail(&drq->list, &cq->child_list); 12613 12614 out: 12615 mempool_free(mbox, phba->mbox_mem_pool); 12616 return status; 12617 } 12618 12619 /** 12620 * lpfc_eq_destroy - Destroy an event Queue on the HBA 12621 * @eq: The queue structure associated with the queue to destroy. 12622 * 12623 * This function destroys a queue, as detailed in @eq by sending an mailbox 12624 * command, specific to the type of queue, to the HBA. 12625 * 12626 * The @eq struct is used to get the queue ID of the queue to destroy. 12627 * 12628 * On success this function will return a zero. If the queue destroy mailbox 12629 * command fails this function will return -ENXIO. 12630 **/ 12631 uint32_t 12632 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 12633 { 12634 LPFC_MBOXQ_t *mbox; 12635 int rc, length, status = 0; 12636 uint32_t shdr_status, shdr_add_status; 12637 union lpfc_sli4_cfg_shdr *shdr; 12638 12639 /* sanity check on queue memory */ 12640 if (!eq) 12641 return -ENODEV; 12642 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 12643 if (!mbox) 12644 return -ENOMEM; 12645 length = (sizeof(struct lpfc_mbx_eq_destroy) - 12646 sizeof(struct lpfc_sli4_cfg_mhdr)); 12647 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12648 LPFC_MBOX_OPCODE_EQ_DESTROY, 12649 length, LPFC_SLI4_MBX_EMBED); 12650 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 12651 eq->queue_id); 12652 mbox->vport = eq->phba->pport; 12653 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12654 12655 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 12656 /* The IOCTL status is embedded in the mailbox subheader. */ 12657 shdr = (union lpfc_sli4_cfg_shdr *) 12658 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 12659 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12660 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12661 if (shdr_status || shdr_add_status || rc) { 12662 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12663 "2505 EQ_DESTROY mailbox failed with " 12664 "status x%x add_status x%x, mbx status x%x\n", 12665 shdr_status, shdr_add_status, rc); 12666 status = -ENXIO; 12667 } 12668 12669 /* Remove eq from any list */ 12670 list_del_init(&eq->list); 12671 mempool_free(mbox, eq->phba->mbox_mem_pool); 12672 return status; 12673 } 12674 12675 /** 12676 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 12677 * @cq: The queue structure associated with the queue to destroy. 12678 * 12679 * This function destroys a queue, as detailed in @cq by sending an mailbox 12680 * command, specific to the type of queue, to the HBA. 12681 * 12682 * The @cq struct is used to get the queue ID of the queue to destroy. 12683 * 12684 * On success this function will return a zero. If the queue destroy mailbox 12685 * command fails this function will return -ENXIO. 12686 **/ 12687 uint32_t 12688 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 12689 { 12690 LPFC_MBOXQ_t *mbox; 12691 int rc, length, status = 0; 12692 uint32_t shdr_status, shdr_add_status; 12693 union lpfc_sli4_cfg_shdr *shdr; 12694 12695 /* sanity check on queue memory */ 12696 if (!cq) 12697 return -ENODEV; 12698 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 12699 if (!mbox) 12700 return -ENOMEM; 12701 length = (sizeof(struct lpfc_mbx_cq_destroy) - 12702 sizeof(struct lpfc_sli4_cfg_mhdr)); 12703 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12704 LPFC_MBOX_OPCODE_CQ_DESTROY, 12705 length, LPFC_SLI4_MBX_EMBED); 12706 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 12707 cq->queue_id); 12708 mbox->vport = cq->phba->pport; 12709 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12710 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 12711 /* The IOCTL status is embedded in the mailbox subheader. */ 12712 shdr = (union lpfc_sli4_cfg_shdr *) 12713 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 12714 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12715 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12716 if (shdr_status || shdr_add_status || rc) { 12717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12718 "2506 CQ_DESTROY mailbox failed with " 12719 "status x%x add_status x%x, mbx status x%x\n", 12720 shdr_status, shdr_add_status, rc); 12721 status = -ENXIO; 12722 } 12723 /* Remove cq from any list */ 12724 list_del_init(&cq->list); 12725 mempool_free(mbox, cq->phba->mbox_mem_pool); 12726 return status; 12727 } 12728 12729 /** 12730 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 12731 * @qm: The queue structure associated with the queue to destroy. 12732 * 12733 * This function destroys a queue, as detailed in @mq by sending an mailbox 12734 * command, specific to the type of queue, to the HBA. 12735 * 12736 * The @mq struct is used to get the queue ID of the queue to destroy. 12737 * 12738 * On success this function will return a zero. If the queue destroy mailbox 12739 * command fails this function will return -ENXIO. 12740 **/ 12741 uint32_t 12742 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 12743 { 12744 LPFC_MBOXQ_t *mbox; 12745 int rc, length, status = 0; 12746 uint32_t shdr_status, shdr_add_status; 12747 union lpfc_sli4_cfg_shdr *shdr; 12748 12749 /* sanity check on queue memory */ 12750 if (!mq) 12751 return -ENODEV; 12752 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 12753 if (!mbox) 12754 return -ENOMEM; 12755 length = (sizeof(struct lpfc_mbx_mq_destroy) - 12756 sizeof(struct lpfc_sli4_cfg_mhdr)); 12757 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12758 LPFC_MBOX_OPCODE_MQ_DESTROY, 12759 length, LPFC_SLI4_MBX_EMBED); 12760 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 12761 mq->queue_id); 12762 mbox->vport = mq->phba->pport; 12763 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12764 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 12765 /* The IOCTL status is embedded in the mailbox subheader. */ 12766 shdr = (union lpfc_sli4_cfg_shdr *) 12767 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 12768 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12769 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12770 if (shdr_status || shdr_add_status || rc) { 12771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12772 "2507 MQ_DESTROY mailbox failed with " 12773 "status x%x add_status x%x, mbx status x%x\n", 12774 shdr_status, shdr_add_status, rc); 12775 status = -ENXIO; 12776 } 12777 /* Remove mq from any list */ 12778 list_del_init(&mq->list); 12779 mempool_free(mbox, mq->phba->mbox_mem_pool); 12780 return status; 12781 } 12782 12783 /** 12784 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 12785 * @wq: The queue structure associated with the queue to destroy. 12786 * 12787 * This function destroys a queue, as detailed in @wq by sending an mailbox 12788 * command, specific to the type of queue, to the HBA. 12789 * 12790 * The @wq struct is used to get the queue ID of the queue to destroy. 12791 * 12792 * On success this function will return a zero. If the queue destroy mailbox 12793 * command fails this function will return -ENXIO. 12794 **/ 12795 uint32_t 12796 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 12797 { 12798 LPFC_MBOXQ_t *mbox; 12799 int rc, length, status = 0; 12800 uint32_t shdr_status, shdr_add_status; 12801 union lpfc_sli4_cfg_shdr *shdr; 12802 12803 /* sanity check on queue memory */ 12804 if (!wq) 12805 return -ENODEV; 12806 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 12807 if (!mbox) 12808 return -ENOMEM; 12809 length = (sizeof(struct lpfc_mbx_wq_destroy) - 12810 sizeof(struct lpfc_sli4_cfg_mhdr)); 12811 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12812 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 12813 length, LPFC_SLI4_MBX_EMBED); 12814 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 12815 wq->queue_id); 12816 mbox->vport = wq->phba->pport; 12817 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12818 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 12819 shdr = (union lpfc_sli4_cfg_shdr *) 12820 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 12821 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12822 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12823 if (shdr_status || shdr_add_status || rc) { 12824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12825 "2508 WQ_DESTROY mailbox failed with " 12826 "status x%x add_status x%x, mbx status x%x\n", 12827 shdr_status, shdr_add_status, rc); 12828 status = -ENXIO; 12829 } 12830 /* Remove wq from any list */ 12831 list_del_init(&wq->list); 12832 mempool_free(mbox, wq->phba->mbox_mem_pool); 12833 return status; 12834 } 12835 12836 /** 12837 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 12838 * @rq: The queue structure associated with the queue to destroy. 12839 * 12840 * This function destroys a queue, as detailed in @rq by sending an mailbox 12841 * command, specific to the type of queue, to the HBA. 12842 * 12843 * The @rq struct is used to get the queue ID of the queue to destroy. 12844 * 12845 * On success this function will return a zero. If the queue destroy mailbox 12846 * command fails this function will return -ENXIO. 12847 **/ 12848 uint32_t 12849 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 12850 struct lpfc_queue *drq) 12851 { 12852 LPFC_MBOXQ_t *mbox; 12853 int rc, length, status = 0; 12854 uint32_t shdr_status, shdr_add_status; 12855 union lpfc_sli4_cfg_shdr *shdr; 12856 12857 /* sanity check on queue memory */ 12858 if (!hrq || !drq) 12859 return -ENODEV; 12860 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 12861 if (!mbox) 12862 return -ENOMEM; 12863 length = (sizeof(struct lpfc_mbx_rq_destroy) - 12864 sizeof(struct lpfc_sli4_cfg_mhdr)); 12865 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12866 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 12867 length, LPFC_SLI4_MBX_EMBED); 12868 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 12869 hrq->queue_id); 12870 mbox->vport = hrq->phba->pport; 12871 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12872 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 12873 /* The IOCTL status is embedded in the mailbox subheader. */ 12874 shdr = (union lpfc_sli4_cfg_shdr *) 12875 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 12876 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12877 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12878 if (shdr_status || shdr_add_status || rc) { 12879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12880 "2509 RQ_DESTROY mailbox failed with " 12881 "status x%x add_status x%x, mbx status x%x\n", 12882 shdr_status, shdr_add_status, rc); 12883 if (rc != MBX_TIMEOUT) 12884 mempool_free(mbox, hrq->phba->mbox_mem_pool); 12885 return -ENXIO; 12886 } 12887 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 12888 drq->queue_id); 12889 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 12890 shdr = (union lpfc_sli4_cfg_shdr *) 12891 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 12892 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12893 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12894 if (shdr_status || shdr_add_status || rc) { 12895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12896 "2510 RQ_DESTROY mailbox failed with " 12897 "status x%x add_status x%x, mbx status x%x\n", 12898 shdr_status, shdr_add_status, rc); 12899 status = -ENXIO; 12900 } 12901 list_del_init(&hrq->list); 12902 list_del_init(&drq->list); 12903 mempool_free(mbox, hrq->phba->mbox_mem_pool); 12904 return status; 12905 } 12906 12907 /** 12908 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 12909 * @phba: The virtual port for which this call being executed. 12910 * @pdma_phys_addr0: Physical address of the 1st SGL page. 12911 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 12912 * @xritag: the xritag that ties this io to the SGL pages. 12913 * 12914 * This routine will post the sgl pages for the IO that has the xritag 12915 * that is in the iocbq structure. The xritag is assigned during iocbq 12916 * creation and persists for as long as the driver is loaded. 12917 * if the caller has fewer than 256 scatter gather segments to map then 12918 * pdma_phys_addr1 should be 0. 12919 * If the caller needs to map more than 256 scatter gather segment then 12920 * pdma_phys_addr1 should be a valid physical address. 12921 * physical address for SGLs must be 64 byte aligned. 12922 * If you are going to map 2 SGL's then the first one must have 256 entries 12923 * the second sgl can have between 1 and 256 entries. 12924 * 12925 * Return codes: 12926 * 0 - Success 12927 * -ENXIO, -ENOMEM - Failure 12928 **/ 12929 int 12930 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 12931 dma_addr_t pdma_phys_addr0, 12932 dma_addr_t pdma_phys_addr1, 12933 uint16_t xritag) 12934 { 12935 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 12936 LPFC_MBOXQ_t *mbox; 12937 int rc; 12938 uint32_t shdr_status, shdr_add_status; 12939 uint32_t mbox_tmo; 12940 union lpfc_sli4_cfg_shdr *shdr; 12941 12942 if (xritag == NO_XRI) { 12943 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12944 "0364 Invalid param:\n"); 12945 return -EINVAL; 12946 } 12947 12948 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12949 if (!mbox) 12950 return -ENOMEM; 12951 12952 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12953 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 12954 sizeof(struct lpfc_mbx_post_sgl_pages) - 12955 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 12956 12957 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 12958 &mbox->u.mqe.un.post_sgl_pages; 12959 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 12960 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 12961 12962 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 12963 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 12964 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 12965 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 12966 12967 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 12968 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 12969 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 12970 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 12971 if (!phba->sli4_hba.intr_enable) 12972 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12973 else { 12974 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 12975 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 12976 } 12977 /* The IOCTL status is embedded in the mailbox subheader. */ 12978 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 12979 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12980 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12981 if (rc != MBX_TIMEOUT) 12982 mempool_free(mbox, phba->mbox_mem_pool); 12983 if (shdr_status || shdr_add_status || rc) { 12984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12985 "2511 POST_SGL mailbox failed with " 12986 "status x%x add_status x%x, mbx status x%x\n", 12987 shdr_status, shdr_add_status, rc); 12988 rc = -ENXIO; 12989 } 12990 return 0; 12991 } 12992 12993 /** 12994 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 12995 * @phba: pointer to lpfc hba data structure. 12996 * 12997 * This routine is invoked to post rpi header templates to the 12998 * HBA consistent with the SLI-4 interface spec. This routine 12999 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 13000 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 13001 * 13002 * Returns 13003 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 13004 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 13005 **/ 13006 uint16_t 13007 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 13008 { 13009 unsigned long xri; 13010 13011 /* 13012 * Fetch the next logical xri. Because this index is logical, 13013 * the driver starts at 0 each time. 13014 */ 13015 spin_lock_irq(&phba->hbalock); 13016 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 13017 phba->sli4_hba.max_cfg_param.max_xri, 0); 13018 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 13019 spin_unlock_irq(&phba->hbalock); 13020 return NO_XRI; 13021 } else { 13022 set_bit(xri, phba->sli4_hba.xri_bmask); 13023 phba->sli4_hba.max_cfg_param.xri_used++; 13024 phba->sli4_hba.xri_count++; 13025 } 13026 13027 spin_unlock_irq(&phba->hbalock); 13028 return xri; 13029 } 13030 13031 /** 13032 * lpfc_sli4_free_xri - Release an xri for reuse. 13033 * @phba: pointer to lpfc hba data structure. 13034 * 13035 * This routine is invoked to release an xri to the pool of 13036 * available rpis maintained by the driver. 13037 **/ 13038 void 13039 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13040 { 13041 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 13042 phba->sli4_hba.xri_count--; 13043 phba->sli4_hba.max_cfg_param.xri_used--; 13044 } 13045 } 13046 13047 /** 13048 * lpfc_sli4_free_xri - Release an xri for reuse. 13049 * @phba: pointer to lpfc hba data structure. 13050 * 13051 * This routine is invoked to release an xri to the pool of 13052 * available rpis maintained by the driver. 13053 **/ 13054 void 13055 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13056 { 13057 spin_lock_irq(&phba->hbalock); 13058 __lpfc_sli4_free_xri(phba, xri); 13059 spin_unlock_irq(&phba->hbalock); 13060 } 13061 13062 /** 13063 * lpfc_sli4_next_xritag - Get an xritag for the io 13064 * @phba: Pointer to HBA context object. 13065 * 13066 * This function gets an xritag for the iocb. If there is no unused xritag 13067 * it will return 0xffff. 13068 * The function returns the allocated xritag if successful, else returns zero. 13069 * Zero is not a valid xritag. 13070 * The caller is not required to hold any lock. 13071 **/ 13072 uint16_t 13073 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 13074 { 13075 uint16_t xri_index; 13076 13077 xri_index = lpfc_sli4_alloc_xri(phba); 13078 if (xri_index != NO_XRI) 13079 return xri_index; 13080 13081 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13082 "2004 Failed to allocate XRI.last XRITAG is %d" 13083 " Max XRI is %d, Used XRI is %d\n", 13084 xri_index, 13085 phba->sli4_hba.max_cfg_param.max_xri, 13086 phba->sli4_hba.max_cfg_param.xri_used); 13087 return NO_XRI; 13088 } 13089 13090 /** 13091 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 13092 * @phba: pointer to lpfc hba data structure. 13093 * 13094 * This routine is invoked to post a block of driver's sgl pages to the 13095 * HBA using non-embedded mailbox command. No Lock is held. This routine 13096 * is only called when the driver is loading and after all IO has been 13097 * stopped. 13098 **/ 13099 int 13100 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) 13101 { 13102 struct lpfc_sglq *sglq_entry; 13103 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13104 struct sgl_page_pairs *sgl_pg_pairs; 13105 void *viraddr; 13106 LPFC_MBOXQ_t *mbox; 13107 uint32_t reqlen, alloclen, pg_pairs; 13108 uint32_t mbox_tmo; 13109 uint16_t xritag_start = 0, lxri = 0; 13110 int els_xri_cnt, rc = 0; 13111 uint32_t shdr_status, shdr_add_status; 13112 union lpfc_sli4_cfg_shdr *shdr; 13113 13114 /* The number of sgls to be posted */ 13115 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 13116 13117 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 13118 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13119 if (reqlen > SLI4_PAGE_SIZE) { 13120 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13121 "2559 Block sgl registration required DMA " 13122 "size (%d) great than a page\n", reqlen); 13123 return -ENOMEM; 13124 } 13125 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13126 if (!mbox) 13127 return -ENOMEM; 13128 13129 /* Allocate DMA memory and set up the non-embedded mailbox command */ 13130 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13131 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 13132 LPFC_SLI4_MBX_NEMBED); 13133 13134 if (alloclen < reqlen) { 13135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13136 "0285 Allocated DMA memory size (%d) is " 13137 "less than the requested DMA memory " 13138 "size (%d)\n", alloclen, reqlen); 13139 lpfc_sli4_mbox_cmd_free(phba, mbox); 13140 return -ENOMEM; 13141 } 13142 /* Set up the SGL pages in the non-embedded DMA pages */ 13143 viraddr = mbox->sge_array->addr[0]; 13144 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13145 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13146 13147 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 13148 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 13149 13150 /* 13151 * Assign the sglq a physical xri only if the driver has not 13152 * initialized those resources. A port reset only needs 13153 * the sglq's posted. 13154 */ 13155 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 13156 LPFC_XRI_RSRC_RDY) { 13157 lxri = lpfc_sli4_next_xritag(phba); 13158 if (lxri == NO_XRI) { 13159 lpfc_sli4_mbox_cmd_free(phba, mbox); 13160 return -ENOMEM; 13161 } 13162 sglq_entry->sli4_lxritag = lxri; 13163 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 13164 } 13165 13166 /* Set up the sge entry */ 13167 sgl_pg_pairs->sgl_pg0_addr_lo = 13168 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 13169 sgl_pg_pairs->sgl_pg0_addr_hi = 13170 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 13171 sgl_pg_pairs->sgl_pg1_addr_lo = 13172 cpu_to_le32(putPaddrLow(0)); 13173 sgl_pg_pairs->sgl_pg1_addr_hi = 13174 cpu_to_le32(putPaddrHigh(0)); 13175 13176 /* Keep the first xritag on the list */ 13177 if (pg_pairs == 0) 13178 xritag_start = sglq_entry->sli4_xritag; 13179 sgl_pg_pairs++; 13180 } 13181 13182 /* Complete initialization and perform endian conversion. */ 13183 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13184 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 13185 sgl->word0 = cpu_to_le32(sgl->word0); 13186 if (!phba->sli4_hba.intr_enable) 13187 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13188 else { 13189 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13190 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13191 } 13192 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13193 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13194 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13195 if (rc != MBX_TIMEOUT) 13196 lpfc_sli4_mbox_cmd_free(phba, mbox); 13197 if (shdr_status || shdr_add_status || rc) { 13198 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13199 "2513 POST_SGL_BLOCK mailbox command failed " 13200 "status x%x add_status x%x mbx status x%x\n", 13201 shdr_status, shdr_add_status, rc); 13202 rc = -ENXIO; 13203 } 13204 13205 if (rc == 0) 13206 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 13207 LPFC_XRI_RSRC_RDY); 13208 return rc; 13209 } 13210 13211 /** 13212 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port. 13213 * @phba: pointer to lpfc hba data structure. 13214 * 13215 * This routine is invoked to post a block of driver's sgl pages to the 13216 * HBA using non-embedded mailbox command. No Lock is held. This routine 13217 * is only called when the driver is loading and after all IO has been 13218 * stopped. 13219 **/ 13220 int 13221 lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) 13222 { 13223 struct lpfc_sglq *sglq_entry; 13224 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13225 struct sgl_page_pairs *sgl_pg_pairs; 13226 void *viraddr; 13227 LPFC_MBOXQ_t *mbox; 13228 uint32_t reqlen, alloclen, index; 13229 uint32_t mbox_tmo; 13230 uint16_t rsrc_start, rsrc_size, els_xri_cnt; 13231 uint16_t xritag_start = 0, lxri = 0; 13232 struct lpfc_rsrc_blks *rsrc_blk; 13233 int cnt, ttl_cnt, rc = 0; 13234 int loop_cnt; 13235 uint32_t shdr_status, shdr_add_status; 13236 union lpfc_sli4_cfg_shdr *shdr; 13237 13238 /* The number of sgls to be posted */ 13239 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 13240 13241 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 13242 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13243 if (reqlen > SLI4_PAGE_SIZE) { 13244 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13245 "2989 Block sgl registration required DMA " 13246 "size (%d) great than a page\n", reqlen); 13247 return -ENOMEM; 13248 } 13249 13250 cnt = 0; 13251 ttl_cnt = 0; 13252 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, 13253 list) { 13254 rsrc_start = rsrc_blk->rsrc_start; 13255 rsrc_size = rsrc_blk->rsrc_size; 13256 13257 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13258 "3014 Working ELS Extent start %d, cnt %d\n", 13259 rsrc_start, rsrc_size); 13260 13261 loop_cnt = min(els_xri_cnt, rsrc_size); 13262 if (ttl_cnt + loop_cnt >= els_xri_cnt) { 13263 loop_cnt = els_xri_cnt - ttl_cnt; 13264 ttl_cnt = els_xri_cnt; 13265 } 13266 13267 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13268 if (!mbox) 13269 return -ENOMEM; 13270 /* 13271 * Allocate DMA memory and set up the non-embedded mailbox 13272 * command. 13273 */ 13274 alloclen = lpfc_sli4_config(phba, mbox, 13275 LPFC_MBOX_SUBSYSTEM_FCOE, 13276 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 13277 reqlen, LPFC_SLI4_MBX_NEMBED); 13278 if (alloclen < reqlen) { 13279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13280 "2987 Allocated DMA memory size (%d) " 13281 "is less than the requested DMA memory " 13282 "size (%d)\n", alloclen, reqlen); 13283 lpfc_sli4_mbox_cmd_free(phba, mbox); 13284 return -ENOMEM; 13285 } 13286 13287 /* Set up the SGL pages in the non-embedded DMA pages */ 13288 viraddr = mbox->sge_array->addr[0]; 13289 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13290 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13291 13292 /* 13293 * The starting resource may not begin at zero. Control 13294 * the loop variants via the block resource parameters, 13295 * but handle the sge pointers with a zero-based index 13296 * that doesn't get reset per loop pass. 13297 */ 13298 for (index = rsrc_start; 13299 index < rsrc_start + loop_cnt; 13300 index++) { 13301 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt]; 13302 13303 /* 13304 * Assign the sglq a physical xri only if the driver 13305 * has not initialized those resources. A port reset 13306 * only needs the sglq's posted. 13307 */ 13308 if (bf_get(lpfc_xri_rsrc_rdy, 13309 &phba->sli4_hba.sli4_flags) != 13310 LPFC_XRI_RSRC_RDY) { 13311 lxri = lpfc_sli4_next_xritag(phba); 13312 if (lxri == NO_XRI) { 13313 lpfc_sli4_mbox_cmd_free(phba, mbox); 13314 rc = -ENOMEM; 13315 goto err_exit; 13316 } 13317 sglq_entry->sli4_lxritag = lxri; 13318 sglq_entry->sli4_xritag = 13319 phba->sli4_hba.xri_ids[lxri]; 13320 } 13321 13322 /* Set up the sge entry */ 13323 sgl_pg_pairs->sgl_pg0_addr_lo = 13324 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 13325 sgl_pg_pairs->sgl_pg0_addr_hi = 13326 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 13327 sgl_pg_pairs->sgl_pg1_addr_lo = 13328 cpu_to_le32(putPaddrLow(0)); 13329 sgl_pg_pairs->sgl_pg1_addr_hi = 13330 cpu_to_le32(putPaddrHigh(0)); 13331 13332 /* Track the starting physical XRI for the mailbox. */ 13333 if (index == rsrc_start) 13334 xritag_start = sglq_entry->sli4_xritag; 13335 sgl_pg_pairs++; 13336 cnt++; 13337 } 13338 13339 /* Complete initialization and perform endian conversion. */ 13340 rsrc_blk->rsrc_used += loop_cnt; 13341 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13342 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt); 13343 sgl->word0 = cpu_to_le32(sgl->word0); 13344 13345 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13346 "3015 Post ELS Extent SGL, start %d, " 13347 "cnt %d, used %d\n", 13348 xritag_start, loop_cnt, rsrc_blk->rsrc_used); 13349 if (!phba->sli4_hba.intr_enable) 13350 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13351 else { 13352 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13353 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13354 } 13355 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13356 shdr_status = bf_get(lpfc_mbox_hdr_status, 13357 &shdr->response); 13358 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13359 &shdr->response); 13360 if (rc != MBX_TIMEOUT) 13361 lpfc_sli4_mbox_cmd_free(phba, mbox); 13362 if (shdr_status || shdr_add_status || rc) { 13363 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13364 "2988 POST_SGL_BLOCK mailbox " 13365 "command failed status x%x " 13366 "add_status x%x mbx status x%x\n", 13367 shdr_status, shdr_add_status, rc); 13368 rc = -ENXIO; 13369 goto err_exit; 13370 } 13371 if (ttl_cnt >= els_xri_cnt) 13372 break; 13373 } 13374 13375 err_exit: 13376 if (rc == 0) 13377 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 13378 LPFC_XRI_RSRC_RDY); 13379 return rc; 13380 } 13381 13382 /** 13383 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 13384 * @phba: pointer to lpfc hba data structure. 13385 * @sblist: pointer to scsi buffer list. 13386 * @count: number of scsi buffers on the list. 13387 * 13388 * This routine is invoked to post a block of @count scsi sgl pages from a 13389 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 13390 * No Lock is held. 13391 * 13392 **/ 13393 int 13394 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, 13395 int cnt) 13396 { 13397 struct lpfc_scsi_buf *psb; 13398 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13399 struct sgl_page_pairs *sgl_pg_pairs; 13400 void *viraddr; 13401 LPFC_MBOXQ_t *mbox; 13402 uint32_t reqlen, alloclen, pg_pairs; 13403 uint32_t mbox_tmo; 13404 uint16_t xritag_start = 0; 13405 int rc = 0; 13406 uint32_t shdr_status, shdr_add_status; 13407 dma_addr_t pdma_phys_bpl1; 13408 union lpfc_sli4_cfg_shdr *shdr; 13409 13410 /* Calculate the requested length of the dma memory */ 13411 reqlen = cnt * sizeof(struct sgl_page_pairs) + 13412 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13413 if (reqlen > SLI4_PAGE_SIZE) { 13414 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13415 "0217 Block sgl registration required DMA " 13416 "size (%d) great than a page\n", reqlen); 13417 return -ENOMEM; 13418 } 13419 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13420 if (!mbox) { 13421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13422 "0283 Failed to allocate mbox cmd memory\n"); 13423 return -ENOMEM; 13424 } 13425 13426 /* Allocate DMA memory and set up the non-embedded mailbox command */ 13427 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13428 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 13429 LPFC_SLI4_MBX_NEMBED); 13430 13431 if (alloclen < reqlen) { 13432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13433 "2561 Allocated DMA memory size (%d) is " 13434 "less than the requested DMA memory " 13435 "size (%d)\n", alloclen, reqlen); 13436 lpfc_sli4_mbox_cmd_free(phba, mbox); 13437 return -ENOMEM; 13438 } 13439 13440 /* Get the first SGE entry from the non-embedded DMA memory */ 13441 viraddr = mbox->sge_array->addr[0]; 13442 13443 /* Set up the SGL pages in the non-embedded DMA pages */ 13444 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13445 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13446 13447 pg_pairs = 0; 13448 list_for_each_entry(psb, sblist, list) { 13449 /* Set up the sge entry */ 13450 sgl_pg_pairs->sgl_pg0_addr_lo = 13451 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 13452 sgl_pg_pairs->sgl_pg0_addr_hi = 13453 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 13454 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 13455 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 13456 else 13457 pdma_phys_bpl1 = 0; 13458 sgl_pg_pairs->sgl_pg1_addr_lo = 13459 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 13460 sgl_pg_pairs->sgl_pg1_addr_hi = 13461 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 13462 /* Keep the first xritag on the list */ 13463 if (pg_pairs == 0) 13464 xritag_start = psb->cur_iocbq.sli4_xritag; 13465 sgl_pg_pairs++; 13466 pg_pairs++; 13467 } 13468 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13469 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 13470 /* Perform endian conversion if necessary */ 13471 sgl->word0 = cpu_to_le32(sgl->word0); 13472 13473 if (!phba->sli4_hba.intr_enable) 13474 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13475 else { 13476 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13477 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13478 } 13479 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13480 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13481 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13482 if (rc != MBX_TIMEOUT) 13483 lpfc_sli4_mbox_cmd_free(phba, mbox); 13484 if (shdr_status || shdr_add_status || rc) { 13485 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13486 "2564 POST_SGL_BLOCK mailbox command failed " 13487 "status x%x add_status x%x mbx status x%x\n", 13488 shdr_status, shdr_add_status, rc); 13489 rc = -ENXIO; 13490 } 13491 return rc; 13492 } 13493 13494 /** 13495 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port. 13496 * @phba: pointer to lpfc hba data structure. 13497 * @sblist: pointer to scsi buffer list. 13498 * @count: number of scsi buffers on the list. 13499 * 13500 * This routine is invoked to post a block of @count scsi sgl pages from a 13501 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 13502 * No Lock is held. 13503 * 13504 **/ 13505 int 13506 lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist, 13507 int cnt) 13508 { 13509 struct lpfc_scsi_buf *psb = NULL; 13510 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13511 struct sgl_page_pairs *sgl_pg_pairs; 13512 void *viraddr; 13513 LPFC_MBOXQ_t *mbox; 13514 uint32_t reqlen, alloclen, pg_pairs; 13515 uint32_t mbox_tmo; 13516 uint16_t xri_start = 0, scsi_xri_start; 13517 uint16_t rsrc_range; 13518 int rc = 0, avail_cnt; 13519 uint32_t shdr_status, shdr_add_status; 13520 dma_addr_t pdma_phys_bpl1; 13521 union lpfc_sli4_cfg_shdr *shdr; 13522 struct lpfc_rsrc_blks *rsrc_blk; 13523 uint32_t xri_cnt = 0; 13524 13525 /* Calculate the total requested length of the dma memory */ 13526 reqlen = cnt * sizeof(struct sgl_page_pairs) + 13527 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13528 if (reqlen > SLI4_PAGE_SIZE) { 13529 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13530 "2932 Block sgl registration required DMA " 13531 "size (%d) great than a page\n", reqlen); 13532 return -ENOMEM; 13533 } 13534 13535 /* 13536 * The use of extents requires the driver to post the sgl headers 13537 * in multiple postings to meet the contiguous resource assignment. 13538 */ 13539 psb = list_prepare_entry(psb, sblist, list); 13540 scsi_xri_start = phba->sli4_hba.scsi_xri_start; 13541 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, 13542 list) { 13543 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size; 13544 if (rsrc_range < scsi_xri_start) 13545 continue; 13546 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size) 13547 continue; 13548 else 13549 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used; 13550 13551 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) + 13552 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13553 /* 13554 * Allocate DMA memory and set up the non-embedded mailbox 13555 * command. The mbox is used to post an SGL page per loop 13556 * but the DMA memory has a use-once semantic so the mailbox 13557 * is used and freed per loop pass. 13558 */ 13559 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13560 if (!mbox) { 13561 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13562 "2933 Failed to allocate mbox cmd " 13563 "memory\n"); 13564 return -ENOMEM; 13565 } 13566 alloclen = lpfc_sli4_config(phba, mbox, 13567 LPFC_MBOX_SUBSYSTEM_FCOE, 13568 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 13569 reqlen, 13570 LPFC_SLI4_MBX_NEMBED); 13571 if (alloclen < reqlen) { 13572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13573 "2934 Allocated DMA memory size (%d) " 13574 "is less than the requested DMA memory " 13575 "size (%d)\n", alloclen, reqlen); 13576 lpfc_sli4_mbox_cmd_free(phba, mbox); 13577 return -ENOMEM; 13578 } 13579 13580 /* Get the first SGE entry from the non-embedded DMA memory */ 13581 viraddr = mbox->sge_array->addr[0]; 13582 13583 /* Set up the SGL pages in the non-embedded DMA pages */ 13584 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13585 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13586 13587 /* pg_pairs tracks posted SGEs per loop iteration. */ 13588 pg_pairs = 0; 13589 list_for_each_entry_continue(psb, sblist, list) { 13590 /* Set up the sge entry */ 13591 sgl_pg_pairs->sgl_pg0_addr_lo = 13592 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 13593 sgl_pg_pairs->sgl_pg0_addr_hi = 13594 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 13595 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 13596 pdma_phys_bpl1 = psb->dma_phys_bpl + 13597 SGL_PAGE_SIZE; 13598 else 13599 pdma_phys_bpl1 = 0; 13600 sgl_pg_pairs->sgl_pg1_addr_lo = 13601 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 13602 sgl_pg_pairs->sgl_pg1_addr_hi = 13603 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 13604 /* Keep the first xri for this extent. */ 13605 if (pg_pairs == 0) 13606 xri_start = psb->cur_iocbq.sli4_xritag; 13607 sgl_pg_pairs++; 13608 pg_pairs++; 13609 xri_cnt++; 13610 13611 /* 13612 * Track two exit conditions - the loop has constructed 13613 * all of the caller's SGE pairs or all available 13614 * resource IDs in this extent are consumed. 13615 */ 13616 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt)) 13617 break; 13618 } 13619 rsrc_blk->rsrc_used += pg_pairs; 13620 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start); 13621 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 13622 13623 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13624 "3016 Post SCSI Extent SGL, start %d, cnt %d " 13625 "blk use %d\n", 13626 xri_start, pg_pairs, rsrc_blk->rsrc_used); 13627 /* Perform endian conversion if necessary */ 13628 sgl->word0 = cpu_to_le32(sgl->word0); 13629 if (!phba->sli4_hba.intr_enable) 13630 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13631 else { 13632 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13633 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13634 } 13635 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13636 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13637 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13638 &shdr->response); 13639 if (rc != MBX_TIMEOUT) 13640 lpfc_sli4_mbox_cmd_free(phba, mbox); 13641 if (shdr_status || shdr_add_status || rc) { 13642 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13643 "2935 POST_SGL_BLOCK mailbox command " 13644 "failed status x%x add_status x%x " 13645 "mbx status x%x\n", 13646 shdr_status, shdr_add_status, rc); 13647 return -ENXIO; 13648 } 13649 13650 /* Post only what is requested. */ 13651 if (xri_cnt >= cnt) 13652 break; 13653 } 13654 return rc; 13655 } 13656 13657 /** 13658 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13659 * @phba: pointer to lpfc_hba struct that the frame was received on 13660 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13661 * 13662 * This function checks the fields in the @fc_hdr to see if the FC frame is a 13663 * valid type of frame that the LPFC driver will handle. This function will 13664 * return a zero if the frame is a valid frame or a non zero value when the 13665 * frame does not pass the check. 13666 **/ 13667 static int 13668 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 13669 { 13670 /* make rctl_names static to save stack space */ 13671 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 13672 char *type_names[] = FC_TYPE_NAMES_INIT; 13673 struct fc_vft_header *fc_vft_hdr; 13674 uint32_t *header = (uint32_t *) fc_hdr; 13675 13676 switch (fc_hdr->fh_r_ctl) { 13677 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 13678 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 13679 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 13680 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 13681 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 13682 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 13683 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 13684 case FC_RCTL_DD_CMD_STATUS: /* command status */ 13685 case FC_RCTL_ELS_REQ: /* extended link services request */ 13686 case FC_RCTL_ELS_REP: /* extended link services reply */ 13687 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 13688 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 13689 case FC_RCTL_BA_NOP: /* basic link service NOP */ 13690 case FC_RCTL_BA_ABTS: /* basic link service abort */ 13691 case FC_RCTL_BA_RMC: /* remove connection */ 13692 case FC_RCTL_BA_ACC: /* basic accept */ 13693 case FC_RCTL_BA_RJT: /* basic reject */ 13694 case FC_RCTL_BA_PRMT: 13695 case FC_RCTL_ACK_1: /* acknowledge_1 */ 13696 case FC_RCTL_ACK_0: /* acknowledge_0 */ 13697 case FC_RCTL_P_RJT: /* port reject */ 13698 case FC_RCTL_F_RJT: /* fabric reject */ 13699 case FC_RCTL_P_BSY: /* port busy */ 13700 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 13701 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 13702 case FC_RCTL_LCR: /* link credit reset */ 13703 case FC_RCTL_END: /* end */ 13704 break; 13705 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 13706 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 13707 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 13708 return lpfc_fc_frame_check(phba, fc_hdr); 13709 default: 13710 goto drop; 13711 } 13712 switch (fc_hdr->fh_type) { 13713 case FC_TYPE_BLS: 13714 case FC_TYPE_ELS: 13715 case FC_TYPE_FCP: 13716 case FC_TYPE_CT: 13717 break; 13718 case FC_TYPE_IP: 13719 case FC_TYPE_ILS: 13720 default: 13721 goto drop; 13722 } 13723 13724 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 13725 "2538 Received frame rctl:%s type:%s " 13726 "Frame Data:%08x %08x %08x %08x %08x %08x\n", 13727 rctl_names[fc_hdr->fh_r_ctl], 13728 type_names[fc_hdr->fh_type], 13729 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 13730 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 13731 be32_to_cpu(header[4]), be32_to_cpu(header[5])); 13732 return 0; 13733 drop: 13734 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 13735 "2539 Dropped frame rctl:%s type:%s\n", 13736 rctl_names[fc_hdr->fh_r_ctl], 13737 type_names[fc_hdr->fh_type]); 13738 return 1; 13739 } 13740 13741 /** 13742 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 13743 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13744 * 13745 * This function processes the FC header to retrieve the VFI from the VF 13746 * header, if one exists. This function will return the VFI if one exists 13747 * or 0 if no VSAN Header exists. 13748 **/ 13749 static uint32_t 13750 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 13751 { 13752 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 13753 13754 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 13755 return 0; 13756 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 13757 } 13758 13759 /** 13760 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 13761 * @phba: Pointer to the HBA structure to search for the vport on 13762 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13763 * @fcfi: The FC Fabric ID that the frame came from 13764 * 13765 * This function searches the @phba for a vport that matches the content of the 13766 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 13767 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 13768 * returns the matching vport pointer or NULL if unable to match frame to a 13769 * vport. 13770 **/ 13771 static struct lpfc_vport * 13772 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 13773 uint16_t fcfi) 13774 { 13775 struct lpfc_vport **vports; 13776 struct lpfc_vport *vport = NULL; 13777 int i; 13778 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 13779 fc_hdr->fh_d_id[1] << 8 | 13780 fc_hdr->fh_d_id[2]); 13781 if (did == Fabric_DID) 13782 return phba->pport; 13783 vports = lpfc_create_vport_work_array(phba); 13784 if (vports != NULL) 13785 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 13786 if (phba->fcf.fcfi == fcfi && 13787 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 13788 vports[i]->fc_myDID == did) { 13789 vport = vports[i]; 13790 break; 13791 } 13792 } 13793 lpfc_destroy_vport_work_array(phba, vports); 13794 return vport; 13795 } 13796 13797 /** 13798 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 13799 * @vport: The vport to work on. 13800 * 13801 * This function updates the receive sequence time stamp for this vport. The 13802 * receive sequence time stamp indicates the time that the last frame of the 13803 * the sequence that has been idle for the longest amount of time was received. 13804 * the driver uses this time stamp to indicate if any received sequences have 13805 * timed out. 13806 **/ 13807 void 13808 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 13809 { 13810 struct lpfc_dmabuf *h_buf; 13811 struct hbq_dmabuf *dmabuf = NULL; 13812 13813 /* get the oldest sequence on the rcv list */ 13814 h_buf = list_get_first(&vport->rcv_buffer_list, 13815 struct lpfc_dmabuf, list); 13816 if (!h_buf) 13817 return; 13818 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13819 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 13820 } 13821 13822 /** 13823 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 13824 * @vport: The vport that the received sequences were sent to. 13825 * 13826 * This function cleans up all outstanding received sequences. This is called 13827 * by the driver when a link event or user action invalidates all the received 13828 * sequences. 13829 **/ 13830 void 13831 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 13832 { 13833 struct lpfc_dmabuf *h_buf, *hnext; 13834 struct lpfc_dmabuf *d_buf, *dnext; 13835 struct hbq_dmabuf *dmabuf = NULL; 13836 13837 /* start with the oldest sequence on the rcv list */ 13838 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 13839 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13840 list_del_init(&dmabuf->hbuf.list); 13841 list_for_each_entry_safe(d_buf, dnext, 13842 &dmabuf->dbuf.list, list) { 13843 list_del_init(&d_buf->list); 13844 lpfc_in_buf_free(vport->phba, d_buf); 13845 } 13846 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 13847 } 13848 } 13849 13850 /** 13851 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 13852 * @vport: The vport that the received sequences were sent to. 13853 * 13854 * This function determines whether any received sequences have timed out by 13855 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 13856 * indicates that there is at least one timed out sequence this routine will 13857 * go through the received sequences one at a time from most inactive to most 13858 * active to determine which ones need to be cleaned up. Once it has determined 13859 * that a sequence needs to be cleaned up it will simply free up the resources 13860 * without sending an abort. 13861 **/ 13862 void 13863 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 13864 { 13865 struct lpfc_dmabuf *h_buf, *hnext; 13866 struct lpfc_dmabuf *d_buf, *dnext; 13867 struct hbq_dmabuf *dmabuf = NULL; 13868 unsigned long timeout; 13869 int abort_count = 0; 13870 13871 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 13872 vport->rcv_buffer_time_stamp); 13873 if (list_empty(&vport->rcv_buffer_list) || 13874 time_before(jiffies, timeout)) 13875 return; 13876 /* start with the oldest sequence on the rcv list */ 13877 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 13878 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13879 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 13880 dmabuf->time_stamp); 13881 if (time_before(jiffies, timeout)) 13882 break; 13883 abort_count++; 13884 list_del_init(&dmabuf->hbuf.list); 13885 list_for_each_entry_safe(d_buf, dnext, 13886 &dmabuf->dbuf.list, list) { 13887 list_del_init(&d_buf->list); 13888 lpfc_in_buf_free(vport->phba, d_buf); 13889 } 13890 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 13891 } 13892 if (abort_count) 13893 lpfc_update_rcv_time_stamp(vport); 13894 } 13895 13896 /** 13897 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 13898 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 13899 * 13900 * This function searches through the existing incomplete sequences that have 13901 * been sent to this @vport. If the frame matches one of the incomplete 13902 * sequences then the dbuf in the @dmabuf is added to the list of frames that 13903 * make up that sequence. If no sequence is found that matches this frame then 13904 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 13905 * This function returns a pointer to the first dmabuf in the sequence list that 13906 * the frame was linked to. 13907 **/ 13908 static struct hbq_dmabuf * 13909 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 13910 { 13911 struct fc_frame_header *new_hdr; 13912 struct fc_frame_header *temp_hdr; 13913 struct lpfc_dmabuf *d_buf; 13914 struct lpfc_dmabuf *h_buf; 13915 struct hbq_dmabuf *seq_dmabuf = NULL; 13916 struct hbq_dmabuf *temp_dmabuf = NULL; 13917 13918 INIT_LIST_HEAD(&dmabuf->dbuf.list); 13919 dmabuf->time_stamp = jiffies; 13920 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 13921 /* Use the hdr_buf to find the sequence that this frame belongs to */ 13922 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 13923 temp_hdr = (struct fc_frame_header *)h_buf->virt; 13924 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 13925 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 13926 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 13927 continue; 13928 /* found a pending sequence that matches this frame */ 13929 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13930 break; 13931 } 13932 if (!seq_dmabuf) { 13933 /* 13934 * This indicates first frame received for this sequence. 13935 * Queue the buffer on the vport's rcv_buffer_list. 13936 */ 13937 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 13938 lpfc_update_rcv_time_stamp(vport); 13939 return dmabuf; 13940 } 13941 temp_hdr = seq_dmabuf->hbuf.virt; 13942 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 13943 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 13944 list_del_init(&seq_dmabuf->hbuf.list); 13945 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 13946 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 13947 lpfc_update_rcv_time_stamp(vport); 13948 return dmabuf; 13949 } 13950 /* move this sequence to the tail to indicate a young sequence */ 13951 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 13952 seq_dmabuf->time_stamp = jiffies; 13953 lpfc_update_rcv_time_stamp(vport); 13954 if (list_empty(&seq_dmabuf->dbuf.list)) { 13955 temp_hdr = dmabuf->hbuf.virt; 13956 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 13957 return seq_dmabuf; 13958 } 13959 /* find the correct place in the sequence to insert this frame */ 13960 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 13961 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 13962 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 13963 /* 13964 * If the frame's sequence count is greater than the frame on 13965 * the list then insert the frame right after this frame 13966 */ 13967 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 13968 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 13969 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 13970 return seq_dmabuf; 13971 } 13972 } 13973 return NULL; 13974 } 13975 13976 /** 13977 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 13978 * @vport: pointer to a vitural port 13979 * @dmabuf: pointer to a dmabuf that describes the FC sequence 13980 * 13981 * This function tries to abort from the partially assembed sequence, described 13982 * by the information from basic abbort @dmabuf. It checks to see whether such 13983 * partially assembled sequence held by the driver. If so, it shall free up all 13984 * the frames from the partially assembled sequence. 13985 * 13986 * Return 13987 * true -- if there is matching partially assembled sequence present and all 13988 * the frames freed with the sequence; 13989 * false -- if there is no matching partially assembled sequence present so 13990 * nothing got aborted in the lower layer driver 13991 **/ 13992 static bool 13993 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 13994 struct hbq_dmabuf *dmabuf) 13995 { 13996 struct fc_frame_header *new_hdr; 13997 struct fc_frame_header *temp_hdr; 13998 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 13999 struct hbq_dmabuf *seq_dmabuf = NULL; 14000 14001 /* Use the hdr_buf to find the sequence that matches this frame */ 14002 INIT_LIST_HEAD(&dmabuf->dbuf.list); 14003 INIT_LIST_HEAD(&dmabuf->hbuf.list); 14004 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14005 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 14006 temp_hdr = (struct fc_frame_header *)h_buf->virt; 14007 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 14008 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 14009 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 14010 continue; 14011 /* found a pending sequence that matches this frame */ 14012 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14013 break; 14014 } 14015 14016 /* Free up all the frames from the partially assembled sequence */ 14017 if (seq_dmabuf) { 14018 list_for_each_entry_safe(d_buf, n_buf, 14019 &seq_dmabuf->dbuf.list, list) { 14020 list_del_init(&d_buf->list); 14021 lpfc_in_buf_free(vport->phba, d_buf); 14022 } 14023 return true; 14024 } 14025 return false; 14026 } 14027 14028 /** 14029 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 14030 * @phba: Pointer to HBA context object. 14031 * @cmd_iocbq: pointer to the command iocbq structure. 14032 * @rsp_iocbq: pointer to the response iocbq structure. 14033 * 14034 * This function handles the sequence abort response iocb command complete 14035 * event. It properly releases the memory allocated to the sequence abort 14036 * accept iocb. 14037 **/ 14038 static void 14039 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 14040 struct lpfc_iocbq *cmd_iocbq, 14041 struct lpfc_iocbq *rsp_iocbq) 14042 { 14043 if (cmd_iocbq) 14044 lpfc_sli_release_iocbq(phba, cmd_iocbq); 14045 } 14046 14047 /** 14048 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 14049 * @phba: Pointer to HBA context object. 14050 * @xri: xri id in transaction. 14051 * 14052 * This function validates the xri maps to the known range of XRIs allocated an 14053 * used by the driver. 14054 **/ 14055 uint16_t 14056 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 14057 uint16_t xri) 14058 { 14059 int i; 14060 14061 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 14062 if (xri == phba->sli4_hba.xri_ids[i]) 14063 return i; 14064 } 14065 return NO_XRI; 14066 } 14067 14068 14069 /** 14070 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 14071 * @phba: Pointer to HBA context object. 14072 * @fc_hdr: pointer to a FC frame header. 14073 * 14074 * This function sends a basic response to a previous unsol sequence abort 14075 * event after aborting the sequence handling. 14076 **/ 14077 static void 14078 lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, 14079 struct fc_frame_header *fc_hdr) 14080 { 14081 struct lpfc_iocbq *ctiocb = NULL; 14082 struct lpfc_nodelist *ndlp; 14083 uint16_t oxid, rxid; 14084 uint32_t sid, fctl; 14085 IOCB_t *icmd; 14086 int rc; 14087 14088 if (!lpfc_is_link_up(phba)) 14089 return; 14090 14091 sid = sli4_sid_from_fc_hdr(fc_hdr); 14092 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 14093 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 14094 14095 ndlp = lpfc_findnode_did(phba->pport, sid); 14096 if (!ndlp) { 14097 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14098 "1268 Find ndlp returned NULL for oxid:x%x " 14099 "SID:x%x\n", oxid, sid); 14100 return; 14101 } 14102 if (lpfc_sli4_xri_inrange(phba, rxid)) 14103 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 14104 14105 /* Allocate buffer for rsp iocb */ 14106 ctiocb = lpfc_sli_get_iocbq(phba); 14107 if (!ctiocb) 14108 return; 14109 14110 /* Extract the F_CTL field from FC_HDR */ 14111 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 14112 14113 icmd = &ctiocb->iocb; 14114 icmd->un.xseq64.bdl.bdeSize = 0; 14115 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 14116 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 14117 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 14118 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 14119 14120 /* Fill in the rest of iocb fields */ 14121 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 14122 icmd->ulpBdeCount = 0; 14123 icmd->ulpLe = 1; 14124 icmd->ulpClass = CLASS3; 14125 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 14126 ctiocb->context1 = ndlp; 14127 14128 ctiocb->iocb_cmpl = NULL; 14129 ctiocb->vport = phba->pport; 14130 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 14131 ctiocb->sli4_lxritag = NO_XRI; 14132 ctiocb->sli4_xritag = NO_XRI; 14133 14134 /* If the oxid maps to the FCP XRI range or if it is out of range, 14135 * send a BLS_RJT. The driver no longer has that exchange. 14136 * Override the IOCB for a BA_RJT. 14137 */ 14138 if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + 14139 phba->sli4_hba.max_cfg_param.xri_base) || 14140 oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + 14141 phba->sli4_hba.max_cfg_param.xri_base)) { 14142 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14143 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14144 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 14145 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 14146 } 14147 14148 if (fctl & FC_FC_EX_CTX) { 14149 /* ABTS sent by responder to CT exchange, construction 14150 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 14151 * field and RX_ID from ABTS for RX_ID field. 14152 */ 14153 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 14154 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 14155 } else { 14156 /* ABTS sent by initiator to CT exchange, construction 14157 * of BA_ACC will need to allocate a new XRI as for the 14158 * XRI_TAG and RX_ID fields. 14159 */ 14160 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 14161 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); 14162 } 14163 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 14164 14165 /* Xmit CT abts response on exchange <xid> */ 14166 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 14167 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 14168 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 14169 14170 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 14171 if (rc == IOCB_ERROR) { 14172 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 14173 "2925 Failed to issue CT ABTS RSP x%x on " 14174 "xri x%x, Data x%x\n", 14175 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 14176 phba->link_state); 14177 lpfc_sli_release_iocbq(phba, ctiocb); 14178 } 14179 } 14180 14181 /** 14182 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 14183 * @vport: Pointer to the vport on which this sequence was received 14184 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14185 * 14186 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 14187 * receive sequence is only partially assembed by the driver, it shall abort 14188 * the partially assembled frames for the sequence. Otherwise, if the 14189 * unsolicited receive sequence has been completely assembled and passed to 14190 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 14191 * unsolicited sequence has been aborted. After that, it will issue a basic 14192 * accept to accept the abort. 14193 **/ 14194 void 14195 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 14196 struct hbq_dmabuf *dmabuf) 14197 { 14198 struct lpfc_hba *phba = vport->phba; 14199 struct fc_frame_header fc_hdr; 14200 uint32_t fctl; 14201 bool abts_par; 14202 14203 /* Make a copy of fc_hdr before the dmabuf being released */ 14204 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 14205 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 14206 14207 if (fctl & FC_FC_EX_CTX) { 14208 /* 14209 * ABTS sent by responder to exchange, just free the buffer 14210 */ 14211 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14212 } else { 14213 /* 14214 * ABTS sent by initiator to exchange, need to do cleanup 14215 */ 14216 /* Try to abort partially assembled seq */ 14217 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); 14218 14219 /* Send abort to ULP if partially seq abort failed */ 14220 if (abts_par == false) 14221 lpfc_sli4_send_seq_to_ulp(vport, dmabuf); 14222 else 14223 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14224 } 14225 /* Send basic accept (BA_ACC) to the abort requester */ 14226 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); 14227 } 14228 14229 /** 14230 * lpfc_seq_complete - Indicates if a sequence is complete 14231 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14232 * 14233 * This function checks the sequence, starting with the frame described by 14234 * @dmabuf, to see if all the frames associated with this sequence are present. 14235 * the frames associated with this sequence are linked to the @dmabuf using the 14236 * dbuf list. This function looks for two major things. 1) That the first frame 14237 * has a sequence count of zero. 2) There is a frame with last frame of sequence 14238 * set. 3) That there are no holes in the sequence count. The function will 14239 * return 1 when the sequence is complete, otherwise it will return 0. 14240 **/ 14241 static int 14242 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 14243 { 14244 struct fc_frame_header *hdr; 14245 struct lpfc_dmabuf *d_buf; 14246 struct hbq_dmabuf *seq_dmabuf; 14247 uint32_t fctl; 14248 int seq_count = 0; 14249 14250 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14251 /* make sure first fame of sequence has a sequence count of zero */ 14252 if (hdr->fh_seq_cnt != seq_count) 14253 return 0; 14254 fctl = (hdr->fh_f_ctl[0] << 16 | 14255 hdr->fh_f_ctl[1] << 8 | 14256 hdr->fh_f_ctl[2]); 14257 /* If last frame of sequence we can return success. */ 14258 if (fctl & FC_FC_END_SEQ) 14259 return 1; 14260 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 14261 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14262 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14263 /* If there is a hole in the sequence count then fail. */ 14264 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 14265 return 0; 14266 fctl = (hdr->fh_f_ctl[0] << 16 | 14267 hdr->fh_f_ctl[1] << 8 | 14268 hdr->fh_f_ctl[2]); 14269 /* If last frame of sequence we can return success. */ 14270 if (fctl & FC_FC_END_SEQ) 14271 return 1; 14272 } 14273 return 0; 14274 } 14275 14276 /** 14277 * lpfc_prep_seq - Prep sequence for ULP processing 14278 * @vport: Pointer to the vport on which this sequence was received 14279 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14280 * 14281 * This function takes a sequence, described by a list of frames, and creates 14282 * a list of iocbq structures to describe the sequence. This iocbq list will be 14283 * used to issue to the generic unsolicited sequence handler. This routine 14284 * returns a pointer to the first iocbq in the list. If the function is unable 14285 * to allocate an iocbq then it throw out the received frames that were not 14286 * able to be described and return a pointer to the first iocbq. If unable to 14287 * allocate any iocbqs (including the first) this function will return NULL. 14288 **/ 14289 static struct lpfc_iocbq * 14290 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 14291 { 14292 struct hbq_dmabuf *hbq_buf; 14293 struct lpfc_dmabuf *d_buf, *n_buf; 14294 struct lpfc_iocbq *first_iocbq, *iocbq; 14295 struct fc_frame_header *fc_hdr; 14296 uint32_t sid; 14297 uint32_t len, tot_len; 14298 struct ulp_bde64 *pbde; 14299 14300 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14301 /* remove from receive buffer list */ 14302 list_del_init(&seq_dmabuf->hbuf.list); 14303 lpfc_update_rcv_time_stamp(vport); 14304 /* get the Remote Port's SID */ 14305 sid = sli4_sid_from_fc_hdr(fc_hdr); 14306 tot_len = 0; 14307 /* Get an iocbq struct to fill in. */ 14308 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 14309 if (first_iocbq) { 14310 /* Initialize the first IOCB. */ 14311 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 14312 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 14313 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 14314 first_iocbq->iocb.ulpContext = NO_XRI; 14315 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 14316 be16_to_cpu(fc_hdr->fh_ox_id); 14317 /* iocbq is prepped for internal consumption. Physical vpi. */ 14318 first_iocbq->iocb.unsli3.rcvsli3.vpi = 14319 vport->phba->vpi_ids[vport->vpi]; 14320 /* put the first buffer into the first IOCBq */ 14321 first_iocbq->context2 = &seq_dmabuf->dbuf; 14322 first_iocbq->context3 = NULL; 14323 first_iocbq->iocb.ulpBdeCount = 1; 14324 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14325 LPFC_DATA_BUF_SIZE; 14326 first_iocbq->iocb.un.rcvels.remoteID = sid; 14327 tot_len = bf_get(lpfc_rcqe_length, 14328 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 14329 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14330 } 14331 iocbq = first_iocbq; 14332 /* 14333 * Each IOCBq can have two Buffers assigned, so go through the list 14334 * of buffers for this sequence and save two buffers in each IOCBq 14335 */ 14336 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 14337 if (!iocbq) { 14338 lpfc_in_buf_free(vport->phba, d_buf); 14339 continue; 14340 } 14341 if (!iocbq->context3) { 14342 iocbq->context3 = d_buf; 14343 iocbq->iocb.ulpBdeCount++; 14344 pbde = (struct ulp_bde64 *) 14345 &iocbq->iocb.unsli3.sli3Words[4]; 14346 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 14347 14348 /* We need to get the size out of the right CQE */ 14349 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14350 len = bf_get(lpfc_rcqe_length, 14351 &hbq_buf->cq_event.cqe.rcqe_cmpl); 14352 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 14353 tot_len += len; 14354 } else { 14355 iocbq = lpfc_sli_get_iocbq(vport->phba); 14356 if (!iocbq) { 14357 if (first_iocbq) { 14358 first_iocbq->iocb.ulpStatus = 14359 IOSTAT_FCP_RSP_ERROR; 14360 first_iocbq->iocb.un.ulpWord[4] = 14361 IOERR_NO_RESOURCES; 14362 } 14363 lpfc_in_buf_free(vport->phba, d_buf); 14364 continue; 14365 } 14366 iocbq->context2 = d_buf; 14367 iocbq->context3 = NULL; 14368 iocbq->iocb.ulpBdeCount = 1; 14369 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14370 LPFC_DATA_BUF_SIZE; 14371 14372 /* We need to get the size out of the right CQE */ 14373 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14374 len = bf_get(lpfc_rcqe_length, 14375 &hbq_buf->cq_event.cqe.rcqe_cmpl); 14376 tot_len += len; 14377 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14378 14379 iocbq->iocb.un.rcvels.remoteID = sid; 14380 list_add_tail(&iocbq->list, &first_iocbq->list); 14381 } 14382 } 14383 return first_iocbq; 14384 } 14385 14386 static void 14387 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 14388 struct hbq_dmabuf *seq_dmabuf) 14389 { 14390 struct fc_frame_header *fc_hdr; 14391 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 14392 struct lpfc_hba *phba = vport->phba; 14393 14394 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14395 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 14396 if (!iocbq) { 14397 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14398 "2707 Ring %d handler: Failed to allocate " 14399 "iocb Rctl x%x Type x%x received\n", 14400 LPFC_ELS_RING, 14401 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 14402 return; 14403 } 14404 if (!lpfc_complete_unsol_iocb(phba, 14405 &phba->sli.ring[LPFC_ELS_RING], 14406 iocbq, fc_hdr->fh_r_ctl, 14407 fc_hdr->fh_type)) 14408 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14409 "2540 Ring %d handler: unexpected Rctl " 14410 "x%x Type x%x received\n", 14411 LPFC_ELS_RING, 14412 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 14413 14414 /* Free iocb created in lpfc_prep_seq */ 14415 list_for_each_entry_safe(curr_iocb, next_iocb, 14416 &iocbq->list, list) { 14417 list_del_init(&curr_iocb->list); 14418 lpfc_sli_release_iocbq(phba, curr_iocb); 14419 } 14420 lpfc_sli_release_iocbq(phba, iocbq); 14421 } 14422 14423 /** 14424 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 14425 * @phba: Pointer to HBA context object. 14426 * 14427 * This function is called with no lock held. This function processes all 14428 * the received buffers and gives it to upper layers when a received buffer 14429 * indicates that it is the final frame in the sequence. The interrupt 14430 * service routine processes received buffers at interrupt contexts and adds 14431 * received dma buffers to the rb_pend_list queue and signals the worker thread. 14432 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 14433 * appropriate receive function when the final frame in a sequence is received. 14434 **/ 14435 void 14436 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 14437 struct hbq_dmabuf *dmabuf) 14438 { 14439 struct hbq_dmabuf *seq_dmabuf; 14440 struct fc_frame_header *fc_hdr; 14441 struct lpfc_vport *vport; 14442 uint32_t fcfi; 14443 14444 /* Process each received buffer */ 14445 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14446 /* check to see if this a valid type of frame */ 14447 if (lpfc_fc_frame_check(phba, fc_hdr)) { 14448 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14449 return; 14450 } 14451 if ((bf_get(lpfc_cqe_code, 14452 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 14453 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 14454 &dmabuf->cq_event.cqe.rcqe_cmpl); 14455 else 14456 fcfi = bf_get(lpfc_rcqe_fcf_id, 14457 &dmabuf->cq_event.cqe.rcqe_cmpl); 14458 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 14459 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 14460 /* throw out the frame */ 14461 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14462 return; 14463 } 14464 /* Handle the basic abort sequence (BA_ABTS) event */ 14465 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 14466 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 14467 return; 14468 } 14469 14470 /* Link this frame */ 14471 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 14472 if (!seq_dmabuf) { 14473 /* unable to add frame to vport - throw it out */ 14474 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14475 return; 14476 } 14477 /* If not last frame in sequence continue processing frames. */ 14478 if (!lpfc_seq_complete(seq_dmabuf)) 14479 return; 14480 14481 /* Send the complete sequence to the upper layer protocol */ 14482 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 14483 } 14484 14485 /** 14486 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 14487 * @phba: pointer to lpfc hba data structure. 14488 * 14489 * This routine is invoked to post rpi header templates to the 14490 * HBA consistent with the SLI-4 interface spec. This routine 14491 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14492 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14493 * 14494 * This routine does not require any locks. It's usage is expected 14495 * to be driver load or reset recovery when the driver is 14496 * sequential. 14497 * 14498 * Return codes 14499 * 0 - successful 14500 * -EIO - The mailbox failed to complete successfully. 14501 * When this error occurs, the driver is not guaranteed 14502 * to have any rpi regions posted to the device and 14503 * must either attempt to repost the regions or take a 14504 * fatal error. 14505 **/ 14506 int 14507 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 14508 { 14509 struct lpfc_rpi_hdr *rpi_page; 14510 uint32_t rc = 0; 14511 uint16_t lrpi = 0; 14512 14513 /* SLI4 ports that support extents do not require RPI headers. */ 14514 if (!phba->sli4_hba.rpi_hdrs_in_use) 14515 goto exit; 14516 if (phba->sli4_hba.extents_in_use) 14517 return -EIO; 14518 14519 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 14520 /* 14521 * Assign the rpi headers a physical rpi only if the driver 14522 * has not initialized those resources. A port reset only 14523 * needs the headers posted. 14524 */ 14525 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 14526 LPFC_RPI_RSRC_RDY) 14527 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 14528 14529 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 14530 if (rc != MBX_SUCCESS) { 14531 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14532 "2008 Error %d posting all rpi " 14533 "headers\n", rc); 14534 rc = -EIO; 14535 break; 14536 } 14537 } 14538 14539 exit: 14540 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 14541 LPFC_RPI_RSRC_RDY); 14542 return rc; 14543 } 14544 14545 /** 14546 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 14547 * @phba: pointer to lpfc hba data structure. 14548 * @rpi_page: pointer to the rpi memory region. 14549 * 14550 * This routine is invoked to post a single rpi header to the 14551 * HBA consistent with the SLI-4 interface spec. This memory region 14552 * maps up to 64 rpi context regions. 14553 * 14554 * Return codes 14555 * 0 - successful 14556 * -ENOMEM - No available memory 14557 * -EIO - The mailbox failed to complete successfully. 14558 **/ 14559 int 14560 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 14561 { 14562 LPFC_MBOXQ_t *mboxq; 14563 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 14564 uint32_t rc = 0; 14565 uint32_t shdr_status, shdr_add_status; 14566 union lpfc_sli4_cfg_shdr *shdr; 14567 14568 /* SLI4 ports that support extents do not require RPI headers. */ 14569 if (!phba->sli4_hba.rpi_hdrs_in_use) 14570 return rc; 14571 if (phba->sli4_hba.extents_in_use) 14572 return -EIO; 14573 14574 /* The port is notified of the header region via a mailbox command. */ 14575 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14576 if (!mboxq) { 14577 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14578 "2001 Unable to allocate memory for issuing " 14579 "SLI_CONFIG_SPECIAL mailbox command\n"); 14580 return -ENOMEM; 14581 } 14582 14583 /* Post all rpi memory regions to the port. */ 14584 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 14585 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 14586 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 14587 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 14588 sizeof(struct lpfc_sli4_cfg_mhdr), 14589 LPFC_SLI4_MBX_EMBED); 14590 14591 14592 /* Post the physical rpi to the port for this rpi header. */ 14593 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 14594 rpi_page->start_rpi); 14595 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 14596 hdr_tmpl, rpi_page->page_count); 14597 14598 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 14599 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 14600 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 14601 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 14602 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14603 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14604 if (rc != MBX_TIMEOUT) 14605 mempool_free(mboxq, phba->mbox_mem_pool); 14606 if (shdr_status || shdr_add_status || rc) { 14607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14608 "2514 POST_RPI_HDR mailbox failed with " 14609 "status x%x add_status x%x, mbx status x%x\n", 14610 shdr_status, shdr_add_status, rc); 14611 rc = -ENXIO; 14612 } 14613 return rc; 14614 } 14615 14616 /** 14617 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 14618 * @phba: pointer to lpfc hba data structure. 14619 * 14620 * This routine is invoked to post rpi header templates to the 14621 * HBA consistent with the SLI-4 interface spec. This routine 14622 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14623 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14624 * 14625 * Returns 14626 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 14627 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 14628 **/ 14629 int 14630 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 14631 { 14632 unsigned long rpi; 14633 uint16_t max_rpi, rpi_limit; 14634 uint16_t rpi_remaining, lrpi = 0; 14635 struct lpfc_rpi_hdr *rpi_hdr; 14636 14637 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 14638 rpi_limit = phba->sli4_hba.next_rpi; 14639 14640 /* 14641 * Fetch the next logical rpi. Because this index is logical, 14642 * the driver starts at 0 each time. 14643 */ 14644 spin_lock_irq(&phba->hbalock); 14645 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 14646 if (rpi >= rpi_limit) 14647 rpi = LPFC_RPI_ALLOC_ERROR; 14648 else { 14649 set_bit(rpi, phba->sli4_hba.rpi_bmask); 14650 phba->sli4_hba.max_cfg_param.rpi_used++; 14651 phba->sli4_hba.rpi_count++; 14652 } 14653 14654 /* 14655 * Don't try to allocate more rpi header regions if the device limit 14656 * has been exhausted. 14657 */ 14658 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 14659 (phba->sli4_hba.rpi_count >= max_rpi)) { 14660 spin_unlock_irq(&phba->hbalock); 14661 return rpi; 14662 } 14663 14664 /* 14665 * RPI header postings are not required for SLI4 ports capable of 14666 * extents. 14667 */ 14668 if (!phba->sli4_hba.rpi_hdrs_in_use) { 14669 spin_unlock_irq(&phba->hbalock); 14670 return rpi; 14671 } 14672 14673 /* 14674 * If the driver is running low on rpi resources, allocate another 14675 * page now. Note that the next_rpi value is used because 14676 * it represents how many are actually in use whereas max_rpi notes 14677 * how many are supported max by the device. 14678 */ 14679 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 14680 spin_unlock_irq(&phba->hbalock); 14681 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 14682 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 14683 if (!rpi_hdr) { 14684 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14685 "2002 Error Could not grow rpi " 14686 "count\n"); 14687 } else { 14688 lrpi = rpi_hdr->start_rpi; 14689 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 14690 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 14691 } 14692 } 14693 14694 return rpi; 14695 } 14696 14697 /** 14698 * lpfc_sli4_free_rpi - Release an rpi for reuse. 14699 * @phba: pointer to lpfc hba data structure. 14700 * 14701 * This routine is invoked to release an rpi to the pool of 14702 * available rpis maintained by the driver. 14703 **/ 14704 void 14705 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 14706 { 14707 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 14708 phba->sli4_hba.rpi_count--; 14709 phba->sli4_hba.max_cfg_param.rpi_used--; 14710 } 14711 } 14712 14713 /** 14714 * lpfc_sli4_free_rpi - Release an rpi for reuse. 14715 * @phba: pointer to lpfc hba data structure. 14716 * 14717 * This routine is invoked to release an rpi to the pool of 14718 * available rpis maintained by the driver. 14719 **/ 14720 void 14721 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 14722 { 14723 spin_lock_irq(&phba->hbalock); 14724 __lpfc_sli4_free_rpi(phba, rpi); 14725 spin_unlock_irq(&phba->hbalock); 14726 } 14727 14728 /** 14729 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 14730 * @phba: pointer to lpfc hba data structure. 14731 * 14732 * This routine is invoked to remove the memory region that 14733 * provided rpi via a bitmask. 14734 **/ 14735 void 14736 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 14737 { 14738 kfree(phba->sli4_hba.rpi_bmask); 14739 kfree(phba->sli4_hba.rpi_ids); 14740 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 14741 } 14742 14743 /** 14744 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 14745 * @phba: pointer to lpfc hba data structure. 14746 * 14747 * This routine is invoked to remove the memory region that 14748 * provided rpi via a bitmask. 14749 **/ 14750 int 14751 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) 14752 { 14753 LPFC_MBOXQ_t *mboxq; 14754 struct lpfc_hba *phba = ndlp->phba; 14755 int rc; 14756 14757 /* The port is notified of the header region via a mailbox command. */ 14758 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14759 if (!mboxq) 14760 return -ENOMEM; 14761 14762 /* Post all rpi memory regions to the port. */ 14763 lpfc_resume_rpi(mboxq, ndlp); 14764 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14765 if (rc == MBX_NOT_FINISHED) { 14766 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14767 "2010 Resume RPI Mailbox failed " 14768 "status %d, mbxStatus x%x\n", rc, 14769 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 14770 mempool_free(mboxq, phba->mbox_mem_pool); 14771 return -EIO; 14772 } 14773 return 0; 14774 } 14775 14776 /** 14777 * lpfc_sli4_init_vpi - Initialize a vpi with the port 14778 * @vport: Pointer to the vport for which the vpi is being initialized 14779 * 14780 * This routine is invoked to activate a vpi with the port. 14781 * 14782 * Returns: 14783 * 0 success 14784 * -Evalue otherwise 14785 **/ 14786 int 14787 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 14788 { 14789 LPFC_MBOXQ_t *mboxq; 14790 int rc = 0; 14791 int retval = MBX_SUCCESS; 14792 uint32_t mbox_tmo; 14793 struct lpfc_hba *phba = vport->phba; 14794 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14795 if (!mboxq) 14796 return -ENOMEM; 14797 lpfc_init_vpi(phba, mboxq, vport->vpi); 14798 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 14799 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 14800 if (rc != MBX_SUCCESS) { 14801 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 14802 "2022 INIT VPI Mailbox failed " 14803 "status %d, mbxStatus x%x\n", rc, 14804 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 14805 retval = -EIO; 14806 } 14807 if (rc != MBX_TIMEOUT) 14808 mempool_free(mboxq, vport->phba->mbox_mem_pool); 14809 14810 return retval; 14811 } 14812 14813 /** 14814 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 14815 * @phba: pointer to lpfc hba data structure. 14816 * @mboxq: Pointer to mailbox object. 14817 * 14818 * This routine is invoked to manually add a single FCF record. The caller 14819 * must pass a completely initialized FCF_Record. This routine takes 14820 * care of the nonembedded mailbox operations. 14821 **/ 14822 static void 14823 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 14824 { 14825 void *virt_addr; 14826 union lpfc_sli4_cfg_shdr *shdr; 14827 uint32_t shdr_status, shdr_add_status; 14828 14829 virt_addr = mboxq->sge_array->addr[0]; 14830 /* The IOCTL status is embedded in the mailbox subheader. */ 14831 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 14832 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14833 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14834 14835 if ((shdr_status || shdr_add_status) && 14836 (shdr_status != STATUS_FCF_IN_USE)) 14837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14838 "2558 ADD_FCF_RECORD mailbox failed with " 14839 "status x%x add_status x%x\n", 14840 shdr_status, shdr_add_status); 14841 14842 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14843 } 14844 14845 /** 14846 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 14847 * @phba: pointer to lpfc hba data structure. 14848 * @fcf_record: pointer to the initialized fcf record to add. 14849 * 14850 * This routine is invoked to manually add a single FCF record. The caller 14851 * must pass a completely initialized FCF_Record. This routine takes 14852 * care of the nonembedded mailbox operations. 14853 **/ 14854 int 14855 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 14856 { 14857 int rc = 0; 14858 LPFC_MBOXQ_t *mboxq; 14859 uint8_t *bytep; 14860 void *virt_addr; 14861 dma_addr_t phys_addr; 14862 struct lpfc_mbx_sge sge; 14863 uint32_t alloc_len, req_len; 14864 uint32_t fcfindex; 14865 14866 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14867 if (!mboxq) { 14868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14869 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 14870 return -ENOMEM; 14871 } 14872 14873 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 14874 sizeof(uint32_t); 14875 14876 /* Allocate DMA memory and set up the non-embedded mailbox command */ 14877 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 14878 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 14879 req_len, LPFC_SLI4_MBX_NEMBED); 14880 if (alloc_len < req_len) { 14881 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14882 "2523 Allocated DMA memory size (x%x) is " 14883 "less than the requested DMA memory " 14884 "size (x%x)\n", alloc_len, req_len); 14885 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14886 return -ENOMEM; 14887 } 14888 14889 /* 14890 * Get the first SGE entry from the non-embedded DMA memory. This 14891 * routine only uses a single SGE. 14892 */ 14893 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 14894 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 14895 virt_addr = mboxq->sge_array->addr[0]; 14896 /* 14897 * Configure the FCF record for FCFI 0. This is the driver's 14898 * hardcoded default and gets used in nonFIP mode. 14899 */ 14900 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 14901 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 14902 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 14903 14904 /* 14905 * Copy the fcf_index and the FCF Record Data. The data starts after 14906 * the FCoE header plus word10. The data copy needs to be endian 14907 * correct. 14908 */ 14909 bytep += sizeof(uint32_t); 14910 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 14911 mboxq->vport = phba->pport; 14912 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 14913 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14914 if (rc == MBX_NOT_FINISHED) { 14915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14916 "2515 ADD_FCF_RECORD mailbox failed with " 14917 "status 0x%x\n", rc); 14918 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14919 rc = -EIO; 14920 } else 14921 rc = 0; 14922 14923 return rc; 14924 } 14925 14926 /** 14927 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 14928 * @phba: pointer to lpfc hba data structure. 14929 * @fcf_record: pointer to the fcf record to write the default data. 14930 * @fcf_index: FCF table entry index. 14931 * 14932 * This routine is invoked to build the driver's default FCF record. The 14933 * values used are hardcoded. This routine handles memory initialization. 14934 * 14935 **/ 14936 void 14937 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 14938 struct fcf_record *fcf_record, 14939 uint16_t fcf_index) 14940 { 14941 memset(fcf_record, 0, sizeof(struct fcf_record)); 14942 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 14943 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 14944 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 14945 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 14946 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 14947 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 14948 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 14949 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 14950 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 14951 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 14952 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 14953 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 14954 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 14955 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 14956 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 14957 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 14958 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 14959 /* Set the VLAN bit map */ 14960 if (phba->valid_vlan) { 14961 fcf_record->vlan_bitmap[phba->vlan_id / 8] 14962 = 1 << (phba->vlan_id % 8); 14963 } 14964 } 14965 14966 /** 14967 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 14968 * @phba: pointer to lpfc hba data structure. 14969 * @fcf_index: FCF table entry offset. 14970 * 14971 * This routine is invoked to scan the entire FCF table by reading FCF 14972 * record and processing it one at a time starting from the @fcf_index 14973 * for initial FCF discovery or fast FCF failover rediscovery. 14974 * 14975 * Return 0 if the mailbox command is submitted successfully, none 0 14976 * otherwise. 14977 **/ 14978 int 14979 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 14980 { 14981 int rc = 0, error; 14982 LPFC_MBOXQ_t *mboxq; 14983 14984 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 14985 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14986 if (!mboxq) { 14987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14988 "2000 Failed to allocate mbox for " 14989 "READ_FCF cmd\n"); 14990 error = -ENOMEM; 14991 goto fail_fcf_scan; 14992 } 14993 /* Construct the read FCF record mailbox command */ 14994 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 14995 if (rc) { 14996 error = -EINVAL; 14997 goto fail_fcf_scan; 14998 } 14999 /* Issue the mailbox command asynchronously */ 15000 mboxq->vport = phba->pport; 15001 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 15002 15003 spin_lock_irq(&phba->hbalock); 15004 phba->hba_flag |= FCF_TS_INPROG; 15005 spin_unlock_irq(&phba->hbalock); 15006 15007 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15008 if (rc == MBX_NOT_FINISHED) 15009 error = -EIO; 15010 else { 15011 /* Reset eligible FCF count for new scan */ 15012 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 15013 phba->fcf.eligible_fcf_cnt = 0; 15014 error = 0; 15015 } 15016 fail_fcf_scan: 15017 if (error) { 15018 if (mboxq) 15019 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15020 /* FCF scan failed, clear FCF_TS_INPROG flag */ 15021 spin_lock_irq(&phba->hbalock); 15022 phba->hba_flag &= ~FCF_TS_INPROG; 15023 spin_unlock_irq(&phba->hbalock); 15024 } 15025 return error; 15026 } 15027 15028 /** 15029 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 15030 * @phba: pointer to lpfc hba data structure. 15031 * @fcf_index: FCF table entry offset. 15032 * 15033 * This routine is invoked to read an FCF record indicated by @fcf_index 15034 * and to use it for FLOGI roundrobin FCF failover. 15035 * 15036 * Return 0 if the mailbox command is submitted successfully, none 0 15037 * otherwise. 15038 **/ 15039 int 15040 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15041 { 15042 int rc = 0, error; 15043 LPFC_MBOXQ_t *mboxq; 15044 15045 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15046 if (!mboxq) { 15047 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15048 "2763 Failed to allocate mbox for " 15049 "READ_FCF cmd\n"); 15050 error = -ENOMEM; 15051 goto fail_fcf_read; 15052 } 15053 /* Construct the read FCF record mailbox command */ 15054 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15055 if (rc) { 15056 error = -EINVAL; 15057 goto fail_fcf_read; 15058 } 15059 /* Issue the mailbox command asynchronously */ 15060 mboxq->vport = phba->pport; 15061 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 15062 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15063 if (rc == MBX_NOT_FINISHED) 15064 error = -EIO; 15065 else 15066 error = 0; 15067 15068 fail_fcf_read: 15069 if (error && mboxq) 15070 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15071 return error; 15072 } 15073 15074 /** 15075 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 15076 * @phba: pointer to lpfc hba data structure. 15077 * @fcf_index: FCF table entry offset. 15078 * 15079 * This routine is invoked to read an FCF record indicated by @fcf_index to 15080 * determine whether it's eligible for FLOGI roundrobin failover list. 15081 * 15082 * Return 0 if the mailbox command is submitted successfully, none 0 15083 * otherwise. 15084 **/ 15085 int 15086 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15087 { 15088 int rc = 0, error; 15089 LPFC_MBOXQ_t *mboxq; 15090 15091 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15092 if (!mboxq) { 15093 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15094 "2758 Failed to allocate mbox for " 15095 "READ_FCF cmd\n"); 15096 error = -ENOMEM; 15097 goto fail_fcf_read; 15098 } 15099 /* Construct the read FCF record mailbox command */ 15100 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15101 if (rc) { 15102 error = -EINVAL; 15103 goto fail_fcf_read; 15104 } 15105 /* Issue the mailbox command asynchronously */ 15106 mboxq->vport = phba->pport; 15107 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 15108 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15109 if (rc == MBX_NOT_FINISHED) 15110 error = -EIO; 15111 else 15112 error = 0; 15113 15114 fail_fcf_read: 15115 if (error && mboxq) 15116 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15117 return error; 15118 } 15119 15120 /** 15121 * lpfc_check_next_fcf_pri 15122 * phba pointer to the lpfc_hba struct for this port. 15123 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 15124 * routine when the rr_bmask is empty. The FCF indecies are put into the 15125 * rr_bmask based on their priority level. Starting from the highest priority 15126 * to the lowest. The most likely FCF candidate will be in the highest 15127 * priority group. When this routine is called it searches the fcf_pri list for 15128 * next lowest priority group and repopulates the rr_bmask with only those 15129 * fcf_indexes. 15130 * returns: 15131 * 1=success 0=failure 15132 **/ 15133 int 15134 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 15135 { 15136 uint16_t next_fcf_pri; 15137 uint16_t last_index; 15138 struct lpfc_fcf_pri *fcf_pri; 15139 int rc; 15140 int ret = 0; 15141 15142 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 15143 LPFC_SLI4_FCF_TBL_INDX_MAX); 15144 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15145 "3060 Last IDX %d\n", last_index); 15146 if (list_empty(&phba->fcf.fcf_pri_list)) { 15147 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15148 "3061 Last IDX %d\n", last_index); 15149 return 0; /* Empty rr list */ 15150 } 15151 next_fcf_pri = 0; 15152 /* 15153 * Clear the rr_bmask and set all of the bits that are at this 15154 * priority. 15155 */ 15156 memset(phba->fcf.fcf_rr_bmask, 0, 15157 sizeof(*phba->fcf.fcf_rr_bmask)); 15158 spin_lock_irq(&phba->hbalock); 15159 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15160 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 15161 continue; 15162 /* 15163 * the 1st priority that has not FLOGI failed 15164 * will be the highest. 15165 */ 15166 if (!next_fcf_pri) 15167 next_fcf_pri = fcf_pri->fcf_rec.priority; 15168 spin_unlock_irq(&phba->hbalock); 15169 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 15170 rc = lpfc_sli4_fcf_rr_index_set(phba, 15171 fcf_pri->fcf_rec.fcf_index); 15172 if (rc) 15173 return 0; 15174 } 15175 spin_lock_irq(&phba->hbalock); 15176 } 15177 /* 15178 * if next_fcf_pri was not set above and the list is not empty then 15179 * we have failed flogis on all of them. So reset flogi failed 15180 * and start at the begining. 15181 */ 15182 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 15183 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15184 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 15185 /* 15186 * the 1st priority that has not FLOGI failed 15187 * will be the highest. 15188 */ 15189 if (!next_fcf_pri) 15190 next_fcf_pri = fcf_pri->fcf_rec.priority; 15191 spin_unlock_irq(&phba->hbalock); 15192 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 15193 rc = lpfc_sli4_fcf_rr_index_set(phba, 15194 fcf_pri->fcf_rec.fcf_index); 15195 if (rc) 15196 return 0; 15197 } 15198 spin_lock_irq(&phba->hbalock); 15199 } 15200 } else 15201 ret = 1; 15202 spin_unlock_irq(&phba->hbalock); 15203 15204 return ret; 15205 } 15206 /** 15207 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 15208 * @phba: pointer to lpfc hba data structure. 15209 * 15210 * This routine is to get the next eligible FCF record index in a round 15211 * robin fashion. If the next eligible FCF record index equals to the 15212 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 15213 * shall be returned, otherwise, the next eligible FCF record's index 15214 * shall be returned. 15215 **/ 15216 uint16_t 15217 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 15218 { 15219 uint16_t next_fcf_index; 15220 15221 /* Search start from next bit of currently registered FCF index */ 15222 next_priority: 15223 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 15224 LPFC_SLI4_FCF_TBL_INDX_MAX; 15225 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15226 LPFC_SLI4_FCF_TBL_INDX_MAX, 15227 next_fcf_index); 15228 15229 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 15230 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15231 /* 15232 * If we have wrapped then we need to clear the bits that 15233 * have been tested so that we can detect when we should 15234 * change the priority level. 15235 */ 15236 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15237 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 15238 } 15239 15240 15241 /* Check roundrobin failover list empty condition */ 15242 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 15243 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 15244 /* 15245 * If next fcf index is not found check if there are lower 15246 * Priority level fcf's in the fcf_priority list. 15247 * Set up the rr_bmask with all of the avaiable fcf bits 15248 * at that level and continue the selection process. 15249 */ 15250 if (lpfc_check_next_fcf_pri_level(phba)) 15251 goto next_priority; 15252 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15253 "2844 No roundrobin failover FCF available\n"); 15254 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 15255 return LPFC_FCOE_FCF_NEXT_NONE; 15256 else { 15257 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15258 "3063 Only FCF available idx %d, flag %x\n", 15259 next_fcf_index, 15260 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 15261 return next_fcf_index; 15262 } 15263 } 15264 15265 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 15266 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 15267 LPFC_FCF_FLOGI_FAILED) 15268 goto next_priority; 15269 15270 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15271 "2845 Get next roundrobin failover FCF (x%x)\n", 15272 next_fcf_index); 15273 15274 return next_fcf_index; 15275 } 15276 15277 /** 15278 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 15279 * @phba: pointer to lpfc hba data structure. 15280 * 15281 * This routine sets the FCF record index in to the eligible bmask for 15282 * roundrobin failover search. It checks to make sure that the index 15283 * does not go beyond the range of the driver allocated bmask dimension 15284 * before setting the bit. 15285 * 15286 * Returns 0 if the index bit successfully set, otherwise, it returns 15287 * -EINVAL. 15288 **/ 15289 int 15290 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 15291 { 15292 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15293 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15294 "2610 FCF (x%x) reached driver's book " 15295 "keeping dimension:x%x\n", 15296 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 15297 return -EINVAL; 15298 } 15299 /* Set the eligible FCF record index bmask */ 15300 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 15301 15302 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15303 "2790 Set FCF (x%x) to roundrobin FCF failover " 15304 "bmask\n", fcf_index); 15305 15306 return 0; 15307 } 15308 15309 /** 15310 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 15311 * @phba: pointer to lpfc hba data structure. 15312 * 15313 * This routine clears the FCF record index from the eligible bmask for 15314 * roundrobin failover search. It checks to make sure that the index 15315 * does not go beyond the range of the driver allocated bmask dimension 15316 * before clearing the bit. 15317 **/ 15318 void 15319 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 15320 { 15321 struct lpfc_fcf_pri *fcf_pri; 15322 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15323 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15324 "2762 FCF (x%x) reached driver's book " 15325 "keeping dimension:x%x\n", 15326 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 15327 return; 15328 } 15329 /* Clear the eligible FCF record index bmask */ 15330 spin_lock_irq(&phba->hbalock); 15331 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15332 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 15333 list_del_init(&fcf_pri->list); 15334 break; 15335 } 15336 } 15337 spin_unlock_irq(&phba->hbalock); 15338 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 15339 15340 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15341 "2791 Clear FCF (x%x) from roundrobin failover " 15342 "bmask\n", fcf_index); 15343 } 15344 15345 /** 15346 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 15347 * @phba: pointer to lpfc hba data structure. 15348 * 15349 * This routine is the completion routine for the rediscover FCF table mailbox 15350 * command. If the mailbox command returned failure, it will try to stop the 15351 * FCF rediscover wait timer. 15352 **/ 15353 void 15354 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 15355 { 15356 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 15357 uint32_t shdr_status, shdr_add_status; 15358 15359 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 15360 15361 shdr_status = bf_get(lpfc_mbox_hdr_status, 15362 &redisc_fcf->header.cfg_shdr.response); 15363 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 15364 &redisc_fcf->header.cfg_shdr.response); 15365 if (shdr_status || shdr_add_status) { 15366 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15367 "2746 Requesting for FCF rediscovery failed " 15368 "status x%x add_status x%x\n", 15369 shdr_status, shdr_add_status); 15370 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 15371 spin_lock_irq(&phba->hbalock); 15372 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 15373 spin_unlock_irq(&phba->hbalock); 15374 /* 15375 * CVL event triggered FCF rediscover request failed, 15376 * last resort to re-try current registered FCF entry. 15377 */ 15378 lpfc_retry_pport_discovery(phba); 15379 } else { 15380 spin_lock_irq(&phba->hbalock); 15381 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 15382 spin_unlock_irq(&phba->hbalock); 15383 /* 15384 * DEAD FCF event triggered FCF rediscover request 15385 * failed, last resort to fail over as a link down 15386 * to FCF registration. 15387 */ 15388 lpfc_sli4_fcf_dead_failthrough(phba); 15389 } 15390 } else { 15391 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15392 "2775 Start FCF rediscover quiescent timer\n"); 15393 /* 15394 * Start FCF rediscovery wait timer for pending FCF 15395 * before rescan FCF record table. 15396 */ 15397 lpfc_fcf_redisc_wait_start_timer(phba); 15398 } 15399 15400 mempool_free(mbox, phba->mbox_mem_pool); 15401 } 15402 15403 /** 15404 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 15405 * @phba: pointer to lpfc hba data structure. 15406 * 15407 * This routine is invoked to request for rediscovery of the entire FCF table 15408 * by the port. 15409 **/ 15410 int 15411 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 15412 { 15413 LPFC_MBOXQ_t *mbox; 15414 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 15415 int rc, length; 15416 15417 /* Cancel retry delay timers to all vports before FCF rediscover */ 15418 lpfc_cancel_all_vport_retry_delay_timer(phba); 15419 15420 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15421 if (!mbox) { 15422 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15423 "2745 Failed to allocate mbox for " 15424 "requesting FCF rediscover.\n"); 15425 return -ENOMEM; 15426 } 15427 15428 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 15429 sizeof(struct lpfc_sli4_cfg_mhdr)); 15430 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15431 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 15432 length, LPFC_SLI4_MBX_EMBED); 15433 15434 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 15435 /* Set count to 0 for invalidating the entire FCF database */ 15436 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 15437 15438 /* Issue the mailbox command asynchronously */ 15439 mbox->vport = phba->pport; 15440 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 15441 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 15442 15443 if (rc == MBX_NOT_FINISHED) { 15444 mempool_free(mbox, phba->mbox_mem_pool); 15445 return -EIO; 15446 } 15447 return 0; 15448 } 15449 15450 /** 15451 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 15452 * @phba: pointer to lpfc hba data structure. 15453 * 15454 * This function is the failover routine as a last resort to the FCF DEAD 15455 * event when driver failed to perform fast FCF failover. 15456 **/ 15457 void 15458 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 15459 { 15460 uint32_t link_state; 15461 15462 /* 15463 * Last resort as FCF DEAD event failover will treat this as 15464 * a link down, but save the link state because we don't want 15465 * it to be changed to Link Down unless it is already down. 15466 */ 15467 link_state = phba->link_state; 15468 lpfc_linkdown(phba); 15469 phba->link_state = link_state; 15470 15471 /* Unregister FCF if no devices connected to it */ 15472 lpfc_unregister_unused_fcf(phba); 15473 } 15474 15475 /** 15476 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 15477 * @phba: pointer to lpfc hba data structure. 15478 * @rgn23_data: pointer to configure region 23 data. 15479 * 15480 * This function gets SLI3 port configure region 23 data through memory dump 15481 * mailbox command. When it successfully retrieves data, the size of the data 15482 * will be returned, otherwise, 0 will be returned. 15483 **/ 15484 static uint32_t 15485 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 15486 { 15487 LPFC_MBOXQ_t *pmb = NULL; 15488 MAILBOX_t *mb; 15489 uint32_t offset = 0; 15490 int rc; 15491 15492 if (!rgn23_data) 15493 return 0; 15494 15495 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15496 if (!pmb) { 15497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15498 "2600 failed to allocate mailbox memory\n"); 15499 return 0; 15500 } 15501 mb = &pmb->u.mb; 15502 15503 do { 15504 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 15505 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 15506 15507 if (rc != MBX_SUCCESS) { 15508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15509 "2601 failed to read config " 15510 "region 23, rc 0x%x Status 0x%x\n", 15511 rc, mb->mbxStatus); 15512 mb->un.varDmp.word_cnt = 0; 15513 } 15514 /* 15515 * dump mem may return a zero when finished or we got a 15516 * mailbox error, either way we are done. 15517 */ 15518 if (mb->un.varDmp.word_cnt == 0) 15519 break; 15520 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 15521 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 15522 15523 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 15524 rgn23_data + offset, 15525 mb->un.varDmp.word_cnt); 15526 offset += mb->un.varDmp.word_cnt; 15527 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 15528 15529 mempool_free(pmb, phba->mbox_mem_pool); 15530 return offset; 15531 } 15532 15533 /** 15534 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 15535 * @phba: pointer to lpfc hba data structure. 15536 * @rgn23_data: pointer to configure region 23 data. 15537 * 15538 * This function gets SLI4 port configure region 23 data through memory dump 15539 * mailbox command. When it successfully retrieves data, the size of the data 15540 * will be returned, otherwise, 0 will be returned. 15541 **/ 15542 static uint32_t 15543 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 15544 { 15545 LPFC_MBOXQ_t *mboxq = NULL; 15546 struct lpfc_dmabuf *mp = NULL; 15547 struct lpfc_mqe *mqe; 15548 uint32_t data_length = 0; 15549 int rc; 15550 15551 if (!rgn23_data) 15552 return 0; 15553 15554 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15555 if (!mboxq) { 15556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15557 "3105 failed to allocate mailbox memory\n"); 15558 return 0; 15559 } 15560 15561 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 15562 goto out; 15563 mqe = &mboxq->u.mqe; 15564 mp = (struct lpfc_dmabuf *) mboxq->context1; 15565 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 15566 if (rc) 15567 goto out; 15568 data_length = mqe->un.mb_words[5]; 15569 if (data_length == 0) 15570 goto out; 15571 if (data_length > DMP_RGN23_SIZE) { 15572 data_length = 0; 15573 goto out; 15574 } 15575 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 15576 out: 15577 mempool_free(mboxq, phba->mbox_mem_pool); 15578 if (mp) { 15579 lpfc_mbuf_free(phba, mp->virt, mp->phys); 15580 kfree(mp); 15581 } 15582 return data_length; 15583 } 15584 15585 /** 15586 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 15587 * @phba: pointer to lpfc hba data structure. 15588 * 15589 * This function read region 23 and parse TLV for port status to 15590 * decide if the user disaled the port. If the TLV indicates the 15591 * port is disabled, the hba_flag is set accordingly. 15592 **/ 15593 void 15594 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 15595 { 15596 uint8_t *rgn23_data = NULL; 15597 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 15598 uint32_t offset = 0; 15599 15600 /* Get adapter Region 23 data */ 15601 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 15602 if (!rgn23_data) 15603 goto out; 15604 15605 if (phba->sli_rev < LPFC_SLI_REV4) 15606 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 15607 else { 15608 if_type = bf_get(lpfc_sli_intf_if_type, 15609 &phba->sli4_hba.sli_intf); 15610 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 15611 goto out; 15612 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 15613 } 15614 15615 if (!data_size) 15616 goto out; 15617 15618 /* Check the region signature first */ 15619 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 15620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15621 "2619 Config region 23 has bad signature\n"); 15622 goto out; 15623 } 15624 offset += 4; 15625 15626 /* Check the data structure version */ 15627 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 15628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15629 "2620 Config region 23 has bad version\n"); 15630 goto out; 15631 } 15632 offset += 4; 15633 15634 /* Parse TLV entries in the region */ 15635 while (offset < data_size) { 15636 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 15637 break; 15638 /* 15639 * If the TLV is not driver specific TLV or driver id is 15640 * not linux driver id, skip the record. 15641 */ 15642 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 15643 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 15644 (rgn23_data[offset + 3] != 0)) { 15645 offset += rgn23_data[offset + 1] * 4 + 4; 15646 continue; 15647 } 15648 15649 /* Driver found a driver specific TLV in the config region */ 15650 sub_tlv_len = rgn23_data[offset + 1] * 4; 15651 offset += 4; 15652 tlv_offset = 0; 15653 15654 /* 15655 * Search for configured port state sub-TLV. 15656 */ 15657 while ((offset < data_size) && 15658 (tlv_offset < sub_tlv_len)) { 15659 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 15660 offset += 4; 15661 tlv_offset += 4; 15662 break; 15663 } 15664 if (rgn23_data[offset] != PORT_STE_TYPE) { 15665 offset += rgn23_data[offset + 1] * 4 + 4; 15666 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 15667 continue; 15668 } 15669 15670 /* This HBA contains PORT_STE configured */ 15671 if (!rgn23_data[offset + 2]) 15672 phba->hba_flag |= LINK_DISABLED; 15673 15674 goto out; 15675 } 15676 } 15677 15678 out: 15679 kfree(rgn23_data); 15680 return; 15681 } 15682 15683 /** 15684 * lpfc_wr_object - write an object to the firmware 15685 * @phba: HBA structure that indicates port to create a queue on. 15686 * @dmabuf_list: list of dmabufs to write to the port. 15687 * @size: the total byte value of the objects to write to the port. 15688 * @offset: the current offset to be used to start the transfer. 15689 * 15690 * This routine will create a wr_object mailbox command to send to the port. 15691 * the mailbox command will be constructed using the dma buffers described in 15692 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 15693 * BDEs that the imbedded mailbox can support. The @offset variable will be 15694 * used to indicate the starting offset of the transfer and will also return 15695 * the offset after the write object mailbox has completed. @size is used to 15696 * determine the end of the object and whether the eof bit should be set. 15697 * 15698 * Return 0 is successful and offset will contain the the new offset to use 15699 * for the next write. 15700 * Return negative value for error cases. 15701 **/ 15702 int 15703 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 15704 uint32_t size, uint32_t *offset) 15705 { 15706 struct lpfc_mbx_wr_object *wr_object; 15707 LPFC_MBOXQ_t *mbox; 15708 int rc = 0, i = 0; 15709 uint32_t shdr_status, shdr_add_status; 15710 uint32_t mbox_tmo; 15711 union lpfc_sli4_cfg_shdr *shdr; 15712 struct lpfc_dmabuf *dmabuf; 15713 uint32_t written = 0; 15714 15715 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15716 if (!mbox) 15717 return -ENOMEM; 15718 15719 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15720 LPFC_MBOX_OPCODE_WRITE_OBJECT, 15721 sizeof(struct lpfc_mbx_wr_object) - 15722 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 15723 15724 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 15725 wr_object->u.request.write_offset = *offset; 15726 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 15727 wr_object->u.request.object_name[0] = 15728 cpu_to_le32(wr_object->u.request.object_name[0]); 15729 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 15730 list_for_each_entry(dmabuf, dmabuf_list, list) { 15731 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 15732 break; 15733 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 15734 wr_object->u.request.bde[i].addrHigh = 15735 putPaddrHigh(dmabuf->phys); 15736 if (written + SLI4_PAGE_SIZE >= size) { 15737 wr_object->u.request.bde[i].tus.f.bdeSize = 15738 (size - written); 15739 written += (size - written); 15740 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 15741 } else { 15742 wr_object->u.request.bde[i].tus.f.bdeSize = 15743 SLI4_PAGE_SIZE; 15744 written += SLI4_PAGE_SIZE; 15745 } 15746 i++; 15747 } 15748 wr_object->u.request.bde_count = i; 15749 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 15750 if (!phba->sli4_hba.intr_enable) 15751 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15752 else { 15753 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 15754 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 15755 } 15756 /* The IOCTL status is embedded in the mailbox subheader. */ 15757 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 15758 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15759 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15760 if (rc != MBX_TIMEOUT) 15761 mempool_free(mbox, phba->mbox_mem_pool); 15762 if (shdr_status || shdr_add_status || rc) { 15763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15764 "3025 Write Object mailbox failed with " 15765 "status x%x add_status x%x, mbx status x%x\n", 15766 shdr_status, shdr_add_status, rc); 15767 rc = -ENXIO; 15768 } else 15769 *offset += wr_object->u.response.actual_write_length; 15770 return rc; 15771 } 15772 15773 /** 15774 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 15775 * @vport: pointer to vport data structure. 15776 * 15777 * This function iterate through the mailboxq and clean up all REG_LOGIN 15778 * and REG_VPI mailbox commands associated with the vport. This function 15779 * is called when driver want to restart discovery of the vport due to 15780 * a Clear Virtual Link event. 15781 **/ 15782 void 15783 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 15784 { 15785 struct lpfc_hba *phba = vport->phba; 15786 LPFC_MBOXQ_t *mb, *nextmb; 15787 struct lpfc_dmabuf *mp; 15788 struct lpfc_nodelist *ndlp; 15789 struct lpfc_nodelist *act_mbx_ndlp = NULL; 15790 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 15791 LIST_HEAD(mbox_cmd_list); 15792 uint8_t restart_loop; 15793 15794 /* Clean up internally queued mailbox commands with the vport */ 15795 spin_lock_irq(&phba->hbalock); 15796 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 15797 if (mb->vport != vport) 15798 continue; 15799 15800 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 15801 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 15802 continue; 15803 15804 list_del(&mb->list); 15805 list_add_tail(&mb->list, &mbox_cmd_list); 15806 } 15807 /* Clean up active mailbox command with the vport */ 15808 mb = phba->sli.mbox_active; 15809 if (mb && (mb->vport == vport)) { 15810 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 15811 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 15812 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15813 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15814 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 15815 /* Put reference count for delayed processing */ 15816 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 15817 /* Unregister the RPI when mailbox complete */ 15818 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 15819 } 15820 } 15821 /* Cleanup any mailbox completions which are not yet processed */ 15822 do { 15823 restart_loop = 0; 15824 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 15825 /* 15826 * If this mailox is already processed or it is 15827 * for another vport ignore it. 15828 */ 15829 if ((mb->vport != vport) || 15830 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 15831 continue; 15832 15833 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 15834 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 15835 continue; 15836 15837 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15838 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15839 ndlp = (struct lpfc_nodelist *)mb->context2; 15840 /* Unregister the RPI when mailbox complete */ 15841 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 15842 restart_loop = 1; 15843 spin_unlock_irq(&phba->hbalock); 15844 spin_lock(shost->host_lock); 15845 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15846 spin_unlock(shost->host_lock); 15847 spin_lock_irq(&phba->hbalock); 15848 break; 15849 } 15850 } 15851 } while (restart_loop); 15852 15853 spin_unlock_irq(&phba->hbalock); 15854 15855 /* Release the cleaned-up mailbox commands */ 15856 while (!list_empty(&mbox_cmd_list)) { 15857 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 15858 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15859 mp = (struct lpfc_dmabuf *) (mb->context1); 15860 if (mp) { 15861 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 15862 kfree(mp); 15863 } 15864 ndlp = (struct lpfc_nodelist *) mb->context2; 15865 mb->context2 = NULL; 15866 if (ndlp) { 15867 spin_lock(shost->host_lock); 15868 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15869 spin_unlock(shost->host_lock); 15870 lpfc_nlp_put(ndlp); 15871 } 15872 } 15873 mempool_free(mb, phba->mbox_mem_pool); 15874 } 15875 15876 /* Release the ndlp with the cleaned-up active mailbox command */ 15877 if (act_mbx_ndlp) { 15878 spin_lock(shost->host_lock); 15879 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15880 spin_unlock(shost->host_lock); 15881 lpfc_nlp_put(act_mbx_ndlp); 15882 } 15883 } 15884 15885 /** 15886 * lpfc_drain_txq - Drain the txq 15887 * @phba: Pointer to HBA context object. 15888 * 15889 * This function attempt to submit IOCBs on the txq 15890 * to the adapter. For SLI4 adapters, the txq contains 15891 * ELS IOCBs that have been deferred because the there 15892 * are no SGLs. This congestion can occur with large 15893 * vport counts during node discovery. 15894 **/ 15895 15896 uint32_t 15897 lpfc_drain_txq(struct lpfc_hba *phba) 15898 { 15899 LIST_HEAD(completions); 15900 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 15901 struct lpfc_iocbq *piocbq = 0; 15902 unsigned long iflags = 0; 15903 char *fail_msg = NULL; 15904 struct lpfc_sglq *sglq; 15905 union lpfc_wqe wqe; 15906 15907 spin_lock_irqsave(&phba->hbalock, iflags); 15908 if (pring->txq_cnt > pring->txq_max) 15909 pring->txq_max = pring->txq_cnt; 15910 15911 spin_unlock_irqrestore(&phba->hbalock, iflags); 15912 15913 while (pring->txq_cnt) { 15914 spin_lock_irqsave(&phba->hbalock, iflags); 15915 15916 piocbq = lpfc_sli_ringtx_get(phba, pring); 15917 sglq = __lpfc_sli_get_sglq(phba, piocbq); 15918 if (!sglq) { 15919 __lpfc_sli_ringtx_put(phba, pring, piocbq); 15920 spin_unlock_irqrestore(&phba->hbalock, iflags); 15921 break; 15922 } else { 15923 if (!piocbq) { 15924 /* The txq_cnt out of sync. This should 15925 * never happen 15926 */ 15927 sglq = __lpfc_clear_active_sglq(phba, 15928 sglq->sli4_lxritag); 15929 spin_unlock_irqrestore(&phba->hbalock, iflags); 15930 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15931 "2823 txq empty and txq_cnt is %d\n ", 15932 pring->txq_cnt); 15933 break; 15934 } 15935 } 15936 15937 /* The xri and iocb resources secured, 15938 * attempt to issue request 15939 */ 15940 piocbq->sli4_lxritag = sglq->sli4_lxritag; 15941 piocbq->sli4_xritag = sglq->sli4_xritag; 15942 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 15943 fail_msg = "to convert bpl to sgl"; 15944 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 15945 fail_msg = "to convert iocb to wqe"; 15946 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 15947 fail_msg = " - Wq is full"; 15948 else 15949 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 15950 15951 if (fail_msg) { 15952 /* Failed means we can't issue and need to cancel */ 15953 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15954 "2822 IOCB failed %s iotag 0x%x " 15955 "xri 0x%x\n", 15956 fail_msg, 15957 piocbq->iotag, piocbq->sli4_xritag); 15958 list_add_tail(&piocbq->list, &completions); 15959 } 15960 spin_unlock_irqrestore(&phba->hbalock, iflags); 15961 } 15962 15963 /* Cancel all the IOCBs that cannot be issued */ 15964 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 15965 IOERR_SLI_ABORTED); 15966 15967 return pring->txq_cnt; 15968 } 15969