1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 69 struct lpfc_cqe *); 70 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, 71 int); 72 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, 73 uint32_t); 74 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 75 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 76 77 static IOCB_t * 78 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 79 { 80 return &iocbq->iocb; 81 } 82 83 /** 84 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 85 * @q: The Work Queue to operate on. 86 * @wqe: The work Queue Entry to put on the Work queue. 87 * 88 * This routine will copy the contents of @wqe to the next available entry on 89 * the @q. This function will then ring the Work Queue Doorbell to signal the 90 * HBA to start processing the Work Queue Entry. This function returns 0 if 91 * successful. If no entries are available on @q then this function will return 92 * -ENOMEM. 93 * The caller is expected to hold the hbalock when calling this routine. 94 **/ 95 static uint32_t 96 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 97 { 98 union lpfc_wqe *temp_wqe; 99 struct lpfc_register doorbell; 100 uint32_t host_index; 101 uint32_t idx; 102 103 /* sanity check on queue memory */ 104 if (unlikely(!q)) 105 return -ENOMEM; 106 temp_wqe = q->qe[q->host_index].wqe; 107 108 /* If the host has not yet processed the next entry then we are done */ 109 idx = ((q->host_index + 1) % q->entry_count); 110 if (idx == q->hba_index) { 111 q->WQ_overflow++; 112 return -ENOMEM; 113 } 114 q->WQ_posted++; 115 /* set consumption flag every once in a while */ 116 if (!((q->host_index + 1) % q->entry_repost)) 117 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 118 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 119 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 120 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 121 122 /* Update the host index before invoking device */ 123 host_index = q->host_index; 124 125 q->host_index = idx; 126 127 /* Ring Doorbell */ 128 doorbell.word0 = 0; 129 if (q->db_format == LPFC_DB_LIST_FORMAT) { 130 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 131 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); 132 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 133 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 134 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 135 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 136 } else { 137 return -EINVAL; 138 } 139 writel(doorbell.word0, q->db_regaddr); 140 141 return 0; 142 } 143 144 /** 145 * lpfc_sli4_wq_release - Updates internal hba index for WQ 146 * @q: The Work Queue to operate on. 147 * @index: The index to advance the hba index to. 148 * 149 * This routine will update the HBA index of a queue to reflect consumption of 150 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 151 * an entry the host calls this function to update the queue's internal 152 * pointers. This routine returns the number of entries that were consumed by 153 * the HBA. 154 **/ 155 static uint32_t 156 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 157 { 158 uint32_t released = 0; 159 160 /* sanity check on queue memory */ 161 if (unlikely(!q)) 162 return 0; 163 164 if (q->hba_index == index) 165 return 0; 166 do { 167 q->hba_index = ((q->hba_index + 1) % q->entry_count); 168 released++; 169 } while (q->hba_index != index); 170 return released; 171 } 172 173 /** 174 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 175 * @q: The Mailbox Queue to operate on. 176 * @wqe: The Mailbox Queue Entry to put on the Work queue. 177 * 178 * This routine will copy the contents of @mqe to the next available entry on 179 * the @q. This function will then ring the Work Queue Doorbell to signal the 180 * HBA to start processing the Work Queue Entry. This function returns 0 if 181 * successful. If no entries are available on @q then this function will return 182 * -ENOMEM. 183 * The caller is expected to hold the hbalock when calling this routine. 184 **/ 185 static uint32_t 186 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 187 { 188 struct lpfc_mqe *temp_mqe; 189 struct lpfc_register doorbell; 190 uint32_t host_index; 191 192 /* sanity check on queue memory */ 193 if (unlikely(!q)) 194 return -ENOMEM; 195 temp_mqe = q->qe[q->host_index].mqe; 196 197 /* If the host has not yet processed the next entry then we are done */ 198 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 199 return -ENOMEM; 200 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 201 /* Save off the mailbox pointer for completion */ 202 q->phba->mbox = (MAILBOX_t *)temp_mqe; 203 204 /* Update the host index before invoking device */ 205 host_index = q->host_index; 206 q->host_index = ((q->host_index + 1) % q->entry_count); 207 208 /* Ring Doorbell */ 209 doorbell.word0 = 0; 210 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 211 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 212 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 213 return 0; 214 } 215 216 /** 217 * lpfc_sli4_mq_release - Updates internal hba index for MQ 218 * @q: The Mailbox Queue to operate on. 219 * 220 * This routine will update the HBA index of a queue to reflect consumption of 221 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 222 * an entry the host calls this function to update the queue's internal 223 * pointers. This routine returns the number of entries that were consumed by 224 * the HBA. 225 **/ 226 static uint32_t 227 lpfc_sli4_mq_release(struct lpfc_queue *q) 228 { 229 /* sanity check on queue memory */ 230 if (unlikely(!q)) 231 return 0; 232 233 /* Clear the mailbox pointer for completion */ 234 q->phba->mbox = NULL; 235 q->hba_index = ((q->hba_index + 1) % q->entry_count); 236 return 1; 237 } 238 239 /** 240 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 241 * @q: The Event Queue to get the first valid EQE from 242 * 243 * This routine will get the first valid Event Queue Entry from @q, update 244 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 245 * the Queue (no more work to do), or the Queue is full of EQEs that have been 246 * processed, but not popped back to the HBA then this routine will return NULL. 247 **/ 248 static struct lpfc_eqe * 249 lpfc_sli4_eq_get(struct lpfc_queue *q) 250 { 251 struct lpfc_eqe *eqe; 252 uint32_t idx; 253 254 /* sanity check on queue memory */ 255 if (unlikely(!q)) 256 return NULL; 257 eqe = q->qe[q->hba_index].eqe; 258 259 /* If the next EQE is not valid then we are done */ 260 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 261 return NULL; 262 /* If the host has not yet processed the next entry then we are done */ 263 idx = ((q->hba_index + 1) % q->entry_count); 264 if (idx == q->host_index) 265 return NULL; 266 267 q->hba_index = idx; 268 return eqe; 269 } 270 271 /** 272 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 273 * @q: The Event Queue to disable interrupts 274 * 275 **/ 276 static inline void 277 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 278 { 279 struct lpfc_register doorbell; 280 281 doorbell.word0 = 0; 282 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 283 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 284 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 285 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 286 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 287 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 288 } 289 290 /** 291 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 292 * @q: The Event Queue that the host has completed processing for. 293 * @arm: Indicates whether the host wants to arms this CQ. 294 * 295 * This routine will mark all Event Queue Entries on @q, from the last 296 * known completed entry to the last entry that was processed, as completed 297 * by clearing the valid bit for each completion queue entry. Then it will 298 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 299 * The internal host index in the @q will be updated by this routine to indicate 300 * that the host has finished processing the entries. The @arm parameter 301 * indicates that the queue should be rearmed when ringing the doorbell. 302 * 303 * This function will return the number of EQEs that were popped. 304 **/ 305 uint32_t 306 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 307 { 308 uint32_t released = 0; 309 struct lpfc_eqe *temp_eqe; 310 struct lpfc_register doorbell; 311 312 /* sanity check on queue memory */ 313 if (unlikely(!q)) 314 return 0; 315 316 /* while there are valid entries */ 317 while (q->hba_index != q->host_index) { 318 temp_eqe = q->qe[q->host_index].eqe; 319 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 320 released++; 321 q->host_index = ((q->host_index + 1) % q->entry_count); 322 } 323 if (unlikely(released == 0 && !arm)) 324 return 0; 325 326 /* ring doorbell for number popped */ 327 doorbell.word0 = 0; 328 if (arm) { 329 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 330 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 331 } 332 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 333 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 334 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 335 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 336 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 337 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 338 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 339 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 340 readl(q->phba->sli4_hba.EQCQDBregaddr); 341 return released; 342 } 343 344 /** 345 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 346 * @q: The Completion Queue to get the first valid CQE from 347 * 348 * This routine will get the first valid Completion Queue Entry from @q, update 349 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 350 * the Queue (no more work to do), or the Queue is full of CQEs that have been 351 * processed, but not popped back to the HBA then this routine will return NULL. 352 **/ 353 static struct lpfc_cqe * 354 lpfc_sli4_cq_get(struct lpfc_queue *q) 355 { 356 struct lpfc_cqe *cqe; 357 uint32_t idx; 358 359 /* sanity check on queue memory */ 360 if (unlikely(!q)) 361 return NULL; 362 363 /* If the next CQE is not valid then we are done */ 364 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 365 return NULL; 366 /* If the host has not yet processed the next entry then we are done */ 367 idx = ((q->hba_index + 1) % q->entry_count); 368 if (idx == q->host_index) 369 return NULL; 370 371 cqe = q->qe[q->hba_index].cqe; 372 q->hba_index = idx; 373 return cqe; 374 } 375 376 /** 377 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 378 * @q: The Completion Queue that the host has completed processing for. 379 * @arm: Indicates whether the host wants to arms this CQ. 380 * 381 * This routine will mark all Completion queue entries on @q, from the last 382 * known completed entry to the last entry that was processed, as completed 383 * by clearing the valid bit for each completion queue entry. Then it will 384 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 385 * The internal host index in the @q will be updated by this routine to indicate 386 * that the host has finished processing the entries. The @arm parameter 387 * indicates that the queue should be rearmed when ringing the doorbell. 388 * 389 * This function will return the number of CQEs that were released. 390 **/ 391 uint32_t 392 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 393 { 394 uint32_t released = 0; 395 struct lpfc_cqe *temp_qe; 396 struct lpfc_register doorbell; 397 398 /* sanity check on queue memory */ 399 if (unlikely(!q)) 400 return 0; 401 /* while there are valid entries */ 402 while (q->hba_index != q->host_index) { 403 temp_qe = q->qe[q->host_index].cqe; 404 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 405 released++; 406 q->host_index = ((q->host_index + 1) % q->entry_count); 407 } 408 if (unlikely(released == 0 && !arm)) 409 return 0; 410 411 /* ring doorbell for number popped */ 412 doorbell.word0 = 0; 413 if (arm) 414 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 415 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 416 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 417 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 418 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 419 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 420 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 421 return released; 422 } 423 424 /** 425 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 426 * @q: The Header Receive Queue to operate on. 427 * @wqe: The Receive Queue Entry to put on the Receive queue. 428 * 429 * This routine will copy the contents of @wqe to the next available entry on 430 * the @q. This function will then ring the Receive Queue Doorbell to signal the 431 * HBA to start processing the Receive Queue Entry. This function returns the 432 * index that the rqe was copied to if successful. If no entries are available 433 * on @q then this function will return -ENOMEM. 434 * The caller is expected to hold the hbalock when calling this routine. 435 **/ 436 static int 437 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 438 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 439 { 440 struct lpfc_rqe *temp_hrqe; 441 struct lpfc_rqe *temp_drqe; 442 struct lpfc_register doorbell; 443 int put_index; 444 445 /* sanity check on queue memory */ 446 if (unlikely(!hq) || unlikely(!dq)) 447 return -ENOMEM; 448 put_index = hq->host_index; 449 temp_hrqe = hq->qe[hq->host_index].rqe; 450 temp_drqe = dq->qe[dq->host_index].rqe; 451 452 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 453 return -EINVAL; 454 if (hq->host_index != dq->host_index) 455 return -EINVAL; 456 /* If the host has not yet processed the next entry then we are done */ 457 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 458 return -EBUSY; 459 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 460 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 461 462 /* Update the host index to point to the next slot */ 463 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 464 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 465 466 /* Ring The Header Receive Queue Doorbell */ 467 if (!(hq->host_index % hq->entry_repost)) { 468 doorbell.word0 = 0; 469 if (hq->db_format == LPFC_DB_RING_FORMAT) { 470 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 471 hq->entry_repost); 472 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 473 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 474 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 475 hq->entry_repost); 476 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 477 hq->host_index); 478 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 479 } else { 480 return -EINVAL; 481 } 482 writel(doorbell.word0, hq->db_regaddr); 483 } 484 return put_index; 485 } 486 487 /** 488 * lpfc_sli4_rq_release - Updates internal hba index for RQ 489 * @q: The Header Receive Queue to operate on. 490 * 491 * This routine will update the HBA index of a queue to reflect consumption of 492 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 493 * consumed an entry the host calls this function to update the queue's 494 * internal pointers. This routine returns the number of entries that were 495 * consumed by the HBA. 496 **/ 497 static uint32_t 498 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 499 { 500 /* sanity check on queue memory */ 501 if (unlikely(!hq) || unlikely(!dq)) 502 return 0; 503 504 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 505 return 0; 506 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 507 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 508 return 1; 509 } 510 511 /** 512 * lpfc_cmd_iocb - Get next command iocb entry in the ring 513 * @phba: Pointer to HBA context object. 514 * @pring: Pointer to driver SLI ring object. 515 * 516 * This function returns pointer to next command iocb entry 517 * in the command ring. The caller must hold hbalock to prevent 518 * other threads consume the next command iocb. 519 * SLI-2/SLI-3 provide different sized iocbs. 520 **/ 521 static inline IOCB_t * 522 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 523 { 524 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 525 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 526 } 527 528 /** 529 * lpfc_resp_iocb - Get next response iocb entry in the ring 530 * @phba: Pointer to HBA context object. 531 * @pring: Pointer to driver SLI ring object. 532 * 533 * This function returns pointer to next response iocb entry 534 * in the response ring. The caller must hold hbalock to make sure 535 * that no other thread consume the next response iocb. 536 * SLI-2/SLI-3 provide different sized iocbs. 537 **/ 538 static inline IOCB_t * 539 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 540 { 541 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 542 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 543 } 544 545 /** 546 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 547 * @phba: Pointer to HBA context object. 548 * 549 * This function is called with hbalock held. This function 550 * allocates a new driver iocb object from the iocb pool. If the 551 * allocation is successful, it returns pointer to the newly 552 * allocated iocb object else it returns NULL. 553 **/ 554 struct lpfc_iocbq * 555 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 556 { 557 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 558 struct lpfc_iocbq * iocbq = NULL; 559 560 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 561 if (iocbq) 562 phba->iocb_cnt++; 563 if (phba->iocb_cnt > phba->iocb_max) 564 phba->iocb_max = phba->iocb_cnt; 565 return iocbq; 566 } 567 568 /** 569 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 570 * @phba: Pointer to HBA context object. 571 * @xritag: XRI value. 572 * 573 * This function clears the sglq pointer from the array of acive 574 * sglq's. The xritag that is passed in is used to index into the 575 * array. Before the xritag can be used it needs to be adjusted 576 * by subtracting the xribase. 577 * 578 * Returns sglq ponter = success, NULL = Failure. 579 **/ 580 static struct lpfc_sglq * 581 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 582 { 583 struct lpfc_sglq *sglq; 584 585 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 586 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 587 return sglq; 588 } 589 590 /** 591 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 592 * @phba: Pointer to HBA context object. 593 * @xritag: XRI value. 594 * 595 * This function returns the sglq pointer from the array of acive 596 * sglq's. The xritag that is passed in is used to index into the 597 * array. Before the xritag can be used it needs to be adjusted 598 * by subtracting the xribase. 599 * 600 * Returns sglq ponter = success, NULL = Failure. 601 **/ 602 struct lpfc_sglq * 603 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 604 { 605 struct lpfc_sglq *sglq; 606 607 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 608 return sglq; 609 } 610 611 /** 612 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 613 * @phba: Pointer to HBA context object. 614 * @xritag: xri used in this exchange. 615 * @rrq: The RRQ to be cleared. 616 * 617 **/ 618 void 619 lpfc_clr_rrq_active(struct lpfc_hba *phba, 620 uint16_t xritag, 621 struct lpfc_node_rrq *rrq) 622 { 623 struct lpfc_nodelist *ndlp = NULL; 624 625 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 626 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 627 628 /* The target DID could have been swapped (cable swap) 629 * we should use the ndlp from the findnode if it is 630 * available. 631 */ 632 if ((!ndlp) && rrq->ndlp) 633 ndlp = rrq->ndlp; 634 635 if (!ndlp) 636 goto out; 637 638 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) { 639 rrq->send_rrq = 0; 640 rrq->xritag = 0; 641 rrq->rrq_stop_time = 0; 642 } 643 out: 644 mempool_free(rrq, phba->rrq_pool); 645 } 646 647 /** 648 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 649 * @phba: Pointer to HBA context object. 650 * 651 * This function is called with hbalock held. This function 652 * Checks if stop_time (ratov from setting rrq active) has 653 * been reached, if it has and the send_rrq flag is set then 654 * it will call lpfc_send_rrq. If the send_rrq flag is not set 655 * then it will just call the routine to clear the rrq and 656 * free the rrq resource. 657 * The timer is set to the next rrq that is going to expire before 658 * leaving the routine. 659 * 660 **/ 661 void 662 lpfc_handle_rrq_active(struct lpfc_hba *phba) 663 { 664 struct lpfc_node_rrq *rrq; 665 struct lpfc_node_rrq *nextrrq; 666 unsigned long next_time; 667 unsigned long iflags; 668 LIST_HEAD(send_rrq); 669 670 spin_lock_irqsave(&phba->hbalock, iflags); 671 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 672 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 673 list_for_each_entry_safe(rrq, nextrrq, 674 &phba->active_rrq_list, list) { 675 if (time_after(jiffies, rrq->rrq_stop_time)) 676 list_move(&rrq->list, &send_rrq); 677 else if (time_before(rrq->rrq_stop_time, next_time)) 678 next_time = rrq->rrq_stop_time; 679 } 680 spin_unlock_irqrestore(&phba->hbalock, iflags); 681 if (!list_empty(&phba->active_rrq_list)) 682 mod_timer(&phba->rrq_tmr, next_time); 683 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 684 list_del(&rrq->list); 685 if (!rrq->send_rrq) 686 /* this call will free the rrq */ 687 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 688 else if (lpfc_send_rrq(phba, rrq)) { 689 /* if we send the rrq then the completion handler 690 * will clear the bit in the xribitmap. 691 */ 692 lpfc_clr_rrq_active(phba, rrq->xritag, 693 rrq); 694 } 695 } 696 } 697 698 /** 699 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 700 * @vport: Pointer to vport context object. 701 * @xri: The xri used in the exchange. 702 * @did: The targets DID for this exchange. 703 * 704 * returns NULL = rrq not found in the phba->active_rrq_list. 705 * rrq = rrq for this xri and target. 706 **/ 707 struct lpfc_node_rrq * 708 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 709 { 710 struct lpfc_hba *phba = vport->phba; 711 struct lpfc_node_rrq *rrq; 712 struct lpfc_node_rrq *nextrrq; 713 unsigned long iflags; 714 715 if (phba->sli_rev != LPFC_SLI_REV4) 716 return NULL; 717 spin_lock_irqsave(&phba->hbalock, iflags); 718 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 719 if (rrq->vport == vport && rrq->xritag == xri && 720 rrq->nlp_DID == did){ 721 list_del(&rrq->list); 722 spin_unlock_irqrestore(&phba->hbalock, iflags); 723 return rrq; 724 } 725 } 726 spin_unlock_irqrestore(&phba->hbalock, iflags); 727 return NULL; 728 } 729 730 /** 731 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 732 * @vport: Pointer to vport context object. 733 * @ndlp: Pointer to the lpfc_node_list structure. 734 * If ndlp is NULL Remove all active RRQs for this vport from the 735 * phba->active_rrq_list and clear the rrq. 736 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 737 **/ 738 void 739 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 740 741 { 742 struct lpfc_hba *phba = vport->phba; 743 struct lpfc_node_rrq *rrq; 744 struct lpfc_node_rrq *nextrrq; 745 unsigned long iflags; 746 LIST_HEAD(rrq_list); 747 748 if (phba->sli_rev != LPFC_SLI_REV4) 749 return; 750 if (!ndlp) { 751 lpfc_sli4_vport_delete_els_xri_aborted(vport); 752 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 753 } 754 spin_lock_irqsave(&phba->hbalock, iflags); 755 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 756 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 757 list_move(&rrq->list, &rrq_list); 758 spin_unlock_irqrestore(&phba->hbalock, iflags); 759 760 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 761 list_del(&rrq->list); 762 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 763 } 764 } 765 766 /** 767 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. 768 * @phba: Pointer to HBA context object. 769 * 770 * Remove all rrqs from the phba->active_rrq_list and free them by 771 * calling __lpfc_clr_active_rrq 772 * 773 **/ 774 void 775 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) 776 { 777 struct lpfc_node_rrq *rrq; 778 struct lpfc_node_rrq *nextrrq; 779 unsigned long next_time; 780 unsigned long iflags; 781 LIST_HEAD(rrq_list); 782 783 if (phba->sli_rev != LPFC_SLI_REV4) 784 return; 785 spin_lock_irqsave(&phba->hbalock, iflags); 786 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 787 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); 788 list_splice_init(&phba->active_rrq_list, &rrq_list); 789 spin_unlock_irqrestore(&phba->hbalock, iflags); 790 791 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 792 list_del(&rrq->list); 793 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 794 } 795 if (!list_empty(&phba->active_rrq_list)) 796 mod_timer(&phba->rrq_tmr, next_time); 797 } 798 799 800 /** 801 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 802 * @phba: Pointer to HBA context object. 803 * @ndlp: Targets nodelist pointer for this exchange. 804 * @xritag the xri in the bitmap to test. 805 * 806 * This function is called with hbalock held. This function 807 * returns 0 = rrq not active for this xri 808 * 1 = rrq is valid for this xri. 809 **/ 810 int 811 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 812 uint16_t xritag) 813 { 814 if (!ndlp) 815 return 0; 816 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 817 return 1; 818 else 819 return 0; 820 } 821 822 /** 823 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 824 * @phba: Pointer to HBA context object. 825 * @ndlp: nodelist pointer for this target. 826 * @xritag: xri used in this exchange. 827 * @rxid: Remote Exchange ID. 828 * @send_rrq: Flag used to determine if we should send rrq els cmd. 829 * 830 * This function takes the hbalock. 831 * The active bit is always set in the active rrq xri_bitmap even 832 * if there is no slot avaiable for the other rrq information. 833 * 834 * returns 0 rrq actived for this xri 835 * < 0 No memory or invalid ndlp. 836 **/ 837 int 838 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 839 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 840 { 841 unsigned long iflags; 842 struct lpfc_node_rrq *rrq; 843 int empty; 844 845 if (!ndlp) 846 return -EINVAL; 847 848 if (!phba->cfg_enable_rrq) 849 return -EINVAL; 850 851 spin_lock_irqsave(&phba->hbalock, iflags); 852 if (phba->pport->load_flag & FC_UNLOADING) { 853 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 854 goto out; 855 } 856 857 /* 858 * set the active bit even if there is no mem available. 859 */ 860 if (NLP_CHK_FREE_REQ(ndlp)) 861 goto out; 862 863 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 864 goto out; 865 866 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 867 goto out; 868 869 spin_unlock_irqrestore(&phba->hbalock, iflags); 870 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 871 if (!rrq) { 872 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 873 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 874 " DID:0x%x Send:%d\n", 875 xritag, rxid, ndlp->nlp_DID, send_rrq); 876 return -EINVAL; 877 } 878 if (phba->cfg_enable_rrq == 1) 879 rrq->send_rrq = send_rrq; 880 else 881 rrq->send_rrq = 0; 882 rrq->xritag = xritag; 883 rrq->rrq_stop_time = jiffies + 884 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 885 rrq->ndlp = ndlp; 886 rrq->nlp_DID = ndlp->nlp_DID; 887 rrq->vport = ndlp->vport; 888 rrq->rxid = rxid; 889 spin_lock_irqsave(&phba->hbalock, iflags); 890 empty = list_empty(&phba->active_rrq_list); 891 list_add_tail(&rrq->list, &phba->active_rrq_list); 892 phba->hba_flag |= HBA_RRQ_ACTIVE; 893 if (empty) 894 lpfc_worker_wake_up(phba); 895 spin_unlock_irqrestore(&phba->hbalock, iflags); 896 return 0; 897 out: 898 spin_unlock_irqrestore(&phba->hbalock, iflags); 899 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 900 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 901 " DID:0x%x Send:%d\n", 902 xritag, rxid, ndlp->nlp_DID, send_rrq); 903 return -EINVAL; 904 } 905 906 /** 907 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 908 * @phba: Pointer to HBA context object. 909 * @piocb: Pointer to the iocbq. 910 * 911 * This function is called with hbalock held. This function 912 * gets a new driver sglq object from the sglq list. If the 913 * list is not empty then it is successful, it returns pointer to the newly 914 * allocated sglq object else it returns NULL. 915 **/ 916 static struct lpfc_sglq * 917 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 918 { 919 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 920 struct lpfc_sglq *sglq = NULL; 921 struct lpfc_sglq *start_sglq = NULL; 922 struct lpfc_scsi_buf *lpfc_cmd; 923 struct lpfc_nodelist *ndlp; 924 int found = 0; 925 926 if (piocbq->iocb_flag & LPFC_IO_FCP) { 927 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 928 ndlp = lpfc_cmd->rdata->pnode; 929 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 930 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 931 ndlp = piocbq->context_un.ndlp; 932 else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) 933 ndlp = piocbq->context_un.ndlp; 934 else 935 ndlp = piocbq->context1; 936 937 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 938 start_sglq = sglq; 939 while (!found) { 940 if (!sglq) 941 return NULL; 942 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) { 943 /* This xri has an rrq outstanding for this DID. 944 * put it back in the list and get another xri. 945 */ 946 list_add_tail(&sglq->list, lpfc_sgl_list); 947 sglq = NULL; 948 list_remove_head(lpfc_sgl_list, sglq, 949 struct lpfc_sglq, list); 950 if (sglq == start_sglq) { 951 sglq = NULL; 952 break; 953 } else 954 continue; 955 } 956 sglq->ndlp = ndlp; 957 found = 1; 958 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 959 sglq->state = SGL_ALLOCATED; 960 } 961 return sglq; 962 } 963 964 /** 965 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 966 * @phba: Pointer to HBA context object. 967 * 968 * This function is called with no lock held. This function 969 * allocates a new driver iocb object from the iocb pool. If the 970 * allocation is successful, it returns pointer to the newly 971 * allocated iocb object else it returns NULL. 972 **/ 973 struct lpfc_iocbq * 974 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 975 { 976 struct lpfc_iocbq * iocbq = NULL; 977 unsigned long iflags; 978 979 spin_lock_irqsave(&phba->hbalock, iflags); 980 iocbq = __lpfc_sli_get_iocbq(phba); 981 spin_unlock_irqrestore(&phba->hbalock, iflags); 982 return iocbq; 983 } 984 985 /** 986 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 987 * @phba: Pointer to HBA context object. 988 * @iocbq: Pointer to driver iocb object. 989 * 990 * This function is called with hbalock held to release driver 991 * iocb object to the iocb pool. The iotag in the iocb object 992 * does not change for each use of the iocb object. This function 993 * clears all other fields of the iocb object when it is freed. 994 * The sqlq structure that holds the xritag and phys and virtual 995 * mappings for the scatter gather list is retrieved from the 996 * active array of sglq. The get of the sglq pointer also clears 997 * the entry in the array. If the status of the IO indiactes that 998 * this IO was aborted then the sglq entry it put on the 999 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1000 * IO has good status or fails for any other reason then the sglq 1001 * entry is added to the free list (lpfc_sgl_list). 1002 **/ 1003 static void 1004 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1005 { 1006 struct lpfc_sglq *sglq; 1007 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1008 unsigned long iflag = 0; 1009 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 1010 1011 if (iocbq->sli4_xritag == NO_XRI) 1012 sglq = NULL; 1013 else 1014 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1015 1016 1017 if (sglq) { 1018 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1019 (sglq->state != SGL_XRI_ABORTED)) { 1020 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 1021 iflag); 1022 list_add(&sglq->list, 1023 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1024 spin_unlock_irqrestore( 1025 &phba->sli4_hba.abts_sgl_list_lock, iflag); 1026 } else { 1027 sglq->state = SGL_FREED; 1028 sglq->ndlp = NULL; 1029 list_add_tail(&sglq->list, 1030 &phba->sli4_hba.lpfc_sgl_list); 1031 1032 /* Check if TXQ queue needs to be serviced */ 1033 if (!list_empty(&pring->txq)) 1034 lpfc_worker_wake_up(phba); 1035 } 1036 } 1037 1038 1039 /* 1040 * Clean all volatile data fields, preserve iotag and node struct. 1041 */ 1042 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1043 iocbq->sli4_lxritag = NO_XRI; 1044 iocbq->sli4_xritag = NO_XRI; 1045 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1046 } 1047 1048 1049 /** 1050 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1051 * @phba: Pointer to HBA context object. 1052 * @iocbq: Pointer to driver iocb object. 1053 * 1054 * This function is called with hbalock held to release driver 1055 * iocb object to the iocb pool. The iotag in the iocb object 1056 * does not change for each use of the iocb object. This function 1057 * clears all other fields of the iocb object when it is freed. 1058 **/ 1059 static void 1060 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1061 { 1062 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1063 1064 1065 /* 1066 * Clean all volatile data fields, preserve iotag and node struct. 1067 */ 1068 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1069 iocbq->sli4_xritag = NO_XRI; 1070 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1071 } 1072 1073 /** 1074 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1075 * @phba: Pointer to HBA context object. 1076 * @iocbq: Pointer to driver iocb object. 1077 * 1078 * This function is called with hbalock held to release driver 1079 * iocb object to the iocb pool. The iotag in the iocb object 1080 * does not change for each use of the iocb object. This function 1081 * clears all other fields of the iocb object when it is freed. 1082 **/ 1083 static void 1084 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1085 { 1086 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1087 phba->iocb_cnt--; 1088 } 1089 1090 /** 1091 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1092 * @phba: Pointer to HBA context object. 1093 * @iocbq: Pointer to driver iocb object. 1094 * 1095 * This function is called with no lock held to release the iocb to 1096 * iocb pool. 1097 **/ 1098 void 1099 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1100 { 1101 unsigned long iflags; 1102 1103 /* 1104 * Clean all volatile data fields, preserve iotag and node struct. 1105 */ 1106 spin_lock_irqsave(&phba->hbalock, iflags); 1107 __lpfc_sli_release_iocbq(phba, iocbq); 1108 spin_unlock_irqrestore(&phba->hbalock, iflags); 1109 } 1110 1111 /** 1112 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1113 * @phba: Pointer to HBA context object. 1114 * @iocblist: List of IOCBs. 1115 * @ulpstatus: ULP status in IOCB command field. 1116 * @ulpWord4: ULP word-4 in IOCB command field. 1117 * 1118 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1119 * on the list by invoking the complete callback function associated with the 1120 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1121 * fields. 1122 **/ 1123 void 1124 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1125 uint32_t ulpstatus, uint32_t ulpWord4) 1126 { 1127 struct lpfc_iocbq *piocb; 1128 1129 while (!list_empty(iocblist)) { 1130 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1131 if (!piocb->iocb_cmpl) 1132 lpfc_sli_release_iocbq(phba, piocb); 1133 else { 1134 piocb->iocb.ulpStatus = ulpstatus; 1135 piocb->iocb.un.ulpWord[4] = ulpWord4; 1136 (piocb->iocb_cmpl) (phba, piocb, piocb); 1137 } 1138 } 1139 return; 1140 } 1141 1142 /** 1143 * lpfc_sli_iocb_cmd_type - Get the iocb type 1144 * @iocb_cmnd: iocb command code. 1145 * 1146 * This function is called by ring event handler function to get the iocb type. 1147 * This function translates the iocb command to an iocb command type used to 1148 * decide the final disposition of each completed IOCB. 1149 * The function returns 1150 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1151 * LPFC_SOL_IOCB if it is a solicited iocb completion 1152 * LPFC_ABORT_IOCB if it is an abort iocb 1153 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1154 * 1155 * The caller is not required to hold any lock. 1156 **/ 1157 static lpfc_iocb_type 1158 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1159 { 1160 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1161 1162 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1163 return 0; 1164 1165 switch (iocb_cmnd) { 1166 case CMD_XMIT_SEQUENCE_CR: 1167 case CMD_XMIT_SEQUENCE_CX: 1168 case CMD_XMIT_BCAST_CN: 1169 case CMD_XMIT_BCAST_CX: 1170 case CMD_ELS_REQUEST_CR: 1171 case CMD_ELS_REQUEST_CX: 1172 case CMD_CREATE_XRI_CR: 1173 case CMD_CREATE_XRI_CX: 1174 case CMD_GET_RPI_CN: 1175 case CMD_XMIT_ELS_RSP_CX: 1176 case CMD_GET_RPI_CR: 1177 case CMD_FCP_IWRITE_CR: 1178 case CMD_FCP_IWRITE_CX: 1179 case CMD_FCP_IREAD_CR: 1180 case CMD_FCP_IREAD_CX: 1181 case CMD_FCP_ICMND_CR: 1182 case CMD_FCP_ICMND_CX: 1183 case CMD_FCP_TSEND_CX: 1184 case CMD_FCP_TRSP_CX: 1185 case CMD_FCP_TRECEIVE_CX: 1186 case CMD_FCP_AUTO_TRSP_CX: 1187 case CMD_ADAPTER_MSG: 1188 case CMD_ADAPTER_DUMP: 1189 case CMD_XMIT_SEQUENCE64_CR: 1190 case CMD_XMIT_SEQUENCE64_CX: 1191 case CMD_XMIT_BCAST64_CN: 1192 case CMD_XMIT_BCAST64_CX: 1193 case CMD_ELS_REQUEST64_CR: 1194 case CMD_ELS_REQUEST64_CX: 1195 case CMD_FCP_IWRITE64_CR: 1196 case CMD_FCP_IWRITE64_CX: 1197 case CMD_FCP_IREAD64_CR: 1198 case CMD_FCP_IREAD64_CX: 1199 case CMD_FCP_ICMND64_CR: 1200 case CMD_FCP_ICMND64_CX: 1201 case CMD_FCP_TSEND64_CX: 1202 case CMD_FCP_TRSP64_CX: 1203 case CMD_FCP_TRECEIVE64_CX: 1204 case CMD_GEN_REQUEST64_CR: 1205 case CMD_GEN_REQUEST64_CX: 1206 case CMD_XMIT_ELS_RSP64_CX: 1207 case DSSCMD_IWRITE64_CR: 1208 case DSSCMD_IWRITE64_CX: 1209 case DSSCMD_IREAD64_CR: 1210 case DSSCMD_IREAD64_CX: 1211 type = LPFC_SOL_IOCB; 1212 break; 1213 case CMD_ABORT_XRI_CN: 1214 case CMD_ABORT_XRI_CX: 1215 case CMD_CLOSE_XRI_CN: 1216 case CMD_CLOSE_XRI_CX: 1217 case CMD_XRI_ABORTED_CX: 1218 case CMD_ABORT_MXRI64_CN: 1219 case CMD_XMIT_BLS_RSP64_CX: 1220 type = LPFC_ABORT_IOCB; 1221 break; 1222 case CMD_RCV_SEQUENCE_CX: 1223 case CMD_RCV_ELS_REQ_CX: 1224 case CMD_RCV_SEQUENCE64_CX: 1225 case CMD_RCV_ELS_REQ64_CX: 1226 case CMD_ASYNC_STATUS: 1227 case CMD_IOCB_RCV_SEQ64_CX: 1228 case CMD_IOCB_RCV_ELS64_CX: 1229 case CMD_IOCB_RCV_CONT64_CX: 1230 case CMD_IOCB_RET_XRI64_CX: 1231 type = LPFC_UNSOL_IOCB; 1232 break; 1233 case CMD_IOCB_XMIT_MSEQ64_CR: 1234 case CMD_IOCB_XMIT_MSEQ64_CX: 1235 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1236 case CMD_IOCB_RCV_ELS_LIST64_CX: 1237 case CMD_IOCB_CLOSE_EXTENDED_CN: 1238 case CMD_IOCB_ABORT_EXTENDED_CN: 1239 case CMD_IOCB_RET_HBQE64_CN: 1240 case CMD_IOCB_FCP_IBIDIR64_CR: 1241 case CMD_IOCB_FCP_IBIDIR64_CX: 1242 case CMD_IOCB_FCP_ITASKMGT64_CX: 1243 case CMD_IOCB_LOGENTRY_CN: 1244 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1245 printk("%s - Unhandled SLI-3 Command x%x\n", 1246 __func__, iocb_cmnd); 1247 type = LPFC_UNKNOWN_IOCB; 1248 break; 1249 default: 1250 type = LPFC_UNKNOWN_IOCB; 1251 break; 1252 } 1253 1254 return type; 1255 } 1256 1257 /** 1258 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1259 * @phba: Pointer to HBA context object. 1260 * 1261 * This function is called from SLI initialization code 1262 * to configure every ring of the HBA's SLI interface. The 1263 * caller is not required to hold any lock. This function issues 1264 * a config_ring mailbox command for each ring. 1265 * This function returns zero if successful else returns a negative 1266 * error code. 1267 **/ 1268 static int 1269 lpfc_sli_ring_map(struct lpfc_hba *phba) 1270 { 1271 struct lpfc_sli *psli = &phba->sli; 1272 LPFC_MBOXQ_t *pmb; 1273 MAILBOX_t *pmbox; 1274 int i, rc, ret = 0; 1275 1276 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1277 if (!pmb) 1278 return -ENOMEM; 1279 pmbox = &pmb->u.mb; 1280 phba->link_state = LPFC_INIT_MBX_CMDS; 1281 for (i = 0; i < psli->num_rings; i++) { 1282 lpfc_config_ring(phba, i, pmb); 1283 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1284 if (rc != MBX_SUCCESS) { 1285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1286 "0446 Adapter failed to init (%d), " 1287 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1288 "ring %d\n", 1289 rc, pmbox->mbxCommand, 1290 pmbox->mbxStatus, i); 1291 phba->link_state = LPFC_HBA_ERROR; 1292 ret = -ENXIO; 1293 break; 1294 } 1295 } 1296 mempool_free(pmb, phba->mbox_mem_pool); 1297 return ret; 1298 } 1299 1300 /** 1301 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1302 * @phba: Pointer to HBA context object. 1303 * @pring: Pointer to driver SLI ring object. 1304 * @piocb: Pointer to the driver iocb object. 1305 * 1306 * This function is called with hbalock held. The function adds the 1307 * new iocb to txcmplq of the given ring. This function always returns 1308 * 0. If this function is called for ELS ring, this function checks if 1309 * there is a vport associated with the ELS command. This function also 1310 * starts els_tmofunc timer if this is an ELS command. 1311 **/ 1312 static int 1313 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1314 struct lpfc_iocbq *piocb) 1315 { 1316 list_add_tail(&piocb->list, &pring->txcmplq); 1317 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1318 1319 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1320 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1321 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1322 if (!piocb->vport) 1323 BUG(); 1324 else 1325 mod_timer(&piocb->vport->els_tmofunc, 1326 jiffies + 1327 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1328 } 1329 1330 1331 return 0; 1332 } 1333 1334 /** 1335 * lpfc_sli_ringtx_get - Get first element of the txq 1336 * @phba: Pointer to HBA context object. 1337 * @pring: Pointer to driver SLI ring object. 1338 * 1339 * This function is called with hbalock held to get next 1340 * iocb in txq of the given ring. If there is any iocb in 1341 * the txq, the function returns first iocb in the list after 1342 * removing the iocb from the list, else it returns NULL. 1343 **/ 1344 struct lpfc_iocbq * 1345 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1346 { 1347 struct lpfc_iocbq *cmd_iocb; 1348 1349 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1350 return cmd_iocb; 1351 } 1352 1353 /** 1354 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1355 * @phba: Pointer to HBA context object. 1356 * @pring: Pointer to driver SLI ring object. 1357 * 1358 * This function is called with hbalock held and the caller must post the 1359 * iocb without releasing the lock. If the caller releases the lock, 1360 * iocb slot returned by the function is not guaranteed to be available. 1361 * The function returns pointer to the next available iocb slot if there 1362 * is available slot in the ring, else it returns NULL. 1363 * If the get index of the ring is ahead of the put index, the function 1364 * will post an error attention event to the worker thread to take the 1365 * HBA to offline state. 1366 **/ 1367 static IOCB_t * 1368 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1369 { 1370 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1371 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1372 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1373 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1374 pring->sli.sli3.next_cmdidx = 0; 1375 1376 if (unlikely(pring->sli.sli3.local_getidx == 1377 pring->sli.sli3.next_cmdidx)) { 1378 1379 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1380 1381 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1382 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1383 "0315 Ring %d issue: portCmdGet %d " 1384 "is bigger than cmd ring %d\n", 1385 pring->ringno, 1386 pring->sli.sli3.local_getidx, 1387 max_cmd_idx); 1388 1389 phba->link_state = LPFC_HBA_ERROR; 1390 /* 1391 * All error attention handlers are posted to 1392 * worker thread 1393 */ 1394 phba->work_ha |= HA_ERATT; 1395 phba->work_hs = HS_FFER3; 1396 1397 lpfc_worker_wake_up(phba); 1398 1399 return NULL; 1400 } 1401 1402 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1403 return NULL; 1404 } 1405 1406 return lpfc_cmd_iocb(phba, pring); 1407 } 1408 1409 /** 1410 * lpfc_sli_next_iotag - Get an iotag for the iocb 1411 * @phba: Pointer to HBA context object. 1412 * @iocbq: Pointer to driver iocb object. 1413 * 1414 * This function gets an iotag for the iocb. If there is no unused iotag and 1415 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1416 * array and assigns a new iotag. 1417 * The function returns the allocated iotag if successful, else returns zero. 1418 * Zero is not a valid iotag. 1419 * The caller is not required to hold any lock. 1420 **/ 1421 uint16_t 1422 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1423 { 1424 struct lpfc_iocbq **new_arr; 1425 struct lpfc_iocbq **old_arr; 1426 size_t new_len; 1427 struct lpfc_sli *psli = &phba->sli; 1428 uint16_t iotag; 1429 1430 spin_lock_irq(&phba->hbalock); 1431 iotag = psli->last_iotag; 1432 if(++iotag < psli->iocbq_lookup_len) { 1433 psli->last_iotag = iotag; 1434 psli->iocbq_lookup[iotag] = iocbq; 1435 spin_unlock_irq(&phba->hbalock); 1436 iocbq->iotag = iotag; 1437 return iotag; 1438 } else if (psli->iocbq_lookup_len < (0xffff 1439 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1440 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1441 spin_unlock_irq(&phba->hbalock); 1442 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1443 GFP_KERNEL); 1444 if (new_arr) { 1445 spin_lock_irq(&phba->hbalock); 1446 old_arr = psli->iocbq_lookup; 1447 if (new_len <= psli->iocbq_lookup_len) { 1448 /* highly unprobable case */ 1449 kfree(new_arr); 1450 iotag = psli->last_iotag; 1451 if(++iotag < psli->iocbq_lookup_len) { 1452 psli->last_iotag = iotag; 1453 psli->iocbq_lookup[iotag] = iocbq; 1454 spin_unlock_irq(&phba->hbalock); 1455 iocbq->iotag = iotag; 1456 return iotag; 1457 } 1458 spin_unlock_irq(&phba->hbalock); 1459 return 0; 1460 } 1461 if (psli->iocbq_lookup) 1462 memcpy(new_arr, old_arr, 1463 ((psli->last_iotag + 1) * 1464 sizeof (struct lpfc_iocbq *))); 1465 psli->iocbq_lookup = new_arr; 1466 psli->iocbq_lookup_len = new_len; 1467 psli->last_iotag = iotag; 1468 psli->iocbq_lookup[iotag] = iocbq; 1469 spin_unlock_irq(&phba->hbalock); 1470 iocbq->iotag = iotag; 1471 kfree(old_arr); 1472 return iotag; 1473 } 1474 } else 1475 spin_unlock_irq(&phba->hbalock); 1476 1477 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1478 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1479 psli->last_iotag); 1480 1481 return 0; 1482 } 1483 1484 /** 1485 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1486 * @phba: Pointer to HBA context object. 1487 * @pring: Pointer to driver SLI ring object. 1488 * @iocb: Pointer to iocb slot in the ring. 1489 * @nextiocb: Pointer to driver iocb object which need to be 1490 * posted to firmware. 1491 * 1492 * This function is called with hbalock held to post a new iocb to 1493 * the firmware. This function copies the new iocb to ring iocb slot and 1494 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1495 * a completion call back for this iocb else the function will free the 1496 * iocb object. 1497 **/ 1498 static void 1499 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1500 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1501 { 1502 /* 1503 * Set up an iotag 1504 */ 1505 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1506 1507 1508 if (pring->ringno == LPFC_ELS_RING) { 1509 lpfc_debugfs_slow_ring_trc(phba, 1510 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1511 *(((uint32_t *) &nextiocb->iocb) + 4), 1512 *(((uint32_t *) &nextiocb->iocb) + 6), 1513 *(((uint32_t *) &nextiocb->iocb) + 7)); 1514 } 1515 1516 /* 1517 * Issue iocb command to adapter 1518 */ 1519 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1520 wmb(); 1521 pring->stats.iocb_cmd++; 1522 1523 /* 1524 * If there is no completion routine to call, we can release the 1525 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1526 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1527 */ 1528 if (nextiocb->iocb_cmpl) 1529 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1530 else 1531 __lpfc_sli_release_iocbq(phba, nextiocb); 1532 1533 /* 1534 * Let the HBA know what IOCB slot will be the next one the 1535 * driver will put a command into. 1536 */ 1537 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1538 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1539 } 1540 1541 /** 1542 * lpfc_sli_update_full_ring - Update the chip attention register 1543 * @phba: Pointer to HBA context object. 1544 * @pring: Pointer to driver SLI ring object. 1545 * 1546 * The caller is not required to hold any lock for calling this function. 1547 * This function updates the chip attention bits for the ring to inform firmware 1548 * that there are pending work to be done for this ring and requests an 1549 * interrupt when there is space available in the ring. This function is 1550 * called when the driver is unable to post more iocbs to the ring due 1551 * to unavailability of space in the ring. 1552 **/ 1553 static void 1554 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1555 { 1556 int ringno = pring->ringno; 1557 1558 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1559 1560 wmb(); 1561 1562 /* 1563 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1564 * The HBA will tell us when an IOCB entry is available. 1565 */ 1566 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1567 readl(phba->CAregaddr); /* flush */ 1568 1569 pring->stats.iocb_cmd_full++; 1570 } 1571 1572 /** 1573 * lpfc_sli_update_ring - Update chip attention register 1574 * @phba: Pointer to HBA context object. 1575 * @pring: Pointer to driver SLI ring object. 1576 * 1577 * This function updates the chip attention register bit for the 1578 * given ring to inform HBA that there is more work to be done 1579 * in this ring. The caller is not required to hold any lock. 1580 **/ 1581 static void 1582 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1583 { 1584 int ringno = pring->ringno; 1585 1586 /* 1587 * Tell the HBA that there is work to do in this ring. 1588 */ 1589 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1590 wmb(); 1591 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1592 readl(phba->CAregaddr); /* flush */ 1593 } 1594 } 1595 1596 /** 1597 * lpfc_sli_resume_iocb - Process iocbs in the txq 1598 * @phba: Pointer to HBA context object. 1599 * @pring: Pointer to driver SLI ring object. 1600 * 1601 * This function is called with hbalock held to post pending iocbs 1602 * in the txq to the firmware. This function is called when driver 1603 * detects space available in the ring. 1604 **/ 1605 static void 1606 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1607 { 1608 IOCB_t *iocb; 1609 struct lpfc_iocbq *nextiocb; 1610 1611 /* 1612 * Check to see if: 1613 * (a) there is anything on the txq to send 1614 * (b) link is up 1615 * (c) link attention events can be processed (fcp ring only) 1616 * (d) IOCB processing is not blocked by the outstanding mbox command. 1617 */ 1618 1619 if (lpfc_is_link_up(phba) && 1620 (!list_empty(&pring->txq)) && 1621 (pring->ringno != phba->sli.fcp_ring || 1622 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1623 1624 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1625 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1626 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1627 1628 if (iocb) 1629 lpfc_sli_update_ring(phba, pring); 1630 else 1631 lpfc_sli_update_full_ring(phba, pring); 1632 } 1633 1634 return; 1635 } 1636 1637 /** 1638 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1639 * @phba: Pointer to HBA context object. 1640 * @hbqno: HBQ number. 1641 * 1642 * This function is called with hbalock held to get the next 1643 * available slot for the given HBQ. If there is free slot 1644 * available for the HBQ it will return pointer to the next available 1645 * HBQ entry else it will return NULL. 1646 **/ 1647 static struct lpfc_hbq_entry * 1648 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1649 { 1650 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1651 1652 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1653 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1654 hbqp->next_hbqPutIdx = 0; 1655 1656 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1657 uint32_t raw_index = phba->hbq_get[hbqno]; 1658 uint32_t getidx = le32_to_cpu(raw_index); 1659 1660 hbqp->local_hbqGetIdx = getidx; 1661 1662 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1663 lpfc_printf_log(phba, KERN_ERR, 1664 LOG_SLI | LOG_VPORT, 1665 "1802 HBQ %d: local_hbqGetIdx " 1666 "%u is > than hbqp->entry_count %u\n", 1667 hbqno, hbqp->local_hbqGetIdx, 1668 hbqp->entry_count); 1669 1670 phba->link_state = LPFC_HBA_ERROR; 1671 return NULL; 1672 } 1673 1674 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1675 return NULL; 1676 } 1677 1678 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1679 hbqp->hbqPutIdx; 1680 } 1681 1682 /** 1683 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1684 * @phba: Pointer to HBA context object. 1685 * 1686 * This function is called with no lock held to free all the 1687 * hbq buffers while uninitializing the SLI interface. It also 1688 * frees the HBQ buffers returned by the firmware but not yet 1689 * processed by the upper layers. 1690 **/ 1691 void 1692 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1693 { 1694 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1695 struct hbq_dmabuf *hbq_buf; 1696 unsigned long flags; 1697 int i, hbq_count; 1698 uint32_t hbqno; 1699 1700 hbq_count = lpfc_sli_hbq_count(); 1701 /* Return all memory used by all HBQs */ 1702 spin_lock_irqsave(&phba->hbalock, flags); 1703 for (i = 0; i < hbq_count; ++i) { 1704 list_for_each_entry_safe(dmabuf, next_dmabuf, 1705 &phba->hbqs[i].hbq_buffer_list, list) { 1706 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1707 list_del(&hbq_buf->dbuf.list); 1708 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1709 } 1710 phba->hbqs[i].buffer_count = 0; 1711 } 1712 /* Return all HBQ buffer that are in-fly */ 1713 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1714 list) { 1715 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1716 list_del(&hbq_buf->dbuf.list); 1717 if (hbq_buf->tag == -1) { 1718 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1719 (phba, hbq_buf); 1720 } else { 1721 hbqno = hbq_buf->tag >> 16; 1722 if (hbqno >= LPFC_MAX_HBQS) 1723 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1724 (phba, hbq_buf); 1725 else 1726 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1727 hbq_buf); 1728 } 1729 } 1730 1731 /* Mark the HBQs not in use */ 1732 phba->hbq_in_use = 0; 1733 spin_unlock_irqrestore(&phba->hbalock, flags); 1734 } 1735 1736 /** 1737 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1738 * @phba: Pointer to HBA context object. 1739 * @hbqno: HBQ number. 1740 * @hbq_buf: Pointer to HBQ buffer. 1741 * 1742 * This function is called with the hbalock held to post a 1743 * hbq buffer to the firmware. If the function finds an empty 1744 * slot in the HBQ, it will post the buffer. The function will return 1745 * pointer to the hbq entry if it successfully post the buffer 1746 * else it will return NULL. 1747 **/ 1748 static int 1749 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1750 struct hbq_dmabuf *hbq_buf) 1751 { 1752 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1753 } 1754 1755 /** 1756 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1757 * @phba: Pointer to HBA context object. 1758 * @hbqno: HBQ number. 1759 * @hbq_buf: Pointer to HBQ buffer. 1760 * 1761 * This function is called with the hbalock held to post a hbq buffer to the 1762 * firmware. If the function finds an empty slot in the HBQ, it will post the 1763 * buffer and place it on the hbq_buffer_list. The function will return zero if 1764 * it successfully post the buffer else it will return an error. 1765 **/ 1766 static int 1767 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1768 struct hbq_dmabuf *hbq_buf) 1769 { 1770 struct lpfc_hbq_entry *hbqe; 1771 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1772 1773 /* Get next HBQ entry slot to use */ 1774 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1775 if (hbqe) { 1776 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1777 1778 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1779 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1780 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1781 hbqe->bde.tus.f.bdeFlags = 0; 1782 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1783 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1784 /* Sync SLIM */ 1785 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1786 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1787 /* flush */ 1788 readl(phba->hbq_put + hbqno); 1789 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1790 return 0; 1791 } else 1792 return -ENOMEM; 1793 } 1794 1795 /** 1796 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1797 * @phba: Pointer to HBA context object. 1798 * @hbqno: HBQ number. 1799 * @hbq_buf: Pointer to HBQ buffer. 1800 * 1801 * This function is called with the hbalock held to post an RQE to the SLI4 1802 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1803 * the hbq_buffer_list and return zero, otherwise it will return an error. 1804 **/ 1805 static int 1806 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1807 struct hbq_dmabuf *hbq_buf) 1808 { 1809 int rc; 1810 struct lpfc_rqe hrqe; 1811 struct lpfc_rqe drqe; 1812 1813 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1814 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1815 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1816 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1817 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1818 &hrqe, &drqe); 1819 if (rc < 0) 1820 return rc; 1821 hbq_buf->tag = rc; 1822 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1823 return 0; 1824 } 1825 1826 /* HBQ for ELS and CT traffic. */ 1827 static struct lpfc_hbq_init lpfc_els_hbq = { 1828 .rn = 1, 1829 .entry_count = 256, 1830 .mask_count = 0, 1831 .profile = 0, 1832 .ring_mask = (1 << LPFC_ELS_RING), 1833 .buffer_count = 0, 1834 .init_count = 40, 1835 .add_count = 40, 1836 }; 1837 1838 /* HBQ for the extra ring if needed */ 1839 static struct lpfc_hbq_init lpfc_extra_hbq = { 1840 .rn = 1, 1841 .entry_count = 200, 1842 .mask_count = 0, 1843 .profile = 0, 1844 .ring_mask = (1 << LPFC_EXTRA_RING), 1845 .buffer_count = 0, 1846 .init_count = 0, 1847 .add_count = 5, 1848 }; 1849 1850 /* Array of HBQs */ 1851 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1852 &lpfc_els_hbq, 1853 &lpfc_extra_hbq, 1854 }; 1855 1856 /** 1857 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1858 * @phba: Pointer to HBA context object. 1859 * @hbqno: HBQ number. 1860 * @count: Number of HBQ buffers to be posted. 1861 * 1862 * This function is called with no lock held to post more hbq buffers to the 1863 * given HBQ. The function returns the number of HBQ buffers successfully 1864 * posted. 1865 **/ 1866 static int 1867 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1868 { 1869 uint32_t i, posted = 0; 1870 unsigned long flags; 1871 struct hbq_dmabuf *hbq_buffer; 1872 LIST_HEAD(hbq_buf_list); 1873 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1874 return 0; 1875 1876 if ((phba->hbqs[hbqno].buffer_count + count) > 1877 lpfc_hbq_defs[hbqno]->entry_count) 1878 count = lpfc_hbq_defs[hbqno]->entry_count - 1879 phba->hbqs[hbqno].buffer_count; 1880 if (!count) 1881 return 0; 1882 /* Allocate HBQ entries */ 1883 for (i = 0; i < count; i++) { 1884 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1885 if (!hbq_buffer) 1886 break; 1887 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1888 } 1889 /* Check whether HBQ is still in use */ 1890 spin_lock_irqsave(&phba->hbalock, flags); 1891 if (!phba->hbq_in_use) 1892 goto err; 1893 while (!list_empty(&hbq_buf_list)) { 1894 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1895 dbuf.list); 1896 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1897 (hbqno << 16)); 1898 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1899 phba->hbqs[hbqno].buffer_count++; 1900 posted++; 1901 } else 1902 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1903 } 1904 spin_unlock_irqrestore(&phba->hbalock, flags); 1905 return posted; 1906 err: 1907 spin_unlock_irqrestore(&phba->hbalock, flags); 1908 while (!list_empty(&hbq_buf_list)) { 1909 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1910 dbuf.list); 1911 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1912 } 1913 return 0; 1914 } 1915 1916 /** 1917 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1918 * @phba: Pointer to HBA context object. 1919 * @qno: HBQ number. 1920 * 1921 * This function posts more buffers to the HBQ. This function 1922 * is called with no lock held. The function returns the number of HBQ entries 1923 * successfully allocated. 1924 **/ 1925 int 1926 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1927 { 1928 if (phba->sli_rev == LPFC_SLI_REV4) 1929 return 0; 1930 else 1931 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1932 lpfc_hbq_defs[qno]->add_count); 1933 } 1934 1935 /** 1936 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1937 * @phba: Pointer to HBA context object. 1938 * @qno: HBQ queue number. 1939 * 1940 * This function is called from SLI initialization code path with 1941 * no lock held to post initial HBQ buffers to firmware. The 1942 * function returns the number of HBQ entries successfully allocated. 1943 **/ 1944 static int 1945 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1946 { 1947 if (phba->sli_rev == LPFC_SLI_REV4) 1948 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1949 lpfc_hbq_defs[qno]->entry_count); 1950 else 1951 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1952 lpfc_hbq_defs[qno]->init_count); 1953 } 1954 1955 /** 1956 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1957 * @phba: Pointer to HBA context object. 1958 * @hbqno: HBQ number. 1959 * 1960 * This function removes the first hbq buffer on an hbq list and returns a 1961 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1962 **/ 1963 static struct hbq_dmabuf * 1964 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1965 { 1966 struct lpfc_dmabuf *d_buf; 1967 1968 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1969 if (!d_buf) 1970 return NULL; 1971 return container_of(d_buf, struct hbq_dmabuf, dbuf); 1972 } 1973 1974 /** 1975 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1976 * @phba: Pointer to HBA context object. 1977 * @tag: Tag of the hbq buffer. 1978 * 1979 * This function is called with hbalock held. This function searches 1980 * for the hbq buffer associated with the given tag in the hbq buffer 1981 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 1982 * it returns NULL. 1983 **/ 1984 static struct hbq_dmabuf * 1985 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 1986 { 1987 struct lpfc_dmabuf *d_buf; 1988 struct hbq_dmabuf *hbq_buf; 1989 uint32_t hbqno; 1990 1991 hbqno = tag >> 16; 1992 if (hbqno >= LPFC_MAX_HBQS) 1993 return NULL; 1994 1995 spin_lock_irq(&phba->hbalock); 1996 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1997 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1998 if (hbq_buf->tag == tag) { 1999 spin_unlock_irq(&phba->hbalock); 2000 return hbq_buf; 2001 } 2002 } 2003 spin_unlock_irq(&phba->hbalock); 2004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2005 "1803 Bad hbq tag. Data: x%x x%x\n", 2006 tag, phba->hbqs[tag >> 16].buffer_count); 2007 return NULL; 2008 } 2009 2010 /** 2011 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2012 * @phba: Pointer to HBA context object. 2013 * @hbq_buffer: Pointer to HBQ buffer. 2014 * 2015 * This function is called with hbalock. This function gives back 2016 * the hbq buffer to firmware. If the HBQ does not have space to 2017 * post the buffer, it will free the buffer. 2018 **/ 2019 void 2020 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2021 { 2022 uint32_t hbqno; 2023 2024 if (hbq_buffer) { 2025 hbqno = hbq_buffer->tag >> 16; 2026 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2027 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2028 } 2029 } 2030 2031 /** 2032 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2033 * @mbxCommand: mailbox command code. 2034 * 2035 * This function is called by the mailbox event handler function to verify 2036 * that the completed mailbox command is a legitimate mailbox command. If the 2037 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2038 * and the mailbox event handler will take the HBA offline. 2039 **/ 2040 static int 2041 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2042 { 2043 uint8_t ret; 2044 2045 switch (mbxCommand) { 2046 case MBX_LOAD_SM: 2047 case MBX_READ_NV: 2048 case MBX_WRITE_NV: 2049 case MBX_WRITE_VPARMS: 2050 case MBX_RUN_BIU_DIAG: 2051 case MBX_INIT_LINK: 2052 case MBX_DOWN_LINK: 2053 case MBX_CONFIG_LINK: 2054 case MBX_CONFIG_RING: 2055 case MBX_RESET_RING: 2056 case MBX_READ_CONFIG: 2057 case MBX_READ_RCONFIG: 2058 case MBX_READ_SPARM: 2059 case MBX_READ_STATUS: 2060 case MBX_READ_RPI: 2061 case MBX_READ_XRI: 2062 case MBX_READ_REV: 2063 case MBX_READ_LNK_STAT: 2064 case MBX_REG_LOGIN: 2065 case MBX_UNREG_LOGIN: 2066 case MBX_CLEAR_LA: 2067 case MBX_DUMP_MEMORY: 2068 case MBX_DUMP_CONTEXT: 2069 case MBX_RUN_DIAGS: 2070 case MBX_RESTART: 2071 case MBX_UPDATE_CFG: 2072 case MBX_DOWN_LOAD: 2073 case MBX_DEL_LD_ENTRY: 2074 case MBX_RUN_PROGRAM: 2075 case MBX_SET_MASK: 2076 case MBX_SET_VARIABLE: 2077 case MBX_UNREG_D_ID: 2078 case MBX_KILL_BOARD: 2079 case MBX_CONFIG_FARP: 2080 case MBX_BEACON: 2081 case MBX_LOAD_AREA: 2082 case MBX_RUN_BIU_DIAG64: 2083 case MBX_CONFIG_PORT: 2084 case MBX_READ_SPARM64: 2085 case MBX_READ_RPI64: 2086 case MBX_REG_LOGIN64: 2087 case MBX_READ_TOPOLOGY: 2088 case MBX_WRITE_WWN: 2089 case MBX_SET_DEBUG: 2090 case MBX_LOAD_EXP_ROM: 2091 case MBX_ASYNCEVT_ENABLE: 2092 case MBX_REG_VPI: 2093 case MBX_UNREG_VPI: 2094 case MBX_HEARTBEAT: 2095 case MBX_PORT_CAPABILITIES: 2096 case MBX_PORT_IOV_CONTROL: 2097 case MBX_SLI4_CONFIG: 2098 case MBX_SLI4_REQ_FTRS: 2099 case MBX_REG_FCFI: 2100 case MBX_UNREG_FCFI: 2101 case MBX_REG_VFI: 2102 case MBX_UNREG_VFI: 2103 case MBX_INIT_VPI: 2104 case MBX_INIT_VFI: 2105 case MBX_RESUME_RPI: 2106 case MBX_READ_EVENT_LOG_STATUS: 2107 case MBX_READ_EVENT_LOG: 2108 case MBX_SECURITY_MGMT: 2109 case MBX_AUTH_PORT: 2110 case MBX_ACCESS_VDATA: 2111 ret = mbxCommand; 2112 break; 2113 default: 2114 ret = MBX_SHUTDOWN; 2115 break; 2116 } 2117 return ret; 2118 } 2119 2120 /** 2121 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2122 * @phba: Pointer to HBA context object. 2123 * @pmboxq: Pointer to mailbox command. 2124 * 2125 * This is completion handler function for mailbox commands issued from 2126 * lpfc_sli_issue_mbox_wait function. This function is called by the 2127 * mailbox event handler function with no lock held. This function 2128 * will wake up thread waiting on the wait queue pointed by context1 2129 * of the mailbox. 2130 **/ 2131 void 2132 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2133 { 2134 wait_queue_head_t *pdone_q; 2135 unsigned long drvr_flag; 2136 2137 /* 2138 * If pdone_q is empty, the driver thread gave up waiting and 2139 * continued running. 2140 */ 2141 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2142 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2143 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2144 if (pdone_q) 2145 wake_up_interruptible(pdone_q); 2146 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2147 return; 2148 } 2149 2150 2151 /** 2152 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2153 * @phba: Pointer to HBA context object. 2154 * @pmb: Pointer to mailbox object. 2155 * 2156 * This function is the default mailbox completion handler. It 2157 * frees the memory resources associated with the completed mailbox 2158 * command. If the completed command is a REG_LOGIN mailbox command, 2159 * this function will issue a UREG_LOGIN to re-claim the RPI. 2160 **/ 2161 void 2162 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2163 { 2164 struct lpfc_vport *vport = pmb->vport; 2165 struct lpfc_dmabuf *mp; 2166 struct lpfc_nodelist *ndlp; 2167 struct Scsi_Host *shost; 2168 uint16_t rpi, vpi; 2169 int rc; 2170 2171 mp = (struct lpfc_dmabuf *) (pmb->context1); 2172 2173 if (mp) { 2174 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2175 kfree(mp); 2176 } 2177 2178 /* 2179 * If a REG_LOGIN succeeded after node is destroyed or node 2180 * is in re-discovery driver need to cleanup the RPI. 2181 */ 2182 if (!(phba->pport->load_flag & FC_UNLOADING) && 2183 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2184 !pmb->u.mb.mbxStatus) { 2185 rpi = pmb->u.mb.un.varWords[0]; 2186 vpi = pmb->u.mb.un.varRegLogin.vpi; 2187 lpfc_unreg_login(phba, vpi, rpi, pmb); 2188 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2189 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2190 if (rc != MBX_NOT_FINISHED) 2191 return; 2192 } 2193 2194 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2195 !(phba->pport->load_flag & FC_UNLOADING) && 2196 !pmb->u.mb.mbxStatus) { 2197 shost = lpfc_shost_from_vport(vport); 2198 spin_lock_irq(shost->host_lock); 2199 vport->vpi_state |= LPFC_VPI_REGISTERED; 2200 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2201 spin_unlock_irq(shost->host_lock); 2202 } 2203 2204 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2205 ndlp = (struct lpfc_nodelist *)pmb->context2; 2206 lpfc_nlp_put(ndlp); 2207 pmb->context2 = NULL; 2208 } 2209 2210 /* Check security permission status on INIT_LINK mailbox command */ 2211 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2212 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2213 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2214 "2860 SLI authentication is required " 2215 "for INIT_LINK but has not done yet\n"); 2216 2217 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2218 lpfc_sli4_mbox_cmd_free(phba, pmb); 2219 else 2220 mempool_free(pmb, phba->mbox_mem_pool); 2221 } 2222 2223 /** 2224 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2225 * @phba: Pointer to HBA context object. 2226 * 2227 * This function is called with no lock held. This function processes all 2228 * the completed mailbox commands and gives it to upper layers. The interrupt 2229 * service routine processes mailbox completion interrupt and adds completed 2230 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2231 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2232 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2233 * function returns the mailbox commands to the upper layer by calling the 2234 * completion handler function of each mailbox. 2235 **/ 2236 int 2237 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2238 { 2239 MAILBOX_t *pmbox; 2240 LPFC_MBOXQ_t *pmb; 2241 int rc; 2242 LIST_HEAD(cmplq); 2243 2244 phba->sli.slistat.mbox_event++; 2245 2246 /* Get all completed mailboxe buffers into the cmplq */ 2247 spin_lock_irq(&phba->hbalock); 2248 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2249 spin_unlock_irq(&phba->hbalock); 2250 2251 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2252 do { 2253 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2254 if (pmb == NULL) 2255 break; 2256 2257 pmbox = &pmb->u.mb; 2258 2259 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2260 if (pmb->vport) { 2261 lpfc_debugfs_disc_trc(pmb->vport, 2262 LPFC_DISC_TRC_MBOX_VPORT, 2263 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2264 (uint32_t)pmbox->mbxCommand, 2265 pmbox->un.varWords[0], 2266 pmbox->un.varWords[1]); 2267 } 2268 else { 2269 lpfc_debugfs_disc_trc(phba->pport, 2270 LPFC_DISC_TRC_MBOX, 2271 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2272 (uint32_t)pmbox->mbxCommand, 2273 pmbox->un.varWords[0], 2274 pmbox->un.varWords[1]); 2275 } 2276 } 2277 2278 /* 2279 * It is a fatal error if unknown mbox command completion. 2280 */ 2281 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2282 MBX_SHUTDOWN) { 2283 /* Unknown mailbox command compl */ 2284 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2285 "(%d):0323 Unknown Mailbox command " 2286 "x%x (x%x/x%x) Cmpl\n", 2287 pmb->vport ? pmb->vport->vpi : 0, 2288 pmbox->mbxCommand, 2289 lpfc_sli_config_mbox_subsys_get(phba, 2290 pmb), 2291 lpfc_sli_config_mbox_opcode_get(phba, 2292 pmb)); 2293 phba->link_state = LPFC_HBA_ERROR; 2294 phba->work_hs = HS_FFER3; 2295 lpfc_handle_eratt(phba); 2296 continue; 2297 } 2298 2299 if (pmbox->mbxStatus) { 2300 phba->sli.slistat.mbox_stat_err++; 2301 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2302 /* Mbox cmd cmpl error - RETRYing */ 2303 lpfc_printf_log(phba, KERN_INFO, 2304 LOG_MBOX | LOG_SLI, 2305 "(%d):0305 Mbox cmd cmpl " 2306 "error - RETRYing Data: x%x " 2307 "(x%x/x%x) x%x x%x x%x\n", 2308 pmb->vport ? pmb->vport->vpi : 0, 2309 pmbox->mbxCommand, 2310 lpfc_sli_config_mbox_subsys_get(phba, 2311 pmb), 2312 lpfc_sli_config_mbox_opcode_get(phba, 2313 pmb), 2314 pmbox->mbxStatus, 2315 pmbox->un.varWords[0], 2316 pmb->vport->port_state); 2317 pmbox->mbxStatus = 0; 2318 pmbox->mbxOwner = OWN_HOST; 2319 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2320 if (rc != MBX_NOT_FINISHED) 2321 continue; 2322 } 2323 } 2324 2325 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2326 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2327 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2328 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2329 "x%x x%x x%x\n", 2330 pmb->vport ? pmb->vport->vpi : 0, 2331 pmbox->mbxCommand, 2332 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2333 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2334 pmb->mbox_cmpl, 2335 *((uint32_t *) pmbox), 2336 pmbox->un.varWords[0], 2337 pmbox->un.varWords[1], 2338 pmbox->un.varWords[2], 2339 pmbox->un.varWords[3], 2340 pmbox->un.varWords[4], 2341 pmbox->un.varWords[5], 2342 pmbox->un.varWords[6], 2343 pmbox->un.varWords[7], 2344 pmbox->un.varWords[8], 2345 pmbox->un.varWords[9], 2346 pmbox->un.varWords[10]); 2347 2348 if (pmb->mbox_cmpl) 2349 pmb->mbox_cmpl(phba,pmb); 2350 } while (1); 2351 return 0; 2352 } 2353 2354 /** 2355 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2356 * @phba: Pointer to HBA context object. 2357 * @pring: Pointer to driver SLI ring object. 2358 * @tag: buffer tag. 2359 * 2360 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2361 * is set in the tag the buffer is posted for a particular exchange, 2362 * the function will return the buffer without replacing the buffer. 2363 * If the buffer is for unsolicited ELS or CT traffic, this function 2364 * returns the buffer and also posts another buffer to the firmware. 2365 **/ 2366 static struct lpfc_dmabuf * 2367 lpfc_sli_get_buff(struct lpfc_hba *phba, 2368 struct lpfc_sli_ring *pring, 2369 uint32_t tag) 2370 { 2371 struct hbq_dmabuf *hbq_entry; 2372 2373 if (tag & QUE_BUFTAG_BIT) 2374 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2375 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2376 if (!hbq_entry) 2377 return NULL; 2378 return &hbq_entry->dbuf; 2379 } 2380 2381 /** 2382 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2383 * @phba: Pointer to HBA context object. 2384 * @pring: Pointer to driver SLI ring object. 2385 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2386 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2387 * @fch_type: the type for the first frame of the sequence. 2388 * 2389 * This function is called with no lock held. This function uses the r_ctl and 2390 * type of the received sequence to find the correct callback function to call 2391 * to process the sequence. 2392 **/ 2393 static int 2394 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2395 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2396 uint32_t fch_type) 2397 { 2398 int i; 2399 2400 /* unSolicited Responses */ 2401 if (pring->prt[0].profile) { 2402 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2403 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2404 saveq); 2405 return 1; 2406 } 2407 /* We must search, based on rctl / type 2408 for the right routine */ 2409 for (i = 0; i < pring->num_mask; i++) { 2410 if ((pring->prt[i].rctl == fch_r_ctl) && 2411 (pring->prt[i].type == fch_type)) { 2412 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2413 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2414 (phba, pring, saveq); 2415 return 1; 2416 } 2417 } 2418 return 0; 2419 } 2420 2421 /** 2422 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2423 * @phba: Pointer to HBA context object. 2424 * @pring: Pointer to driver SLI ring object. 2425 * @saveq: Pointer to the unsolicited iocb. 2426 * 2427 * This function is called with no lock held by the ring event handler 2428 * when there is an unsolicited iocb posted to the response ring by the 2429 * firmware. This function gets the buffer associated with the iocbs 2430 * and calls the event handler for the ring. This function handles both 2431 * qring buffers and hbq buffers. 2432 * When the function returns 1 the caller can free the iocb object otherwise 2433 * upper layer functions will free the iocb objects. 2434 **/ 2435 static int 2436 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2437 struct lpfc_iocbq *saveq) 2438 { 2439 IOCB_t * irsp; 2440 WORD5 * w5p; 2441 uint32_t Rctl, Type; 2442 uint32_t match; 2443 struct lpfc_iocbq *iocbq; 2444 struct lpfc_dmabuf *dmzbuf; 2445 2446 match = 0; 2447 irsp = &(saveq->iocb); 2448 2449 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2450 if (pring->lpfc_sli_rcv_async_status) 2451 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2452 else 2453 lpfc_printf_log(phba, 2454 KERN_WARNING, 2455 LOG_SLI, 2456 "0316 Ring %d handler: unexpected " 2457 "ASYNC_STATUS iocb received evt_code " 2458 "0x%x\n", 2459 pring->ringno, 2460 irsp->un.asyncstat.evt_code); 2461 return 1; 2462 } 2463 2464 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2465 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2466 if (irsp->ulpBdeCount > 0) { 2467 dmzbuf = lpfc_sli_get_buff(phba, pring, 2468 irsp->un.ulpWord[3]); 2469 lpfc_in_buf_free(phba, dmzbuf); 2470 } 2471 2472 if (irsp->ulpBdeCount > 1) { 2473 dmzbuf = lpfc_sli_get_buff(phba, pring, 2474 irsp->unsli3.sli3Words[3]); 2475 lpfc_in_buf_free(phba, dmzbuf); 2476 } 2477 2478 if (irsp->ulpBdeCount > 2) { 2479 dmzbuf = lpfc_sli_get_buff(phba, pring, 2480 irsp->unsli3.sli3Words[7]); 2481 lpfc_in_buf_free(phba, dmzbuf); 2482 } 2483 2484 return 1; 2485 } 2486 2487 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2488 if (irsp->ulpBdeCount != 0) { 2489 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2490 irsp->un.ulpWord[3]); 2491 if (!saveq->context2) 2492 lpfc_printf_log(phba, 2493 KERN_ERR, 2494 LOG_SLI, 2495 "0341 Ring %d Cannot find buffer for " 2496 "an unsolicited iocb. tag 0x%x\n", 2497 pring->ringno, 2498 irsp->un.ulpWord[3]); 2499 } 2500 if (irsp->ulpBdeCount == 2) { 2501 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2502 irsp->unsli3.sli3Words[7]); 2503 if (!saveq->context3) 2504 lpfc_printf_log(phba, 2505 KERN_ERR, 2506 LOG_SLI, 2507 "0342 Ring %d Cannot find buffer for an" 2508 " unsolicited iocb. tag 0x%x\n", 2509 pring->ringno, 2510 irsp->unsli3.sli3Words[7]); 2511 } 2512 list_for_each_entry(iocbq, &saveq->list, list) { 2513 irsp = &(iocbq->iocb); 2514 if (irsp->ulpBdeCount != 0) { 2515 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2516 irsp->un.ulpWord[3]); 2517 if (!iocbq->context2) 2518 lpfc_printf_log(phba, 2519 KERN_ERR, 2520 LOG_SLI, 2521 "0343 Ring %d Cannot find " 2522 "buffer for an unsolicited iocb" 2523 ". tag 0x%x\n", pring->ringno, 2524 irsp->un.ulpWord[3]); 2525 } 2526 if (irsp->ulpBdeCount == 2) { 2527 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2528 irsp->unsli3.sli3Words[7]); 2529 if (!iocbq->context3) 2530 lpfc_printf_log(phba, 2531 KERN_ERR, 2532 LOG_SLI, 2533 "0344 Ring %d Cannot find " 2534 "buffer for an unsolicited " 2535 "iocb. tag 0x%x\n", 2536 pring->ringno, 2537 irsp->unsli3.sli3Words[7]); 2538 } 2539 } 2540 } 2541 if (irsp->ulpBdeCount != 0 && 2542 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2543 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2544 int found = 0; 2545 2546 /* search continue save q for same XRI */ 2547 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2548 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2549 saveq->iocb.unsli3.rcvsli3.ox_id) { 2550 list_add_tail(&saveq->list, &iocbq->list); 2551 found = 1; 2552 break; 2553 } 2554 } 2555 if (!found) 2556 list_add_tail(&saveq->clist, 2557 &pring->iocb_continue_saveq); 2558 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2559 list_del_init(&iocbq->clist); 2560 saveq = iocbq; 2561 irsp = &(saveq->iocb); 2562 } else 2563 return 0; 2564 } 2565 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2566 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2567 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2568 Rctl = FC_RCTL_ELS_REQ; 2569 Type = FC_TYPE_ELS; 2570 } else { 2571 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2572 Rctl = w5p->hcsw.Rctl; 2573 Type = w5p->hcsw.Type; 2574 2575 /* Firmware Workaround */ 2576 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2577 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2578 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2579 Rctl = FC_RCTL_ELS_REQ; 2580 Type = FC_TYPE_ELS; 2581 w5p->hcsw.Rctl = Rctl; 2582 w5p->hcsw.Type = Type; 2583 } 2584 } 2585 2586 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2587 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2588 "0313 Ring %d handler: unexpected Rctl x%x " 2589 "Type x%x received\n", 2590 pring->ringno, Rctl, Type); 2591 2592 return 1; 2593 } 2594 2595 /** 2596 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2597 * @phba: Pointer to HBA context object. 2598 * @pring: Pointer to driver SLI ring object. 2599 * @prspiocb: Pointer to response iocb object. 2600 * 2601 * This function looks up the iocb_lookup table to get the command iocb 2602 * corresponding to the given response iocb using the iotag of the 2603 * response iocb. This function is called with the hbalock held. 2604 * This function returns the command iocb object if it finds the command 2605 * iocb else returns NULL. 2606 **/ 2607 static struct lpfc_iocbq * 2608 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2609 struct lpfc_sli_ring *pring, 2610 struct lpfc_iocbq *prspiocb) 2611 { 2612 struct lpfc_iocbq *cmd_iocb = NULL; 2613 uint16_t iotag; 2614 2615 iotag = prspiocb->iocb.ulpIoTag; 2616 2617 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2618 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2619 list_del_init(&cmd_iocb->list); 2620 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2621 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2622 } 2623 return cmd_iocb; 2624 } 2625 2626 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2627 "0317 iotag x%x is out off " 2628 "range: max iotag x%x wd0 x%x\n", 2629 iotag, phba->sli.last_iotag, 2630 *(((uint32_t *) &prspiocb->iocb) + 7)); 2631 return NULL; 2632 } 2633 2634 /** 2635 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2636 * @phba: Pointer to HBA context object. 2637 * @pring: Pointer to driver SLI ring object. 2638 * @iotag: IOCB tag. 2639 * 2640 * This function looks up the iocb_lookup table to get the command iocb 2641 * corresponding to the given iotag. This function is called with the 2642 * hbalock held. 2643 * This function returns the command iocb object if it finds the command 2644 * iocb else returns NULL. 2645 **/ 2646 static struct lpfc_iocbq * 2647 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2648 struct lpfc_sli_ring *pring, uint16_t iotag) 2649 { 2650 struct lpfc_iocbq *cmd_iocb; 2651 2652 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2653 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2654 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2655 /* remove from txcmpl queue list */ 2656 list_del_init(&cmd_iocb->list); 2657 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2658 return cmd_iocb; 2659 } 2660 } 2661 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2662 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2663 iotag, phba->sli.last_iotag); 2664 return NULL; 2665 } 2666 2667 /** 2668 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2669 * @phba: Pointer to HBA context object. 2670 * @pring: Pointer to driver SLI ring object. 2671 * @saveq: Pointer to the response iocb to be processed. 2672 * 2673 * This function is called by the ring event handler for non-fcp 2674 * rings when there is a new response iocb in the response ring. 2675 * The caller is not required to hold any locks. This function 2676 * gets the command iocb associated with the response iocb and 2677 * calls the completion handler for the command iocb. If there 2678 * is no completion handler, the function will free the resources 2679 * associated with command iocb. If the response iocb is for 2680 * an already aborted command iocb, the status of the completion 2681 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2682 * This function always returns 1. 2683 **/ 2684 static int 2685 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2686 struct lpfc_iocbq *saveq) 2687 { 2688 struct lpfc_iocbq *cmdiocbp; 2689 int rc = 1; 2690 unsigned long iflag; 2691 2692 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2693 spin_lock_irqsave(&phba->hbalock, iflag); 2694 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2695 spin_unlock_irqrestore(&phba->hbalock, iflag); 2696 2697 if (cmdiocbp) { 2698 if (cmdiocbp->iocb_cmpl) { 2699 /* 2700 * If an ELS command failed send an event to mgmt 2701 * application. 2702 */ 2703 if (saveq->iocb.ulpStatus && 2704 (pring->ringno == LPFC_ELS_RING) && 2705 (cmdiocbp->iocb.ulpCommand == 2706 CMD_ELS_REQUEST64_CR)) 2707 lpfc_send_els_failure_event(phba, 2708 cmdiocbp, saveq); 2709 2710 /* 2711 * Post all ELS completions to the worker thread. 2712 * All other are passed to the completion callback. 2713 */ 2714 if (pring->ringno == LPFC_ELS_RING) { 2715 if ((phba->sli_rev < LPFC_SLI_REV4) && 2716 (cmdiocbp->iocb_flag & 2717 LPFC_DRIVER_ABORTED)) { 2718 spin_lock_irqsave(&phba->hbalock, 2719 iflag); 2720 cmdiocbp->iocb_flag &= 2721 ~LPFC_DRIVER_ABORTED; 2722 spin_unlock_irqrestore(&phba->hbalock, 2723 iflag); 2724 saveq->iocb.ulpStatus = 2725 IOSTAT_LOCAL_REJECT; 2726 saveq->iocb.un.ulpWord[4] = 2727 IOERR_SLI_ABORTED; 2728 2729 /* Firmware could still be in progress 2730 * of DMAing payload, so don't free data 2731 * buffer till after a hbeat. 2732 */ 2733 spin_lock_irqsave(&phba->hbalock, 2734 iflag); 2735 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2736 spin_unlock_irqrestore(&phba->hbalock, 2737 iflag); 2738 } 2739 if (phba->sli_rev == LPFC_SLI_REV4) { 2740 if (saveq->iocb_flag & 2741 LPFC_EXCHANGE_BUSY) { 2742 /* Set cmdiocb flag for the 2743 * exchange busy so sgl (xri) 2744 * will not be released until 2745 * the abort xri is received 2746 * from hba. 2747 */ 2748 spin_lock_irqsave( 2749 &phba->hbalock, iflag); 2750 cmdiocbp->iocb_flag |= 2751 LPFC_EXCHANGE_BUSY; 2752 spin_unlock_irqrestore( 2753 &phba->hbalock, iflag); 2754 } 2755 if (cmdiocbp->iocb_flag & 2756 LPFC_DRIVER_ABORTED) { 2757 /* 2758 * Clear LPFC_DRIVER_ABORTED 2759 * bit in case it was driver 2760 * initiated abort. 2761 */ 2762 spin_lock_irqsave( 2763 &phba->hbalock, iflag); 2764 cmdiocbp->iocb_flag &= 2765 ~LPFC_DRIVER_ABORTED; 2766 spin_unlock_irqrestore( 2767 &phba->hbalock, iflag); 2768 cmdiocbp->iocb.ulpStatus = 2769 IOSTAT_LOCAL_REJECT; 2770 cmdiocbp->iocb.un.ulpWord[4] = 2771 IOERR_ABORT_REQUESTED; 2772 /* 2773 * For SLI4, irsiocb contains 2774 * NO_XRI in sli_xritag, it 2775 * shall not affect releasing 2776 * sgl (xri) process. 2777 */ 2778 saveq->iocb.ulpStatus = 2779 IOSTAT_LOCAL_REJECT; 2780 saveq->iocb.un.ulpWord[4] = 2781 IOERR_SLI_ABORTED; 2782 spin_lock_irqsave( 2783 &phba->hbalock, iflag); 2784 saveq->iocb_flag |= 2785 LPFC_DELAY_MEM_FREE; 2786 spin_unlock_irqrestore( 2787 &phba->hbalock, iflag); 2788 } 2789 } 2790 } 2791 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2792 } else 2793 lpfc_sli_release_iocbq(phba, cmdiocbp); 2794 } else { 2795 /* 2796 * Unknown initiating command based on the response iotag. 2797 * This could be the case on the ELS ring because of 2798 * lpfc_els_abort(). 2799 */ 2800 if (pring->ringno != LPFC_ELS_RING) { 2801 /* 2802 * Ring <ringno> handler: unexpected completion IoTag 2803 * <IoTag> 2804 */ 2805 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2806 "0322 Ring %d handler: " 2807 "unexpected completion IoTag x%x " 2808 "Data: x%x x%x x%x x%x\n", 2809 pring->ringno, 2810 saveq->iocb.ulpIoTag, 2811 saveq->iocb.ulpStatus, 2812 saveq->iocb.un.ulpWord[4], 2813 saveq->iocb.ulpCommand, 2814 saveq->iocb.ulpContext); 2815 } 2816 } 2817 2818 return rc; 2819 } 2820 2821 /** 2822 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2823 * @phba: Pointer to HBA context object. 2824 * @pring: Pointer to driver SLI ring object. 2825 * 2826 * This function is called from the iocb ring event handlers when 2827 * put pointer is ahead of the get pointer for a ring. This function signal 2828 * an error attention condition to the worker thread and the worker 2829 * thread will transition the HBA to offline state. 2830 **/ 2831 static void 2832 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2833 { 2834 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2835 /* 2836 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2837 * rsp ring <portRspMax> 2838 */ 2839 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2840 "0312 Ring %d handler: portRspPut %d " 2841 "is bigger than rsp ring %d\n", 2842 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2843 pring->sli.sli3.numRiocb); 2844 2845 phba->link_state = LPFC_HBA_ERROR; 2846 2847 /* 2848 * All error attention handlers are posted to 2849 * worker thread 2850 */ 2851 phba->work_ha |= HA_ERATT; 2852 phba->work_hs = HS_FFER3; 2853 2854 lpfc_worker_wake_up(phba); 2855 2856 return; 2857 } 2858 2859 /** 2860 * lpfc_poll_eratt - Error attention polling timer timeout handler 2861 * @ptr: Pointer to address of HBA context object. 2862 * 2863 * This function is invoked by the Error Attention polling timer when the 2864 * timer times out. It will check the SLI Error Attention register for 2865 * possible attention events. If so, it will post an Error Attention event 2866 * and wake up worker thread to process it. Otherwise, it will set up the 2867 * Error Attention polling timer for the next poll. 2868 **/ 2869 void lpfc_poll_eratt(unsigned long ptr) 2870 { 2871 struct lpfc_hba *phba; 2872 uint32_t eratt = 0, rem; 2873 uint64_t sli_intr, cnt; 2874 2875 phba = (struct lpfc_hba *)ptr; 2876 2877 /* Here we will also keep track of interrupts per sec of the hba */ 2878 sli_intr = phba->sli.slistat.sli_intr; 2879 2880 if (phba->sli.slistat.sli_prev_intr > sli_intr) 2881 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 2882 sli_intr); 2883 else 2884 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 2885 2886 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */ 2887 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL); 2888 phba->sli.slistat.sli_ips = cnt; 2889 2890 phba->sli.slistat.sli_prev_intr = sli_intr; 2891 2892 /* Check chip HA register for error event */ 2893 eratt = lpfc_sli_check_eratt(phba); 2894 2895 if (eratt) 2896 /* Tell the worker thread there is work to do */ 2897 lpfc_worker_wake_up(phba); 2898 else 2899 /* Restart the timer for next eratt poll */ 2900 mod_timer(&phba->eratt_poll, 2901 jiffies + 2902 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); 2903 return; 2904 } 2905 2906 2907 /** 2908 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2909 * @phba: Pointer to HBA context object. 2910 * @pring: Pointer to driver SLI ring object. 2911 * @mask: Host attention register mask for this ring. 2912 * 2913 * This function is called from the interrupt context when there is a ring 2914 * event for the fcp ring. The caller does not hold any lock. 2915 * The function processes each response iocb in the response ring until it 2916 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 2917 * LE bit set. The function will call the completion handler of the command iocb 2918 * if the response iocb indicates a completion for a command iocb or it is 2919 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2920 * function if this is an unsolicited iocb. 2921 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2922 * to check it explicitly. 2923 */ 2924 int 2925 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2926 struct lpfc_sli_ring *pring, uint32_t mask) 2927 { 2928 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2929 IOCB_t *irsp = NULL; 2930 IOCB_t *entry = NULL; 2931 struct lpfc_iocbq *cmdiocbq = NULL; 2932 struct lpfc_iocbq rspiocbq; 2933 uint32_t status; 2934 uint32_t portRspPut, portRspMax; 2935 int rc = 1; 2936 lpfc_iocb_type type; 2937 unsigned long iflag; 2938 uint32_t rsp_cmpl = 0; 2939 2940 spin_lock_irqsave(&phba->hbalock, iflag); 2941 pring->stats.iocb_event++; 2942 2943 /* 2944 * The next available response entry should never exceed the maximum 2945 * entries. If it does, treat it as an adapter hardware error. 2946 */ 2947 portRspMax = pring->sli.sli3.numRiocb; 2948 portRspPut = le32_to_cpu(pgp->rspPutInx); 2949 if (unlikely(portRspPut >= portRspMax)) { 2950 lpfc_sli_rsp_pointers_error(phba, pring); 2951 spin_unlock_irqrestore(&phba->hbalock, iflag); 2952 return 1; 2953 } 2954 if (phba->fcp_ring_in_use) { 2955 spin_unlock_irqrestore(&phba->hbalock, iflag); 2956 return 1; 2957 } else 2958 phba->fcp_ring_in_use = 1; 2959 2960 rmb(); 2961 while (pring->sli.sli3.rspidx != portRspPut) { 2962 /* 2963 * Fetch an entry off the ring and copy it into a local data 2964 * structure. The copy involves a byte-swap since the 2965 * network byte order and pci byte orders are different. 2966 */ 2967 entry = lpfc_resp_iocb(phba, pring); 2968 phba->last_completion_time = jiffies; 2969 2970 if (++pring->sli.sli3.rspidx >= portRspMax) 2971 pring->sli.sli3.rspidx = 0; 2972 2973 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2974 (uint32_t *) &rspiocbq.iocb, 2975 phba->iocb_rsp_size); 2976 INIT_LIST_HEAD(&(rspiocbq.list)); 2977 irsp = &rspiocbq.iocb; 2978 2979 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 2980 pring->stats.iocb_rsp++; 2981 rsp_cmpl++; 2982 2983 if (unlikely(irsp->ulpStatus)) { 2984 /* 2985 * If resource errors reported from HBA, reduce 2986 * queuedepths of the SCSI device. 2987 */ 2988 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2989 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 2990 IOERR_NO_RESOURCES)) { 2991 spin_unlock_irqrestore(&phba->hbalock, iflag); 2992 phba->lpfc_rampdown_queue_depth(phba); 2993 spin_lock_irqsave(&phba->hbalock, iflag); 2994 } 2995 2996 /* Rsp ring <ringno> error: IOCB */ 2997 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2998 "0336 Rsp Ring %d error: IOCB Data: " 2999 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3000 pring->ringno, 3001 irsp->un.ulpWord[0], 3002 irsp->un.ulpWord[1], 3003 irsp->un.ulpWord[2], 3004 irsp->un.ulpWord[3], 3005 irsp->un.ulpWord[4], 3006 irsp->un.ulpWord[5], 3007 *(uint32_t *)&irsp->un1, 3008 *((uint32_t *)&irsp->un1 + 1)); 3009 } 3010 3011 switch (type) { 3012 case LPFC_ABORT_IOCB: 3013 case LPFC_SOL_IOCB: 3014 /* 3015 * Idle exchange closed via ABTS from port. No iocb 3016 * resources need to be recovered. 3017 */ 3018 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3019 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3020 "0333 IOCB cmd 0x%x" 3021 " processed. Skipping" 3022 " completion\n", 3023 irsp->ulpCommand); 3024 break; 3025 } 3026 3027 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3028 &rspiocbq); 3029 if (unlikely(!cmdiocbq)) 3030 break; 3031 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3032 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3033 if (cmdiocbq->iocb_cmpl) { 3034 spin_unlock_irqrestore(&phba->hbalock, iflag); 3035 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3036 &rspiocbq); 3037 spin_lock_irqsave(&phba->hbalock, iflag); 3038 } 3039 break; 3040 case LPFC_UNSOL_IOCB: 3041 spin_unlock_irqrestore(&phba->hbalock, iflag); 3042 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3043 spin_lock_irqsave(&phba->hbalock, iflag); 3044 break; 3045 default: 3046 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3047 char adaptermsg[LPFC_MAX_ADPTMSG]; 3048 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3049 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3050 MAX_MSG_DATA); 3051 dev_warn(&((phba->pcidev)->dev), 3052 "lpfc%d: %s\n", 3053 phba->brd_no, adaptermsg); 3054 } else { 3055 /* Unknown IOCB command */ 3056 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3057 "0334 Unknown IOCB command " 3058 "Data: x%x, x%x x%x x%x x%x\n", 3059 type, irsp->ulpCommand, 3060 irsp->ulpStatus, 3061 irsp->ulpIoTag, 3062 irsp->ulpContext); 3063 } 3064 break; 3065 } 3066 3067 /* 3068 * The response IOCB has been processed. Update the ring 3069 * pointer in SLIM. If the port response put pointer has not 3070 * been updated, sync the pgp->rspPutInx and fetch the new port 3071 * response put pointer. 3072 */ 3073 writel(pring->sli.sli3.rspidx, 3074 &phba->host_gp[pring->ringno].rspGetInx); 3075 3076 if (pring->sli.sli3.rspidx == portRspPut) 3077 portRspPut = le32_to_cpu(pgp->rspPutInx); 3078 } 3079 3080 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3081 pring->stats.iocb_rsp_full++; 3082 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3083 writel(status, phba->CAregaddr); 3084 readl(phba->CAregaddr); 3085 } 3086 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3087 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3088 pring->stats.iocb_cmd_empty++; 3089 3090 /* Force update of the local copy of cmdGetInx */ 3091 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3092 lpfc_sli_resume_iocb(phba, pring); 3093 3094 if ((pring->lpfc_sli_cmd_available)) 3095 (pring->lpfc_sli_cmd_available) (phba, pring); 3096 3097 } 3098 3099 phba->fcp_ring_in_use = 0; 3100 spin_unlock_irqrestore(&phba->hbalock, iflag); 3101 return rc; 3102 } 3103 3104 /** 3105 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3106 * @phba: Pointer to HBA context object. 3107 * @pring: Pointer to driver SLI ring object. 3108 * @rspiocbp: Pointer to driver response IOCB object. 3109 * 3110 * This function is called from the worker thread when there is a slow-path 3111 * response IOCB to process. This function chains all the response iocbs until 3112 * seeing the iocb with the LE bit set. The function will call 3113 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3114 * completion of a command iocb. The function will call the 3115 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3116 * The function frees the resources or calls the completion handler if this 3117 * iocb is an abort completion. The function returns NULL when the response 3118 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3119 * this function shall chain the iocb on to the iocb_continueq and return the 3120 * response iocb passed in. 3121 **/ 3122 static struct lpfc_iocbq * 3123 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3124 struct lpfc_iocbq *rspiocbp) 3125 { 3126 struct lpfc_iocbq *saveq; 3127 struct lpfc_iocbq *cmdiocbp; 3128 struct lpfc_iocbq *next_iocb; 3129 IOCB_t *irsp = NULL; 3130 uint32_t free_saveq; 3131 uint8_t iocb_cmd_type; 3132 lpfc_iocb_type type; 3133 unsigned long iflag; 3134 int rc; 3135 3136 spin_lock_irqsave(&phba->hbalock, iflag); 3137 /* First add the response iocb to the countinueq list */ 3138 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3139 pring->iocb_continueq_cnt++; 3140 3141 /* Now, determine whether the list is completed for processing */ 3142 irsp = &rspiocbp->iocb; 3143 if (irsp->ulpLe) { 3144 /* 3145 * By default, the driver expects to free all resources 3146 * associated with this iocb completion. 3147 */ 3148 free_saveq = 1; 3149 saveq = list_get_first(&pring->iocb_continueq, 3150 struct lpfc_iocbq, list); 3151 irsp = &(saveq->iocb); 3152 list_del_init(&pring->iocb_continueq); 3153 pring->iocb_continueq_cnt = 0; 3154 3155 pring->stats.iocb_rsp++; 3156 3157 /* 3158 * If resource errors reported from HBA, reduce 3159 * queuedepths of the SCSI device. 3160 */ 3161 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3162 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3163 IOERR_NO_RESOURCES)) { 3164 spin_unlock_irqrestore(&phba->hbalock, iflag); 3165 phba->lpfc_rampdown_queue_depth(phba); 3166 spin_lock_irqsave(&phba->hbalock, iflag); 3167 } 3168 3169 if (irsp->ulpStatus) { 3170 /* Rsp ring <ringno> error: IOCB */ 3171 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3172 "0328 Rsp Ring %d error: " 3173 "IOCB Data: " 3174 "x%x x%x x%x x%x " 3175 "x%x x%x x%x x%x " 3176 "x%x x%x x%x x%x " 3177 "x%x x%x x%x x%x\n", 3178 pring->ringno, 3179 irsp->un.ulpWord[0], 3180 irsp->un.ulpWord[1], 3181 irsp->un.ulpWord[2], 3182 irsp->un.ulpWord[3], 3183 irsp->un.ulpWord[4], 3184 irsp->un.ulpWord[5], 3185 *(((uint32_t *) irsp) + 6), 3186 *(((uint32_t *) irsp) + 7), 3187 *(((uint32_t *) irsp) + 8), 3188 *(((uint32_t *) irsp) + 9), 3189 *(((uint32_t *) irsp) + 10), 3190 *(((uint32_t *) irsp) + 11), 3191 *(((uint32_t *) irsp) + 12), 3192 *(((uint32_t *) irsp) + 13), 3193 *(((uint32_t *) irsp) + 14), 3194 *(((uint32_t *) irsp) + 15)); 3195 } 3196 3197 /* 3198 * Fetch the IOCB command type and call the correct completion 3199 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3200 * get freed back to the lpfc_iocb_list by the discovery 3201 * kernel thread. 3202 */ 3203 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3204 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3205 switch (type) { 3206 case LPFC_SOL_IOCB: 3207 spin_unlock_irqrestore(&phba->hbalock, iflag); 3208 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3209 spin_lock_irqsave(&phba->hbalock, iflag); 3210 break; 3211 3212 case LPFC_UNSOL_IOCB: 3213 spin_unlock_irqrestore(&phba->hbalock, iflag); 3214 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3215 spin_lock_irqsave(&phba->hbalock, iflag); 3216 if (!rc) 3217 free_saveq = 0; 3218 break; 3219 3220 case LPFC_ABORT_IOCB: 3221 cmdiocbp = NULL; 3222 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3223 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3224 saveq); 3225 if (cmdiocbp) { 3226 /* Call the specified completion routine */ 3227 if (cmdiocbp->iocb_cmpl) { 3228 spin_unlock_irqrestore(&phba->hbalock, 3229 iflag); 3230 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3231 saveq); 3232 spin_lock_irqsave(&phba->hbalock, 3233 iflag); 3234 } else 3235 __lpfc_sli_release_iocbq(phba, 3236 cmdiocbp); 3237 } 3238 break; 3239 3240 case LPFC_UNKNOWN_IOCB: 3241 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3242 char adaptermsg[LPFC_MAX_ADPTMSG]; 3243 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3244 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3245 MAX_MSG_DATA); 3246 dev_warn(&((phba->pcidev)->dev), 3247 "lpfc%d: %s\n", 3248 phba->brd_no, adaptermsg); 3249 } else { 3250 /* Unknown IOCB command */ 3251 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3252 "0335 Unknown IOCB " 3253 "command Data: x%x " 3254 "x%x x%x x%x\n", 3255 irsp->ulpCommand, 3256 irsp->ulpStatus, 3257 irsp->ulpIoTag, 3258 irsp->ulpContext); 3259 } 3260 break; 3261 } 3262 3263 if (free_saveq) { 3264 list_for_each_entry_safe(rspiocbp, next_iocb, 3265 &saveq->list, list) { 3266 list_del_init(&rspiocbp->list); 3267 __lpfc_sli_release_iocbq(phba, rspiocbp); 3268 } 3269 __lpfc_sli_release_iocbq(phba, saveq); 3270 } 3271 rspiocbp = NULL; 3272 } 3273 spin_unlock_irqrestore(&phba->hbalock, iflag); 3274 return rspiocbp; 3275 } 3276 3277 /** 3278 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3279 * @phba: Pointer to HBA context object. 3280 * @pring: Pointer to driver SLI ring object. 3281 * @mask: Host attention register mask for this ring. 3282 * 3283 * This routine wraps the actual slow_ring event process routine from the 3284 * API jump table function pointer from the lpfc_hba struct. 3285 **/ 3286 void 3287 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3288 struct lpfc_sli_ring *pring, uint32_t mask) 3289 { 3290 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3291 } 3292 3293 /** 3294 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3295 * @phba: Pointer to HBA context object. 3296 * @pring: Pointer to driver SLI ring object. 3297 * @mask: Host attention register mask for this ring. 3298 * 3299 * This function is called from the worker thread when there is a ring event 3300 * for non-fcp rings. The caller does not hold any lock. The function will 3301 * remove each response iocb in the response ring and calls the handle 3302 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3303 **/ 3304 static void 3305 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3306 struct lpfc_sli_ring *pring, uint32_t mask) 3307 { 3308 struct lpfc_pgp *pgp; 3309 IOCB_t *entry; 3310 IOCB_t *irsp = NULL; 3311 struct lpfc_iocbq *rspiocbp = NULL; 3312 uint32_t portRspPut, portRspMax; 3313 unsigned long iflag; 3314 uint32_t status; 3315 3316 pgp = &phba->port_gp[pring->ringno]; 3317 spin_lock_irqsave(&phba->hbalock, iflag); 3318 pring->stats.iocb_event++; 3319 3320 /* 3321 * The next available response entry should never exceed the maximum 3322 * entries. If it does, treat it as an adapter hardware error. 3323 */ 3324 portRspMax = pring->sli.sli3.numRiocb; 3325 portRspPut = le32_to_cpu(pgp->rspPutInx); 3326 if (portRspPut >= portRspMax) { 3327 /* 3328 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3329 * rsp ring <portRspMax> 3330 */ 3331 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3332 "0303 Ring %d handler: portRspPut %d " 3333 "is bigger than rsp ring %d\n", 3334 pring->ringno, portRspPut, portRspMax); 3335 3336 phba->link_state = LPFC_HBA_ERROR; 3337 spin_unlock_irqrestore(&phba->hbalock, iflag); 3338 3339 phba->work_hs = HS_FFER3; 3340 lpfc_handle_eratt(phba); 3341 3342 return; 3343 } 3344 3345 rmb(); 3346 while (pring->sli.sli3.rspidx != portRspPut) { 3347 /* 3348 * Build a completion list and call the appropriate handler. 3349 * The process is to get the next available response iocb, get 3350 * a free iocb from the list, copy the response data into the 3351 * free iocb, insert to the continuation list, and update the 3352 * next response index to slim. This process makes response 3353 * iocb's in the ring available to DMA as fast as possible but 3354 * pays a penalty for a copy operation. Since the iocb is 3355 * only 32 bytes, this penalty is considered small relative to 3356 * the PCI reads for register values and a slim write. When 3357 * the ulpLe field is set, the entire Command has been 3358 * received. 3359 */ 3360 entry = lpfc_resp_iocb(phba, pring); 3361 3362 phba->last_completion_time = jiffies; 3363 rspiocbp = __lpfc_sli_get_iocbq(phba); 3364 if (rspiocbp == NULL) { 3365 printk(KERN_ERR "%s: out of buffers! Failing " 3366 "completion.\n", __func__); 3367 break; 3368 } 3369 3370 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3371 phba->iocb_rsp_size); 3372 irsp = &rspiocbp->iocb; 3373 3374 if (++pring->sli.sli3.rspidx >= portRspMax) 3375 pring->sli.sli3.rspidx = 0; 3376 3377 if (pring->ringno == LPFC_ELS_RING) { 3378 lpfc_debugfs_slow_ring_trc(phba, 3379 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3380 *(((uint32_t *) irsp) + 4), 3381 *(((uint32_t *) irsp) + 6), 3382 *(((uint32_t *) irsp) + 7)); 3383 } 3384 3385 writel(pring->sli.sli3.rspidx, 3386 &phba->host_gp[pring->ringno].rspGetInx); 3387 3388 spin_unlock_irqrestore(&phba->hbalock, iflag); 3389 /* Handle the response IOCB */ 3390 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3391 spin_lock_irqsave(&phba->hbalock, iflag); 3392 3393 /* 3394 * If the port response put pointer has not been updated, sync 3395 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3396 * response put pointer. 3397 */ 3398 if (pring->sli.sli3.rspidx == portRspPut) { 3399 portRspPut = le32_to_cpu(pgp->rspPutInx); 3400 } 3401 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3402 3403 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3404 /* At least one response entry has been freed */ 3405 pring->stats.iocb_rsp_full++; 3406 /* SET RxRE_RSP in Chip Att register */ 3407 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3408 writel(status, phba->CAregaddr); 3409 readl(phba->CAregaddr); /* flush */ 3410 } 3411 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3412 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3413 pring->stats.iocb_cmd_empty++; 3414 3415 /* Force update of the local copy of cmdGetInx */ 3416 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3417 lpfc_sli_resume_iocb(phba, pring); 3418 3419 if ((pring->lpfc_sli_cmd_available)) 3420 (pring->lpfc_sli_cmd_available) (phba, pring); 3421 3422 } 3423 3424 spin_unlock_irqrestore(&phba->hbalock, iflag); 3425 return; 3426 } 3427 3428 /** 3429 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3430 * @phba: Pointer to HBA context object. 3431 * @pring: Pointer to driver SLI ring object. 3432 * @mask: Host attention register mask for this ring. 3433 * 3434 * This function is called from the worker thread when there is a pending 3435 * ELS response iocb on the driver internal slow-path response iocb worker 3436 * queue. The caller does not hold any lock. The function will remove each 3437 * response iocb from the response worker queue and calls the handle 3438 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3439 **/ 3440 static void 3441 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3442 struct lpfc_sli_ring *pring, uint32_t mask) 3443 { 3444 struct lpfc_iocbq *irspiocbq; 3445 struct hbq_dmabuf *dmabuf; 3446 struct lpfc_cq_event *cq_event; 3447 unsigned long iflag; 3448 3449 spin_lock_irqsave(&phba->hbalock, iflag); 3450 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3451 spin_unlock_irqrestore(&phba->hbalock, iflag); 3452 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3453 /* Get the response iocb from the head of work queue */ 3454 spin_lock_irqsave(&phba->hbalock, iflag); 3455 list_remove_head(&phba->sli4_hba.sp_queue_event, 3456 cq_event, struct lpfc_cq_event, list); 3457 spin_unlock_irqrestore(&phba->hbalock, iflag); 3458 3459 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3460 case CQE_CODE_COMPL_WQE: 3461 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3462 cq_event); 3463 /* Translate ELS WCQE to response IOCBQ */ 3464 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3465 irspiocbq); 3466 if (irspiocbq) 3467 lpfc_sli_sp_handle_rspiocb(phba, pring, 3468 irspiocbq); 3469 break; 3470 case CQE_CODE_RECEIVE: 3471 case CQE_CODE_RECEIVE_V1: 3472 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3473 cq_event); 3474 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3475 break; 3476 default: 3477 break; 3478 } 3479 } 3480 } 3481 3482 /** 3483 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3484 * @phba: Pointer to HBA context object. 3485 * @pring: Pointer to driver SLI ring object. 3486 * 3487 * This function aborts all iocbs in the given ring and frees all the iocb 3488 * objects in txq. This function issues an abort iocb for all the iocb commands 3489 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3490 * the return of this function. The caller is not required to hold any locks. 3491 **/ 3492 void 3493 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3494 { 3495 LIST_HEAD(completions); 3496 struct lpfc_iocbq *iocb, *next_iocb; 3497 3498 if (pring->ringno == LPFC_ELS_RING) { 3499 lpfc_fabric_abort_hba(phba); 3500 } 3501 3502 /* Error everything on txq and txcmplq 3503 * First do the txq. 3504 */ 3505 spin_lock_irq(&phba->hbalock); 3506 list_splice_init(&pring->txq, &completions); 3507 3508 /* Next issue ABTS for everything on the txcmplq */ 3509 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3510 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3511 3512 spin_unlock_irq(&phba->hbalock); 3513 3514 /* Cancel all the IOCBs from the completions list */ 3515 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3516 IOERR_SLI_ABORTED); 3517 } 3518 3519 /** 3520 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3521 * @phba: Pointer to HBA context object. 3522 * 3523 * This function flushes all iocbs in the fcp ring and frees all the iocb 3524 * objects in txq and txcmplq. This function will not issue abort iocbs 3525 * for all the iocb commands in txcmplq, they will just be returned with 3526 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3527 * slot has been permanently disabled. 3528 **/ 3529 void 3530 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3531 { 3532 LIST_HEAD(txq); 3533 LIST_HEAD(txcmplq); 3534 struct lpfc_sli *psli = &phba->sli; 3535 struct lpfc_sli_ring *pring; 3536 3537 /* Currently, only one fcp ring */ 3538 pring = &psli->ring[psli->fcp_ring]; 3539 3540 spin_lock_irq(&phba->hbalock); 3541 /* Retrieve everything on txq */ 3542 list_splice_init(&pring->txq, &txq); 3543 3544 /* Retrieve everything on the txcmplq */ 3545 list_splice_init(&pring->txcmplq, &txcmplq); 3546 3547 /* Indicate the I/O queues are flushed */ 3548 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3549 spin_unlock_irq(&phba->hbalock); 3550 3551 /* Flush the txq */ 3552 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3553 IOERR_SLI_DOWN); 3554 3555 /* Flush the txcmpq */ 3556 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3557 IOERR_SLI_DOWN); 3558 } 3559 3560 /** 3561 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3562 * @phba: Pointer to HBA context object. 3563 * @mask: Bit mask to be checked. 3564 * 3565 * This function reads the host status register and compares 3566 * with the provided bit mask to check if HBA completed 3567 * the restart. This function will wait in a loop for the 3568 * HBA to complete restart. If the HBA does not restart within 3569 * 15 iterations, the function will reset the HBA again. The 3570 * function returns 1 when HBA fail to restart otherwise returns 3571 * zero. 3572 **/ 3573 static int 3574 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3575 { 3576 uint32_t status; 3577 int i = 0; 3578 int retval = 0; 3579 3580 /* Read the HBA Host Status Register */ 3581 if (lpfc_readl(phba->HSregaddr, &status)) 3582 return 1; 3583 3584 /* 3585 * Check status register every 100ms for 5 retries, then every 3586 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3587 * every 2.5 sec for 4. 3588 * Break our of the loop if errors occurred during init. 3589 */ 3590 while (((status & mask) != mask) && 3591 !(status & HS_FFERM) && 3592 i++ < 20) { 3593 3594 if (i <= 5) 3595 msleep(10); 3596 else if (i <= 10) 3597 msleep(500); 3598 else 3599 msleep(2500); 3600 3601 if (i == 15) { 3602 /* Do post */ 3603 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3604 lpfc_sli_brdrestart(phba); 3605 } 3606 /* Read the HBA Host Status Register */ 3607 if (lpfc_readl(phba->HSregaddr, &status)) { 3608 retval = 1; 3609 break; 3610 } 3611 } 3612 3613 /* Check to see if any errors occurred during init */ 3614 if ((status & HS_FFERM) || (i >= 20)) { 3615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3616 "2751 Adapter failed to restart, " 3617 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3618 status, 3619 readl(phba->MBslimaddr + 0xa8), 3620 readl(phba->MBslimaddr + 0xac)); 3621 phba->link_state = LPFC_HBA_ERROR; 3622 retval = 1; 3623 } 3624 3625 return retval; 3626 } 3627 3628 /** 3629 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3630 * @phba: Pointer to HBA context object. 3631 * @mask: Bit mask to be checked. 3632 * 3633 * This function checks the host status register to check if HBA is 3634 * ready. This function will wait in a loop for the HBA to be ready 3635 * If the HBA is not ready , the function will will reset the HBA PCI 3636 * function again. The function returns 1 when HBA fail to be ready 3637 * otherwise returns zero. 3638 **/ 3639 static int 3640 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3641 { 3642 uint32_t status; 3643 int retval = 0; 3644 3645 /* Read the HBA Host Status Register */ 3646 status = lpfc_sli4_post_status_check(phba); 3647 3648 if (status) { 3649 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3650 lpfc_sli_brdrestart(phba); 3651 status = lpfc_sli4_post_status_check(phba); 3652 } 3653 3654 /* Check to see if any errors occurred during init */ 3655 if (status) { 3656 phba->link_state = LPFC_HBA_ERROR; 3657 retval = 1; 3658 } else 3659 phba->sli4_hba.intr_enable = 0; 3660 3661 return retval; 3662 } 3663 3664 /** 3665 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3666 * @phba: Pointer to HBA context object. 3667 * @mask: Bit mask to be checked. 3668 * 3669 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3670 * from the API jump table function pointer from the lpfc_hba struct. 3671 **/ 3672 int 3673 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3674 { 3675 return phba->lpfc_sli_brdready(phba, mask); 3676 } 3677 3678 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3679 3680 /** 3681 * lpfc_reset_barrier - Make HBA ready for HBA reset 3682 * @phba: Pointer to HBA context object. 3683 * 3684 * This function is called before resetting an HBA. This function is called 3685 * with hbalock held and requests HBA to quiesce DMAs before a reset. 3686 **/ 3687 void lpfc_reset_barrier(struct lpfc_hba *phba) 3688 { 3689 uint32_t __iomem *resp_buf; 3690 uint32_t __iomem *mbox_buf; 3691 volatile uint32_t mbox; 3692 uint32_t hc_copy, ha_copy, resp_data; 3693 int i; 3694 uint8_t hdrtype; 3695 3696 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3697 if (hdrtype != 0x80 || 3698 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3699 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3700 return; 3701 3702 /* 3703 * Tell the other part of the chip to suspend temporarily all 3704 * its DMA activity. 3705 */ 3706 resp_buf = phba->MBslimaddr; 3707 3708 /* Disable the error attention */ 3709 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 3710 return; 3711 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3712 readl(phba->HCregaddr); /* flush */ 3713 phba->link_flag |= LS_IGNORE_ERATT; 3714 3715 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3716 return; 3717 if (ha_copy & HA_ERATT) { 3718 /* Clear Chip error bit */ 3719 writel(HA_ERATT, phba->HAregaddr); 3720 phba->pport->stopped = 1; 3721 } 3722 3723 mbox = 0; 3724 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3725 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3726 3727 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3728 mbox_buf = phba->MBslimaddr; 3729 writel(mbox, mbox_buf); 3730 3731 for (i = 0; i < 50; i++) { 3732 if (lpfc_readl((resp_buf + 1), &resp_data)) 3733 return; 3734 if (resp_data != ~(BARRIER_TEST_PATTERN)) 3735 mdelay(1); 3736 else 3737 break; 3738 } 3739 resp_data = 0; 3740 if (lpfc_readl((resp_buf + 1), &resp_data)) 3741 return; 3742 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 3743 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3744 phba->pport->stopped) 3745 goto restore_hc; 3746 else 3747 goto clear_errat; 3748 } 3749 3750 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3751 resp_data = 0; 3752 for (i = 0; i < 500; i++) { 3753 if (lpfc_readl(resp_buf, &resp_data)) 3754 return; 3755 if (resp_data != mbox) 3756 mdelay(1); 3757 else 3758 break; 3759 } 3760 3761 clear_errat: 3762 3763 while (++i < 500) { 3764 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3765 return; 3766 if (!(ha_copy & HA_ERATT)) 3767 mdelay(1); 3768 else 3769 break; 3770 } 3771 3772 if (readl(phba->HAregaddr) & HA_ERATT) { 3773 writel(HA_ERATT, phba->HAregaddr); 3774 phba->pport->stopped = 1; 3775 } 3776 3777 restore_hc: 3778 phba->link_flag &= ~LS_IGNORE_ERATT; 3779 writel(hc_copy, phba->HCregaddr); 3780 readl(phba->HCregaddr); /* flush */ 3781 } 3782 3783 /** 3784 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3785 * @phba: Pointer to HBA context object. 3786 * 3787 * This function issues a kill_board mailbox command and waits for 3788 * the error attention interrupt. This function is called for stopping 3789 * the firmware processing. The caller is not required to hold any 3790 * locks. This function calls lpfc_hba_down_post function to free 3791 * any pending commands after the kill. The function will return 1 when it 3792 * fails to kill the board else will return 0. 3793 **/ 3794 int 3795 lpfc_sli_brdkill(struct lpfc_hba *phba) 3796 { 3797 struct lpfc_sli *psli; 3798 LPFC_MBOXQ_t *pmb; 3799 uint32_t status; 3800 uint32_t ha_copy; 3801 int retval; 3802 int i = 0; 3803 3804 psli = &phba->sli; 3805 3806 /* Kill HBA */ 3807 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3808 "0329 Kill HBA Data: x%x x%x\n", 3809 phba->pport->port_state, psli->sli_flag); 3810 3811 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3812 if (!pmb) 3813 return 1; 3814 3815 /* Disable the error attention */ 3816 spin_lock_irq(&phba->hbalock); 3817 if (lpfc_readl(phba->HCregaddr, &status)) { 3818 spin_unlock_irq(&phba->hbalock); 3819 mempool_free(pmb, phba->mbox_mem_pool); 3820 return 1; 3821 } 3822 status &= ~HC_ERINT_ENA; 3823 writel(status, phba->HCregaddr); 3824 readl(phba->HCregaddr); /* flush */ 3825 phba->link_flag |= LS_IGNORE_ERATT; 3826 spin_unlock_irq(&phba->hbalock); 3827 3828 lpfc_kill_board(phba, pmb); 3829 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3830 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3831 3832 if (retval != MBX_SUCCESS) { 3833 if (retval != MBX_BUSY) 3834 mempool_free(pmb, phba->mbox_mem_pool); 3835 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3836 "2752 KILL_BOARD command failed retval %d\n", 3837 retval); 3838 spin_lock_irq(&phba->hbalock); 3839 phba->link_flag &= ~LS_IGNORE_ERATT; 3840 spin_unlock_irq(&phba->hbalock); 3841 return 1; 3842 } 3843 3844 spin_lock_irq(&phba->hbalock); 3845 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3846 spin_unlock_irq(&phba->hbalock); 3847 3848 mempool_free(pmb, phba->mbox_mem_pool); 3849 3850 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3851 * attention every 100ms for 3 seconds. If we don't get ERATT after 3852 * 3 seconds we still set HBA_ERROR state because the status of the 3853 * board is now undefined. 3854 */ 3855 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3856 return 1; 3857 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3858 mdelay(100); 3859 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3860 return 1; 3861 } 3862 3863 del_timer_sync(&psli->mbox_tmo); 3864 if (ha_copy & HA_ERATT) { 3865 writel(HA_ERATT, phba->HAregaddr); 3866 phba->pport->stopped = 1; 3867 } 3868 spin_lock_irq(&phba->hbalock); 3869 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3870 psli->mbox_active = NULL; 3871 phba->link_flag &= ~LS_IGNORE_ERATT; 3872 spin_unlock_irq(&phba->hbalock); 3873 3874 lpfc_hba_down_post(phba); 3875 phba->link_state = LPFC_HBA_ERROR; 3876 3877 return ha_copy & HA_ERATT ? 0 : 1; 3878 } 3879 3880 /** 3881 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3882 * @phba: Pointer to HBA context object. 3883 * 3884 * This function resets the HBA by writing HC_INITFF to the control 3885 * register. After the HBA resets, this function resets all the iocb ring 3886 * indices. This function disables PCI layer parity checking during 3887 * the reset. 3888 * This function returns 0 always. 3889 * The caller is not required to hold any locks. 3890 **/ 3891 int 3892 lpfc_sli_brdreset(struct lpfc_hba *phba) 3893 { 3894 struct lpfc_sli *psli; 3895 struct lpfc_sli_ring *pring; 3896 uint16_t cfg_value; 3897 int i; 3898 3899 psli = &phba->sli; 3900 3901 /* Reset HBA */ 3902 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3903 "0325 Reset HBA Data: x%x x%x\n", 3904 phba->pport->port_state, psli->sli_flag); 3905 3906 /* perform board reset */ 3907 phba->fc_eventTag = 0; 3908 phba->link_events = 0; 3909 phba->pport->fc_myDID = 0; 3910 phba->pport->fc_prevDID = 0; 3911 3912 /* Turn off parity checking and serr during the physical reset */ 3913 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3914 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3915 (cfg_value & 3916 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3917 3918 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 3919 3920 /* Now toggle INITFF bit in the Host Control Register */ 3921 writel(HC_INITFF, phba->HCregaddr); 3922 mdelay(1); 3923 readl(phba->HCregaddr); /* flush */ 3924 writel(0, phba->HCregaddr); 3925 readl(phba->HCregaddr); /* flush */ 3926 3927 /* Restore PCI cmd register */ 3928 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3929 3930 /* Initialize relevant SLI info */ 3931 for (i = 0; i < psli->num_rings; i++) { 3932 pring = &psli->ring[i]; 3933 pring->flag = 0; 3934 pring->sli.sli3.rspidx = 0; 3935 pring->sli.sli3.next_cmdidx = 0; 3936 pring->sli.sli3.local_getidx = 0; 3937 pring->sli.sli3.cmdidx = 0; 3938 pring->missbufcnt = 0; 3939 } 3940 3941 phba->link_state = LPFC_WARM_START; 3942 return 0; 3943 } 3944 3945 /** 3946 * lpfc_sli4_brdreset - Reset a sli-4 HBA 3947 * @phba: Pointer to HBA context object. 3948 * 3949 * This function resets a SLI4 HBA. This function disables PCI layer parity 3950 * checking during resets the device. The caller is not required to hold 3951 * any locks. 3952 * 3953 * This function returns 0 always. 3954 **/ 3955 int 3956 lpfc_sli4_brdreset(struct lpfc_hba *phba) 3957 { 3958 struct lpfc_sli *psli = &phba->sli; 3959 uint16_t cfg_value; 3960 int rc; 3961 3962 /* Reset HBA */ 3963 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3964 "0295 Reset HBA Data: x%x x%x\n", 3965 phba->pport->port_state, psli->sli_flag); 3966 3967 /* perform board reset */ 3968 phba->fc_eventTag = 0; 3969 phba->link_events = 0; 3970 phba->pport->fc_myDID = 0; 3971 phba->pport->fc_prevDID = 0; 3972 3973 spin_lock_irq(&phba->hbalock); 3974 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3975 phba->fcf.fcf_flag = 0; 3976 spin_unlock_irq(&phba->hbalock); 3977 3978 /* Now physically reset the device */ 3979 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3980 "0389 Performing PCI function reset!\n"); 3981 3982 /* Turn off parity checking and serr during the physical reset */ 3983 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3984 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 3985 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3986 3987 /* Perform FCoE PCI function reset before freeing queue memory */ 3988 rc = lpfc_pci_function_reset(phba); 3989 lpfc_sli4_queue_destroy(phba); 3990 3991 /* Restore PCI cmd register */ 3992 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3993 3994 return rc; 3995 } 3996 3997 /** 3998 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 3999 * @phba: Pointer to HBA context object. 4000 * 4001 * This function is called in the SLI initialization code path to 4002 * restart the HBA. The caller is not required to hold any lock. 4003 * This function writes MBX_RESTART mailbox command to the SLIM and 4004 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4005 * function to free any pending commands. The function enables 4006 * POST only during the first initialization. The function returns zero. 4007 * The function does not guarantee completion of MBX_RESTART mailbox 4008 * command before the return of this function. 4009 **/ 4010 static int 4011 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4012 { 4013 MAILBOX_t *mb; 4014 struct lpfc_sli *psli; 4015 volatile uint32_t word0; 4016 void __iomem *to_slim; 4017 uint32_t hba_aer_enabled; 4018 4019 spin_lock_irq(&phba->hbalock); 4020 4021 /* Take PCIe device Advanced Error Reporting (AER) state */ 4022 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4023 4024 psli = &phba->sli; 4025 4026 /* Restart HBA */ 4027 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4028 "0337 Restart HBA Data: x%x x%x\n", 4029 phba->pport->port_state, psli->sli_flag); 4030 4031 word0 = 0; 4032 mb = (MAILBOX_t *) &word0; 4033 mb->mbxCommand = MBX_RESTART; 4034 mb->mbxHc = 1; 4035 4036 lpfc_reset_barrier(phba); 4037 4038 to_slim = phba->MBslimaddr; 4039 writel(*(uint32_t *) mb, to_slim); 4040 readl(to_slim); /* flush */ 4041 4042 /* Only skip post after fc_ffinit is completed */ 4043 if (phba->pport->port_state) 4044 word0 = 1; /* This is really setting up word1 */ 4045 else 4046 word0 = 0; /* This is really setting up word1 */ 4047 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4048 writel(*(uint32_t *) mb, to_slim); 4049 readl(to_slim); /* flush */ 4050 4051 lpfc_sli_brdreset(phba); 4052 phba->pport->stopped = 0; 4053 phba->link_state = LPFC_INIT_START; 4054 phba->hba_flag = 0; 4055 spin_unlock_irq(&phba->hbalock); 4056 4057 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4058 psli->stats_start = get_seconds(); 4059 4060 /* Give the INITFF and Post time to settle. */ 4061 mdelay(100); 4062 4063 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4064 if (hba_aer_enabled) 4065 pci_disable_pcie_error_reporting(phba->pcidev); 4066 4067 lpfc_hba_down_post(phba); 4068 4069 return 0; 4070 } 4071 4072 /** 4073 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4074 * @phba: Pointer to HBA context object. 4075 * 4076 * This function is called in the SLI initialization code path to restart 4077 * a SLI4 HBA. The caller is not required to hold any lock. 4078 * At the end of the function, it calls lpfc_hba_down_post function to 4079 * free any pending commands. 4080 **/ 4081 static int 4082 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4083 { 4084 struct lpfc_sli *psli = &phba->sli; 4085 uint32_t hba_aer_enabled; 4086 int rc; 4087 4088 /* Restart HBA */ 4089 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4090 "0296 Restart HBA Data: x%x x%x\n", 4091 phba->pport->port_state, psli->sli_flag); 4092 4093 /* Take PCIe device Advanced Error Reporting (AER) state */ 4094 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4095 4096 rc = lpfc_sli4_brdreset(phba); 4097 4098 spin_lock_irq(&phba->hbalock); 4099 phba->pport->stopped = 0; 4100 phba->link_state = LPFC_INIT_START; 4101 phba->hba_flag = 0; 4102 spin_unlock_irq(&phba->hbalock); 4103 4104 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4105 psli->stats_start = get_seconds(); 4106 4107 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4108 if (hba_aer_enabled) 4109 pci_disable_pcie_error_reporting(phba->pcidev); 4110 4111 lpfc_hba_down_post(phba); 4112 4113 return rc; 4114 } 4115 4116 /** 4117 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4118 * @phba: Pointer to HBA context object. 4119 * 4120 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4121 * API jump table function pointer from the lpfc_hba struct. 4122 **/ 4123 int 4124 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4125 { 4126 return phba->lpfc_sli_brdrestart(phba); 4127 } 4128 4129 /** 4130 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4131 * @phba: Pointer to HBA context object. 4132 * 4133 * This function is called after a HBA restart to wait for successful 4134 * restart of the HBA. Successful restart of the HBA is indicated by 4135 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4136 * iteration, the function will restart the HBA again. The function returns 4137 * zero if HBA successfully restarted else returns negative error code. 4138 **/ 4139 static int 4140 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4141 { 4142 uint32_t status, i = 0; 4143 4144 /* Read the HBA Host Status Register */ 4145 if (lpfc_readl(phba->HSregaddr, &status)) 4146 return -EIO; 4147 4148 /* Check status register to see what current state is */ 4149 i = 0; 4150 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4151 4152 /* Check every 10ms for 10 retries, then every 100ms for 90 4153 * retries, then every 1 sec for 50 retires for a total of 4154 * ~60 seconds before reset the board again and check every 4155 * 1 sec for 50 retries. The up to 60 seconds before the 4156 * board ready is required by the Falcon FIPS zeroization 4157 * complete, and any reset the board in between shall cause 4158 * restart of zeroization, further delay the board ready. 4159 */ 4160 if (i++ >= 200) { 4161 /* Adapter failed to init, timeout, status reg 4162 <status> */ 4163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4164 "0436 Adapter failed to init, " 4165 "timeout, status reg x%x, " 4166 "FW Data: A8 x%x AC x%x\n", status, 4167 readl(phba->MBslimaddr + 0xa8), 4168 readl(phba->MBslimaddr + 0xac)); 4169 phba->link_state = LPFC_HBA_ERROR; 4170 return -ETIMEDOUT; 4171 } 4172 4173 /* Check to see if any errors occurred during init */ 4174 if (status & HS_FFERM) { 4175 /* ERROR: During chipset initialization */ 4176 /* Adapter failed to init, chipset, status reg 4177 <status> */ 4178 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4179 "0437 Adapter failed to init, " 4180 "chipset, status reg x%x, " 4181 "FW Data: A8 x%x AC x%x\n", status, 4182 readl(phba->MBslimaddr + 0xa8), 4183 readl(phba->MBslimaddr + 0xac)); 4184 phba->link_state = LPFC_HBA_ERROR; 4185 return -EIO; 4186 } 4187 4188 if (i <= 10) 4189 msleep(10); 4190 else if (i <= 100) 4191 msleep(100); 4192 else 4193 msleep(1000); 4194 4195 if (i == 150) { 4196 /* Do post */ 4197 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4198 lpfc_sli_brdrestart(phba); 4199 } 4200 /* Read the HBA Host Status Register */ 4201 if (lpfc_readl(phba->HSregaddr, &status)) 4202 return -EIO; 4203 } 4204 4205 /* Check to see if any errors occurred during init */ 4206 if (status & HS_FFERM) { 4207 /* ERROR: During chipset initialization */ 4208 /* Adapter failed to init, chipset, status reg <status> */ 4209 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4210 "0438 Adapter failed to init, chipset, " 4211 "status reg x%x, " 4212 "FW Data: A8 x%x AC x%x\n", status, 4213 readl(phba->MBslimaddr + 0xa8), 4214 readl(phba->MBslimaddr + 0xac)); 4215 phba->link_state = LPFC_HBA_ERROR; 4216 return -EIO; 4217 } 4218 4219 /* Clear all interrupt enable conditions */ 4220 writel(0, phba->HCregaddr); 4221 readl(phba->HCregaddr); /* flush */ 4222 4223 /* setup host attn register */ 4224 writel(0xffffffff, phba->HAregaddr); 4225 readl(phba->HAregaddr); /* flush */ 4226 return 0; 4227 } 4228 4229 /** 4230 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4231 * 4232 * This function calculates and returns the number of HBQs required to be 4233 * configured. 4234 **/ 4235 int 4236 lpfc_sli_hbq_count(void) 4237 { 4238 return ARRAY_SIZE(lpfc_hbq_defs); 4239 } 4240 4241 /** 4242 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4243 * 4244 * This function adds the number of hbq entries in every HBQ to get 4245 * the total number of hbq entries required for the HBA and returns 4246 * the total count. 4247 **/ 4248 static int 4249 lpfc_sli_hbq_entry_count(void) 4250 { 4251 int hbq_count = lpfc_sli_hbq_count(); 4252 int count = 0; 4253 int i; 4254 4255 for (i = 0; i < hbq_count; ++i) 4256 count += lpfc_hbq_defs[i]->entry_count; 4257 return count; 4258 } 4259 4260 /** 4261 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4262 * 4263 * This function calculates amount of memory required for all hbq entries 4264 * to be configured and returns the total memory required. 4265 **/ 4266 int 4267 lpfc_sli_hbq_size(void) 4268 { 4269 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4270 } 4271 4272 /** 4273 * lpfc_sli_hbq_setup - configure and initialize HBQs 4274 * @phba: Pointer to HBA context object. 4275 * 4276 * This function is called during the SLI initialization to configure 4277 * all the HBQs and post buffers to the HBQ. The caller is not 4278 * required to hold any locks. This function will return zero if successful 4279 * else it will return negative error code. 4280 **/ 4281 static int 4282 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4283 { 4284 int hbq_count = lpfc_sli_hbq_count(); 4285 LPFC_MBOXQ_t *pmb; 4286 MAILBOX_t *pmbox; 4287 uint32_t hbqno; 4288 uint32_t hbq_entry_index; 4289 4290 /* Get a Mailbox buffer to setup mailbox 4291 * commands for HBA initialization 4292 */ 4293 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4294 4295 if (!pmb) 4296 return -ENOMEM; 4297 4298 pmbox = &pmb->u.mb; 4299 4300 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4301 phba->link_state = LPFC_INIT_MBX_CMDS; 4302 phba->hbq_in_use = 1; 4303 4304 hbq_entry_index = 0; 4305 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4306 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4307 phba->hbqs[hbqno].hbqPutIdx = 0; 4308 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4309 phba->hbqs[hbqno].entry_count = 4310 lpfc_hbq_defs[hbqno]->entry_count; 4311 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4312 hbq_entry_index, pmb); 4313 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4314 4315 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4316 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4317 mbxStatus <status>, ring <num> */ 4318 4319 lpfc_printf_log(phba, KERN_ERR, 4320 LOG_SLI | LOG_VPORT, 4321 "1805 Adapter failed to init. " 4322 "Data: x%x x%x x%x\n", 4323 pmbox->mbxCommand, 4324 pmbox->mbxStatus, hbqno); 4325 4326 phba->link_state = LPFC_HBA_ERROR; 4327 mempool_free(pmb, phba->mbox_mem_pool); 4328 return -ENXIO; 4329 } 4330 } 4331 phba->hbq_count = hbq_count; 4332 4333 mempool_free(pmb, phba->mbox_mem_pool); 4334 4335 /* Initially populate or replenish the HBQs */ 4336 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4337 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4338 return 0; 4339 } 4340 4341 /** 4342 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4343 * @phba: Pointer to HBA context object. 4344 * 4345 * This function is called during the SLI initialization to configure 4346 * all the HBQs and post buffers to the HBQ. The caller is not 4347 * required to hold any locks. This function will return zero if successful 4348 * else it will return negative error code. 4349 **/ 4350 static int 4351 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4352 { 4353 phba->hbq_in_use = 1; 4354 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 4355 phba->hbq_count = 1; 4356 /* Initially populate or replenish the HBQs */ 4357 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 4358 return 0; 4359 } 4360 4361 /** 4362 * lpfc_sli_config_port - Issue config port mailbox command 4363 * @phba: Pointer to HBA context object. 4364 * @sli_mode: sli mode - 2/3 4365 * 4366 * This function is called by the sli intialization code path 4367 * to issue config_port mailbox command. This function restarts the 4368 * HBA firmware and issues a config_port mailbox command to configure 4369 * the SLI interface in the sli mode specified by sli_mode 4370 * variable. The caller is not required to hold any locks. 4371 * The function returns 0 if successful, else returns negative error 4372 * code. 4373 **/ 4374 int 4375 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4376 { 4377 LPFC_MBOXQ_t *pmb; 4378 uint32_t resetcount = 0, rc = 0, done = 0; 4379 4380 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4381 if (!pmb) { 4382 phba->link_state = LPFC_HBA_ERROR; 4383 return -ENOMEM; 4384 } 4385 4386 phba->sli_rev = sli_mode; 4387 while (resetcount < 2 && !done) { 4388 spin_lock_irq(&phba->hbalock); 4389 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4390 spin_unlock_irq(&phba->hbalock); 4391 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4392 lpfc_sli_brdrestart(phba); 4393 rc = lpfc_sli_chipset_init(phba); 4394 if (rc) 4395 break; 4396 4397 spin_lock_irq(&phba->hbalock); 4398 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4399 spin_unlock_irq(&phba->hbalock); 4400 resetcount++; 4401 4402 /* Call pre CONFIG_PORT mailbox command initialization. A 4403 * value of 0 means the call was successful. Any other 4404 * nonzero value is a failure, but if ERESTART is returned, 4405 * the driver may reset the HBA and try again. 4406 */ 4407 rc = lpfc_config_port_prep(phba); 4408 if (rc == -ERESTART) { 4409 phba->link_state = LPFC_LINK_UNKNOWN; 4410 continue; 4411 } else if (rc) 4412 break; 4413 4414 phba->link_state = LPFC_INIT_MBX_CMDS; 4415 lpfc_config_port(phba, pmb); 4416 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4417 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4418 LPFC_SLI3_HBQ_ENABLED | 4419 LPFC_SLI3_CRP_ENABLED | 4420 LPFC_SLI3_BG_ENABLED | 4421 LPFC_SLI3_DSS_ENABLED); 4422 if (rc != MBX_SUCCESS) { 4423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4424 "0442 Adapter failed to init, mbxCmd x%x " 4425 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4426 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4427 spin_lock_irq(&phba->hbalock); 4428 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4429 spin_unlock_irq(&phba->hbalock); 4430 rc = -ENXIO; 4431 } else { 4432 /* Allow asynchronous mailbox command to go through */ 4433 spin_lock_irq(&phba->hbalock); 4434 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4435 spin_unlock_irq(&phba->hbalock); 4436 done = 1; 4437 4438 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4439 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4440 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4441 "3110 Port did not grant ASABT\n"); 4442 } 4443 } 4444 if (!done) { 4445 rc = -EINVAL; 4446 goto do_prep_failed; 4447 } 4448 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4449 if (!pmb->u.mb.un.varCfgPort.cMA) { 4450 rc = -ENXIO; 4451 goto do_prep_failed; 4452 } 4453 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4454 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4455 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4456 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4457 phba->max_vpi : phba->max_vports; 4458 4459 } else 4460 phba->max_vpi = 0; 4461 phba->fips_level = 0; 4462 phba->fips_spec_rev = 0; 4463 if (pmb->u.mb.un.varCfgPort.gdss) { 4464 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4465 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4466 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4467 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4468 "2850 Security Crypto Active. FIPS x%d " 4469 "(Spec Rev: x%d)", 4470 phba->fips_level, phba->fips_spec_rev); 4471 } 4472 if (pmb->u.mb.un.varCfgPort.sec_err) { 4473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4474 "2856 Config Port Security Crypto " 4475 "Error: x%x ", 4476 pmb->u.mb.un.varCfgPort.sec_err); 4477 } 4478 if (pmb->u.mb.un.varCfgPort.gerbm) 4479 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4480 if (pmb->u.mb.un.varCfgPort.gcrp) 4481 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4482 4483 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4484 phba->port_gp = phba->mbox->us.s3_pgp.port; 4485 4486 if (phba->cfg_enable_bg) { 4487 if (pmb->u.mb.un.varCfgPort.gbg) 4488 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4489 else 4490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4491 "0443 Adapter did not grant " 4492 "BlockGuard\n"); 4493 } 4494 } else { 4495 phba->hbq_get = NULL; 4496 phba->port_gp = phba->mbox->us.s2.port; 4497 phba->max_vpi = 0; 4498 } 4499 do_prep_failed: 4500 mempool_free(pmb, phba->mbox_mem_pool); 4501 return rc; 4502 } 4503 4504 4505 /** 4506 * lpfc_sli_hba_setup - SLI intialization function 4507 * @phba: Pointer to HBA context object. 4508 * 4509 * This function is the main SLI intialization function. This function 4510 * is called by the HBA intialization code, HBA reset code and HBA 4511 * error attention handler code. Caller is not required to hold any 4512 * locks. This function issues config_port mailbox command to configure 4513 * the SLI, setup iocb rings and HBQ rings. In the end the function 4514 * calls the config_port_post function to issue init_link mailbox 4515 * command and to start the discovery. The function will return zero 4516 * if successful, else it will return negative error code. 4517 **/ 4518 int 4519 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4520 { 4521 uint32_t rc; 4522 int mode = 3, i; 4523 int longs; 4524 4525 switch (lpfc_sli_mode) { 4526 case 2: 4527 if (phba->cfg_enable_npiv) { 4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4529 "1824 NPIV enabled: Override lpfc_sli_mode " 4530 "parameter (%d) to auto (0).\n", 4531 lpfc_sli_mode); 4532 break; 4533 } 4534 mode = 2; 4535 break; 4536 case 0: 4537 case 3: 4538 break; 4539 default: 4540 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4541 "1819 Unrecognized lpfc_sli_mode " 4542 "parameter: %d.\n", lpfc_sli_mode); 4543 4544 break; 4545 } 4546 4547 rc = lpfc_sli_config_port(phba, mode); 4548 4549 if (rc && lpfc_sli_mode == 3) 4550 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4551 "1820 Unable to select SLI-3. " 4552 "Not supported by adapter.\n"); 4553 if (rc && mode != 2) 4554 rc = lpfc_sli_config_port(phba, 2); 4555 if (rc) 4556 goto lpfc_sli_hba_setup_error; 4557 4558 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4559 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4560 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4561 if (!rc) { 4562 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4563 "2709 This device supports " 4564 "Advanced Error Reporting (AER)\n"); 4565 spin_lock_irq(&phba->hbalock); 4566 phba->hba_flag |= HBA_AER_ENABLED; 4567 spin_unlock_irq(&phba->hbalock); 4568 } else { 4569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4570 "2708 This device does not support " 4571 "Advanced Error Reporting (AER): %d\n", 4572 rc); 4573 phba->cfg_aer_support = 0; 4574 } 4575 } 4576 4577 if (phba->sli_rev == 3) { 4578 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4579 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4580 } else { 4581 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4582 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4583 phba->sli3_options = 0; 4584 } 4585 4586 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4587 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4588 phba->sli_rev, phba->max_vpi); 4589 rc = lpfc_sli_ring_map(phba); 4590 4591 if (rc) 4592 goto lpfc_sli_hba_setup_error; 4593 4594 /* Initialize VPIs. */ 4595 if (phba->sli_rev == LPFC_SLI_REV3) { 4596 /* 4597 * The VPI bitmask and physical ID array are allocated 4598 * and initialized once only - at driver load. A port 4599 * reset doesn't need to reinitialize this memory. 4600 */ 4601 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 4602 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 4603 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), 4604 GFP_KERNEL); 4605 if (!phba->vpi_bmask) { 4606 rc = -ENOMEM; 4607 goto lpfc_sli_hba_setup_error; 4608 } 4609 4610 phba->vpi_ids = kzalloc( 4611 (phba->max_vpi+1) * sizeof(uint16_t), 4612 GFP_KERNEL); 4613 if (!phba->vpi_ids) { 4614 kfree(phba->vpi_bmask); 4615 rc = -ENOMEM; 4616 goto lpfc_sli_hba_setup_error; 4617 } 4618 for (i = 0; i < phba->max_vpi; i++) 4619 phba->vpi_ids[i] = i; 4620 } 4621 } 4622 4623 /* Init HBQs */ 4624 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4625 rc = lpfc_sli_hbq_setup(phba); 4626 if (rc) 4627 goto lpfc_sli_hba_setup_error; 4628 } 4629 spin_lock_irq(&phba->hbalock); 4630 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4631 spin_unlock_irq(&phba->hbalock); 4632 4633 rc = lpfc_config_port_post(phba); 4634 if (rc) 4635 goto lpfc_sli_hba_setup_error; 4636 4637 return rc; 4638 4639 lpfc_sli_hba_setup_error: 4640 phba->link_state = LPFC_HBA_ERROR; 4641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4642 "0445 Firmware initialization failed\n"); 4643 return rc; 4644 } 4645 4646 /** 4647 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4648 * @phba: Pointer to HBA context object. 4649 * @mboxq: mailbox pointer. 4650 * This function issue a dump mailbox command to read config region 4651 * 23 and parse the records in the region and populate driver 4652 * data structure. 4653 **/ 4654 static int 4655 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 4656 { 4657 LPFC_MBOXQ_t *mboxq; 4658 struct lpfc_dmabuf *mp; 4659 struct lpfc_mqe *mqe; 4660 uint32_t data_length; 4661 int rc; 4662 4663 /* Program the default value of vlan_id and fc_map */ 4664 phba->valid_vlan = 0; 4665 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4666 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4667 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4668 4669 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4670 if (!mboxq) 4671 return -ENOMEM; 4672 4673 mqe = &mboxq->u.mqe; 4674 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 4675 rc = -ENOMEM; 4676 goto out_free_mboxq; 4677 } 4678 4679 mp = (struct lpfc_dmabuf *) mboxq->context1; 4680 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4681 4682 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4683 "(%d):2571 Mailbox cmd x%x Status x%x " 4684 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4685 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4686 "CQ: x%x x%x x%x x%x\n", 4687 mboxq->vport ? mboxq->vport->vpi : 0, 4688 bf_get(lpfc_mqe_command, mqe), 4689 bf_get(lpfc_mqe_status, mqe), 4690 mqe->un.mb_words[0], mqe->un.mb_words[1], 4691 mqe->un.mb_words[2], mqe->un.mb_words[3], 4692 mqe->un.mb_words[4], mqe->un.mb_words[5], 4693 mqe->un.mb_words[6], mqe->un.mb_words[7], 4694 mqe->un.mb_words[8], mqe->un.mb_words[9], 4695 mqe->un.mb_words[10], mqe->un.mb_words[11], 4696 mqe->un.mb_words[12], mqe->un.mb_words[13], 4697 mqe->un.mb_words[14], mqe->un.mb_words[15], 4698 mqe->un.mb_words[16], mqe->un.mb_words[50], 4699 mboxq->mcqe.word0, 4700 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4701 mboxq->mcqe.trailer); 4702 4703 if (rc) { 4704 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4705 kfree(mp); 4706 rc = -EIO; 4707 goto out_free_mboxq; 4708 } 4709 data_length = mqe->un.mb_words[5]; 4710 if (data_length > DMP_RGN23_SIZE) { 4711 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4712 kfree(mp); 4713 rc = -EIO; 4714 goto out_free_mboxq; 4715 } 4716 4717 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4718 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4719 kfree(mp); 4720 rc = 0; 4721 4722 out_free_mboxq: 4723 mempool_free(mboxq, phba->mbox_mem_pool); 4724 return rc; 4725 } 4726 4727 /** 4728 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4729 * @phba: pointer to lpfc hba data structure. 4730 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4731 * @vpd: pointer to the memory to hold resulting port vpd data. 4732 * @vpd_size: On input, the number of bytes allocated to @vpd. 4733 * On output, the number of data bytes in @vpd. 4734 * 4735 * This routine executes a READ_REV SLI4 mailbox command. In 4736 * addition, this routine gets the port vpd data. 4737 * 4738 * Return codes 4739 * 0 - successful 4740 * -ENOMEM - could not allocated memory. 4741 **/ 4742 static int 4743 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4744 uint8_t *vpd, uint32_t *vpd_size) 4745 { 4746 int rc = 0; 4747 uint32_t dma_size; 4748 struct lpfc_dmabuf *dmabuf; 4749 struct lpfc_mqe *mqe; 4750 4751 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4752 if (!dmabuf) 4753 return -ENOMEM; 4754 4755 /* 4756 * Get a DMA buffer for the vpd data resulting from the READ_REV 4757 * mailbox command. 4758 */ 4759 dma_size = *vpd_size; 4760 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4761 dma_size, 4762 &dmabuf->phys, 4763 GFP_KERNEL); 4764 if (!dmabuf->virt) { 4765 kfree(dmabuf); 4766 return -ENOMEM; 4767 } 4768 memset(dmabuf->virt, 0, dma_size); 4769 4770 /* 4771 * The SLI4 implementation of READ_REV conflicts at word1, 4772 * bits 31:16 and SLI4 adds vpd functionality not present 4773 * in SLI3. This code corrects the conflicts. 4774 */ 4775 lpfc_read_rev(phba, mboxq); 4776 mqe = &mboxq->u.mqe; 4777 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4778 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4779 mqe->un.read_rev.word1 &= 0x0000FFFF; 4780 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4781 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4782 4783 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4784 if (rc) { 4785 dma_free_coherent(&phba->pcidev->dev, dma_size, 4786 dmabuf->virt, dmabuf->phys); 4787 kfree(dmabuf); 4788 return -EIO; 4789 } 4790 4791 /* 4792 * The available vpd length cannot be bigger than the 4793 * DMA buffer passed to the port. Catch the less than 4794 * case and update the caller's size. 4795 */ 4796 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4797 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4798 4799 memcpy(vpd, dmabuf->virt, *vpd_size); 4800 4801 dma_free_coherent(&phba->pcidev->dev, dma_size, 4802 dmabuf->virt, dmabuf->phys); 4803 kfree(dmabuf); 4804 return 0; 4805 } 4806 4807 /** 4808 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 4809 * @phba: pointer to lpfc hba data structure. 4810 * 4811 * This routine retrieves SLI4 device physical port name this PCI function 4812 * is attached to. 4813 * 4814 * Return codes 4815 * 0 - successful 4816 * otherwise - failed to retrieve physical port name 4817 **/ 4818 static int 4819 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 4820 { 4821 LPFC_MBOXQ_t *mboxq; 4822 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 4823 struct lpfc_controller_attribute *cntl_attr; 4824 struct lpfc_mbx_get_port_name *get_port_name; 4825 void *virtaddr = NULL; 4826 uint32_t alloclen, reqlen; 4827 uint32_t shdr_status, shdr_add_status; 4828 union lpfc_sli4_cfg_shdr *shdr; 4829 char cport_name = 0; 4830 int rc; 4831 4832 /* We assume nothing at this point */ 4833 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4834 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 4835 4836 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4837 if (!mboxq) 4838 return -ENOMEM; 4839 /* obtain link type and link number via READ_CONFIG */ 4840 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4841 lpfc_sli4_read_config(phba); 4842 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 4843 goto retrieve_ppname; 4844 4845 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 4846 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 4847 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4848 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 4849 LPFC_SLI4_MBX_NEMBED); 4850 if (alloclen < reqlen) { 4851 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4852 "3084 Allocated DMA memory size (%d) is " 4853 "less than the requested DMA memory size " 4854 "(%d)\n", alloclen, reqlen); 4855 rc = -ENOMEM; 4856 goto out_free_mboxq; 4857 } 4858 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4859 virtaddr = mboxq->sge_array->addr[0]; 4860 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 4861 shdr = &mbx_cntl_attr->cfg_shdr; 4862 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4863 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4864 if (shdr_status || shdr_add_status || rc) { 4865 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4866 "3085 Mailbox x%x (x%x/x%x) failed, " 4867 "rc:x%x, status:x%x, add_status:x%x\n", 4868 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4869 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4870 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4871 rc, shdr_status, shdr_add_status); 4872 rc = -ENXIO; 4873 goto out_free_mboxq; 4874 } 4875 cntl_attr = &mbx_cntl_attr->cntl_attr; 4876 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 4877 phba->sli4_hba.lnk_info.lnk_tp = 4878 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 4879 phba->sli4_hba.lnk_info.lnk_no = 4880 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 4881 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4882 "3086 lnk_type:%d, lnk_numb:%d\n", 4883 phba->sli4_hba.lnk_info.lnk_tp, 4884 phba->sli4_hba.lnk_info.lnk_no); 4885 4886 retrieve_ppname: 4887 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4888 LPFC_MBOX_OPCODE_GET_PORT_NAME, 4889 sizeof(struct lpfc_mbx_get_port_name) - 4890 sizeof(struct lpfc_sli4_cfg_mhdr), 4891 LPFC_SLI4_MBX_EMBED); 4892 get_port_name = &mboxq->u.mqe.un.get_port_name; 4893 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 4894 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 4895 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 4896 phba->sli4_hba.lnk_info.lnk_tp); 4897 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4898 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4899 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4900 if (shdr_status || shdr_add_status || rc) { 4901 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4902 "3087 Mailbox x%x (x%x/x%x) failed: " 4903 "rc:x%x, status:x%x, add_status:x%x\n", 4904 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4905 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4906 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4907 rc, shdr_status, shdr_add_status); 4908 rc = -ENXIO; 4909 goto out_free_mboxq; 4910 } 4911 switch (phba->sli4_hba.lnk_info.lnk_no) { 4912 case LPFC_LINK_NUMBER_0: 4913 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 4914 &get_port_name->u.response); 4915 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4916 break; 4917 case LPFC_LINK_NUMBER_1: 4918 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 4919 &get_port_name->u.response); 4920 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4921 break; 4922 case LPFC_LINK_NUMBER_2: 4923 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 4924 &get_port_name->u.response); 4925 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4926 break; 4927 case LPFC_LINK_NUMBER_3: 4928 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 4929 &get_port_name->u.response); 4930 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4931 break; 4932 default: 4933 break; 4934 } 4935 4936 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 4937 phba->Port[0] = cport_name; 4938 phba->Port[1] = '\0'; 4939 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4940 "3091 SLI get port name: %s\n", phba->Port); 4941 } 4942 4943 out_free_mboxq: 4944 if (rc != MBX_TIMEOUT) { 4945 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 4946 lpfc_sli4_mbox_cmd_free(phba, mboxq); 4947 else 4948 mempool_free(mboxq, phba->mbox_mem_pool); 4949 } 4950 return rc; 4951 } 4952 4953 /** 4954 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4955 * @phba: pointer to lpfc hba data structure. 4956 * 4957 * This routine is called to explicitly arm the SLI4 device's completion and 4958 * event queues 4959 **/ 4960 static void 4961 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4962 { 4963 int fcp_eqidx; 4964 4965 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4966 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4967 fcp_eqidx = 0; 4968 if (phba->sli4_hba.fcp_cq) { 4969 do { 4970 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4971 LPFC_QUEUE_REARM); 4972 } while (++fcp_eqidx < phba->cfg_fcp_io_channel); 4973 } 4974 if (phba->sli4_hba.hba_eq) { 4975 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 4976 fcp_eqidx++) 4977 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], 4978 LPFC_QUEUE_REARM); 4979 } 4980 } 4981 4982 /** 4983 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 4984 * @phba: Pointer to HBA context object. 4985 * @type: The resource extent type. 4986 * @extnt_count: buffer to hold port available extent count. 4987 * @extnt_size: buffer to hold element count per extent. 4988 * 4989 * This function calls the port and retrievs the number of available 4990 * extents and their size for a particular extent type. 4991 * 4992 * Returns: 0 if successful. Nonzero otherwise. 4993 **/ 4994 int 4995 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 4996 uint16_t *extnt_count, uint16_t *extnt_size) 4997 { 4998 int rc = 0; 4999 uint32_t length; 5000 uint32_t mbox_tmo; 5001 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5002 LPFC_MBOXQ_t *mbox; 5003 5004 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5005 if (!mbox) 5006 return -ENOMEM; 5007 5008 /* Find out how many extents are available for this resource type */ 5009 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5010 sizeof(struct lpfc_sli4_cfg_mhdr)); 5011 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5012 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5013 length, LPFC_SLI4_MBX_EMBED); 5014 5015 /* Send an extents count of 0 - the GET doesn't use it. */ 5016 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5017 LPFC_SLI4_MBX_EMBED); 5018 if (unlikely(rc)) { 5019 rc = -EIO; 5020 goto err_exit; 5021 } 5022 5023 if (!phba->sli4_hba.intr_enable) 5024 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5025 else { 5026 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5027 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5028 } 5029 if (unlikely(rc)) { 5030 rc = -EIO; 5031 goto err_exit; 5032 } 5033 5034 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5035 if (bf_get(lpfc_mbox_hdr_status, 5036 &rsrc_info->header.cfg_shdr.response)) { 5037 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5038 "2930 Failed to get resource extents " 5039 "Status 0x%x Add'l Status 0x%x\n", 5040 bf_get(lpfc_mbox_hdr_status, 5041 &rsrc_info->header.cfg_shdr.response), 5042 bf_get(lpfc_mbox_hdr_add_status, 5043 &rsrc_info->header.cfg_shdr.response)); 5044 rc = -EIO; 5045 goto err_exit; 5046 } 5047 5048 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5049 &rsrc_info->u.rsp); 5050 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5051 &rsrc_info->u.rsp); 5052 5053 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5054 "3162 Retrieved extents type-%d from port: count:%d, " 5055 "size:%d\n", type, *extnt_count, *extnt_size); 5056 5057 err_exit: 5058 mempool_free(mbox, phba->mbox_mem_pool); 5059 return rc; 5060 } 5061 5062 /** 5063 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5064 * @phba: Pointer to HBA context object. 5065 * @type: The extent type to check. 5066 * 5067 * This function reads the current available extents from the port and checks 5068 * if the extent count or extent size has changed since the last access. 5069 * Callers use this routine post port reset to understand if there is a 5070 * extent reprovisioning requirement. 5071 * 5072 * Returns: 5073 * -Error: error indicates problem. 5074 * 1: Extent count or size has changed. 5075 * 0: No changes. 5076 **/ 5077 static int 5078 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5079 { 5080 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5081 uint16_t size_diff, rsrc_ext_size; 5082 int rc = 0; 5083 struct lpfc_rsrc_blks *rsrc_entry; 5084 struct list_head *rsrc_blk_list = NULL; 5085 5086 size_diff = 0; 5087 curr_ext_cnt = 0; 5088 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5089 &rsrc_ext_cnt, 5090 &rsrc_ext_size); 5091 if (unlikely(rc)) 5092 return -EIO; 5093 5094 switch (type) { 5095 case LPFC_RSC_TYPE_FCOE_RPI: 5096 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5097 break; 5098 case LPFC_RSC_TYPE_FCOE_VPI: 5099 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5100 break; 5101 case LPFC_RSC_TYPE_FCOE_XRI: 5102 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5103 break; 5104 case LPFC_RSC_TYPE_FCOE_VFI: 5105 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5106 break; 5107 default: 5108 break; 5109 } 5110 5111 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5112 curr_ext_cnt++; 5113 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5114 size_diff++; 5115 } 5116 5117 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5118 rc = 1; 5119 5120 return rc; 5121 } 5122 5123 /** 5124 * lpfc_sli4_cfg_post_extnts - 5125 * @phba: Pointer to HBA context object. 5126 * @extnt_cnt - number of available extents. 5127 * @type - the extent type (rpi, xri, vfi, vpi). 5128 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5129 * @mbox - pointer to the caller's allocated mailbox structure. 5130 * 5131 * This function executes the extents allocation request. It also 5132 * takes care of the amount of memory needed to allocate or get the 5133 * allocated extents. It is the caller's responsibility to evaluate 5134 * the response. 5135 * 5136 * Returns: 5137 * -Error: Error value describes the condition found. 5138 * 0: if successful 5139 **/ 5140 static int 5141 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5142 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5143 { 5144 int rc = 0; 5145 uint32_t req_len; 5146 uint32_t emb_len; 5147 uint32_t alloc_len, mbox_tmo; 5148 5149 /* Calculate the total requested length of the dma memory */ 5150 req_len = extnt_cnt * sizeof(uint16_t); 5151 5152 /* 5153 * Calculate the size of an embedded mailbox. The uint32_t 5154 * accounts for extents-specific word. 5155 */ 5156 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5157 sizeof(uint32_t); 5158 5159 /* 5160 * Presume the allocation and response will fit into an embedded 5161 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5162 */ 5163 *emb = LPFC_SLI4_MBX_EMBED; 5164 if (req_len > emb_len) { 5165 req_len = extnt_cnt * sizeof(uint16_t) + 5166 sizeof(union lpfc_sli4_cfg_shdr) + 5167 sizeof(uint32_t); 5168 *emb = LPFC_SLI4_MBX_NEMBED; 5169 } 5170 5171 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5172 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5173 req_len, *emb); 5174 if (alloc_len < req_len) { 5175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5176 "2982 Allocated DMA memory size (x%x) is " 5177 "less than the requested DMA memory " 5178 "size (x%x)\n", alloc_len, req_len); 5179 return -ENOMEM; 5180 } 5181 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5182 if (unlikely(rc)) 5183 return -EIO; 5184 5185 if (!phba->sli4_hba.intr_enable) 5186 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5187 else { 5188 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5189 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5190 } 5191 5192 if (unlikely(rc)) 5193 rc = -EIO; 5194 return rc; 5195 } 5196 5197 /** 5198 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5199 * @phba: Pointer to HBA context object. 5200 * @type: The resource extent type to allocate. 5201 * 5202 * This function allocates the number of elements for the specified 5203 * resource type. 5204 **/ 5205 static int 5206 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5207 { 5208 bool emb = false; 5209 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5210 uint16_t rsrc_id, rsrc_start, j, k; 5211 uint16_t *ids; 5212 int i, rc; 5213 unsigned long longs; 5214 unsigned long *bmask; 5215 struct lpfc_rsrc_blks *rsrc_blks; 5216 LPFC_MBOXQ_t *mbox; 5217 uint32_t length; 5218 struct lpfc_id_range *id_array = NULL; 5219 void *virtaddr = NULL; 5220 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5221 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5222 struct list_head *ext_blk_list; 5223 5224 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5225 &rsrc_cnt, 5226 &rsrc_size); 5227 if (unlikely(rc)) 5228 return -EIO; 5229 5230 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5231 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5232 "3009 No available Resource Extents " 5233 "for resource type 0x%x: Count: 0x%x, " 5234 "Size 0x%x\n", type, rsrc_cnt, 5235 rsrc_size); 5236 return -ENOMEM; 5237 } 5238 5239 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5240 "2903 Post resource extents type-0x%x: " 5241 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5242 5243 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5244 if (!mbox) 5245 return -ENOMEM; 5246 5247 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5248 if (unlikely(rc)) { 5249 rc = -EIO; 5250 goto err_exit; 5251 } 5252 5253 /* 5254 * Figure out where the response is located. Then get local pointers 5255 * to the response data. The port does not guarantee to respond to 5256 * all extents counts request so update the local variable with the 5257 * allocated count from the port. 5258 */ 5259 if (emb == LPFC_SLI4_MBX_EMBED) { 5260 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5261 id_array = &rsrc_ext->u.rsp.id[0]; 5262 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5263 } else { 5264 virtaddr = mbox->sge_array->addr[0]; 5265 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5266 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5267 id_array = &n_rsrc->id; 5268 } 5269 5270 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5271 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5272 5273 /* 5274 * Based on the resource size and count, correct the base and max 5275 * resource values. 5276 */ 5277 length = sizeof(struct lpfc_rsrc_blks); 5278 switch (type) { 5279 case LPFC_RSC_TYPE_FCOE_RPI: 5280 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5281 sizeof(unsigned long), 5282 GFP_KERNEL); 5283 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5284 rc = -ENOMEM; 5285 goto err_exit; 5286 } 5287 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * 5288 sizeof(uint16_t), 5289 GFP_KERNEL); 5290 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5291 kfree(phba->sli4_hba.rpi_bmask); 5292 rc = -ENOMEM; 5293 goto err_exit; 5294 } 5295 5296 /* 5297 * The next_rpi was initialized with the maximum available 5298 * count but the port may allocate a smaller number. Catch 5299 * that case and update the next_rpi. 5300 */ 5301 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5302 5303 /* Initialize local ptrs for common extent processing later. */ 5304 bmask = phba->sli4_hba.rpi_bmask; 5305 ids = phba->sli4_hba.rpi_ids; 5306 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5307 break; 5308 case LPFC_RSC_TYPE_FCOE_VPI: 5309 phba->vpi_bmask = kzalloc(longs * 5310 sizeof(unsigned long), 5311 GFP_KERNEL); 5312 if (unlikely(!phba->vpi_bmask)) { 5313 rc = -ENOMEM; 5314 goto err_exit; 5315 } 5316 phba->vpi_ids = kzalloc(rsrc_id_cnt * 5317 sizeof(uint16_t), 5318 GFP_KERNEL); 5319 if (unlikely(!phba->vpi_ids)) { 5320 kfree(phba->vpi_bmask); 5321 rc = -ENOMEM; 5322 goto err_exit; 5323 } 5324 5325 /* Initialize local ptrs for common extent processing later. */ 5326 bmask = phba->vpi_bmask; 5327 ids = phba->vpi_ids; 5328 ext_blk_list = &phba->lpfc_vpi_blk_list; 5329 break; 5330 case LPFC_RSC_TYPE_FCOE_XRI: 5331 phba->sli4_hba.xri_bmask = kzalloc(longs * 5332 sizeof(unsigned long), 5333 GFP_KERNEL); 5334 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5335 rc = -ENOMEM; 5336 goto err_exit; 5337 } 5338 phba->sli4_hba.max_cfg_param.xri_used = 0; 5339 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5340 sizeof(uint16_t), 5341 GFP_KERNEL); 5342 if (unlikely(!phba->sli4_hba.xri_ids)) { 5343 kfree(phba->sli4_hba.xri_bmask); 5344 rc = -ENOMEM; 5345 goto err_exit; 5346 } 5347 5348 /* Initialize local ptrs for common extent processing later. */ 5349 bmask = phba->sli4_hba.xri_bmask; 5350 ids = phba->sli4_hba.xri_ids; 5351 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5352 break; 5353 case LPFC_RSC_TYPE_FCOE_VFI: 5354 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5355 sizeof(unsigned long), 5356 GFP_KERNEL); 5357 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5358 rc = -ENOMEM; 5359 goto err_exit; 5360 } 5361 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * 5362 sizeof(uint16_t), 5363 GFP_KERNEL); 5364 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5365 kfree(phba->sli4_hba.vfi_bmask); 5366 rc = -ENOMEM; 5367 goto err_exit; 5368 } 5369 5370 /* Initialize local ptrs for common extent processing later. */ 5371 bmask = phba->sli4_hba.vfi_bmask; 5372 ids = phba->sli4_hba.vfi_ids; 5373 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5374 break; 5375 default: 5376 /* Unsupported Opcode. Fail call. */ 5377 id_array = NULL; 5378 bmask = NULL; 5379 ids = NULL; 5380 ext_blk_list = NULL; 5381 goto err_exit; 5382 } 5383 5384 /* 5385 * Complete initializing the extent configuration with the 5386 * allocated ids assigned to this function. The bitmask serves 5387 * as an index into the array and manages the available ids. The 5388 * array just stores the ids communicated to the port via the wqes. 5389 */ 5390 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5391 if ((i % 2) == 0) 5392 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5393 &id_array[k]); 5394 else 5395 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5396 &id_array[k]); 5397 5398 rsrc_blks = kzalloc(length, GFP_KERNEL); 5399 if (unlikely(!rsrc_blks)) { 5400 rc = -ENOMEM; 5401 kfree(bmask); 5402 kfree(ids); 5403 goto err_exit; 5404 } 5405 rsrc_blks->rsrc_start = rsrc_id; 5406 rsrc_blks->rsrc_size = rsrc_size; 5407 list_add_tail(&rsrc_blks->list, ext_blk_list); 5408 rsrc_start = rsrc_id; 5409 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) 5410 phba->sli4_hba.scsi_xri_start = rsrc_start + 5411 lpfc_sli4_get_els_iocb_cnt(phba); 5412 5413 while (rsrc_id < (rsrc_start + rsrc_size)) { 5414 ids[j] = rsrc_id; 5415 rsrc_id++; 5416 j++; 5417 } 5418 /* Entire word processed. Get next word.*/ 5419 if ((i % 2) == 1) 5420 k++; 5421 } 5422 err_exit: 5423 lpfc_sli4_mbox_cmd_free(phba, mbox); 5424 return rc; 5425 } 5426 5427 /** 5428 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5429 * @phba: Pointer to HBA context object. 5430 * @type: the extent's type. 5431 * 5432 * This function deallocates all extents of a particular resource type. 5433 * SLI4 does not allow for deallocating a particular extent range. It 5434 * is the caller's responsibility to release all kernel memory resources. 5435 **/ 5436 static int 5437 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 5438 { 5439 int rc; 5440 uint32_t length, mbox_tmo = 0; 5441 LPFC_MBOXQ_t *mbox; 5442 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 5443 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 5444 5445 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5446 if (!mbox) 5447 return -ENOMEM; 5448 5449 /* 5450 * This function sends an embedded mailbox because it only sends the 5451 * the resource type. All extents of this type are released by the 5452 * port. 5453 */ 5454 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 5455 sizeof(struct lpfc_sli4_cfg_mhdr)); 5456 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5457 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 5458 length, LPFC_SLI4_MBX_EMBED); 5459 5460 /* Send an extents count of 0 - the dealloc doesn't use it. */ 5461 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5462 LPFC_SLI4_MBX_EMBED); 5463 if (unlikely(rc)) { 5464 rc = -EIO; 5465 goto out_free_mbox; 5466 } 5467 if (!phba->sli4_hba.intr_enable) 5468 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5469 else { 5470 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5471 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5472 } 5473 if (unlikely(rc)) { 5474 rc = -EIO; 5475 goto out_free_mbox; 5476 } 5477 5478 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 5479 if (bf_get(lpfc_mbox_hdr_status, 5480 &dealloc_rsrc->header.cfg_shdr.response)) { 5481 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5482 "2919 Failed to release resource extents " 5483 "for type %d - Status 0x%x Add'l Status 0x%x. " 5484 "Resource memory not released.\n", 5485 type, 5486 bf_get(lpfc_mbox_hdr_status, 5487 &dealloc_rsrc->header.cfg_shdr.response), 5488 bf_get(lpfc_mbox_hdr_add_status, 5489 &dealloc_rsrc->header.cfg_shdr.response)); 5490 rc = -EIO; 5491 goto out_free_mbox; 5492 } 5493 5494 /* Release kernel memory resources for the specific type. */ 5495 switch (type) { 5496 case LPFC_RSC_TYPE_FCOE_VPI: 5497 kfree(phba->vpi_bmask); 5498 kfree(phba->vpi_ids); 5499 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5500 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5501 &phba->lpfc_vpi_blk_list, list) { 5502 list_del_init(&rsrc_blk->list); 5503 kfree(rsrc_blk); 5504 } 5505 phba->sli4_hba.max_cfg_param.vpi_used = 0; 5506 break; 5507 case LPFC_RSC_TYPE_FCOE_XRI: 5508 kfree(phba->sli4_hba.xri_bmask); 5509 kfree(phba->sli4_hba.xri_ids); 5510 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5511 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5512 list_del_init(&rsrc_blk->list); 5513 kfree(rsrc_blk); 5514 } 5515 break; 5516 case LPFC_RSC_TYPE_FCOE_VFI: 5517 kfree(phba->sli4_hba.vfi_bmask); 5518 kfree(phba->sli4_hba.vfi_ids); 5519 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5520 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5521 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 5522 list_del_init(&rsrc_blk->list); 5523 kfree(rsrc_blk); 5524 } 5525 break; 5526 case LPFC_RSC_TYPE_FCOE_RPI: 5527 /* RPI bitmask and physical id array are cleaned up earlier. */ 5528 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5529 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 5530 list_del_init(&rsrc_blk->list); 5531 kfree(rsrc_blk); 5532 } 5533 break; 5534 default: 5535 break; 5536 } 5537 5538 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5539 5540 out_free_mbox: 5541 mempool_free(mbox, phba->mbox_mem_pool); 5542 return rc; 5543 } 5544 5545 /** 5546 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 5547 * @phba: Pointer to HBA context object. 5548 * 5549 * This function allocates all SLI4 resource identifiers. 5550 **/ 5551 int 5552 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 5553 { 5554 int i, rc, error = 0; 5555 uint16_t count, base; 5556 unsigned long longs; 5557 5558 if (!phba->sli4_hba.rpi_hdrs_in_use) 5559 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5560 if (phba->sli4_hba.extents_in_use) { 5561 /* 5562 * The port supports resource extents. The XRI, VPI, VFI, RPI 5563 * resource extent count must be read and allocated before 5564 * provisioning the resource id arrays. 5565 */ 5566 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5567 LPFC_IDX_RSRC_RDY) { 5568 /* 5569 * Extent-based resources are set - the driver could 5570 * be in a port reset. Figure out if any corrective 5571 * actions need to be taken. 5572 */ 5573 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5574 LPFC_RSC_TYPE_FCOE_VFI); 5575 if (rc != 0) 5576 error++; 5577 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5578 LPFC_RSC_TYPE_FCOE_VPI); 5579 if (rc != 0) 5580 error++; 5581 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5582 LPFC_RSC_TYPE_FCOE_XRI); 5583 if (rc != 0) 5584 error++; 5585 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5586 LPFC_RSC_TYPE_FCOE_RPI); 5587 if (rc != 0) 5588 error++; 5589 5590 /* 5591 * It's possible that the number of resources 5592 * provided to this port instance changed between 5593 * resets. Detect this condition and reallocate 5594 * resources. Otherwise, there is no action. 5595 */ 5596 if (error) { 5597 lpfc_printf_log(phba, KERN_INFO, 5598 LOG_MBOX | LOG_INIT, 5599 "2931 Detected extent resource " 5600 "change. Reallocating all " 5601 "extents.\n"); 5602 rc = lpfc_sli4_dealloc_extent(phba, 5603 LPFC_RSC_TYPE_FCOE_VFI); 5604 rc = lpfc_sli4_dealloc_extent(phba, 5605 LPFC_RSC_TYPE_FCOE_VPI); 5606 rc = lpfc_sli4_dealloc_extent(phba, 5607 LPFC_RSC_TYPE_FCOE_XRI); 5608 rc = lpfc_sli4_dealloc_extent(phba, 5609 LPFC_RSC_TYPE_FCOE_RPI); 5610 } else 5611 return 0; 5612 } 5613 5614 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5615 if (unlikely(rc)) 5616 goto err_exit; 5617 5618 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5619 if (unlikely(rc)) 5620 goto err_exit; 5621 5622 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5623 if (unlikely(rc)) 5624 goto err_exit; 5625 5626 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5627 if (unlikely(rc)) 5628 goto err_exit; 5629 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5630 LPFC_IDX_RSRC_RDY); 5631 return rc; 5632 } else { 5633 /* 5634 * The port does not support resource extents. The XRI, VPI, 5635 * VFI, RPI resource ids were determined from READ_CONFIG. 5636 * Just allocate the bitmasks and provision the resource id 5637 * arrays. If a port reset is active, the resources don't 5638 * need any action - just exit. 5639 */ 5640 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5641 LPFC_IDX_RSRC_RDY) { 5642 lpfc_sli4_dealloc_resource_identifiers(phba); 5643 lpfc_sli4_remove_rpis(phba); 5644 } 5645 /* RPIs. */ 5646 count = phba->sli4_hba.max_cfg_param.max_rpi; 5647 if (count <= 0) { 5648 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5649 "3279 Invalid provisioning of " 5650 "rpi:%d\n", count); 5651 rc = -EINVAL; 5652 goto err_exit; 5653 } 5654 base = phba->sli4_hba.max_cfg_param.rpi_base; 5655 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5656 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5657 sizeof(unsigned long), 5658 GFP_KERNEL); 5659 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5660 rc = -ENOMEM; 5661 goto err_exit; 5662 } 5663 phba->sli4_hba.rpi_ids = kzalloc(count * 5664 sizeof(uint16_t), 5665 GFP_KERNEL); 5666 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5667 rc = -ENOMEM; 5668 goto free_rpi_bmask; 5669 } 5670 5671 for (i = 0; i < count; i++) 5672 phba->sli4_hba.rpi_ids[i] = base + i; 5673 5674 /* VPIs. */ 5675 count = phba->sli4_hba.max_cfg_param.max_vpi; 5676 if (count <= 0) { 5677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5678 "3280 Invalid provisioning of " 5679 "vpi:%d\n", count); 5680 rc = -EINVAL; 5681 goto free_rpi_ids; 5682 } 5683 base = phba->sli4_hba.max_cfg_param.vpi_base; 5684 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5685 phba->vpi_bmask = kzalloc(longs * 5686 sizeof(unsigned long), 5687 GFP_KERNEL); 5688 if (unlikely(!phba->vpi_bmask)) { 5689 rc = -ENOMEM; 5690 goto free_rpi_ids; 5691 } 5692 phba->vpi_ids = kzalloc(count * 5693 sizeof(uint16_t), 5694 GFP_KERNEL); 5695 if (unlikely(!phba->vpi_ids)) { 5696 rc = -ENOMEM; 5697 goto free_vpi_bmask; 5698 } 5699 5700 for (i = 0; i < count; i++) 5701 phba->vpi_ids[i] = base + i; 5702 5703 /* XRIs. */ 5704 count = phba->sli4_hba.max_cfg_param.max_xri; 5705 if (count <= 0) { 5706 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5707 "3281 Invalid provisioning of " 5708 "xri:%d\n", count); 5709 rc = -EINVAL; 5710 goto free_vpi_ids; 5711 } 5712 base = phba->sli4_hba.max_cfg_param.xri_base; 5713 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5714 phba->sli4_hba.xri_bmask = kzalloc(longs * 5715 sizeof(unsigned long), 5716 GFP_KERNEL); 5717 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5718 rc = -ENOMEM; 5719 goto free_vpi_ids; 5720 } 5721 phba->sli4_hba.max_cfg_param.xri_used = 0; 5722 phba->sli4_hba.xri_ids = kzalloc(count * 5723 sizeof(uint16_t), 5724 GFP_KERNEL); 5725 if (unlikely(!phba->sli4_hba.xri_ids)) { 5726 rc = -ENOMEM; 5727 goto free_xri_bmask; 5728 } 5729 5730 for (i = 0; i < count; i++) 5731 phba->sli4_hba.xri_ids[i] = base + i; 5732 5733 /* VFIs. */ 5734 count = phba->sli4_hba.max_cfg_param.max_vfi; 5735 if (count <= 0) { 5736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5737 "3282 Invalid provisioning of " 5738 "vfi:%d\n", count); 5739 rc = -EINVAL; 5740 goto free_xri_ids; 5741 } 5742 base = phba->sli4_hba.max_cfg_param.vfi_base; 5743 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5744 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5745 sizeof(unsigned long), 5746 GFP_KERNEL); 5747 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5748 rc = -ENOMEM; 5749 goto free_xri_ids; 5750 } 5751 phba->sli4_hba.vfi_ids = kzalloc(count * 5752 sizeof(uint16_t), 5753 GFP_KERNEL); 5754 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5755 rc = -ENOMEM; 5756 goto free_vfi_bmask; 5757 } 5758 5759 for (i = 0; i < count; i++) 5760 phba->sli4_hba.vfi_ids[i] = base + i; 5761 5762 /* 5763 * Mark all resources ready. An HBA reset doesn't need 5764 * to reset the initialization. 5765 */ 5766 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5767 LPFC_IDX_RSRC_RDY); 5768 return 0; 5769 } 5770 5771 free_vfi_bmask: 5772 kfree(phba->sli4_hba.vfi_bmask); 5773 free_xri_ids: 5774 kfree(phba->sli4_hba.xri_ids); 5775 free_xri_bmask: 5776 kfree(phba->sli4_hba.xri_bmask); 5777 free_vpi_ids: 5778 kfree(phba->vpi_ids); 5779 free_vpi_bmask: 5780 kfree(phba->vpi_bmask); 5781 free_rpi_ids: 5782 kfree(phba->sli4_hba.rpi_ids); 5783 free_rpi_bmask: 5784 kfree(phba->sli4_hba.rpi_bmask); 5785 err_exit: 5786 return rc; 5787 } 5788 5789 /** 5790 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 5791 * @phba: Pointer to HBA context object. 5792 * 5793 * This function allocates the number of elements for the specified 5794 * resource type. 5795 **/ 5796 int 5797 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 5798 { 5799 if (phba->sli4_hba.extents_in_use) { 5800 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5801 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5802 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5803 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5804 } else { 5805 kfree(phba->vpi_bmask); 5806 phba->sli4_hba.max_cfg_param.vpi_used = 0; 5807 kfree(phba->vpi_ids); 5808 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5809 kfree(phba->sli4_hba.xri_bmask); 5810 kfree(phba->sli4_hba.xri_ids); 5811 kfree(phba->sli4_hba.vfi_bmask); 5812 kfree(phba->sli4_hba.vfi_ids); 5813 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5814 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5815 } 5816 5817 return 0; 5818 } 5819 5820 /** 5821 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 5822 * @phba: Pointer to HBA context object. 5823 * @type: The resource extent type. 5824 * @extnt_count: buffer to hold port extent count response 5825 * @extnt_size: buffer to hold port extent size response. 5826 * 5827 * This function calls the port to read the host allocated extents 5828 * for a particular type. 5829 **/ 5830 int 5831 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 5832 uint16_t *extnt_cnt, uint16_t *extnt_size) 5833 { 5834 bool emb; 5835 int rc = 0; 5836 uint16_t curr_blks = 0; 5837 uint32_t req_len, emb_len; 5838 uint32_t alloc_len, mbox_tmo; 5839 struct list_head *blk_list_head; 5840 struct lpfc_rsrc_blks *rsrc_blk; 5841 LPFC_MBOXQ_t *mbox; 5842 void *virtaddr = NULL; 5843 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5844 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5845 union lpfc_sli4_cfg_shdr *shdr; 5846 5847 switch (type) { 5848 case LPFC_RSC_TYPE_FCOE_VPI: 5849 blk_list_head = &phba->lpfc_vpi_blk_list; 5850 break; 5851 case LPFC_RSC_TYPE_FCOE_XRI: 5852 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 5853 break; 5854 case LPFC_RSC_TYPE_FCOE_VFI: 5855 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 5856 break; 5857 case LPFC_RSC_TYPE_FCOE_RPI: 5858 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 5859 break; 5860 default: 5861 return -EIO; 5862 } 5863 5864 /* Count the number of extents currently allocatd for this type. */ 5865 list_for_each_entry(rsrc_blk, blk_list_head, list) { 5866 if (curr_blks == 0) { 5867 /* 5868 * The GET_ALLOCATED mailbox does not return the size, 5869 * just the count. The size should be just the size 5870 * stored in the current allocated block and all sizes 5871 * for an extent type are the same so set the return 5872 * value now. 5873 */ 5874 *extnt_size = rsrc_blk->rsrc_size; 5875 } 5876 curr_blks++; 5877 } 5878 5879 /* Calculate the total requested length of the dma memory. */ 5880 req_len = curr_blks * sizeof(uint16_t); 5881 5882 /* 5883 * Calculate the size of an embedded mailbox. The uint32_t 5884 * accounts for extents-specific word. 5885 */ 5886 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5887 sizeof(uint32_t); 5888 5889 /* 5890 * Presume the allocation and response will fit into an embedded 5891 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5892 */ 5893 emb = LPFC_SLI4_MBX_EMBED; 5894 req_len = emb_len; 5895 if (req_len > emb_len) { 5896 req_len = curr_blks * sizeof(uint16_t) + 5897 sizeof(union lpfc_sli4_cfg_shdr) + 5898 sizeof(uint32_t); 5899 emb = LPFC_SLI4_MBX_NEMBED; 5900 } 5901 5902 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5903 if (!mbox) 5904 return -ENOMEM; 5905 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 5906 5907 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5908 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 5909 req_len, emb); 5910 if (alloc_len < req_len) { 5911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5912 "2983 Allocated DMA memory size (x%x) is " 5913 "less than the requested DMA memory " 5914 "size (x%x)\n", alloc_len, req_len); 5915 rc = -ENOMEM; 5916 goto err_exit; 5917 } 5918 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 5919 if (unlikely(rc)) { 5920 rc = -EIO; 5921 goto err_exit; 5922 } 5923 5924 if (!phba->sli4_hba.intr_enable) 5925 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5926 else { 5927 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5928 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5929 } 5930 5931 if (unlikely(rc)) { 5932 rc = -EIO; 5933 goto err_exit; 5934 } 5935 5936 /* 5937 * Figure out where the response is located. Then get local pointers 5938 * to the response data. The port does not guarantee to respond to 5939 * all extents counts request so update the local variable with the 5940 * allocated count from the port. 5941 */ 5942 if (emb == LPFC_SLI4_MBX_EMBED) { 5943 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5944 shdr = &rsrc_ext->header.cfg_shdr; 5945 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5946 } else { 5947 virtaddr = mbox->sge_array->addr[0]; 5948 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5949 shdr = &n_rsrc->cfg_shdr; 5950 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5951 } 5952 5953 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 5954 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5955 "2984 Failed to read allocated resources " 5956 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 5957 type, 5958 bf_get(lpfc_mbox_hdr_status, &shdr->response), 5959 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 5960 rc = -EIO; 5961 goto err_exit; 5962 } 5963 err_exit: 5964 lpfc_sli4_mbox_cmd_free(phba, mbox); 5965 return rc; 5966 } 5967 5968 /** 5969 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block 5970 * @phba: pointer to lpfc hba data structure. 5971 * 5972 * This routine walks the list of els buffers that have been allocated and 5973 * repost them to the port by using SGL block post. This is needed after a 5974 * pci_function_reset/warm_start or start. It attempts to construct blocks 5975 * of els buffer sgls which contains contiguous xris and uses the non-embedded 5976 * SGL block post mailbox commands to post them to the port. For single els 5977 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 5978 * mailbox command for posting. 5979 * 5980 * Returns: 0 = success, non-zero failure. 5981 **/ 5982 static int 5983 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) 5984 { 5985 struct lpfc_sglq *sglq_entry = NULL; 5986 struct lpfc_sglq *sglq_entry_next = NULL; 5987 struct lpfc_sglq *sglq_entry_first = NULL; 5988 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; 5989 int last_xritag = NO_XRI; 5990 LIST_HEAD(prep_sgl_list); 5991 LIST_HEAD(blck_sgl_list); 5992 LIST_HEAD(allc_sgl_list); 5993 LIST_HEAD(post_sgl_list); 5994 LIST_HEAD(free_sgl_list); 5995 5996 spin_lock_irq(&phba->hbalock); 5997 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 5998 spin_unlock_irq(&phba->hbalock); 5999 6000 total_cnt = phba->sli4_hba.els_xri_cnt; 6001 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6002 &allc_sgl_list, list) { 6003 list_del_init(&sglq_entry->list); 6004 block_cnt++; 6005 if ((last_xritag != NO_XRI) && 6006 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6007 /* a hole in xri block, form a sgl posting block */ 6008 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6009 post_cnt = block_cnt - 1; 6010 /* prepare list for next posting block */ 6011 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6012 block_cnt = 1; 6013 } else { 6014 /* prepare list for next posting block */ 6015 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6016 /* enough sgls for non-embed sgl mbox command */ 6017 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6018 list_splice_init(&prep_sgl_list, 6019 &blck_sgl_list); 6020 post_cnt = block_cnt; 6021 block_cnt = 0; 6022 } 6023 } 6024 num_posted++; 6025 6026 /* keep track of last sgl's xritag */ 6027 last_xritag = sglq_entry->sli4_xritag; 6028 6029 /* end of repost sgl list condition for els buffers */ 6030 if (num_posted == phba->sli4_hba.els_xri_cnt) { 6031 if (post_cnt == 0) { 6032 list_splice_init(&prep_sgl_list, 6033 &blck_sgl_list); 6034 post_cnt = block_cnt; 6035 } else if (block_cnt == 1) { 6036 status = lpfc_sli4_post_sgl(phba, 6037 sglq_entry->phys, 0, 6038 sglq_entry->sli4_xritag); 6039 if (!status) { 6040 /* successful, put sgl to posted list */ 6041 list_add_tail(&sglq_entry->list, 6042 &post_sgl_list); 6043 } else { 6044 /* Failure, put sgl to free list */ 6045 lpfc_printf_log(phba, KERN_WARNING, 6046 LOG_SLI, 6047 "3159 Failed to post els " 6048 "sgl, xritag:x%x\n", 6049 sglq_entry->sli4_xritag); 6050 list_add_tail(&sglq_entry->list, 6051 &free_sgl_list); 6052 total_cnt--; 6053 } 6054 } 6055 } 6056 6057 /* continue until a nembed page worth of sgls */ 6058 if (post_cnt == 0) 6059 continue; 6060 6061 /* post the els buffer list sgls as a block */ 6062 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list, 6063 post_cnt); 6064 6065 if (!status) { 6066 /* success, put sgl list to posted sgl list */ 6067 list_splice_init(&blck_sgl_list, &post_sgl_list); 6068 } else { 6069 /* Failure, put sgl list to free sgl list */ 6070 sglq_entry_first = list_first_entry(&blck_sgl_list, 6071 struct lpfc_sglq, 6072 list); 6073 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6074 "3160 Failed to post els sgl-list, " 6075 "xritag:x%x-x%x\n", 6076 sglq_entry_first->sli4_xritag, 6077 (sglq_entry_first->sli4_xritag + 6078 post_cnt - 1)); 6079 list_splice_init(&blck_sgl_list, &free_sgl_list); 6080 total_cnt -= post_cnt; 6081 } 6082 6083 /* don't reset xirtag due to hole in xri block */ 6084 if (block_cnt == 0) 6085 last_xritag = NO_XRI; 6086 6087 /* reset els sgl post count for next round of posting */ 6088 post_cnt = 0; 6089 } 6090 /* update the number of XRIs posted for ELS */ 6091 phba->sli4_hba.els_xri_cnt = total_cnt; 6092 6093 /* free the els sgls failed to post */ 6094 lpfc_free_sgl_list(phba, &free_sgl_list); 6095 6096 /* push els sgls posted to the availble list */ 6097 if (!list_empty(&post_sgl_list)) { 6098 spin_lock_irq(&phba->hbalock); 6099 list_splice_init(&post_sgl_list, 6100 &phba->sli4_hba.lpfc_sgl_list); 6101 spin_unlock_irq(&phba->hbalock); 6102 } else { 6103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6104 "3161 Failure to post els sgl to port.\n"); 6105 return -EIO; 6106 } 6107 return 0; 6108 } 6109 6110 /** 6111 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 6112 * @phba: Pointer to HBA context object. 6113 * 6114 * This function is the main SLI4 device intialization PCI function. This 6115 * function is called by the HBA intialization code, HBA reset code and 6116 * HBA error attention handler code. Caller is not required to hold any 6117 * locks. 6118 **/ 6119 int 6120 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6121 { 6122 int rc; 6123 LPFC_MBOXQ_t *mboxq; 6124 struct lpfc_mqe *mqe; 6125 uint8_t *vpd; 6126 uint32_t vpd_size; 6127 uint32_t ftr_rsp = 0; 6128 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 6129 struct lpfc_vport *vport = phba->pport; 6130 struct lpfc_dmabuf *mp; 6131 6132 /* Perform a PCI function reset to start from clean */ 6133 rc = lpfc_pci_function_reset(phba); 6134 if (unlikely(rc)) 6135 return -ENODEV; 6136 6137 /* Check the HBA Host Status Register for readyness */ 6138 rc = lpfc_sli4_post_status_check(phba); 6139 if (unlikely(rc)) 6140 return -ENODEV; 6141 else { 6142 spin_lock_irq(&phba->hbalock); 6143 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 6144 spin_unlock_irq(&phba->hbalock); 6145 } 6146 6147 /* 6148 * Allocate a single mailbox container for initializing the 6149 * port. 6150 */ 6151 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6152 if (!mboxq) 6153 return -ENOMEM; 6154 6155 /* Issue READ_REV to collect vpd and FW information. */ 6156 vpd_size = SLI4_PAGE_SIZE; 6157 vpd = kzalloc(vpd_size, GFP_KERNEL); 6158 if (!vpd) { 6159 rc = -ENOMEM; 6160 goto out_free_mbox; 6161 } 6162 6163 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 6164 if (unlikely(rc)) { 6165 kfree(vpd); 6166 goto out_free_mbox; 6167 } 6168 6169 mqe = &mboxq->u.mqe; 6170 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 6171 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 6172 phba->hba_flag |= HBA_FCOE_MODE; 6173 else 6174 phba->hba_flag &= ~HBA_FCOE_MODE; 6175 6176 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 6177 LPFC_DCBX_CEE_MODE) 6178 phba->hba_flag |= HBA_FIP_SUPPORT; 6179 else 6180 phba->hba_flag &= ~HBA_FIP_SUPPORT; 6181 6182 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 6183 6184 if (phba->sli_rev != LPFC_SLI_REV4) { 6185 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6186 "0376 READ_REV Error. SLI Level %d " 6187 "FCoE enabled %d\n", 6188 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 6189 rc = -EIO; 6190 kfree(vpd); 6191 goto out_free_mbox; 6192 } 6193 6194 /* 6195 * Continue initialization with default values even if driver failed 6196 * to read FCoE param config regions, only read parameters if the 6197 * board is FCoE 6198 */ 6199 if (phba->hba_flag & HBA_FCOE_MODE && 6200 lpfc_sli4_read_fcoe_params(phba)) 6201 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 6202 "2570 Failed to read FCoE parameters\n"); 6203 6204 /* 6205 * Retrieve sli4 device physical port name, failure of doing it 6206 * is considered as non-fatal. 6207 */ 6208 rc = lpfc_sli4_retrieve_pport_name(phba); 6209 if (!rc) 6210 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6211 "3080 Successful retrieving SLI4 device " 6212 "physical port name: %s.\n", phba->Port); 6213 6214 /* 6215 * Evaluate the read rev and vpd data. Populate the driver 6216 * state with the results. If this routine fails, the failure 6217 * is not fatal as the driver will use generic values. 6218 */ 6219 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 6220 if (unlikely(!rc)) { 6221 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6222 "0377 Error %d parsing vpd. " 6223 "Using defaults.\n", rc); 6224 rc = 0; 6225 } 6226 kfree(vpd); 6227 6228 /* Save information as VPD data */ 6229 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 6230 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 6231 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 6232 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 6233 &mqe->un.read_rev); 6234 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 6235 &mqe->un.read_rev); 6236 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 6237 &mqe->un.read_rev); 6238 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 6239 &mqe->un.read_rev); 6240 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 6241 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 6242 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 6243 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 6244 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 6245 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 6246 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6247 "(%d):0380 READ_REV Status x%x " 6248 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 6249 mboxq->vport ? mboxq->vport->vpi : 0, 6250 bf_get(lpfc_mqe_status, mqe), 6251 phba->vpd.rev.opFwName, 6252 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 6253 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 6254 6255 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 6256 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 6257 if (phba->pport->cfg_lun_queue_depth > rc) { 6258 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6259 "3362 LUN queue depth changed from %d to %d\n", 6260 phba->pport->cfg_lun_queue_depth, rc); 6261 phba->pport->cfg_lun_queue_depth = rc; 6262 } 6263 6264 6265 /* 6266 * Discover the port's supported feature set and match it against the 6267 * hosts requests. 6268 */ 6269 lpfc_request_features(phba, mboxq); 6270 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6271 if (unlikely(rc)) { 6272 rc = -EIO; 6273 goto out_free_mbox; 6274 } 6275 6276 /* 6277 * The port must support FCP initiator mode as this is the 6278 * only mode running in the host. 6279 */ 6280 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 6281 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6282 "0378 No support for fcpi mode.\n"); 6283 ftr_rsp++; 6284 } 6285 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 6286 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 6287 else 6288 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 6289 /* 6290 * If the port cannot support the host's requested features 6291 * then turn off the global config parameters to disable the 6292 * feature in the driver. This is not a fatal error. 6293 */ 6294 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 6295 if (phba->cfg_enable_bg) { 6296 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)) 6297 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 6298 else 6299 ftr_rsp++; 6300 } 6301 6302 if (phba->max_vpi && phba->cfg_enable_npiv && 6303 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6304 ftr_rsp++; 6305 6306 if (ftr_rsp) { 6307 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6308 "0379 Feature Mismatch Data: x%08x %08x " 6309 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 6310 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 6311 phba->cfg_enable_npiv, phba->max_vpi); 6312 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 6313 phba->cfg_enable_bg = 0; 6314 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6315 phba->cfg_enable_npiv = 0; 6316 } 6317 6318 /* These SLI3 features are assumed in SLI4 */ 6319 spin_lock_irq(&phba->hbalock); 6320 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 6321 spin_unlock_irq(&phba->hbalock); 6322 6323 /* 6324 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 6325 * calls depends on these resources to complete port setup. 6326 */ 6327 rc = lpfc_sli4_alloc_resource_identifiers(phba); 6328 if (rc) { 6329 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6330 "2920 Failed to alloc Resource IDs " 6331 "rc = x%x\n", rc); 6332 goto out_free_mbox; 6333 } 6334 6335 /* Read the port's service parameters. */ 6336 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6337 if (rc) { 6338 phba->link_state = LPFC_HBA_ERROR; 6339 rc = -ENOMEM; 6340 goto out_free_mbox; 6341 } 6342 6343 mboxq->vport = vport; 6344 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6345 mp = (struct lpfc_dmabuf *) mboxq->context1; 6346 if (rc == MBX_SUCCESS) { 6347 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 6348 rc = 0; 6349 } 6350 6351 /* 6352 * This memory was allocated by the lpfc_read_sparam routine. Release 6353 * it to the mbuf pool. 6354 */ 6355 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6356 kfree(mp); 6357 mboxq->context1 = NULL; 6358 if (unlikely(rc)) { 6359 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6360 "0382 READ_SPARAM command failed " 6361 "status %d, mbxStatus x%x\n", 6362 rc, bf_get(lpfc_mqe_status, mqe)); 6363 phba->link_state = LPFC_HBA_ERROR; 6364 rc = -EIO; 6365 goto out_free_mbox; 6366 } 6367 6368 lpfc_update_vport_wwn(vport); 6369 6370 /* Update the fc_host data structures with new wwn. */ 6371 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6372 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6373 6374 /* update host els and scsi xri-sgl sizes and mappings */ 6375 rc = lpfc_sli4_xri_sgl_update(phba); 6376 if (unlikely(rc)) { 6377 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6378 "1400 Failed to update xri-sgl size and " 6379 "mapping: %d\n", rc); 6380 goto out_free_mbox; 6381 } 6382 6383 /* register the els sgl pool to the port */ 6384 rc = lpfc_sli4_repost_els_sgl_list(phba); 6385 if (unlikely(rc)) { 6386 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6387 "0582 Error %d during els sgl post " 6388 "operation\n", rc); 6389 rc = -ENODEV; 6390 goto out_free_mbox; 6391 } 6392 6393 /* register the allocated scsi sgl pool to the port */ 6394 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6395 if (unlikely(rc)) { 6396 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6397 "0383 Error %d during scsi sgl post " 6398 "operation\n", rc); 6399 /* Some Scsi buffers were moved to the abort scsi list */ 6400 /* A pci function reset will repost them */ 6401 rc = -ENODEV; 6402 goto out_free_mbox; 6403 } 6404 6405 /* Post the rpi header region to the device. */ 6406 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 6407 if (unlikely(rc)) { 6408 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6409 "0393 Error %d during rpi post operation\n", 6410 rc); 6411 rc = -ENODEV; 6412 goto out_free_mbox; 6413 } 6414 lpfc_sli4_node_prep(phba); 6415 6416 /* Create all the SLI4 queues */ 6417 rc = lpfc_sli4_queue_create(phba); 6418 if (rc) { 6419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6420 "3089 Failed to allocate queues\n"); 6421 rc = -ENODEV; 6422 goto out_stop_timers; 6423 } 6424 /* Set up all the queues to the device */ 6425 rc = lpfc_sli4_queue_setup(phba); 6426 if (unlikely(rc)) { 6427 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6428 "0381 Error %d during queue setup.\n ", rc); 6429 goto out_destroy_queue; 6430 } 6431 6432 /* Arm the CQs and then EQs on device */ 6433 lpfc_sli4_arm_cqeq_intr(phba); 6434 6435 /* Indicate device interrupt mode */ 6436 phba->sli4_hba.intr_enable = 1; 6437 6438 /* Allow asynchronous mailbox command to go through */ 6439 spin_lock_irq(&phba->hbalock); 6440 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6441 spin_unlock_irq(&phba->hbalock); 6442 6443 /* Post receive buffers to the device */ 6444 lpfc_sli4_rb_setup(phba); 6445 6446 /* Reset HBA FCF states after HBA reset */ 6447 phba->fcf.fcf_flag = 0; 6448 phba->fcf.current_rec.flag = 0; 6449 6450 /* Start the ELS watchdog timer */ 6451 mod_timer(&vport->els_tmofunc, 6452 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 6453 6454 /* Start heart beat timer */ 6455 mod_timer(&phba->hb_tmofunc, 6456 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 6457 phba->hb_outstanding = 0; 6458 phba->last_completion_time = jiffies; 6459 6460 /* Start error attention (ERATT) polling timer */ 6461 mod_timer(&phba->eratt_poll, 6462 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); 6463 6464 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 6465 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 6466 rc = pci_enable_pcie_error_reporting(phba->pcidev); 6467 if (!rc) { 6468 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6469 "2829 This device supports " 6470 "Advanced Error Reporting (AER)\n"); 6471 spin_lock_irq(&phba->hbalock); 6472 phba->hba_flag |= HBA_AER_ENABLED; 6473 spin_unlock_irq(&phba->hbalock); 6474 } else { 6475 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6476 "2830 This device does not support " 6477 "Advanced Error Reporting (AER)\n"); 6478 phba->cfg_aer_support = 0; 6479 } 6480 rc = 0; 6481 } 6482 6483 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 6484 /* 6485 * The FC Port needs to register FCFI (index 0) 6486 */ 6487 lpfc_reg_fcfi(phba, mboxq); 6488 mboxq->vport = phba->pport; 6489 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6490 if (rc != MBX_SUCCESS) 6491 goto out_unset_queue; 6492 rc = 0; 6493 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 6494 &mboxq->u.mqe.un.reg_fcfi); 6495 6496 /* Check if the port is configured to be disabled */ 6497 lpfc_sli_read_link_ste(phba); 6498 } 6499 6500 /* 6501 * The port is ready, set the host's link state to LINK_DOWN 6502 * in preparation for link interrupts. 6503 */ 6504 spin_lock_irq(&phba->hbalock); 6505 phba->link_state = LPFC_LINK_DOWN; 6506 spin_unlock_irq(&phba->hbalock); 6507 if (!(phba->hba_flag & HBA_FCOE_MODE) && 6508 (phba->hba_flag & LINK_DISABLED)) { 6509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6510 "3103 Adapter Link is disabled.\n"); 6511 lpfc_down_link(phba, mboxq); 6512 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6513 if (rc != MBX_SUCCESS) { 6514 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6515 "3104 Adapter failed to issue " 6516 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 6517 goto out_unset_queue; 6518 } 6519 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 6520 /* don't perform init_link on SLI4 FC port loopback test */ 6521 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 6522 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6523 if (rc) 6524 goto out_unset_queue; 6525 } 6526 } 6527 mempool_free(mboxq, phba->mbox_mem_pool); 6528 return rc; 6529 out_unset_queue: 6530 /* Unset all the queues set up in this routine when error out */ 6531 lpfc_sli4_queue_unset(phba); 6532 out_destroy_queue: 6533 lpfc_sli4_queue_destroy(phba); 6534 out_stop_timers: 6535 lpfc_stop_hba_timers(phba); 6536 out_free_mbox: 6537 mempool_free(mboxq, phba->mbox_mem_pool); 6538 return rc; 6539 } 6540 6541 /** 6542 * lpfc_mbox_timeout - Timeout call back function for mbox timer 6543 * @ptr: context object - pointer to hba structure. 6544 * 6545 * This is the callback function for mailbox timer. The mailbox 6546 * timer is armed when a new mailbox command is issued and the timer 6547 * is deleted when the mailbox complete. The function is called by 6548 * the kernel timer code when a mailbox does not complete within 6549 * expected time. This function wakes up the worker thread to 6550 * process the mailbox timeout and returns. All the processing is 6551 * done by the worker thread function lpfc_mbox_timeout_handler. 6552 **/ 6553 void 6554 lpfc_mbox_timeout(unsigned long ptr) 6555 { 6556 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 6557 unsigned long iflag; 6558 uint32_t tmo_posted; 6559 6560 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 6561 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 6562 if (!tmo_posted) 6563 phba->pport->work_port_events |= WORKER_MBOX_TMO; 6564 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 6565 6566 if (!tmo_posted) 6567 lpfc_worker_wake_up(phba); 6568 return; 6569 } 6570 6571 /** 6572 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 6573 * are pending 6574 * @phba: Pointer to HBA context object. 6575 * 6576 * This function checks if any mailbox completions are present on the mailbox 6577 * completion queue. 6578 **/ 6579 bool 6580 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 6581 { 6582 6583 uint32_t idx; 6584 struct lpfc_queue *mcq; 6585 struct lpfc_mcqe *mcqe; 6586 bool pending_completions = false; 6587 6588 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 6589 return false; 6590 6591 /* Check for completions on mailbox completion queue */ 6592 6593 mcq = phba->sli4_hba.mbx_cq; 6594 idx = mcq->hba_index; 6595 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { 6596 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; 6597 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 6598 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 6599 pending_completions = true; 6600 break; 6601 } 6602 idx = (idx + 1) % mcq->entry_count; 6603 if (mcq->hba_index == idx) 6604 break; 6605 } 6606 return pending_completions; 6607 6608 } 6609 6610 /** 6611 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 6612 * that were missed. 6613 * @phba: Pointer to HBA context object. 6614 * 6615 * For sli4, it is possible to miss an interrupt. As such mbox completions 6616 * maybe missed causing erroneous mailbox timeouts to occur. This function 6617 * checks to see if mbox completions are on the mailbox completion queue 6618 * and will process all the completions associated with the eq for the 6619 * mailbox completion queue. 6620 **/ 6621 bool 6622 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 6623 { 6624 6625 uint32_t eqidx; 6626 struct lpfc_queue *fpeq = NULL; 6627 struct lpfc_eqe *eqe; 6628 bool mbox_pending; 6629 6630 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 6631 return false; 6632 6633 /* Find the eq associated with the mcq */ 6634 6635 if (phba->sli4_hba.hba_eq) 6636 for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) 6637 if (phba->sli4_hba.hba_eq[eqidx]->queue_id == 6638 phba->sli4_hba.mbx_cq->assoc_qid) { 6639 fpeq = phba->sli4_hba.hba_eq[eqidx]; 6640 break; 6641 } 6642 if (!fpeq) 6643 return false; 6644 6645 /* Turn off interrupts from this EQ */ 6646 6647 lpfc_sli4_eq_clr_intr(fpeq); 6648 6649 /* Check to see if a mbox completion is pending */ 6650 6651 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 6652 6653 /* 6654 * If a mbox completion is pending, process all the events on EQ 6655 * associated with the mbox completion queue (this could include 6656 * mailbox commands, async events, els commands, receive queue data 6657 * and fcp commands) 6658 */ 6659 6660 if (mbox_pending) 6661 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 6662 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); 6663 fpeq->EQ_processed++; 6664 } 6665 6666 /* Always clear and re-arm the EQ */ 6667 6668 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 6669 6670 return mbox_pending; 6671 6672 } 6673 6674 /** 6675 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 6676 * @phba: Pointer to HBA context object. 6677 * 6678 * This function is called from worker thread when a mailbox command times out. 6679 * The caller is not required to hold any locks. This function will reset the 6680 * HBA and recover all the pending commands. 6681 **/ 6682 void 6683 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 6684 { 6685 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6686 MAILBOX_t *mb = &pmbox->u.mb; 6687 struct lpfc_sli *psli = &phba->sli; 6688 struct lpfc_sli_ring *pring; 6689 6690 /* If the mailbox completed, process the completion and return */ 6691 if (lpfc_sli4_process_missed_mbox_completions(phba)) 6692 return; 6693 6694 /* Check the pmbox pointer first. There is a race condition 6695 * between the mbox timeout handler getting executed in the 6696 * worklist and the mailbox actually completing. When this 6697 * race condition occurs, the mbox_active will be NULL. 6698 */ 6699 spin_lock_irq(&phba->hbalock); 6700 if (pmbox == NULL) { 6701 lpfc_printf_log(phba, KERN_WARNING, 6702 LOG_MBOX | LOG_SLI, 6703 "0353 Active Mailbox cleared - mailbox timeout " 6704 "exiting\n"); 6705 spin_unlock_irq(&phba->hbalock); 6706 return; 6707 } 6708 6709 /* Mbox cmd <mbxCommand> timeout */ 6710 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6711 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 6712 mb->mbxCommand, 6713 phba->pport->port_state, 6714 phba->sli.sli_flag, 6715 phba->sli.mbox_active); 6716 spin_unlock_irq(&phba->hbalock); 6717 6718 /* Setting state unknown so lpfc_sli_abort_iocb_ring 6719 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 6720 * it to fail all outstanding SCSI IO. 6721 */ 6722 spin_lock_irq(&phba->pport->work_port_lock); 6723 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6724 spin_unlock_irq(&phba->pport->work_port_lock); 6725 spin_lock_irq(&phba->hbalock); 6726 phba->link_state = LPFC_LINK_UNKNOWN; 6727 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6728 spin_unlock_irq(&phba->hbalock); 6729 6730 pring = &psli->ring[psli->fcp_ring]; 6731 lpfc_sli_abort_iocb_ring(phba, pring); 6732 6733 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6734 "0345 Resetting board due to mailbox timeout\n"); 6735 6736 /* Reset the HBA device */ 6737 lpfc_reset_hba(phba); 6738 } 6739 6740 /** 6741 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 6742 * @phba: Pointer to HBA context object. 6743 * @pmbox: Pointer to mailbox object. 6744 * @flag: Flag indicating how the mailbox need to be processed. 6745 * 6746 * This function is called by discovery code and HBA management code 6747 * to submit a mailbox command to firmware with SLI-3 interface spec. This 6748 * function gets the hbalock to protect the data structures. 6749 * The mailbox command can be submitted in polling mode, in which case 6750 * this function will wait in a polling loop for the completion of the 6751 * mailbox. 6752 * If the mailbox is submitted in no_wait mode (not polling) the 6753 * function will submit the command and returns immediately without waiting 6754 * for the mailbox completion. The no_wait is supported only when HBA 6755 * is in SLI2/SLI3 mode - interrupts are enabled. 6756 * The SLI interface allows only one mailbox pending at a time. If the 6757 * mailbox is issued in polling mode and there is already a mailbox 6758 * pending, then the function will return an error. If the mailbox is issued 6759 * in NO_WAIT mode and there is a mailbox pending already, the function 6760 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 6761 * The sli layer owns the mailbox object until the completion of mailbox 6762 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 6763 * return codes the caller owns the mailbox command after the return of 6764 * the function. 6765 **/ 6766 static int 6767 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 6768 uint32_t flag) 6769 { 6770 MAILBOX_t *mbx; 6771 struct lpfc_sli *psli = &phba->sli; 6772 uint32_t status, evtctr; 6773 uint32_t ha_copy, hc_copy; 6774 int i; 6775 unsigned long timeout; 6776 unsigned long drvr_flag = 0; 6777 uint32_t word0, ldata; 6778 void __iomem *to_slim; 6779 int processing_queue = 0; 6780 6781 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6782 if (!pmbox) { 6783 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6784 /* processing mbox queue from intr_handler */ 6785 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6786 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6787 return MBX_SUCCESS; 6788 } 6789 processing_queue = 1; 6790 pmbox = lpfc_mbox_get(phba); 6791 if (!pmbox) { 6792 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6793 return MBX_SUCCESS; 6794 } 6795 } 6796 6797 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 6798 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 6799 if(!pmbox->vport) { 6800 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6801 lpfc_printf_log(phba, KERN_ERR, 6802 LOG_MBOX | LOG_VPORT, 6803 "1806 Mbox x%x failed. No vport\n", 6804 pmbox->u.mb.mbxCommand); 6805 dump_stack(); 6806 goto out_not_finished; 6807 } 6808 } 6809 6810 /* If the PCI channel is in offline state, do not post mbox. */ 6811 if (unlikely(pci_channel_offline(phba->pcidev))) { 6812 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6813 goto out_not_finished; 6814 } 6815 6816 /* If HBA has a deferred error attention, fail the iocb. */ 6817 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 6818 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6819 goto out_not_finished; 6820 } 6821 6822 psli = &phba->sli; 6823 6824 mbx = &pmbox->u.mb; 6825 status = MBX_SUCCESS; 6826 6827 if (phba->link_state == LPFC_HBA_ERROR) { 6828 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6829 6830 /* Mbox command <mbxCommand> cannot issue */ 6831 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6832 "(%d):0311 Mailbox command x%x cannot " 6833 "issue Data: x%x x%x\n", 6834 pmbox->vport ? pmbox->vport->vpi : 0, 6835 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6836 goto out_not_finished; 6837 } 6838 6839 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 6840 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 6841 !(hc_copy & HC_MBINT_ENA)) { 6842 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6843 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6844 "(%d):2528 Mailbox command x%x cannot " 6845 "issue Data: x%x x%x\n", 6846 pmbox->vport ? pmbox->vport->vpi : 0, 6847 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6848 goto out_not_finished; 6849 } 6850 } 6851 6852 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6853 /* Polling for a mbox command when another one is already active 6854 * is not allowed in SLI. Also, the driver must have established 6855 * SLI2 mode to queue and process multiple mbox commands. 6856 */ 6857 6858 if (flag & MBX_POLL) { 6859 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6860 6861 /* Mbox command <mbxCommand> cannot issue */ 6862 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6863 "(%d):2529 Mailbox command x%x " 6864 "cannot issue Data: x%x x%x\n", 6865 pmbox->vport ? pmbox->vport->vpi : 0, 6866 pmbox->u.mb.mbxCommand, 6867 psli->sli_flag, flag); 6868 goto out_not_finished; 6869 } 6870 6871 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 6872 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6873 /* Mbox command <mbxCommand> cannot issue */ 6874 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6875 "(%d):2530 Mailbox command x%x " 6876 "cannot issue Data: x%x x%x\n", 6877 pmbox->vport ? pmbox->vport->vpi : 0, 6878 pmbox->u.mb.mbxCommand, 6879 psli->sli_flag, flag); 6880 goto out_not_finished; 6881 } 6882 6883 /* Another mailbox command is still being processed, queue this 6884 * command to be processed later. 6885 */ 6886 lpfc_mbox_put(phba, pmbox); 6887 6888 /* Mbox cmd issue - BUSY */ 6889 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6890 "(%d):0308 Mbox cmd issue - BUSY Data: " 6891 "x%x x%x x%x x%x\n", 6892 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 6893 mbx->mbxCommand, phba->pport->port_state, 6894 psli->sli_flag, flag); 6895 6896 psli->slistat.mbox_busy++; 6897 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6898 6899 if (pmbox->vport) { 6900 lpfc_debugfs_disc_trc(pmbox->vport, 6901 LPFC_DISC_TRC_MBOX_VPORT, 6902 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 6903 (uint32_t)mbx->mbxCommand, 6904 mbx->un.varWords[0], mbx->un.varWords[1]); 6905 } 6906 else { 6907 lpfc_debugfs_disc_trc(phba->pport, 6908 LPFC_DISC_TRC_MBOX, 6909 "MBOX Bsy: cmd:x%x mb:x%x x%x", 6910 (uint32_t)mbx->mbxCommand, 6911 mbx->un.varWords[0], mbx->un.varWords[1]); 6912 } 6913 6914 return MBX_BUSY; 6915 } 6916 6917 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6918 6919 /* If we are not polling, we MUST be in SLI2 mode */ 6920 if (flag != MBX_POLL) { 6921 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 6922 (mbx->mbxCommand != MBX_KILL_BOARD)) { 6923 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6924 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6925 /* Mbox command <mbxCommand> cannot issue */ 6926 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6927 "(%d):2531 Mailbox command x%x " 6928 "cannot issue Data: x%x x%x\n", 6929 pmbox->vport ? pmbox->vport->vpi : 0, 6930 pmbox->u.mb.mbxCommand, 6931 psli->sli_flag, flag); 6932 goto out_not_finished; 6933 } 6934 /* timeout active mbox command */ 6935 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 6936 1000); 6937 mod_timer(&psli->mbox_tmo, jiffies + timeout); 6938 } 6939 6940 /* Mailbox cmd <cmd> issue */ 6941 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6942 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 6943 "x%x\n", 6944 pmbox->vport ? pmbox->vport->vpi : 0, 6945 mbx->mbxCommand, phba->pport->port_state, 6946 psli->sli_flag, flag); 6947 6948 if (mbx->mbxCommand != MBX_HEARTBEAT) { 6949 if (pmbox->vport) { 6950 lpfc_debugfs_disc_trc(pmbox->vport, 6951 LPFC_DISC_TRC_MBOX_VPORT, 6952 "MBOX Send vport: cmd:x%x mb:x%x x%x", 6953 (uint32_t)mbx->mbxCommand, 6954 mbx->un.varWords[0], mbx->un.varWords[1]); 6955 } 6956 else { 6957 lpfc_debugfs_disc_trc(phba->pport, 6958 LPFC_DISC_TRC_MBOX, 6959 "MBOX Send: cmd:x%x mb:x%x x%x", 6960 (uint32_t)mbx->mbxCommand, 6961 mbx->un.varWords[0], mbx->un.varWords[1]); 6962 } 6963 } 6964 6965 psli->slistat.mbox_cmd++; 6966 evtctr = psli->slistat.mbox_event; 6967 6968 /* next set own bit for the adapter and copy over command word */ 6969 mbx->mbxOwner = OWN_CHIP; 6970 6971 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6972 /* Populate mbox extension offset word. */ 6973 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 6974 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 6975 = (uint8_t *)phba->mbox_ext 6976 - (uint8_t *)phba->mbox; 6977 } 6978 6979 /* Copy the mailbox extension data */ 6980 if (pmbox->in_ext_byte_len && pmbox->context2) { 6981 lpfc_sli_pcimem_bcopy(pmbox->context2, 6982 (uint8_t *)phba->mbox_ext, 6983 pmbox->in_ext_byte_len); 6984 } 6985 /* Copy command data to host SLIM area */ 6986 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 6987 } else { 6988 /* Populate mbox extension offset word. */ 6989 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 6990 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 6991 = MAILBOX_HBA_EXT_OFFSET; 6992 6993 /* Copy the mailbox extension data */ 6994 if (pmbox->in_ext_byte_len && pmbox->context2) { 6995 lpfc_memcpy_to_slim(phba->MBslimaddr + 6996 MAILBOX_HBA_EXT_OFFSET, 6997 pmbox->context2, pmbox->in_ext_byte_len); 6998 6999 } 7000 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7001 /* copy command data into host mbox for cmpl */ 7002 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 7003 } 7004 7005 /* First copy mbox command data to HBA SLIM, skip past first 7006 word */ 7007 to_slim = phba->MBslimaddr + sizeof (uint32_t); 7008 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 7009 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 7010 7011 /* Next copy over first word, with mbxOwner set */ 7012 ldata = *((uint32_t *)mbx); 7013 to_slim = phba->MBslimaddr; 7014 writel(ldata, to_slim); 7015 readl(to_slim); /* flush */ 7016 7017 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7018 /* switch over to host mailbox */ 7019 psli->sli_flag |= LPFC_SLI_ACTIVE; 7020 } 7021 } 7022 7023 wmb(); 7024 7025 switch (flag) { 7026 case MBX_NOWAIT: 7027 /* Set up reference to mailbox command */ 7028 psli->mbox_active = pmbox; 7029 /* Interrupt board to do it */ 7030 writel(CA_MBATT, phba->CAregaddr); 7031 readl(phba->CAregaddr); /* flush */ 7032 /* Don't wait for it to finish, just return */ 7033 break; 7034 7035 case MBX_POLL: 7036 /* Set up null reference to mailbox command */ 7037 psli->mbox_active = NULL; 7038 /* Interrupt board to do it */ 7039 writel(CA_MBATT, phba->CAregaddr); 7040 readl(phba->CAregaddr); /* flush */ 7041 7042 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7043 /* First read mbox status word */ 7044 word0 = *((uint32_t *)phba->mbox); 7045 word0 = le32_to_cpu(word0); 7046 } else { 7047 /* First read mbox status word */ 7048 if (lpfc_readl(phba->MBslimaddr, &word0)) { 7049 spin_unlock_irqrestore(&phba->hbalock, 7050 drvr_flag); 7051 goto out_not_finished; 7052 } 7053 } 7054 7055 /* Read the HBA Host Attention Register */ 7056 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7057 spin_unlock_irqrestore(&phba->hbalock, 7058 drvr_flag); 7059 goto out_not_finished; 7060 } 7061 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7062 1000) + jiffies; 7063 i = 0; 7064 /* Wait for command to complete */ 7065 while (((word0 & OWN_CHIP) == OWN_CHIP) || 7066 (!(ha_copy & HA_MBATT) && 7067 (phba->link_state > LPFC_WARM_START))) { 7068 if (time_after(jiffies, timeout)) { 7069 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7070 spin_unlock_irqrestore(&phba->hbalock, 7071 drvr_flag); 7072 goto out_not_finished; 7073 } 7074 7075 /* Check if we took a mbox interrupt while we were 7076 polling */ 7077 if (((word0 & OWN_CHIP) != OWN_CHIP) 7078 && (evtctr != psli->slistat.mbox_event)) 7079 break; 7080 7081 if (i++ > 10) { 7082 spin_unlock_irqrestore(&phba->hbalock, 7083 drvr_flag); 7084 msleep(1); 7085 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7086 } 7087 7088 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7089 /* First copy command data */ 7090 word0 = *((uint32_t *)phba->mbox); 7091 word0 = le32_to_cpu(word0); 7092 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7093 MAILBOX_t *slimmb; 7094 uint32_t slimword0; 7095 /* Check real SLIM for any errors */ 7096 slimword0 = readl(phba->MBslimaddr); 7097 slimmb = (MAILBOX_t *) & slimword0; 7098 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 7099 && slimmb->mbxStatus) { 7100 psli->sli_flag &= 7101 ~LPFC_SLI_ACTIVE; 7102 word0 = slimword0; 7103 } 7104 } 7105 } else { 7106 /* First copy command data */ 7107 word0 = readl(phba->MBslimaddr); 7108 } 7109 /* Read the HBA Host Attention Register */ 7110 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7111 spin_unlock_irqrestore(&phba->hbalock, 7112 drvr_flag); 7113 goto out_not_finished; 7114 } 7115 } 7116 7117 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7118 /* copy results back to user */ 7119 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE); 7120 /* Copy the mailbox extension data */ 7121 if (pmbox->out_ext_byte_len && pmbox->context2) { 7122 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 7123 pmbox->context2, 7124 pmbox->out_ext_byte_len); 7125 } 7126 } else { 7127 /* First copy command data */ 7128 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 7129 MAILBOX_CMD_SIZE); 7130 /* Copy the mailbox extension data */ 7131 if (pmbox->out_ext_byte_len && pmbox->context2) { 7132 lpfc_memcpy_from_slim(pmbox->context2, 7133 phba->MBslimaddr + 7134 MAILBOX_HBA_EXT_OFFSET, 7135 pmbox->out_ext_byte_len); 7136 } 7137 } 7138 7139 writel(HA_MBATT, phba->HAregaddr); 7140 readl(phba->HAregaddr); /* flush */ 7141 7142 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7143 status = mbx->mbxStatus; 7144 } 7145 7146 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7147 return status; 7148 7149 out_not_finished: 7150 if (processing_queue) { 7151 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 7152 lpfc_mbox_cmpl_put(phba, pmbox); 7153 } 7154 return MBX_NOT_FINISHED; 7155 } 7156 7157 /** 7158 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 7159 * @phba: Pointer to HBA context object. 7160 * 7161 * The function blocks the posting of SLI4 asynchronous mailbox commands from 7162 * the driver internal pending mailbox queue. It will then try to wait out the 7163 * possible outstanding mailbox command before return. 7164 * 7165 * Returns: 7166 * 0 - the outstanding mailbox command completed; otherwise, the wait for 7167 * the outstanding mailbox command timed out. 7168 **/ 7169 static int 7170 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 7171 { 7172 struct lpfc_sli *psli = &phba->sli; 7173 int rc = 0; 7174 unsigned long timeout = 0; 7175 7176 /* Mark the asynchronous mailbox command posting as blocked */ 7177 spin_lock_irq(&phba->hbalock); 7178 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7179 /* Determine how long we might wait for the active mailbox 7180 * command to be gracefully completed by firmware. 7181 */ 7182 if (phba->sli.mbox_active) 7183 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 7184 phba->sli.mbox_active) * 7185 1000) + jiffies; 7186 spin_unlock_irq(&phba->hbalock); 7187 7188 /* Make sure the mailbox is really active */ 7189 if (timeout) 7190 lpfc_sli4_process_missed_mbox_completions(phba); 7191 7192 /* Wait for the outstnading mailbox command to complete */ 7193 while (phba->sli.mbox_active) { 7194 /* Check active mailbox complete status every 2ms */ 7195 msleep(2); 7196 if (time_after(jiffies, timeout)) { 7197 /* Timeout, marked the outstanding cmd not complete */ 7198 rc = 1; 7199 break; 7200 } 7201 } 7202 7203 /* Can not cleanly block async mailbox command, fails it */ 7204 if (rc) { 7205 spin_lock_irq(&phba->hbalock); 7206 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7207 spin_unlock_irq(&phba->hbalock); 7208 } 7209 return rc; 7210 } 7211 7212 /** 7213 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 7214 * @phba: Pointer to HBA context object. 7215 * 7216 * The function unblocks and resume posting of SLI4 asynchronous mailbox 7217 * commands from the driver internal pending mailbox queue. It makes sure 7218 * that there is no outstanding mailbox command before resuming posting 7219 * asynchronous mailbox commands. If, for any reason, there is outstanding 7220 * mailbox command, it will try to wait it out before resuming asynchronous 7221 * mailbox command posting. 7222 **/ 7223 static void 7224 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 7225 { 7226 struct lpfc_sli *psli = &phba->sli; 7227 7228 spin_lock_irq(&phba->hbalock); 7229 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7230 /* Asynchronous mailbox posting is not blocked, do nothing */ 7231 spin_unlock_irq(&phba->hbalock); 7232 return; 7233 } 7234 7235 /* Outstanding synchronous mailbox command is guaranteed to be done, 7236 * successful or timeout, after timing-out the outstanding mailbox 7237 * command shall always be removed, so just unblock posting async 7238 * mailbox command and resume 7239 */ 7240 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7241 spin_unlock_irq(&phba->hbalock); 7242 7243 /* wake up worker thread to post asynchronlous mailbox command */ 7244 lpfc_worker_wake_up(phba); 7245 } 7246 7247 /** 7248 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 7249 * @phba: Pointer to HBA context object. 7250 * @mboxq: Pointer to mailbox object. 7251 * 7252 * The function waits for the bootstrap mailbox register ready bit from 7253 * port for twice the regular mailbox command timeout value. 7254 * 7255 * 0 - no timeout on waiting for bootstrap mailbox register ready. 7256 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 7257 **/ 7258 static int 7259 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7260 { 7261 uint32_t db_ready; 7262 unsigned long timeout; 7263 struct lpfc_register bmbx_reg; 7264 7265 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 7266 * 1000) + jiffies; 7267 7268 do { 7269 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 7270 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 7271 if (!db_ready) 7272 msleep(2); 7273 7274 if (time_after(jiffies, timeout)) 7275 return MBXERR_ERROR; 7276 } while (!db_ready); 7277 7278 return 0; 7279 } 7280 7281 /** 7282 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 7283 * @phba: Pointer to HBA context object. 7284 * @mboxq: Pointer to mailbox object. 7285 * 7286 * The function posts a mailbox to the port. The mailbox is expected 7287 * to be comletely filled in and ready for the port to operate on it. 7288 * This routine executes a synchronous completion operation on the 7289 * mailbox by polling for its completion. 7290 * 7291 * The caller must not be holding any locks when calling this routine. 7292 * 7293 * Returns: 7294 * MBX_SUCCESS - mailbox posted successfully 7295 * Any of the MBX error values. 7296 **/ 7297 static int 7298 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7299 { 7300 int rc = MBX_SUCCESS; 7301 unsigned long iflag; 7302 uint32_t mcqe_status; 7303 uint32_t mbx_cmnd; 7304 struct lpfc_sli *psli = &phba->sli; 7305 struct lpfc_mqe *mb = &mboxq->u.mqe; 7306 struct lpfc_bmbx_create *mbox_rgn; 7307 struct dma_address *dma_address; 7308 7309 /* 7310 * Only one mailbox can be active to the bootstrap mailbox region 7311 * at a time and there is no queueing provided. 7312 */ 7313 spin_lock_irqsave(&phba->hbalock, iflag); 7314 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7315 spin_unlock_irqrestore(&phba->hbalock, iflag); 7316 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7317 "(%d):2532 Mailbox command x%x (x%x/x%x) " 7318 "cannot issue Data: x%x x%x\n", 7319 mboxq->vport ? mboxq->vport->vpi : 0, 7320 mboxq->u.mb.mbxCommand, 7321 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7322 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7323 psli->sli_flag, MBX_POLL); 7324 return MBXERR_ERROR; 7325 } 7326 /* The server grabs the token and owns it until release */ 7327 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7328 phba->sli.mbox_active = mboxq; 7329 spin_unlock_irqrestore(&phba->hbalock, iflag); 7330 7331 /* wait for bootstrap mbox register for readyness */ 7332 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7333 if (rc) 7334 goto exit; 7335 7336 /* 7337 * Initialize the bootstrap memory region to avoid stale data areas 7338 * in the mailbox post. Then copy the caller's mailbox contents to 7339 * the bmbx mailbox region. 7340 */ 7341 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 7342 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 7343 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 7344 sizeof(struct lpfc_mqe)); 7345 7346 /* Post the high mailbox dma address to the port and wait for ready. */ 7347 dma_address = &phba->sli4_hba.bmbx.dma_address; 7348 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 7349 7350 /* wait for bootstrap mbox register for hi-address write done */ 7351 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7352 if (rc) 7353 goto exit; 7354 7355 /* Post the low mailbox dma address to the port. */ 7356 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 7357 7358 /* wait for bootstrap mbox register for low address write done */ 7359 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7360 if (rc) 7361 goto exit; 7362 7363 /* 7364 * Read the CQ to ensure the mailbox has completed. 7365 * If so, update the mailbox status so that the upper layers 7366 * can complete the request normally. 7367 */ 7368 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 7369 sizeof(struct lpfc_mqe)); 7370 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 7371 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 7372 sizeof(struct lpfc_mcqe)); 7373 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 7374 /* 7375 * When the CQE status indicates a failure and the mailbox status 7376 * indicates success then copy the CQE status into the mailbox status 7377 * (and prefix it with x4000). 7378 */ 7379 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 7380 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 7381 bf_set(lpfc_mqe_status, mb, 7382 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 7383 rc = MBXERR_ERROR; 7384 } else 7385 lpfc_sli4_swap_str(phba, mboxq); 7386 7387 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7388 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 7389 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 7390 " x%x x%x CQ: x%x x%x x%x x%x\n", 7391 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7392 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7393 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7394 bf_get(lpfc_mqe_status, mb), 7395 mb->un.mb_words[0], mb->un.mb_words[1], 7396 mb->un.mb_words[2], mb->un.mb_words[3], 7397 mb->un.mb_words[4], mb->un.mb_words[5], 7398 mb->un.mb_words[6], mb->un.mb_words[7], 7399 mb->un.mb_words[8], mb->un.mb_words[9], 7400 mb->un.mb_words[10], mb->un.mb_words[11], 7401 mb->un.mb_words[12], mboxq->mcqe.word0, 7402 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 7403 mboxq->mcqe.trailer); 7404 exit: 7405 /* We are holding the token, no needed for lock when release */ 7406 spin_lock_irqsave(&phba->hbalock, iflag); 7407 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7408 phba->sli.mbox_active = NULL; 7409 spin_unlock_irqrestore(&phba->hbalock, iflag); 7410 return rc; 7411 } 7412 7413 /** 7414 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 7415 * @phba: Pointer to HBA context object. 7416 * @pmbox: Pointer to mailbox object. 7417 * @flag: Flag indicating how the mailbox need to be processed. 7418 * 7419 * This function is called by discovery code and HBA management code to submit 7420 * a mailbox command to firmware with SLI-4 interface spec. 7421 * 7422 * Return codes the caller owns the mailbox command after the return of the 7423 * function. 7424 **/ 7425 static int 7426 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 7427 uint32_t flag) 7428 { 7429 struct lpfc_sli *psli = &phba->sli; 7430 unsigned long iflags; 7431 int rc; 7432 7433 /* dump from issue mailbox command if setup */ 7434 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 7435 7436 rc = lpfc_mbox_dev_check(phba); 7437 if (unlikely(rc)) { 7438 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7439 "(%d):2544 Mailbox command x%x (x%x/x%x) " 7440 "cannot issue Data: x%x x%x\n", 7441 mboxq->vport ? mboxq->vport->vpi : 0, 7442 mboxq->u.mb.mbxCommand, 7443 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7444 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7445 psli->sli_flag, flag); 7446 goto out_not_finished; 7447 } 7448 7449 /* Detect polling mode and jump to a handler */ 7450 if (!phba->sli4_hba.intr_enable) { 7451 if (flag == MBX_POLL) 7452 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7453 else 7454 rc = -EIO; 7455 if (rc != MBX_SUCCESS) 7456 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7457 "(%d):2541 Mailbox command x%x " 7458 "(x%x/x%x) failure: " 7459 "mqe_sta: x%x mcqe_sta: x%x/x%x " 7460 "Data: x%x x%x\n,", 7461 mboxq->vport ? mboxq->vport->vpi : 0, 7462 mboxq->u.mb.mbxCommand, 7463 lpfc_sli_config_mbox_subsys_get(phba, 7464 mboxq), 7465 lpfc_sli_config_mbox_opcode_get(phba, 7466 mboxq), 7467 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 7468 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 7469 bf_get(lpfc_mcqe_ext_status, 7470 &mboxq->mcqe), 7471 psli->sli_flag, flag); 7472 return rc; 7473 } else if (flag == MBX_POLL) { 7474 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7475 "(%d):2542 Try to issue mailbox command " 7476 "x%x (x%x/x%x) synchronously ahead of async" 7477 "mailbox command queue: x%x x%x\n", 7478 mboxq->vport ? mboxq->vport->vpi : 0, 7479 mboxq->u.mb.mbxCommand, 7480 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7481 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7482 psli->sli_flag, flag); 7483 /* Try to block the asynchronous mailbox posting */ 7484 rc = lpfc_sli4_async_mbox_block(phba); 7485 if (!rc) { 7486 /* Successfully blocked, now issue sync mbox cmd */ 7487 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7488 if (rc != MBX_SUCCESS) 7489 lpfc_printf_log(phba, KERN_WARNING, 7490 LOG_MBOX | LOG_SLI, 7491 "(%d):2597 Sync Mailbox command " 7492 "x%x (x%x/x%x) failure: " 7493 "mqe_sta: x%x mcqe_sta: x%x/x%x " 7494 "Data: x%x x%x\n,", 7495 mboxq->vport ? mboxq->vport->vpi : 0, 7496 mboxq->u.mb.mbxCommand, 7497 lpfc_sli_config_mbox_subsys_get(phba, 7498 mboxq), 7499 lpfc_sli_config_mbox_opcode_get(phba, 7500 mboxq), 7501 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 7502 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 7503 bf_get(lpfc_mcqe_ext_status, 7504 &mboxq->mcqe), 7505 psli->sli_flag, flag); 7506 /* Unblock the async mailbox posting afterward */ 7507 lpfc_sli4_async_mbox_unblock(phba); 7508 } 7509 return rc; 7510 } 7511 7512 /* Now, interrupt mode asynchrous mailbox command */ 7513 rc = lpfc_mbox_cmd_check(phba, mboxq); 7514 if (rc) { 7515 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7516 "(%d):2543 Mailbox command x%x (x%x/x%x) " 7517 "cannot issue Data: x%x x%x\n", 7518 mboxq->vport ? mboxq->vport->vpi : 0, 7519 mboxq->u.mb.mbxCommand, 7520 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7521 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7522 psli->sli_flag, flag); 7523 goto out_not_finished; 7524 } 7525 7526 /* Put the mailbox command to the driver internal FIFO */ 7527 psli->slistat.mbox_busy++; 7528 spin_lock_irqsave(&phba->hbalock, iflags); 7529 lpfc_mbox_put(phba, mboxq); 7530 spin_unlock_irqrestore(&phba->hbalock, iflags); 7531 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7532 "(%d):0354 Mbox cmd issue - Enqueue Data: " 7533 "x%x (x%x/x%x) x%x x%x x%x\n", 7534 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 7535 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 7536 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7537 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7538 phba->pport->port_state, 7539 psli->sli_flag, MBX_NOWAIT); 7540 /* Wake up worker thread to transport mailbox command from head */ 7541 lpfc_worker_wake_up(phba); 7542 7543 return MBX_BUSY; 7544 7545 out_not_finished: 7546 return MBX_NOT_FINISHED; 7547 } 7548 7549 /** 7550 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 7551 * @phba: Pointer to HBA context object. 7552 * 7553 * This function is called by worker thread to send a mailbox command to 7554 * SLI4 HBA firmware. 7555 * 7556 **/ 7557 int 7558 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 7559 { 7560 struct lpfc_sli *psli = &phba->sli; 7561 LPFC_MBOXQ_t *mboxq; 7562 int rc = MBX_SUCCESS; 7563 unsigned long iflags; 7564 struct lpfc_mqe *mqe; 7565 uint32_t mbx_cmnd; 7566 7567 /* Check interrupt mode before post async mailbox command */ 7568 if (unlikely(!phba->sli4_hba.intr_enable)) 7569 return MBX_NOT_FINISHED; 7570 7571 /* Check for mailbox command service token */ 7572 spin_lock_irqsave(&phba->hbalock, iflags); 7573 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7574 spin_unlock_irqrestore(&phba->hbalock, iflags); 7575 return MBX_NOT_FINISHED; 7576 } 7577 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7578 spin_unlock_irqrestore(&phba->hbalock, iflags); 7579 return MBX_NOT_FINISHED; 7580 } 7581 if (unlikely(phba->sli.mbox_active)) { 7582 spin_unlock_irqrestore(&phba->hbalock, iflags); 7583 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7584 "0384 There is pending active mailbox cmd\n"); 7585 return MBX_NOT_FINISHED; 7586 } 7587 /* Take the mailbox command service token */ 7588 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7589 7590 /* Get the next mailbox command from head of queue */ 7591 mboxq = lpfc_mbox_get(phba); 7592 7593 /* If no more mailbox command waiting for post, we're done */ 7594 if (!mboxq) { 7595 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7596 spin_unlock_irqrestore(&phba->hbalock, iflags); 7597 return MBX_SUCCESS; 7598 } 7599 phba->sli.mbox_active = mboxq; 7600 spin_unlock_irqrestore(&phba->hbalock, iflags); 7601 7602 /* Check device readiness for posting mailbox command */ 7603 rc = lpfc_mbox_dev_check(phba); 7604 if (unlikely(rc)) 7605 /* Driver clean routine will clean up pending mailbox */ 7606 goto out_not_finished; 7607 7608 /* Prepare the mbox command to be posted */ 7609 mqe = &mboxq->u.mqe; 7610 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 7611 7612 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7613 mod_timer(&psli->mbox_tmo, (jiffies + 7614 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 7615 7616 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7617 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 7618 "x%x x%x\n", 7619 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7620 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7621 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7622 phba->pport->port_state, psli->sli_flag); 7623 7624 if (mbx_cmnd != MBX_HEARTBEAT) { 7625 if (mboxq->vport) { 7626 lpfc_debugfs_disc_trc(mboxq->vport, 7627 LPFC_DISC_TRC_MBOX_VPORT, 7628 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7629 mbx_cmnd, mqe->un.mb_words[0], 7630 mqe->un.mb_words[1]); 7631 } else { 7632 lpfc_debugfs_disc_trc(phba->pport, 7633 LPFC_DISC_TRC_MBOX, 7634 "MBOX Send: cmd:x%x mb:x%x x%x", 7635 mbx_cmnd, mqe->un.mb_words[0], 7636 mqe->un.mb_words[1]); 7637 } 7638 } 7639 psli->slistat.mbox_cmd++; 7640 7641 /* Post the mailbox command to the port */ 7642 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 7643 if (rc != MBX_SUCCESS) { 7644 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7645 "(%d):2533 Mailbox command x%x (x%x/x%x) " 7646 "cannot issue Data: x%x x%x\n", 7647 mboxq->vport ? mboxq->vport->vpi : 0, 7648 mboxq->u.mb.mbxCommand, 7649 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7650 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7651 psli->sli_flag, MBX_NOWAIT); 7652 goto out_not_finished; 7653 } 7654 7655 return rc; 7656 7657 out_not_finished: 7658 spin_lock_irqsave(&phba->hbalock, iflags); 7659 if (phba->sli.mbox_active) { 7660 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7661 __lpfc_mbox_cmpl_put(phba, mboxq); 7662 /* Release the token */ 7663 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7664 phba->sli.mbox_active = NULL; 7665 } 7666 spin_unlock_irqrestore(&phba->hbalock, iflags); 7667 7668 return MBX_NOT_FINISHED; 7669 } 7670 7671 /** 7672 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 7673 * @phba: Pointer to HBA context object. 7674 * @pmbox: Pointer to mailbox object. 7675 * @flag: Flag indicating how the mailbox need to be processed. 7676 * 7677 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 7678 * the API jump table function pointer from the lpfc_hba struct. 7679 * 7680 * Return codes the caller owns the mailbox command after the return of the 7681 * function. 7682 **/ 7683 int 7684 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 7685 { 7686 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 7687 } 7688 7689 /** 7690 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 7691 * @phba: The hba struct for which this call is being executed. 7692 * @dev_grp: The HBA PCI-Device group number. 7693 * 7694 * This routine sets up the mbox interface API function jump table in @phba 7695 * struct. 7696 * Returns: 0 - success, -ENODEV - failure. 7697 **/ 7698 int 7699 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7700 { 7701 7702 switch (dev_grp) { 7703 case LPFC_PCI_DEV_LP: 7704 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 7705 phba->lpfc_sli_handle_slow_ring_event = 7706 lpfc_sli_handle_slow_ring_event_s3; 7707 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 7708 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 7709 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 7710 break; 7711 case LPFC_PCI_DEV_OC: 7712 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 7713 phba->lpfc_sli_handle_slow_ring_event = 7714 lpfc_sli_handle_slow_ring_event_s4; 7715 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 7716 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 7717 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 7718 break; 7719 default: 7720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7721 "1420 Invalid HBA PCI-device group: 0x%x\n", 7722 dev_grp); 7723 return -ENODEV; 7724 break; 7725 } 7726 return 0; 7727 } 7728 7729 /** 7730 * __lpfc_sli_ringtx_put - Add an iocb to the txq 7731 * @phba: Pointer to HBA context object. 7732 * @pring: Pointer to driver SLI ring object. 7733 * @piocb: Pointer to address of newly added command iocb. 7734 * 7735 * This function is called with hbalock held to add a command 7736 * iocb to the txq when SLI layer cannot submit the command iocb 7737 * to the ring. 7738 **/ 7739 void 7740 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7741 struct lpfc_iocbq *piocb) 7742 { 7743 /* Insert the caller's iocb in the txq tail for later processing. */ 7744 list_add_tail(&piocb->list, &pring->txq); 7745 } 7746 7747 /** 7748 * lpfc_sli_next_iocb - Get the next iocb in the txq 7749 * @phba: Pointer to HBA context object. 7750 * @pring: Pointer to driver SLI ring object. 7751 * @piocb: Pointer to address of newly added command iocb. 7752 * 7753 * This function is called with hbalock held before a new 7754 * iocb is submitted to the firmware. This function checks 7755 * txq to flush the iocbs in txq to Firmware before 7756 * submitting new iocbs to the Firmware. 7757 * If there are iocbs in the txq which need to be submitted 7758 * to firmware, lpfc_sli_next_iocb returns the first element 7759 * of the txq after dequeuing it from txq. 7760 * If there is no iocb in the txq then the function will return 7761 * *piocb and *piocb is set to NULL. Caller needs to check 7762 * *piocb to find if there are more commands in the txq. 7763 **/ 7764 static struct lpfc_iocbq * 7765 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7766 struct lpfc_iocbq **piocb) 7767 { 7768 struct lpfc_iocbq * nextiocb; 7769 7770 nextiocb = lpfc_sli_ringtx_get(phba, pring); 7771 if (!nextiocb) { 7772 nextiocb = *piocb; 7773 *piocb = NULL; 7774 } 7775 7776 return nextiocb; 7777 } 7778 7779 /** 7780 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 7781 * @phba: Pointer to HBA context object. 7782 * @ring_number: SLI ring number to issue iocb on. 7783 * @piocb: Pointer to command iocb. 7784 * @flag: Flag indicating if this command can be put into txq. 7785 * 7786 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 7787 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 7788 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 7789 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 7790 * this function allows only iocbs for posting buffers. This function finds 7791 * next available slot in the command ring and posts the command to the 7792 * available slot and writes the port attention register to request HBA start 7793 * processing new iocb. If there is no slot available in the ring and 7794 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 7795 * the function returns IOCB_BUSY. 7796 * 7797 * This function is called with hbalock held. The function will return success 7798 * after it successfully submit the iocb to firmware or after adding to the 7799 * txq. 7800 **/ 7801 static int 7802 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 7803 struct lpfc_iocbq *piocb, uint32_t flag) 7804 { 7805 struct lpfc_iocbq *nextiocb; 7806 IOCB_t *iocb; 7807 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 7808 7809 if (piocb->iocb_cmpl && (!piocb->vport) && 7810 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 7811 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 7812 lpfc_printf_log(phba, KERN_ERR, 7813 LOG_SLI | LOG_VPORT, 7814 "1807 IOCB x%x failed. No vport\n", 7815 piocb->iocb.ulpCommand); 7816 dump_stack(); 7817 return IOCB_ERROR; 7818 } 7819 7820 7821 /* If the PCI channel is in offline state, do not post iocbs. */ 7822 if (unlikely(pci_channel_offline(phba->pcidev))) 7823 return IOCB_ERROR; 7824 7825 /* If HBA has a deferred error attention, fail the iocb. */ 7826 if (unlikely(phba->hba_flag & DEFER_ERATT)) 7827 return IOCB_ERROR; 7828 7829 /* 7830 * We should never get an IOCB if we are in a < LINK_DOWN state 7831 */ 7832 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 7833 return IOCB_ERROR; 7834 7835 /* 7836 * Check to see if we are blocking IOCB processing because of a 7837 * outstanding event. 7838 */ 7839 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 7840 goto iocb_busy; 7841 7842 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 7843 /* 7844 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 7845 * can be issued if the link is not up. 7846 */ 7847 switch (piocb->iocb.ulpCommand) { 7848 case CMD_GEN_REQUEST64_CR: 7849 case CMD_GEN_REQUEST64_CX: 7850 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 7851 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 7852 FC_RCTL_DD_UNSOL_CMD) || 7853 (piocb->iocb.un.genreq64.w5.hcsw.Type != 7854 MENLO_TRANSPORT_TYPE)) 7855 7856 goto iocb_busy; 7857 break; 7858 case CMD_QUE_RING_BUF_CN: 7859 case CMD_QUE_RING_BUF64_CN: 7860 /* 7861 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 7862 * completion, iocb_cmpl MUST be 0. 7863 */ 7864 if (piocb->iocb_cmpl) 7865 piocb->iocb_cmpl = NULL; 7866 /*FALLTHROUGH*/ 7867 case CMD_CREATE_XRI_CR: 7868 case CMD_CLOSE_XRI_CN: 7869 case CMD_CLOSE_XRI_CX: 7870 break; 7871 default: 7872 goto iocb_busy; 7873 } 7874 7875 /* 7876 * For FCP commands, we must be in a state where we can process link 7877 * attention events. 7878 */ 7879 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 7880 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 7881 goto iocb_busy; 7882 } 7883 7884 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 7885 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 7886 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 7887 7888 if (iocb) 7889 lpfc_sli_update_ring(phba, pring); 7890 else 7891 lpfc_sli_update_full_ring(phba, pring); 7892 7893 if (!piocb) 7894 return IOCB_SUCCESS; 7895 7896 goto out_busy; 7897 7898 iocb_busy: 7899 pring->stats.iocb_cmd_delay++; 7900 7901 out_busy: 7902 7903 if (!(flag & SLI_IOCB_RET_IOCB)) { 7904 __lpfc_sli_ringtx_put(phba, pring, piocb); 7905 return IOCB_SUCCESS; 7906 } 7907 7908 return IOCB_BUSY; 7909 } 7910 7911 /** 7912 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 7913 * @phba: Pointer to HBA context object. 7914 * @piocb: Pointer to command iocb. 7915 * @sglq: Pointer to the scatter gather queue object. 7916 * 7917 * This routine converts the bpl or bde that is in the IOCB 7918 * to a sgl list for the sli4 hardware. The physical address 7919 * of the bpl/bde is converted back to a virtual address. 7920 * If the IOCB contains a BPL then the list of BDE's is 7921 * converted to sli4_sge's. If the IOCB contains a single 7922 * BDE then it is converted to a single sli_sge. 7923 * The IOCB is still in cpu endianess so the contents of 7924 * the bpl can be used without byte swapping. 7925 * 7926 * Returns valid XRI = Success, NO_XRI = Failure. 7927 **/ 7928 static uint16_t 7929 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 7930 struct lpfc_sglq *sglq) 7931 { 7932 uint16_t xritag = NO_XRI; 7933 struct ulp_bde64 *bpl = NULL; 7934 struct ulp_bde64 bde; 7935 struct sli4_sge *sgl = NULL; 7936 struct lpfc_dmabuf *dmabuf; 7937 IOCB_t *icmd; 7938 int numBdes = 0; 7939 int i = 0; 7940 uint32_t offset = 0; /* accumulated offset in the sg request list */ 7941 int inbound = 0; /* number of sg reply entries inbound from firmware */ 7942 7943 if (!piocbq || !sglq) 7944 return xritag; 7945 7946 sgl = (struct sli4_sge *)sglq->sgl; 7947 icmd = &piocbq->iocb; 7948 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 7949 return sglq->sli4_xritag; 7950 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7951 numBdes = icmd->un.genreq64.bdl.bdeSize / 7952 sizeof(struct ulp_bde64); 7953 /* The addrHigh and addrLow fields within the IOCB 7954 * have not been byteswapped yet so there is no 7955 * need to swap them back. 7956 */ 7957 if (piocbq->context3) 7958 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 7959 else 7960 return xritag; 7961 7962 bpl = (struct ulp_bde64 *)dmabuf->virt; 7963 if (!bpl) 7964 return xritag; 7965 7966 for (i = 0; i < numBdes; i++) { 7967 /* Should already be byte swapped. */ 7968 sgl->addr_hi = bpl->addrHigh; 7969 sgl->addr_lo = bpl->addrLow; 7970 7971 sgl->word2 = le32_to_cpu(sgl->word2); 7972 if ((i+1) == numBdes) 7973 bf_set(lpfc_sli4_sge_last, sgl, 1); 7974 else 7975 bf_set(lpfc_sli4_sge_last, sgl, 0); 7976 /* swap the size field back to the cpu so we 7977 * can assign it to the sgl. 7978 */ 7979 bde.tus.w = le32_to_cpu(bpl->tus.w); 7980 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 7981 /* The offsets in the sgl need to be accumulated 7982 * separately for the request and reply lists. 7983 * The request is always first, the reply follows. 7984 */ 7985 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 7986 /* add up the reply sg entries */ 7987 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 7988 inbound++; 7989 /* first inbound? reset the offset */ 7990 if (inbound == 1) 7991 offset = 0; 7992 bf_set(lpfc_sli4_sge_offset, sgl, offset); 7993 bf_set(lpfc_sli4_sge_type, sgl, 7994 LPFC_SGE_TYPE_DATA); 7995 offset += bde.tus.f.bdeSize; 7996 } 7997 sgl->word2 = cpu_to_le32(sgl->word2); 7998 bpl++; 7999 sgl++; 8000 } 8001 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 8002 /* The addrHigh and addrLow fields of the BDE have not 8003 * been byteswapped yet so they need to be swapped 8004 * before putting them in the sgl. 8005 */ 8006 sgl->addr_hi = 8007 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 8008 sgl->addr_lo = 8009 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 8010 sgl->word2 = le32_to_cpu(sgl->word2); 8011 bf_set(lpfc_sli4_sge_last, sgl, 1); 8012 sgl->word2 = cpu_to_le32(sgl->word2); 8013 sgl->sge_len = 8014 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 8015 } 8016 return sglq->sli4_xritag; 8017 } 8018 8019 /** 8020 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 8021 * @phba: Pointer to HBA context object. 8022 * 8023 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 8024 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 8025 * held. 8026 * 8027 * Return: index into SLI4 fast-path FCP queue index. 8028 **/ 8029 static inline uint32_t 8030 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 8031 { 8032 struct lpfc_vector_map_info *cpup; 8033 int chann, cpu; 8034 8035 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) { 8036 cpu = smp_processor_id(); 8037 if (cpu < phba->sli4_hba.num_present_cpu) { 8038 cpup = phba->sli4_hba.cpu_map; 8039 cpup += cpu; 8040 return cpup->channel_id; 8041 } 8042 chann = cpu; 8043 } 8044 chann = atomic_add_return(1, &phba->fcp_qidx); 8045 chann = (chann % phba->cfg_fcp_io_channel); 8046 return chann; 8047 } 8048 8049 /** 8050 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 8051 * @phba: Pointer to HBA context object. 8052 * @piocb: Pointer to command iocb. 8053 * @wqe: Pointer to the work queue entry. 8054 * 8055 * This routine converts the iocb command to its Work Queue Entry 8056 * equivalent. The wqe pointer should not have any fields set when 8057 * this routine is called because it will memcpy over them. 8058 * This routine does not set the CQ_ID or the WQEC bits in the 8059 * wqe. 8060 * 8061 * Returns: 0 = Success, IOCB_ERROR = Failure. 8062 **/ 8063 static int 8064 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 8065 union lpfc_wqe *wqe) 8066 { 8067 uint32_t xmit_len = 0, total_len = 0; 8068 uint8_t ct = 0; 8069 uint32_t fip; 8070 uint32_t abort_tag; 8071 uint8_t command_type = ELS_COMMAND_NON_FIP; 8072 uint8_t cmnd; 8073 uint16_t xritag; 8074 uint16_t abrt_iotag; 8075 struct lpfc_iocbq *abrtiocbq; 8076 struct ulp_bde64 *bpl = NULL; 8077 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 8078 int numBdes, i; 8079 struct ulp_bde64 bde; 8080 struct lpfc_nodelist *ndlp; 8081 uint32_t *pcmd; 8082 uint32_t if_type; 8083 8084 fip = phba->hba_flag & HBA_FIP_SUPPORT; 8085 /* The fcp commands will set command type */ 8086 if (iocbq->iocb_flag & LPFC_IO_FCP) 8087 command_type = FCP_COMMAND; 8088 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 8089 command_type = ELS_COMMAND_FIP; 8090 else 8091 command_type = ELS_COMMAND_NON_FIP; 8092 8093 /* Some of the fields are in the right position already */ 8094 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 8095 abort_tag = (uint32_t) iocbq->iotag; 8096 xritag = iocbq->sli4_xritag; 8097 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 8098 /* words0-2 bpl convert bde */ 8099 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8100 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8101 sizeof(struct ulp_bde64); 8102 bpl = (struct ulp_bde64 *) 8103 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 8104 if (!bpl) 8105 return IOCB_ERROR; 8106 8107 /* Should already be byte swapped. */ 8108 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 8109 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 8110 /* swap the size field back to the cpu so we 8111 * can assign it to the sgl. 8112 */ 8113 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 8114 xmit_len = wqe->generic.bde.tus.f.bdeSize; 8115 total_len = 0; 8116 for (i = 0; i < numBdes; i++) { 8117 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8118 total_len += bde.tus.f.bdeSize; 8119 } 8120 } else 8121 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 8122 8123 iocbq->iocb.ulpIoTag = iocbq->iotag; 8124 cmnd = iocbq->iocb.ulpCommand; 8125 8126 switch (iocbq->iocb.ulpCommand) { 8127 case CMD_ELS_REQUEST64_CR: 8128 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 8129 ndlp = iocbq->context_un.ndlp; 8130 else 8131 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8132 if (!iocbq->iocb.ulpLe) { 8133 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8134 "2007 Only Limited Edition cmd Format" 8135 " supported 0x%x\n", 8136 iocbq->iocb.ulpCommand); 8137 return IOCB_ERROR; 8138 } 8139 8140 wqe->els_req.payload_len = xmit_len; 8141 /* Els_reguest64 has a TMO */ 8142 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 8143 iocbq->iocb.ulpTimeout); 8144 /* Need a VF for word 4 set the vf bit*/ 8145 bf_set(els_req64_vf, &wqe->els_req, 0); 8146 /* And a VFID for word 12 */ 8147 bf_set(els_req64_vfid, &wqe->els_req, 0); 8148 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8149 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8150 iocbq->iocb.ulpContext); 8151 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 8152 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 8153 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 8154 if (command_type == ELS_COMMAND_FIP) 8155 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 8156 >> LPFC_FIP_ELS_ID_SHIFT); 8157 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8158 iocbq->context2)->virt); 8159 if_type = bf_get(lpfc_sli_intf_if_type, 8160 &phba->sli4_hba.sli_intf); 8161 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8162 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 8163 *pcmd == ELS_CMD_SCR || 8164 *pcmd == ELS_CMD_FDISC || 8165 *pcmd == ELS_CMD_LOGO || 8166 *pcmd == ELS_CMD_PLOGI)) { 8167 bf_set(els_req64_sp, &wqe->els_req, 1); 8168 bf_set(els_req64_sid, &wqe->els_req, 8169 iocbq->vport->fc_myDID); 8170 if ((*pcmd == ELS_CMD_FLOGI) && 8171 !(phba->fc_topology == 8172 LPFC_TOPOLOGY_LOOP)) 8173 bf_set(els_req64_sid, &wqe->els_req, 0); 8174 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 8175 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8176 phba->vpi_ids[iocbq->vport->vpi]); 8177 } else if (pcmd && iocbq->context1) { 8178 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 8179 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8180 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8181 } 8182 } 8183 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 8184 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8185 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 8186 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 8187 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 8188 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 8189 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8190 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 8191 wqe->els_req.max_response_payload_len = total_len - xmit_len; 8192 break; 8193 case CMD_XMIT_SEQUENCE64_CX: 8194 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 8195 iocbq->iocb.un.ulpWord[3]); 8196 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 8197 iocbq->iocb.unsli3.rcvsli3.ox_id); 8198 /* The entire sequence is transmitted for this IOCB */ 8199 xmit_len = total_len; 8200 cmnd = CMD_XMIT_SEQUENCE64_CR; 8201 if (phba->link_flag & LS_LOOPBACK_MODE) 8202 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 8203 case CMD_XMIT_SEQUENCE64_CR: 8204 /* word3 iocb=io_tag32 wqe=reserved */ 8205 wqe->xmit_sequence.rsvd3 = 0; 8206 /* word4 relative_offset memcpy */ 8207 /* word5 r_ctl/df_ctl memcpy */ 8208 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 8209 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 8210 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 8211 LPFC_WQE_IOD_WRITE); 8212 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 8213 LPFC_WQE_LENLOC_WORD12); 8214 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 8215 wqe->xmit_sequence.xmit_len = xmit_len; 8216 command_type = OTHER_COMMAND; 8217 break; 8218 case CMD_XMIT_BCAST64_CN: 8219 /* word3 iocb=iotag32 wqe=seq_payload_len */ 8220 wqe->xmit_bcast64.seq_payload_len = xmit_len; 8221 /* word4 iocb=rsvd wqe=rsvd */ 8222 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 8223 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 8224 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 8225 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8226 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 8227 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 8228 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 8229 LPFC_WQE_LENLOC_WORD3); 8230 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 8231 break; 8232 case CMD_FCP_IWRITE64_CR: 8233 command_type = FCP_COMMAND_DATA_OUT; 8234 /* word3 iocb=iotag wqe=payload_offset_len */ 8235 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8236 bf_set(payload_offset_len, &wqe->fcp_iwrite, 8237 xmit_len + sizeof(struct fcp_rsp)); 8238 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 8239 0); 8240 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8241 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8242 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 8243 iocbq->iocb.ulpFCP2Rcvy); 8244 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 8245 /* Always open the exchange */ 8246 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 8247 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 8248 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 8249 LPFC_WQE_LENLOC_WORD4); 8250 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 8251 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 8252 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 8253 break; 8254 case CMD_FCP_IREAD64_CR: 8255 /* word3 iocb=iotag wqe=payload_offset_len */ 8256 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8257 bf_set(payload_offset_len, &wqe->fcp_iread, 8258 xmit_len + sizeof(struct fcp_rsp)); 8259 bf_set(cmd_buff_len, &wqe->fcp_iread, 8260 0); 8261 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8262 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8263 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 8264 iocbq->iocb.ulpFCP2Rcvy); 8265 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 8266 /* Always open the exchange */ 8267 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 8268 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 8269 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 8270 LPFC_WQE_LENLOC_WORD4); 8271 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 8272 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 8273 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8274 break; 8275 case CMD_FCP_ICMND64_CR: 8276 /* word3 iocb=iotag wqe=payload_offset_len */ 8277 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8278 bf_set(payload_offset_len, &wqe->fcp_icmd, 8279 xmit_len + sizeof(struct fcp_rsp)); 8280 bf_set(cmd_buff_len, &wqe->fcp_icmd, 8281 0); 8282 /* word3 iocb=IO_TAG wqe=reserved */ 8283 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 8284 /* Always open the exchange */ 8285 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 8286 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 8287 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 8288 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 8289 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 8290 LPFC_WQE_LENLOC_NONE); 8291 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 8292 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 8293 iocbq->iocb.ulpFCP2Rcvy); 8294 break; 8295 case CMD_GEN_REQUEST64_CR: 8296 /* For this command calculate the xmit length of the 8297 * request bde. 8298 */ 8299 xmit_len = 0; 8300 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8301 sizeof(struct ulp_bde64); 8302 for (i = 0; i < numBdes; i++) { 8303 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8304 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 8305 break; 8306 xmit_len += bde.tus.f.bdeSize; 8307 } 8308 /* word3 iocb=IO_TAG wqe=request_payload_len */ 8309 wqe->gen_req.request_payload_len = xmit_len; 8310 /* word4 iocb=parameter wqe=relative_offset memcpy */ 8311 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 8312 /* word6 context tag copied in memcpy */ 8313 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 8314 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8315 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8316 "2015 Invalid CT %x command 0x%x\n", 8317 ct, iocbq->iocb.ulpCommand); 8318 return IOCB_ERROR; 8319 } 8320 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 8321 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 8322 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 8323 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 8324 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 8325 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 8326 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8327 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 8328 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 8329 command_type = OTHER_COMMAND; 8330 break; 8331 case CMD_XMIT_ELS_RSP64_CX: 8332 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8333 /* words0-2 BDE memcpy */ 8334 /* word3 iocb=iotag32 wqe=response_payload_len */ 8335 wqe->xmit_els_rsp.response_payload_len = xmit_len; 8336 /* word4 */ 8337 wqe->xmit_els_rsp.word4 = 0; 8338 /* word5 iocb=rsvd wge=did */ 8339 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 8340 iocbq->iocb.un.xseq64.xmit_els_remoteID); 8341 8342 if_type = bf_get(lpfc_sli_intf_if_type, 8343 &phba->sli4_hba.sli_intf); 8344 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8345 if (iocbq->vport->fc_flag & FC_PT2PT) { 8346 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 8347 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 8348 iocbq->vport->fc_myDID); 8349 if (iocbq->vport->fc_myDID == Fabric_DID) { 8350 bf_set(wqe_els_did, 8351 &wqe->xmit_els_rsp.wqe_dest, 0); 8352 } 8353 } 8354 } 8355 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 8356 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8357 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 8358 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8359 iocbq->iocb.unsli3.rcvsli3.ox_id); 8360 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 8361 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 8362 phba->vpi_ids[iocbq->vport->vpi]); 8363 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 8364 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 8365 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 8366 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 8367 LPFC_WQE_LENLOC_WORD3); 8368 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 8369 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 8370 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8371 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8372 iocbq->context2)->virt); 8373 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8374 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 8375 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 8376 iocbq->vport->fc_myDID); 8377 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 8378 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 8379 phba->vpi_ids[phba->pport->vpi]); 8380 } 8381 command_type = OTHER_COMMAND; 8382 break; 8383 case CMD_CLOSE_XRI_CN: 8384 case CMD_ABORT_XRI_CN: 8385 case CMD_ABORT_XRI_CX: 8386 /* words 0-2 memcpy should be 0 rserved */ 8387 /* port will send abts */ 8388 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 8389 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 8390 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 8391 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 8392 } else 8393 fip = 0; 8394 8395 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 8396 /* 8397 * The link is down, or the command was ELS_FIP 8398 * so the fw does not need to send abts 8399 * on the wire. 8400 */ 8401 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 8402 else 8403 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 8404 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 8405 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 8406 wqe->abort_cmd.rsrvd5 = 0; 8407 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 8408 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8409 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 8410 /* 8411 * The abort handler will send us CMD_ABORT_XRI_CN or 8412 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 8413 */ 8414 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 8415 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 8416 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 8417 LPFC_WQE_LENLOC_NONE); 8418 cmnd = CMD_ABORT_XRI_CX; 8419 command_type = OTHER_COMMAND; 8420 xritag = 0; 8421 break; 8422 case CMD_XMIT_BLS_RSP64_CX: 8423 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8424 /* As BLS ABTS RSP WQE is very different from other WQEs, 8425 * we re-construct this WQE here based on information in 8426 * iocbq from scratch. 8427 */ 8428 memset(wqe, 0, sizeof(union lpfc_wqe)); 8429 /* OX_ID is invariable to who sent ABTS to CT exchange */ 8430 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 8431 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 8432 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 8433 LPFC_ABTS_UNSOL_INT) { 8434 /* ABTS sent by initiator to CT exchange, the 8435 * RX_ID field will be filled with the newly 8436 * allocated responder XRI. 8437 */ 8438 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8439 iocbq->sli4_xritag); 8440 } else { 8441 /* ABTS sent by responder to CT exchange, the 8442 * RX_ID field will be filled with the responder 8443 * RX_ID from ABTS. 8444 */ 8445 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8446 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 8447 } 8448 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 8449 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 8450 8451 /* Use CT=VPI */ 8452 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 8453 ndlp->nlp_DID); 8454 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 8455 iocbq->iocb.ulpContext); 8456 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 8457 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 8458 phba->vpi_ids[phba->pport->vpi]); 8459 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 8460 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 8461 LPFC_WQE_LENLOC_NONE); 8462 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 8463 command_type = OTHER_COMMAND; 8464 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 8465 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 8466 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 8467 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 8468 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 8469 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 8470 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 8471 } 8472 8473 break; 8474 case CMD_XRI_ABORTED_CX: 8475 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 8476 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 8477 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 8478 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 8479 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 8480 default: 8481 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8482 "2014 Invalid command 0x%x\n", 8483 iocbq->iocb.ulpCommand); 8484 return IOCB_ERROR; 8485 break; 8486 } 8487 8488 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 8489 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 8490 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 8491 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 8492 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 8493 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 8494 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 8495 LPFC_IO_DIF_INSERT); 8496 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 8497 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 8498 wqe->generic.wqe_com.abort_tag = abort_tag; 8499 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 8500 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 8501 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 8502 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 8503 return 0; 8504 } 8505 8506 /** 8507 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 8508 * @phba: Pointer to HBA context object. 8509 * @ring_number: SLI ring number to issue iocb on. 8510 * @piocb: Pointer to command iocb. 8511 * @flag: Flag indicating if this command can be put into txq. 8512 * 8513 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 8514 * an iocb command to an HBA with SLI-4 interface spec. 8515 * 8516 * This function is called with hbalock held. The function will return success 8517 * after it successfully submit the iocb to firmware or after adding to the 8518 * txq. 8519 **/ 8520 static int 8521 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 8522 struct lpfc_iocbq *piocb, uint32_t flag) 8523 { 8524 struct lpfc_sglq *sglq; 8525 union lpfc_wqe wqe; 8526 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 8527 8528 if (piocb->sli4_xritag == NO_XRI) { 8529 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8530 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8531 sglq = NULL; 8532 else { 8533 if (!list_empty(&pring->txq)) { 8534 if (!(flag & SLI_IOCB_RET_IOCB)) { 8535 __lpfc_sli_ringtx_put(phba, 8536 pring, piocb); 8537 return IOCB_SUCCESS; 8538 } else { 8539 return IOCB_BUSY; 8540 } 8541 } else { 8542 sglq = __lpfc_sli_get_sglq(phba, piocb); 8543 if (!sglq) { 8544 if (!(flag & SLI_IOCB_RET_IOCB)) { 8545 __lpfc_sli_ringtx_put(phba, 8546 pring, 8547 piocb); 8548 return IOCB_SUCCESS; 8549 } else 8550 return IOCB_BUSY; 8551 } 8552 } 8553 } 8554 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 8555 /* These IO's already have an XRI and a mapped sgl. */ 8556 sglq = NULL; 8557 } else { 8558 /* 8559 * This is a continuation of a commandi,(CX) so this 8560 * sglq is on the active list 8561 */ 8562 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 8563 if (!sglq) 8564 return IOCB_ERROR; 8565 } 8566 8567 if (sglq) { 8568 piocb->sli4_lxritag = sglq->sli4_lxritag; 8569 piocb->sli4_xritag = sglq->sli4_xritag; 8570 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 8571 return IOCB_ERROR; 8572 } 8573 8574 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 8575 return IOCB_ERROR; 8576 8577 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8578 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8579 if (unlikely(!phba->sli4_hba.fcp_wq)) 8580 return IOCB_ERROR; 8581 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8582 &wqe)) 8583 return IOCB_ERROR; 8584 } else { 8585 if (unlikely(!phba->sli4_hba.els_wq)) 8586 return IOCB_ERROR; 8587 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8588 return IOCB_ERROR; 8589 } 8590 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 8591 8592 return 0; 8593 } 8594 8595 /** 8596 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 8597 * 8598 * This routine wraps the actual lockless version for issusing IOCB function 8599 * pointer from the lpfc_hba struct. 8600 * 8601 * Return codes: 8602 * IOCB_ERROR - Error 8603 * IOCB_SUCCESS - Success 8604 * IOCB_BUSY - Busy 8605 **/ 8606 int 8607 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8608 struct lpfc_iocbq *piocb, uint32_t flag) 8609 { 8610 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8611 } 8612 8613 /** 8614 * lpfc_sli_api_table_setup - Set up sli api function jump table 8615 * @phba: The hba struct for which this call is being executed. 8616 * @dev_grp: The HBA PCI-Device group number. 8617 * 8618 * This routine sets up the SLI interface API function jump table in @phba 8619 * struct. 8620 * Returns: 0 - success, -ENODEV - failure. 8621 **/ 8622 int 8623 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8624 { 8625 8626 switch (dev_grp) { 8627 case LPFC_PCI_DEV_LP: 8628 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 8629 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 8630 break; 8631 case LPFC_PCI_DEV_OC: 8632 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 8633 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 8634 break; 8635 default: 8636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8637 "1419 Invalid HBA PCI-device group: 0x%x\n", 8638 dev_grp); 8639 return -ENODEV; 8640 break; 8641 } 8642 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 8643 return 0; 8644 } 8645 8646 /** 8647 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 8648 * @phba: Pointer to HBA context object. 8649 * @pring: Pointer to driver SLI ring object. 8650 * @piocb: Pointer to command iocb. 8651 * @flag: Flag indicating if this command can be put into txq. 8652 * 8653 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 8654 * function. This function gets the hbalock and calls 8655 * __lpfc_sli_issue_iocb function and will return the error returned 8656 * by __lpfc_sli_issue_iocb function. This wrapper is used by 8657 * functions which do not hold hbalock. 8658 **/ 8659 int 8660 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8661 struct lpfc_iocbq *piocb, uint32_t flag) 8662 { 8663 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 8664 struct lpfc_sli_ring *pring; 8665 struct lpfc_queue *fpeq; 8666 struct lpfc_eqe *eqe; 8667 unsigned long iflags; 8668 int rc, idx; 8669 8670 if (phba->sli_rev == LPFC_SLI_REV4) { 8671 if (piocb->iocb_flag & LPFC_IO_FCP) { 8672 if (unlikely(!phba->sli4_hba.fcp_wq)) 8673 return IOCB_ERROR; 8674 idx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8675 piocb->fcp_wqidx = idx; 8676 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; 8677 8678 pring = &phba->sli.ring[ring_number]; 8679 spin_lock_irqsave(&pring->ring_lock, iflags); 8680 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, 8681 flag); 8682 spin_unlock_irqrestore(&pring->ring_lock, iflags); 8683 8684 if (lpfc_fcp_look_ahead) { 8685 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx]; 8686 8687 if (atomic_dec_and_test(&fcp_eq_hdl-> 8688 fcp_eq_in_use)) { 8689 8690 /* Get associated EQ with this index */ 8691 fpeq = phba->sli4_hba.hba_eq[idx]; 8692 8693 /* Turn off interrupts from this EQ */ 8694 lpfc_sli4_eq_clr_intr(fpeq); 8695 8696 /* 8697 * Process all the events on FCP EQ 8698 */ 8699 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 8700 lpfc_sli4_hba_handle_eqe(phba, 8701 eqe, idx); 8702 fpeq->EQ_processed++; 8703 } 8704 8705 /* Always clear and re-arm the EQ */ 8706 lpfc_sli4_eq_release(fpeq, 8707 LPFC_QUEUE_REARM); 8708 } 8709 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 8710 } 8711 } else { 8712 pring = &phba->sli.ring[ring_number]; 8713 spin_lock_irqsave(&pring->ring_lock, iflags); 8714 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, 8715 flag); 8716 spin_unlock_irqrestore(&pring->ring_lock, iflags); 8717 8718 } 8719 } else { 8720 /* For now, SLI2/3 will still use hbalock */ 8721 spin_lock_irqsave(&phba->hbalock, iflags); 8722 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8723 spin_unlock_irqrestore(&phba->hbalock, iflags); 8724 } 8725 return rc; 8726 } 8727 8728 /** 8729 * lpfc_extra_ring_setup - Extra ring setup function 8730 * @phba: Pointer to HBA context object. 8731 * 8732 * This function is called while driver attaches with the 8733 * HBA to setup the extra ring. The extra ring is used 8734 * only when driver needs to support target mode functionality 8735 * or IP over FC functionalities. 8736 * 8737 * This function is called with no lock held. 8738 **/ 8739 static int 8740 lpfc_extra_ring_setup( struct lpfc_hba *phba) 8741 { 8742 struct lpfc_sli *psli; 8743 struct lpfc_sli_ring *pring; 8744 8745 psli = &phba->sli; 8746 8747 /* Adjust cmd/rsp ring iocb entries more evenly */ 8748 8749 /* Take some away from the FCP ring */ 8750 pring = &psli->ring[psli->fcp_ring]; 8751 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8752 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8753 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8754 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8755 8756 /* and give them to the extra ring */ 8757 pring = &psli->ring[psli->extra_ring]; 8758 8759 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8760 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8761 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8762 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8763 8764 /* Setup default profile for this ring */ 8765 pring->iotag_max = 4096; 8766 pring->num_mask = 1; 8767 pring->prt[0].profile = 0; /* Mask 0 */ 8768 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 8769 pring->prt[0].type = phba->cfg_multi_ring_type; 8770 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 8771 return 0; 8772 } 8773 8774 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 8775 * @phba: Pointer to HBA context object. 8776 * @iocbq: Pointer to iocb object. 8777 * 8778 * The async_event handler calls this routine when it receives 8779 * an ASYNC_STATUS_CN event from the port. The port generates 8780 * this event when an Abort Sequence request to an rport fails 8781 * twice in succession. The abort could be originated by the 8782 * driver or by the port. The ABTS could have been for an ELS 8783 * or FCP IO. The port only generates this event when an ABTS 8784 * fails to complete after one retry. 8785 */ 8786 static void 8787 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 8788 struct lpfc_iocbq *iocbq) 8789 { 8790 struct lpfc_nodelist *ndlp = NULL; 8791 uint16_t rpi = 0, vpi = 0; 8792 struct lpfc_vport *vport = NULL; 8793 8794 /* The rpi in the ulpContext is vport-sensitive. */ 8795 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 8796 rpi = iocbq->iocb.ulpContext; 8797 8798 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8799 "3092 Port generated ABTS async event " 8800 "on vpi %d rpi %d status 0x%x\n", 8801 vpi, rpi, iocbq->iocb.ulpStatus); 8802 8803 vport = lpfc_find_vport_by_vpid(phba, vpi); 8804 if (!vport) 8805 goto err_exit; 8806 ndlp = lpfc_findnode_rpi(vport, rpi); 8807 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 8808 goto err_exit; 8809 8810 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 8811 lpfc_sli_abts_recover_port(vport, ndlp); 8812 return; 8813 8814 err_exit: 8815 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8816 "3095 Event Context not found, no " 8817 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 8818 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 8819 vpi, rpi); 8820 } 8821 8822 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 8823 * @phba: pointer to HBA context object. 8824 * @ndlp: nodelist pointer for the impacted rport. 8825 * @axri: pointer to the wcqe containing the failed exchange. 8826 * 8827 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 8828 * port. The port generates this event when an abort exchange request to an 8829 * rport fails twice in succession with no reply. The abort could be originated 8830 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 8831 */ 8832 void 8833 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 8834 struct lpfc_nodelist *ndlp, 8835 struct sli4_wcqe_xri_aborted *axri) 8836 { 8837 struct lpfc_vport *vport; 8838 uint32_t ext_status = 0; 8839 8840 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 8841 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8842 "3115 Node Context not found, driver " 8843 "ignoring abts err event\n"); 8844 return; 8845 } 8846 8847 vport = ndlp->vport; 8848 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8849 "3116 Port generated FCP XRI ABORT event on " 8850 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 8851 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 8852 bf_get(lpfc_wcqe_xa_xri, axri), 8853 bf_get(lpfc_wcqe_xa_status, axri), 8854 axri->parameter); 8855 8856 /* 8857 * Catch the ABTS protocol failure case. Older OCe FW releases returned 8858 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 8859 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 8860 */ 8861 ext_status = axri->parameter & IOERR_PARAM_MASK; 8862 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 8863 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 8864 lpfc_sli_abts_recover_port(vport, ndlp); 8865 } 8866 8867 /** 8868 * lpfc_sli_async_event_handler - ASYNC iocb handler function 8869 * @phba: Pointer to HBA context object. 8870 * @pring: Pointer to driver SLI ring object. 8871 * @iocbq: Pointer to iocb object. 8872 * 8873 * This function is called by the slow ring event handler 8874 * function when there is an ASYNC event iocb in the ring. 8875 * This function is called with no lock held. 8876 * Currently this function handles only temperature related 8877 * ASYNC events. The function decodes the temperature sensor 8878 * event message and posts events for the management applications. 8879 **/ 8880 static void 8881 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 8882 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 8883 { 8884 IOCB_t *icmd; 8885 uint16_t evt_code; 8886 struct temp_event temp_event_data; 8887 struct Scsi_Host *shost; 8888 uint32_t *iocb_w; 8889 8890 icmd = &iocbq->iocb; 8891 evt_code = icmd->un.asyncstat.evt_code; 8892 8893 switch (evt_code) { 8894 case ASYNC_TEMP_WARN: 8895 case ASYNC_TEMP_SAFE: 8896 temp_event_data.data = (uint32_t) icmd->ulpContext; 8897 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 8898 if (evt_code == ASYNC_TEMP_WARN) { 8899 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 8900 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8901 "0347 Adapter is very hot, please take " 8902 "corrective action. temperature : %d Celsius\n", 8903 (uint32_t) icmd->ulpContext); 8904 } else { 8905 temp_event_data.event_code = LPFC_NORMAL_TEMP; 8906 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8907 "0340 Adapter temperature is OK now. " 8908 "temperature : %d Celsius\n", 8909 (uint32_t) icmd->ulpContext); 8910 } 8911 8912 /* Send temperature change event to applications */ 8913 shost = lpfc_shost_from_vport(phba->pport); 8914 fc_host_post_vendor_event(shost, fc_get_event_number(), 8915 sizeof(temp_event_data), (char *) &temp_event_data, 8916 LPFC_NL_VENDOR_ID); 8917 break; 8918 case ASYNC_STATUS_CN: 8919 lpfc_sli_abts_err_handler(phba, iocbq); 8920 break; 8921 default: 8922 iocb_w = (uint32_t *) icmd; 8923 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8924 "0346 Ring %d handler: unexpected ASYNC_STATUS" 8925 " evt_code 0x%x\n" 8926 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 8927 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 8928 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 8929 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 8930 pring->ringno, icmd->un.asyncstat.evt_code, 8931 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 8932 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 8933 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 8934 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 8935 8936 break; 8937 } 8938 } 8939 8940 8941 /** 8942 * lpfc_sli_setup - SLI ring setup function 8943 * @phba: Pointer to HBA context object. 8944 * 8945 * lpfc_sli_setup sets up rings of the SLI interface with 8946 * number of iocbs per ring and iotags. This function is 8947 * called while driver attach to the HBA and before the 8948 * interrupts are enabled. So there is no need for locking. 8949 * 8950 * This function always returns 0. 8951 **/ 8952 int 8953 lpfc_sli_setup(struct lpfc_hba *phba) 8954 { 8955 int i, totiocbsize = 0; 8956 struct lpfc_sli *psli = &phba->sli; 8957 struct lpfc_sli_ring *pring; 8958 8959 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 8960 if (phba->sli_rev == LPFC_SLI_REV4) 8961 psli->num_rings += phba->cfg_fcp_io_channel; 8962 psli->sli_flag = 0; 8963 psli->fcp_ring = LPFC_FCP_RING; 8964 psli->next_ring = LPFC_FCP_NEXT_RING; 8965 psli->extra_ring = LPFC_EXTRA_RING; 8966 8967 psli->iocbq_lookup = NULL; 8968 psli->iocbq_lookup_len = 0; 8969 psli->last_iotag = 0; 8970 8971 for (i = 0; i < psli->num_rings; i++) { 8972 pring = &psli->ring[i]; 8973 switch (i) { 8974 case LPFC_FCP_RING: /* ring 0 - FCP */ 8975 /* numCiocb and numRiocb are used in config_port */ 8976 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 8977 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 8978 pring->sli.sli3.numCiocb += 8979 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8980 pring->sli.sli3.numRiocb += 8981 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8982 pring->sli.sli3.numCiocb += 8983 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8984 pring->sli.sli3.numRiocb += 8985 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8986 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 8987 SLI3_IOCB_CMD_SIZE : 8988 SLI2_IOCB_CMD_SIZE; 8989 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 8990 SLI3_IOCB_RSP_SIZE : 8991 SLI2_IOCB_RSP_SIZE; 8992 pring->iotag_ctr = 0; 8993 pring->iotag_max = 8994 (phba->cfg_hba_queue_depth * 2); 8995 pring->fast_iotag = pring->iotag_max; 8996 pring->num_mask = 0; 8997 break; 8998 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 8999 /* numCiocb and numRiocb are used in config_port */ 9000 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 9001 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 9002 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9003 SLI3_IOCB_CMD_SIZE : 9004 SLI2_IOCB_CMD_SIZE; 9005 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9006 SLI3_IOCB_RSP_SIZE : 9007 SLI2_IOCB_RSP_SIZE; 9008 pring->iotag_max = phba->cfg_hba_queue_depth; 9009 pring->num_mask = 0; 9010 break; 9011 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 9012 /* numCiocb and numRiocb are used in config_port */ 9013 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 9014 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 9015 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9016 SLI3_IOCB_CMD_SIZE : 9017 SLI2_IOCB_CMD_SIZE; 9018 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9019 SLI3_IOCB_RSP_SIZE : 9020 SLI2_IOCB_RSP_SIZE; 9021 pring->fast_iotag = 0; 9022 pring->iotag_ctr = 0; 9023 pring->iotag_max = 4096; 9024 pring->lpfc_sli_rcv_async_status = 9025 lpfc_sli_async_event_handler; 9026 pring->num_mask = LPFC_MAX_RING_MASK; 9027 pring->prt[0].profile = 0; /* Mask 0 */ 9028 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 9029 pring->prt[0].type = FC_TYPE_ELS; 9030 pring->prt[0].lpfc_sli_rcv_unsol_event = 9031 lpfc_els_unsol_event; 9032 pring->prt[1].profile = 0; /* Mask 1 */ 9033 pring->prt[1].rctl = FC_RCTL_ELS_REP; 9034 pring->prt[1].type = FC_TYPE_ELS; 9035 pring->prt[1].lpfc_sli_rcv_unsol_event = 9036 lpfc_els_unsol_event; 9037 pring->prt[2].profile = 0; /* Mask 2 */ 9038 /* NameServer Inquiry */ 9039 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 9040 /* NameServer */ 9041 pring->prt[2].type = FC_TYPE_CT; 9042 pring->prt[2].lpfc_sli_rcv_unsol_event = 9043 lpfc_ct_unsol_event; 9044 pring->prt[3].profile = 0; /* Mask 3 */ 9045 /* NameServer response */ 9046 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 9047 /* NameServer */ 9048 pring->prt[3].type = FC_TYPE_CT; 9049 pring->prt[3].lpfc_sli_rcv_unsol_event = 9050 lpfc_ct_unsol_event; 9051 break; 9052 } 9053 totiocbsize += (pring->sli.sli3.numCiocb * 9054 pring->sli.sli3.sizeCiocb) + 9055 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 9056 } 9057 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 9058 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 9059 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 9060 "SLI2 SLIM Data: x%x x%lx\n", 9061 phba->brd_no, totiocbsize, 9062 (unsigned long) MAX_SLIM_IOCB_SIZE); 9063 } 9064 if (phba->cfg_multi_ring_support == 2) 9065 lpfc_extra_ring_setup(phba); 9066 9067 return 0; 9068 } 9069 9070 /** 9071 * lpfc_sli_queue_setup - Queue initialization function 9072 * @phba: Pointer to HBA context object. 9073 * 9074 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 9075 * ring. This function also initializes ring indices of each ring. 9076 * This function is called during the initialization of the SLI 9077 * interface of an HBA. 9078 * This function is called with no lock held and always returns 9079 * 1. 9080 **/ 9081 int 9082 lpfc_sli_queue_setup(struct lpfc_hba *phba) 9083 { 9084 struct lpfc_sli *psli; 9085 struct lpfc_sli_ring *pring; 9086 int i; 9087 9088 psli = &phba->sli; 9089 spin_lock_irq(&phba->hbalock); 9090 INIT_LIST_HEAD(&psli->mboxq); 9091 INIT_LIST_HEAD(&psli->mboxq_cmpl); 9092 /* Initialize list headers for txq and txcmplq as double linked lists */ 9093 for (i = 0; i < psli->num_rings; i++) { 9094 pring = &psli->ring[i]; 9095 pring->ringno = i; 9096 pring->sli.sli3.next_cmdidx = 0; 9097 pring->sli.sli3.local_getidx = 0; 9098 pring->sli.sli3.cmdidx = 0; 9099 INIT_LIST_HEAD(&pring->txq); 9100 INIT_LIST_HEAD(&pring->txcmplq); 9101 INIT_LIST_HEAD(&pring->iocb_continueq); 9102 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 9103 INIT_LIST_HEAD(&pring->postbufq); 9104 spin_lock_init(&pring->ring_lock); 9105 } 9106 spin_unlock_irq(&phba->hbalock); 9107 return 1; 9108 } 9109 9110 /** 9111 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 9112 * @phba: Pointer to HBA context object. 9113 * 9114 * This routine flushes the mailbox command subsystem. It will unconditionally 9115 * flush all the mailbox commands in the three possible stages in the mailbox 9116 * command sub-system: pending mailbox command queue; the outstanding mailbox 9117 * command; and completed mailbox command queue. It is caller's responsibility 9118 * to make sure that the driver is in the proper state to flush the mailbox 9119 * command sub-system. Namely, the posting of mailbox commands into the 9120 * pending mailbox command queue from the various clients must be stopped; 9121 * either the HBA is in a state that it will never works on the outstanding 9122 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 9123 * mailbox command has been completed. 9124 **/ 9125 static void 9126 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 9127 { 9128 LIST_HEAD(completions); 9129 struct lpfc_sli *psli = &phba->sli; 9130 LPFC_MBOXQ_t *pmb; 9131 unsigned long iflag; 9132 9133 /* Flush all the mailbox commands in the mbox system */ 9134 spin_lock_irqsave(&phba->hbalock, iflag); 9135 /* The pending mailbox command queue */ 9136 list_splice_init(&phba->sli.mboxq, &completions); 9137 /* The outstanding active mailbox command */ 9138 if (psli->mbox_active) { 9139 list_add_tail(&psli->mbox_active->list, &completions); 9140 psli->mbox_active = NULL; 9141 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9142 } 9143 /* The completed mailbox command queue */ 9144 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 9145 spin_unlock_irqrestore(&phba->hbalock, iflag); 9146 9147 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 9148 while (!list_empty(&completions)) { 9149 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 9150 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 9151 if (pmb->mbox_cmpl) 9152 pmb->mbox_cmpl(phba, pmb); 9153 } 9154 } 9155 9156 /** 9157 * lpfc_sli_host_down - Vport cleanup function 9158 * @vport: Pointer to virtual port object. 9159 * 9160 * lpfc_sli_host_down is called to clean up the resources 9161 * associated with a vport before destroying virtual 9162 * port data structures. 9163 * This function does following operations: 9164 * - Free discovery resources associated with this virtual 9165 * port. 9166 * - Free iocbs associated with this virtual port in 9167 * the txq. 9168 * - Send abort for all iocb commands associated with this 9169 * vport in txcmplq. 9170 * 9171 * This function is called with no lock held and always returns 1. 9172 **/ 9173 int 9174 lpfc_sli_host_down(struct lpfc_vport *vport) 9175 { 9176 LIST_HEAD(completions); 9177 struct lpfc_hba *phba = vport->phba; 9178 struct lpfc_sli *psli = &phba->sli; 9179 struct lpfc_sli_ring *pring; 9180 struct lpfc_iocbq *iocb, *next_iocb; 9181 int i; 9182 unsigned long flags = 0; 9183 uint16_t prev_pring_flag; 9184 9185 lpfc_cleanup_discovery_resources(vport); 9186 9187 spin_lock_irqsave(&phba->hbalock, flags); 9188 for (i = 0; i < psli->num_rings; i++) { 9189 pring = &psli->ring[i]; 9190 prev_pring_flag = pring->flag; 9191 /* Only slow rings */ 9192 if (pring->ringno == LPFC_ELS_RING) { 9193 pring->flag |= LPFC_DEFERRED_RING_EVENT; 9194 /* Set the lpfc data pending flag */ 9195 set_bit(LPFC_DATA_READY, &phba->data_flags); 9196 } 9197 /* 9198 * Error everything on the txq since these iocbs have not been 9199 * given to the FW yet. 9200 */ 9201 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 9202 if (iocb->vport != vport) 9203 continue; 9204 list_move_tail(&iocb->list, &completions); 9205 } 9206 9207 /* Next issue ABTS for everything on the txcmplq */ 9208 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 9209 list) { 9210 if (iocb->vport != vport) 9211 continue; 9212 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 9213 } 9214 9215 pring->flag = prev_pring_flag; 9216 } 9217 9218 spin_unlock_irqrestore(&phba->hbalock, flags); 9219 9220 /* Cancel all the IOCBs from the completions list */ 9221 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9222 IOERR_SLI_DOWN); 9223 return 1; 9224 } 9225 9226 /** 9227 * lpfc_sli_hba_down - Resource cleanup function for the HBA 9228 * @phba: Pointer to HBA context object. 9229 * 9230 * This function cleans up all iocb, buffers, mailbox commands 9231 * while shutting down the HBA. This function is called with no 9232 * lock held and always returns 1. 9233 * This function does the following to cleanup driver resources: 9234 * - Free discovery resources for each virtual port 9235 * - Cleanup any pending fabric iocbs 9236 * - Iterate through the iocb txq and free each entry 9237 * in the list. 9238 * - Free up any buffer posted to the HBA 9239 * - Free mailbox commands in the mailbox queue. 9240 **/ 9241 int 9242 lpfc_sli_hba_down(struct lpfc_hba *phba) 9243 { 9244 LIST_HEAD(completions); 9245 struct lpfc_sli *psli = &phba->sli; 9246 struct lpfc_sli_ring *pring; 9247 struct lpfc_dmabuf *buf_ptr; 9248 unsigned long flags = 0; 9249 int i; 9250 9251 /* Shutdown the mailbox command sub-system */ 9252 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 9253 9254 lpfc_hba_down_prep(phba); 9255 9256 lpfc_fabric_abort_hba(phba); 9257 9258 spin_lock_irqsave(&phba->hbalock, flags); 9259 for (i = 0; i < psli->num_rings; i++) { 9260 pring = &psli->ring[i]; 9261 /* Only slow rings */ 9262 if (pring->ringno == LPFC_ELS_RING) { 9263 pring->flag |= LPFC_DEFERRED_RING_EVENT; 9264 /* Set the lpfc data pending flag */ 9265 set_bit(LPFC_DATA_READY, &phba->data_flags); 9266 } 9267 9268 /* 9269 * Error everything on the txq since these iocbs have not been 9270 * given to the FW yet. 9271 */ 9272 list_splice_init(&pring->txq, &completions); 9273 } 9274 spin_unlock_irqrestore(&phba->hbalock, flags); 9275 9276 /* Cancel all the IOCBs from the completions list */ 9277 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9278 IOERR_SLI_DOWN); 9279 9280 spin_lock_irqsave(&phba->hbalock, flags); 9281 list_splice_init(&phba->elsbuf, &completions); 9282 phba->elsbuf_cnt = 0; 9283 phba->elsbuf_prev_cnt = 0; 9284 spin_unlock_irqrestore(&phba->hbalock, flags); 9285 9286 while (!list_empty(&completions)) { 9287 list_remove_head(&completions, buf_ptr, 9288 struct lpfc_dmabuf, list); 9289 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 9290 kfree(buf_ptr); 9291 } 9292 9293 /* Return any active mbox cmds */ 9294 del_timer_sync(&psli->mbox_tmo); 9295 9296 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 9297 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 9298 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 9299 9300 return 1; 9301 } 9302 9303 /** 9304 * lpfc_sli_pcimem_bcopy - SLI memory copy function 9305 * @srcp: Source memory pointer. 9306 * @destp: Destination memory pointer. 9307 * @cnt: Number of words required to be copied. 9308 * 9309 * This function is used for copying data between driver memory 9310 * and the SLI memory. This function also changes the endianness 9311 * of each word if native endianness is different from SLI 9312 * endianness. This function can be called with or without 9313 * lock. 9314 **/ 9315 void 9316 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 9317 { 9318 uint32_t *src = srcp; 9319 uint32_t *dest = destp; 9320 uint32_t ldata; 9321 int i; 9322 9323 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 9324 ldata = *src; 9325 ldata = le32_to_cpu(ldata); 9326 *dest = ldata; 9327 src++; 9328 dest++; 9329 } 9330 } 9331 9332 9333 /** 9334 * lpfc_sli_bemem_bcopy - SLI memory copy function 9335 * @srcp: Source memory pointer. 9336 * @destp: Destination memory pointer. 9337 * @cnt: Number of words required to be copied. 9338 * 9339 * This function is used for copying data between a data structure 9340 * with big endian representation to local endianness. 9341 * This function can be called with or without lock. 9342 **/ 9343 void 9344 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 9345 { 9346 uint32_t *src = srcp; 9347 uint32_t *dest = destp; 9348 uint32_t ldata; 9349 int i; 9350 9351 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 9352 ldata = *src; 9353 ldata = be32_to_cpu(ldata); 9354 *dest = ldata; 9355 src++; 9356 dest++; 9357 } 9358 } 9359 9360 /** 9361 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 9362 * @phba: Pointer to HBA context object. 9363 * @pring: Pointer to driver SLI ring object. 9364 * @mp: Pointer to driver buffer object. 9365 * 9366 * This function is called with no lock held. 9367 * It always return zero after adding the buffer to the postbufq 9368 * buffer list. 9369 **/ 9370 int 9371 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9372 struct lpfc_dmabuf *mp) 9373 { 9374 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 9375 later */ 9376 spin_lock_irq(&phba->hbalock); 9377 list_add_tail(&mp->list, &pring->postbufq); 9378 pring->postbufq_cnt++; 9379 spin_unlock_irq(&phba->hbalock); 9380 return 0; 9381 } 9382 9383 /** 9384 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 9385 * @phba: Pointer to HBA context object. 9386 * 9387 * When HBQ is enabled, buffers are searched based on tags. This function 9388 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 9389 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 9390 * does not conflict with tags of buffer posted for unsolicited events. 9391 * The function returns the allocated tag. The function is called with 9392 * no locks held. 9393 **/ 9394 uint32_t 9395 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 9396 { 9397 spin_lock_irq(&phba->hbalock); 9398 phba->buffer_tag_count++; 9399 /* 9400 * Always set the QUE_BUFTAG_BIT to distiguish between 9401 * a tag assigned by HBQ. 9402 */ 9403 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 9404 spin_unlock_irq(&phba->hbalock); 9405 return phba->buffer_tag_count; 9406 } 9407 9408 /** 9409 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 9410 * @phba: Pointer to HBA context object. 9411 * @pring: Pointer to driver SLI ring object. 9412 * @tag: Buffer tag. 9413 * 9414 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 9415 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 9416 * iocb is posted to the response ring with the tag of the buffer. 9417 * This function searches the pring->postbufq list using the tag 9418 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 9419 * iocb. If the buffer is found then lpfc_dmabuf object of the 9420 * buffer is returned to the caller else NULL is returned. 9421 * This function is called with no lock held. 9422 **/ 9423 struct lpfc_dmabuf * 9424 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9425 uint32_t tag) 9426 { 9427 struct lpfc_dmabuf *mp, *next_mp; 9428 struct list_head *slp = &pring->postbufq; 9429 9430 /* Search postbufq, from the beginning, looking for a match on tag */ 9431 spin_lock_irq(&phba->hbalock); 9432 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9433 if (mp->buffer_tag == tag) { 9434 list_del_init(&mp->list); 9435 pring->postbufq_cnt--; 9436 spin_unlock_irq(&phba->hbalock); 9437 return mp; 9438 } 9439 } 9440 9441 spin_unlock_irq(&phba->hbalock); 9442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9443 "0402 Cannot find virtual addr for buffer tag on " 9444 "ring %d Data x%lx x%p x%p x%x\n", 9445 pring->ringno, (unsigned long) tag, 9446 slp->next, slp->prev, pring->postbufq_cnt); 9447 9448 return NULL; 9449 } 9450 9451 /** 9452 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 9453 * @phba: Pointer to HBA context object. 9454 * @pring: Pointer to driver SLI ring object. 9455 * @phys: DMA address of the buffer. 9456 * 9457 * This function searches the buffer list using the dma_address 9458 * of unsolicited event to find the driver's lpfc_dmabuf object 9459 * corresponding to the dma_address. The function returns the 9460 * lpfc_dmabuf object if a buffer is found else it returns NULL. 9461 * This function is called by the ct and els unsolicited event 9462 * handlers to get the buffer associated with the unsolicited 9463 * event. 9464 * 9465 * This function is called with no lock held. 9466 **/ 9467 struct lpfc_dmabuf * 9468 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9469 dma_addr_t phys) 9470 { 9471 struct lpfc_dmabuf *mp, *next_mp; 9472 struct list_head *slp = &pring->postbufq; 9473 9474 /* Search postbufq, from the beginning, looking for a match on phys */ 9475 spin_lock_irq(&phba->hbalock); 9476 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9477 if (mp->phys == phys) { 9478 list_del_init(&mp->list); 9479 pring->postbufq_cnt--; 9480 spin_unlock_irq(&phba->hbalock); 9481 return mp; 9482 } 9483 } 9484 9485 spin_unlock_irq(&phba->hbalock); 9486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9487 "0410 Cannot find virtual addr for mapped buf on " 9488 "ring %d Data x%llx x%p x%p x%x\n", 9489 pring->ringno, (unsigned long long)phys, 9490 slp->next, slp->prev, pring->postbufq_cnt); 9491 return NULL; 9492 } 9493 9494 /** 9495 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 9496 * @phba: Pointer to HBA context object. 9497 * @cmdiocb: Pointer to driver command iocb object. 9498 * @rspiocb: Pointer to driver response iocb object. 9499 * 9500 * This function is the completion handler for the abort iocbs for 9501 * ELS commands. This function is called from the ELS ring event 9502 * handler with no lock held. This function frees memory resources 9503 * associated with the abort iocb. 9504 **/ 9505 static void 9506 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9507 struct lpfc_iocbq *rspiocb) 9508 { 9509 IOCB_t *irsp = &rspiocb->iocb; 9510 uint16_t abort_iotag, abort_context; 9511 struct lpfc_iocbq *abort_iocb = NULL; 9512 9513 if (irsp->ulpStatus) { 9514 9515 /* 9516 * Assume that the port already completed and returned, or 9517 * will return the iocb. Just Log the message. 9518 */ 9519 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 9520 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 9521 9522 spin_lock_irq(&phba->hbalock); 9523 if (phba->sli_rev < LPFC_SLI_REV4) { 9524 if (abort_iotag != 0 && 9525 abort_iotag <= phba->sli.last_iotag) 9526 abort_iocb = 9527 phba->sli.iocbq_lookup[abort_iotag]; 9528 } else 9529 /* For sli4 the abort_tag is the XRI, 9530 * so the abort routine puts the iotag of the iocb 9531 * being aborted in the context field of the abort 9532 * IOCB. 9533 */ 9534 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 9535 9536 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 9537 "0327 Cannot abort els iocb %p " 9538 "with tag %x context %x, abort status %x, " 9539 "abort code %x\n", 9540 abort_iocb, abort_iotag, abort_context, 9541 irsp->ulpStatus, irsp->un.ulpWord[4]); 9542 9543 spin_unlock_irq(&phba->hbalock); 9544 } 9545 lpfc_sli_release_iocbq(phba, cmdiocb); 9546 return; 9547 } 9548 9549 /** 9550 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 9551 * @phba: Pointer to HBA context object. 9552 * @cmdiocb: Pointer to driver command iocb object. 9553 * @rspiocb: Pointer to driver response iocb object. 9554 * 9555 * The function is called from SLI ring event handler with no 9556 * lock held. This function is the completion handler for ELS commands 9557 * which are aborted. The function frees memory resources used for 9558 * the aborted ELS commands. 9559 **/ 9560 static void 9561 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9562 struct lpfc_iocbq *rspiocb) 9563 { 9564 IOCB_t *irsp = &rspiocb->iocb; 9565 9566 /* ELS cmd tag <ulpIoTag> completes */ 9567 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9568 "0139 Ignoring ELS cmd tag x%x completion Data: " 9569 "x%x x%x x%x\n", 9570 irsp->ulpIoTag, irsp->ulpStatus, 9571 irsp->un.ulpWord[4], irsp->ulpTimeout); 9572 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 9573 lpfc_ct_free_iocb(phba, cmdiocb); 9574 else 9575 lpfc_els_free_iocb(phba, cmdiocb); 9576 return; 9577 } 9578 9579 /** 9580 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 9581 * @phba: Pointer to HBA context object. 9582 * @pring: Pointer to driver SLI ring object. 9583 * @cmdiocb: Pointer to driver command iocb object. 9584 * 9585 * This function issues an abort iocb for the provided command iocb down to 9586 * the port. Other than the case the outstanding command iocb is an abort 9587 * request, this function issues abort out unconditionally. This function is 9588 * called with hbalock held. The function returns 0 when it fails due to 9589 * memory allocation failure or when the command iocb is an abort request. 9590 **/ 9591 static int 9592 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9593 struct lpfc_iocbq *cmdiocb) 9594 { 9595 struct lpfc_vport *vport = cmdiocb->vport; 9596 struct lpfc_iocbq *abtsiocbp; 9597 IOCB_t *icmd = NULL; 9598 IOCB_t *iabt = NULL; 9599 int retval; 9600 unsigned long iflags; 9601 9602 /* 9603 * There are certain command types we don't want to abort. And we 9604 * don't want to abort commands that are already in the process of 9605 * being aborted. 9606 */ 9607 icmd = &cmdiocb->iocb; 9608 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9609 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9610 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9611 return 0; 9612 9613 /* issue ABTS for this IOCB based on iotag */ 9614 abtsiocbp = __lpfc_sli_get_iocbq(phba); 9615 if (abtsiocbp == NULL) 9616 return 0; 9617 9618 /* This signals the response to set the correct status 9619 * before calling the completion handler 9620 */ 9621 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 9622 9623 iabt = &abtsiocbp->iocb; 9624 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 9625 iabt->un.acxri.abortContextTag = icmd->ulpContext; 9626 if (phba->sli_rev == LPFC_SLI_REV4) { 9627 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 9628 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 9629 } 9630 else 9631 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 9632 iabt->ulpLe = 1; 9633 iabt->ulpClass = icmd->ulpClass; 9634 9635 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9636 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 9637 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 9638 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 9639 9640 if (phba->link_state >= LPFC_LINK_UP) 9641 iabt->ulpCommand = CMD_ABORT_XRI_CN; 9642 else 9643 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 9644 9645 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 9646 9647 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 9648 "0339 Abort xri x%x, original iotag x%x, " 9649 "abort cmd iotag x%x\n", 9650 iabt->un.acxri.abortIoTag, 9651 iabt->un.acxri.abortContextTag, 9652 abtsiocbp->iotag); 9653 9654 if (phba->sli_rev == LPFC_SLI_REV4) { 9655 /* Note: both hbalock and ring_lock need to be set here */ 9656 spin_lock_irqsave(&pring->ring_lock, iflags); 9657 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 9658 abtsiocbp, 0); 9659 spin_unlock_irqrestore(&pring->ring_lock, iflags); 9660 } else { 9661 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 9662 abtsiocbp, 0); 9663 } 9664 9665 if (retval) 9666 __lpfc_sli_release_iocbq(phba, abtsiocbp); 9667 9668 /* 9669 * Caller to this routine should check for IOCB_ERROR 9670 * and handle it properly. This routine no longer removes 9671 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9672 */ 9673 return retval; 9674 } 9675 9676 /** 9677 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 9678 * @phba: Pointer to HBA context object. 9679 * @pring: Pointer to driver SLI ring object. 9680 * @cmdiocb: Pointer to driver command iocb object. 9681 * 9682 * This function issues an abort iocb for the provided command iocb. In case 9683 * of unloading, the abort iocb will not be issued to commands on the ELS 9684 * ring. Instead, the callback function shall be changed to those commands 9685 * so that nothing happens when them finishes. This function is called with 9686 * hbalock held. The function returns 0 when the command iocb is an abort 9687 * request. 9688 **/ 9689 int 9690 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9691 struct lpfc_iocbq *cmdiocb) 9692 { 9693 struct lpfc_vport *vport = cmdiocb->vport; 9694 int retval = IOCB_ERROR; 9695 IOCB_t *icmd = NULL; 9696 9697 /* 9698 * There are certain command types we don't want to abort. And we 9699 * don't want to abort commands that are already in the process of 9700 * being aborted. 9701 */ 9702 icmd = &cmdiocb->iocb; 9703 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9704 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9705 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9706 return 0; 9707 9708 /* 9709 * If we're unloading, don't abort iocb on the ELS ring, but change 9710 * the callback so that nothing happens when it finishes. 9711 */ 9712 if ((vport->load_flag & FC_UNLOADING) && 9713 (pring->ringno == LPFC_ELS_RING)) { 9714 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 9715 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 9716 else 9717 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 9718 goto abort_iotag_exit; 9719 } 9720 9721 /* Now, we try to issue the abort to the cmdiocb out */ 9722 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 9723 9724 abort_iotag_exit: 9725 /* 9726 * Caller to this routine should check for IOCB_ERROR 9727 * and handle it properly. This routine no longer removes 9728 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9729 */ 9730 return retval; 9731 } 9732 9733 /** 9734 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 9735 * @phba: Pointer to HBA context object. 9736 * @pring: Pointer to driver SLI ring object. 9737 * 9738 * This function aborts all iocbs in the given ring and frees all the iocb 9739 * objects in txq. This function issues abort iocbs unconditionally for all 9740 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 9741 * to complete before the return of this function. The caller is not required 9742 * to hold any locks. 9743 **/ 9744 static void 9745 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 9746 { 9747 LIST_HEAD(completions); 9748 struct lpfc_iocbq *iocb, *next_iocb; 9749 9750 if (pring->ringno == LPFC_ELS_RING) 9751 lpfc_fabric_abort_hba(phba); 9752 9753 spin_lock_irq(&phba->hbalock); 9754 9755 /* Take off all the iocbs on txq for cancelling */ 9756 list_splice_init(&pring->txq, &completions); 9757 pring->txq_cnt = 0; 9758 9759 /* Next issue ABTS for everything on the txcmplq */ 9760 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 9761 lpfc_sli_abort_iotag_issue(phba, pring, iocb); 9762 9763 spin_unlock_irq(&phba->hbalock); 9764 9765 /* Cancel all the IOCBs from the completions list */ 9766 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9767 IOERR_SLI_ABORTED); 9768 } 9769 9770 /** 9771 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 9772 * @phba: pointer to lpfc HBA data structure. 9773 * 9774 * This routine will abort all pending and outstanding iocbs to an HBA. 9775 **/ 9776 void 9777 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 9778 { 9779 struct lpfc_sli *psli = &phba->sli; 9780 struct lpfc_sli_ring *pring; 9781 int i; 9782 9783 for (i = 0; i < psli->num_rings; i++) { 9784 pring = &psli->ring[i]; 9785 lpfc_sli_iocb_ring_abort(phba, pring); 9786 } 9787 } 9788 9789 /** 9790 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 9791 * @iocbq: Pointer to driver iocb object. 9792 * @vport: Pointer to driver virtual port object. 9793 * @tgt_id: SCSI ID of the target. 9794 * @lun_id: LUN ID of the scsi device. 9795 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 9796 * 9797 * This function acts as an iocb filter for functions which abort or count 9798 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 9799 * 0 if the filtering criteria is met for the given iocb and will return 9800 * 1 if the filtering criteria is not met. 9801 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 9802 * given iocb is for the SCSI device specified by vport, tgt_id and 9803 * lun_id parameter. 9804 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 9805 * given iocb is for the SCSI target specified by vport and tgt_id 9806 * parameters. 9807 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 9808 * given iocb is for the SCSI host associated with the given vport. 9809 * This function is called with no locks held. 9810 **/ 9811 static int 9812 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 9813 uint16_t tgt_id, uint64_t lun_id, 9814 lpfc_ctx_cmd ctx_cmd) 9815 { 9816 struct lpfc_scsi_buf *lpfc_cmd; 9817 int rc = 1; 9818 9819 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 9820 return rc; 9821 9822 if (iocbq->vport != vport) 9823 return rc; 9824 9825 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 9826 9827 if (lpfc_cmd->pCmd == NULL) 9828 return rc; 9829 9830 switch (ctx_cmd) { 9831 case LPFC_CTX_LUN: 9832 if ((lpfc_cmd->rdata->pnode) && 9833 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 9834 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 9835 rc = 0; 9836 break; 9837 case LPFC_CTX_TGT: 9838 if ((lpfc_cmd->rdata->pnode) && 9839 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 9840 rc = 0; 9841 break; 9842 case LPFC_CTX_HOST: 9843 rc = 0; 9844 break; 9845 default: 9846 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 9847 __func__, ctx_cmd); 9848 break; 9849 } 9850 9851 return rc; 9852 } 9853 9854 /** 9855 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 9856 * @vport: Pointer to virtual port. 9857 * @tgt_id: SCSI ID of the target. 9858 * @lun_id: LUN ID of the scsi device. 9859 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9860 * 9861 * This function returns number of FCP commands pending for the vport. 9862 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 9863 * commands pending on the vport associated with SCSI device specified 9864 * by tgt_id and lun_id parameters. 9865 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 9866 * commands pending on the vport associated with SCSI target specified 9867 * by tgt_id parameter. 9868 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 9869 * commands pending on the vport. 9870 * This function returns the number of iocbs which satisfy the filter. 9871 * This function is called without any lock held. 9872 **/ 9873 int 9874 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 9875 lpfc_ctx_cmd ctx_cmd) 9876 { 9877 struct lpfc_hba *phba = vport->phba; 9878 struct lpfc_iocbq *iocbq; 9879 int sum, i; 9880 9881 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 9882 iocbq = phba->sli.iocbq_lookup[i]; 9883 9884 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 9885 ctx_cmd) == 0) 9886 sum++; 9887 } 9888 9889 return sum; 9890 } 9891 9892 /** 9893 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 9894 * @phba: Pointer to HBA context object 9895 * @cmdiocb: Pointer to command iocb object. 9896 * @rspiocb: Pointer to response iocb object. 9897 * 9898 * This function is called when an aborted FCP iocb completes. This 9899 * function is called by the ring event handler with no lock held. 9900 * This function frees the iocb. 9901 **/ 9902 void 9903 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9904 struct lpfc_iocbq *rspiocb) 9905 { 9906 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9907 "3096 ABORT_XRI_CN completing on rpi x%x " 9908 "original iotag x%x, abort cmd iotag x%x " 9909 "status 0x%x, reason 0x%x\n", 9910 cmdiocb->iocb.un.acxri.abortContextTag, 9911 cmdiocb->iocb.un.acxri.abortIoTag, 9912 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 9913 rspiocb->iocb.un.ulpWord[4]); 9914 lpfc_sli_release_iocbq(phba, cmdiocb); 9915 return; 9916 } 9917 9918 /** 9919 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 9920 * @vport: Pointer to virtual port. 9921 * @pring: Pointer to driver SLI ring object. 9922 * @tgt_id: SCSI ID of the target. 9923 * @lun_id: LUN ID of the scsi device. 9924 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9925 * 9926 * This function sends an abort command for every SCSI command 9927 * associated with the given virtual port pending on the ring 9928 * filtered by lpfc_sli_validate_fcp_iocb function. 9929 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 9930 * FCP iocbs associated with lun specified by tgt_id and lun_id 9931 * parameters 9932 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 9933 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 9934 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 9935 * FCP iocbs associated with virtual port. 9936 * This function returns number of iocbs it failed to abort. 9937 * This function is called with no locks held. 9938 **/ 9939 int 9940 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 9941 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 9942 { 9943 struct lpfc_hba *phba = vport->phba; 9944 struct lpfc_iocbq *iocbq; 9945 struct lpfc_iocbq *abtsiocb; 9946 IOCB_t *cmd = NULL; 9947 int errcnt = 0, ret_val = 0; 9948 int i; 9949 9950 for (i = 1; i <= phba->sli.last_iotag; i++) { 9951 iocbq = phba->sli.iocbq_lookup[i]; 9952 9953 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 9954 abort_cmd) != 0) 9955 continue; 9956 9957 /* 9958 * If the iocbq is already being aborted, don't take a second 9959 * action, but do count it. 9960 */ 9961 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 9962 continue; 9963 9964 /* issue ABTS for this IOCB based on iotag */ 9965 abtsiocb = lpfc_sli_get_iocbq(phba); 9966 if (abtsiocb == NULL) { 9967 errcnt++; 9968 continue; 9969 } 9970 9971 /* indicate the IO is being aborted by the driver. */ 9972 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 9973 9974 cmd = &iocbq->iocb; 9975 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 9976 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 9977 if (phba->sli_rev == LPFC_SLI_REV4) 9978 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 9979 else 9980 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 9981 abtsiocb->iocb.ulpLe = 1; 9982 abtsiocb->iocb.ulpClass = cmd->ulpClass; 9983 abtsiocb->vport = vport; 9984 9985 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9986 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 9987 if (iocbq->iocb_flag & LPFC_IO_FCP) 9988 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 9989 9990 if (lpfc_is_link_up(phba)) 9991 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 9992 else 9993 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 9994 9995 /* Setup callback routine and issue the command. */ 9996 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 9997 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 9998 abtsiocb, 0); 9999 if (ret_val == IOCB_ERROR) { 10000 lpfc_sli_release_iocbq(phba, abtsiocb); 10001 errcnt++; 10002 continue; 10003 } 10004 } 10005 10006 return errcnt; 10007 } 10008 10009 /** 10010 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 10011 * @phba: Pointer to HBA context object. 10012 * @cmdiocbq: Pointer to command iocb. 10013 * @rspiocbq: Pointer to response iocb. 10014 * 10015 * This function is the completion handler for iocbs issued using 10016 * lpfc_sli_issue_iocb_wait function. This function is called by the 10017 * ring event handler function without any lock held. This function 10018 * can be called from both worker thread context and interrupt 10019 * context. This function also can be called from other thread which 10020 * cleans up the SLI layer objects. 10021 * This function copy the contents of the response iocb to the 10022 * response iocb memory object provided by the caller of 10023 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 10024 * sleeps for the iocb completion. 10025 **/ 10026 static void 10027 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 10028 struct lpfc_iocbq *cmdiocbq, 10029 struct lpfc_iocbq *rspiocbq) 10030 { 10031 wait_queue_head_t *pdone_q; 10032 unsigned long iflags; 10033 struct lpfc_scsi_buf *lpfc_cmd; 10034 10035 spin_lock_irqsave(&phba->hbalock, iflags); 10036 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 10037 10038 /* 10039 * A time out has occurred for the iocb. If a time out 10040 * completion handler has been supplied, call it. Otherwise, 10041 * just free the iocbq. 10042 */ 10043 10044 spin_unlock_irqrestore(&phba->hbalock, iflags); 10045 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 10046 cmdiocbq->wait_iocb_cmpl = NULL; 10047 if (cmdiocbq->iocb_cmpl) 10048 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 10049 else 10050 lpfc_sli_release_iocbq(phba, cmdiocbq); 10051 return; 10052 } 10053 10054 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 10055 if (cmdiocbq->context2 && rspiocbq) 10056 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 10057 &rspiocbq->iocb, sizeof(IOCB_t)); 10058 10059 /* Set the exchange busy flag for task management commands */ 10060 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 10061 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 10062 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 10063 cur_iocbq); 10064 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 10065 } 10066 10067 pdone_q = cmdiocbq->context_un.wait_queue; 10068 if (pdone_q) 10069 wake_up(pdone_q); 10070 spin_unlock_irqrestore(&phba->hbalock, iflags); 10071 return; 10072 } 10073 10074 /** 10075 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 10076 * @phba: Pointer to HBA context object.. 10077 * @piocbq: Pointer to command iocb. 10078 * @flag: Flag to test. 10079 * 10080 * This routine grabs the hbalock and then test the iocb_flag to 10081 * see if the passed in flag is set. 10082 * Returns: 10083 * 1 if flag is set. 10084 * 0 if flag is not set. 10085 **/ 10086 static int 10087 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 10088 struct lpfc_iocbq *piocbq, uint32_t flag) 10089 { 10090 unsigned long iflags; 10091 int ret; 10092 10093 spin_lock_irqsave(&phba->hbalock, iflags); 10094 ret = piocbq->iocb_flag & flag; 10095 spin_unlock_irqrestore(&phba->hbalock, iflags); 10096 return ret; 10097 10098 } 10099 10100 /** 10101 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 10102 * @phba: Pointer to HBA context object.. 10103 * @pring: Pointer to sli ring. 10104 * @piocb: Pointer to command iocb. 10105 * @prspiocbq: Pointer to response iocb. 10106 * @timeout: Timeout in number of seconds. 10107 * 10108 * This function issues the iocb to firmware and waits for the 10109 * iocb to complete. The iocb_cmpl field of the shall be used 10110 * to handle iocbs which time out. If the field is NULL, the 10111 * function shall free the iocbq structure. If more clean up is 10112 * needed, the caller is expected to provide a completion function 10113 * that will provide the needed clean up. If the iocb command is 10114 * not completed within timeout seconds, the function will either 10115 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 10116 * completion function set in the iocb_cmpl field and then return 10117 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 10118 * resources if this function returns IOCB_TIMEDOUT. 10119 * The function waits for the iocb completion using an 10120 * non-interruptible wait. 10121 * This function will sleep while waiting for iocb completion. 10122 * So, this function should not be called from any context which 10123 * does not allow sleeping. Due to the same reason, this function 10124 * cannot be called with interrupt disabled. 10125 * This function assumes that the iocb completions occur while 10126 * this function sleep. So, this function cannot be called from 10127 * the thread which process iocb completion for this ring. 10128 * This function clears the iocb_flag of the iocb object before 10129 * issuing the iocb and the iocb completion handler sets this 10130 * flag and wakes this thread when the iocb completes. 10131 * The contents of the response iocb will be copied to prspiocbq 10132 * by the completion handler when the command completes. 10133 * This function returns IOCB_SUCCESS when success. 10134 * This function is called with no lock held. 10135 **/ 10136 int 10137 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 10138 uint32_t ring_number, 10139 struct lpfc_iocbq *piocb, 10140 struct lpfc_iocbq *prspiocbq, 10141 uint32_t timeout) 10142 { 10143 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 10144 long timeleft, timeout_req = 0; 10145 int retval = IOCB_SUCCESS; 10146 uint32_t creg_val; 10147 struct lpfc_iocbq *iocb; 10148 int txq_cnt = 0; 10149 int txcmplq_cnt = 0; 10150 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 10151 unsigned long iflags; 10152 bool iocb_completed = true; 10153 10154 /* 10155 * If the caller has provided a response iocbq buffer, then context2 10156 * is NULL or its an error. 10157 */ 10158 if (prspiocbq) { 10159 if (piocb->context2) 10160 return IOCB_ERROR; 10161 piocb->context2 = prspiocbq; 10162 } 10163 10164 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 10165 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 10166 piocb->context_un.wait_queue = &done_q; 10167 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 10168 10169 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 10170 if (lpfc_readl(phba->HCregaddr, &creg_val)) 10171 return IOCB_ERROR; 10172 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 10173 writel(creg_val, phba->HCregaddr); 10174 readl(phba->HCregaddr); /* flush */ 10175 } 10176 10177 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 10178 SLI_IOCB_RET_IOCB); 10179 if (retval == IOCB_SUCCESS) { 10180 timeout_req = msecs_to_jiffies(timeout * 1000); 10181 timeleft = wait_event_timeout(done_q, 10182 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 10183 timeout_req); 10184 spin_lock_irqsave(&phba->hbalock, iflags); 10185 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 10186 10187 /* 10188 * IOCB timed out. Inform the wake iocb wait 10189 * completion function and set local status 10190 */ 10191 10192 iocb_completed = false; 10193 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 10194 } 10195 spin_unlock_irqrestore(&phba->hbalock, iflags); 10196 if (iocb_completed) { 10197 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10198 "0331 IOCB wake signaled\n"); 10199 /* Note: we are not indicating if the IOCB has a success 10200 * status or not - that's for the caller to check. 10201 * IOCB_SUCCESS means just that the command was sent and 10202 * completed. Not that it completed successfully. 10203 * */ 10204 } else if (timeleft == 0) { 10205 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10206 "0338 IOCB wait timeout error - no " 10207 "wake response Data x%x\n", timeout); 10208 retval = IOCB_TIMEDOUT; 10209 } else { 10210 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10211 "0330 IOCB wake NOT set, " 10212 "Data x%x x%lx\n", 10213 timeout, (timeleft / jiffies)); 10214 retval = IOCB_TIMEDOUT; 10215 } 10216 } else if (retval == IOCB_BUSY) { 10217 if (phba->cfg_log_verbose & LOG_SLI) { 10218 list_for_each_entry(iocb, &pring->txq, list) { 10219 txq_cnt++; 10220 } 10221 list_for_each_entry(iocb, &pring->txcmplq, list) { 10222 txcmplq_cnt++; 10223 } 10224 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10225 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 10226 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 10227 } 10228 return retval; 10229 } else { 10230 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10231 "0332 IOCB wait issue failed, Data x%x\n", 10232 retval); 10233 retval = IOCB_ERROR; 10234 } 10235 10236 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 10237 if (lpfc_readl(phba->HCregaddr, &creg_val)) 10238 return IOCB_ERROR; 10239 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 10240 writel(creg_val, phba->HCregaddr); 10241 readl(phba->HCregaddr); /* flush */ 10242 } 10243 10244 if (prspiocbq) 10245 piocb->context2 = NULL; 10246 10247 piocb->context_un.wait_queue = NULL; 10248 piocb->iocb_cmpl = NULL; 10249 return retval; 10250 } 10251 10252 /** 10253 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 10254 * @phba: Pointer to HBA context object. 10255 * @pmboxq: Pointer to driver mailbox object. 10256 * @timeout: Timeout in number of seconds. 10257 * 10258 * This function issues the mailbox to firmware and waits for the 10259 * mailbox command to complete. If the mailbox command is not 10260 * completed within timeout seconds, it returns MBX_TIMEOUT. 10261 * The function waits for the mailbox completion using an 10262 * interruptible wait. If the thread is woken up due to a 10263 * signal, MBX_TIMEOUT error is returned to the caller. Caller 10264 * should not free the mailbox resources, if this function returns 10265 * MBX_TIMEOUT. 10266 * This function will sleep while waiting for mailbox completion. 10267 * So, this function should not be called from any context which 10268 * does not allow sleeping. Due to the same reason, this function 10269 * cannot be called with interrupt disabled. 10270 * This function assumes that the mailbox completion occurs while 10271 * this function sleep. So, this function cannot be called from 10272 * the worker thread which processes mailbox completion. 10273 * This function is called in the context of HBA management 10274 * applications. 10275 * This function returns MBX_SUCCESS when successful. 10276 * This function is called with no lock held. 10277 **/ 10278 int 10279 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 10280 uint32_t timeout) 10281 { 10282 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 10283 MAILBOX_t *mb = NULL; 10284 int retval; 10285 unsigned long flag; 10286 10287 /* The caller might set context1 for extended buffer */ 10288 if (pmboxq->context1) 10289 mb = (MAILBOX_t *)pmboxq->context1; 10290 10291 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 10292 /* setup wake call as IOCB callback */ 10293 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 10294 /* setup context field to pass wait_queue pointer to wake function */ 10295 pmboxq->context1 = &done_q; 10296 10297 /* now issue the command */ 10298 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 10299 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 10300 wait_event_interruptible_timeout(done_q, 10301 pmboxq->mbox_flag & LPFC_MBX_WAKE, 10302 msecs_to_jiffies(timeout * 1000)); 10303 10304 spin_lock_irqsave(&phba->hbalock, flag); 10305 /* restore the possible extended buffer for free resource */ 10306 pmboxq->context1 = (uint8_t *)mb; 10307 /* 10308 * if LPFC_MBX_WAKE flag is set the mailbox is completed 10309 * else do not free the resources. 10310 */ 10311 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 10312 retval = MBX_SUCCESS; 10313 } else { 10314 retval = MBX_TIMEOUT; 10315 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10316 } 10317 spin_unlock_irqrestore(&phba->hbalock, flag); 10318 } else { 10319 /* restore the possible extended buffer for free resource */ 10320 pmboxq->context1 = (uint8_t *)mb; 10321 } 10322 10323 return retval; 10324 } 10325 10326 /** 10327 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 10328 * @phba: Pointer to HBA context. 10329 * 10330 * This function is called to shutdown the driver's mailbox sub-system. 10331 * It first marks the mailbox sub-system is in a block state to prevent 10332 * the asynchronous mailbox command from issued off the pending mailbox 10333 * command queue. If the mailbox command sub-system shutdown is due to 10334 * HBA error conditions such as EEH or ERATT, this routine shall invoke 10335 * the mailbox sub-system flush routine to forcefully bring down the 10336 * mailbox sub-system. Otherwise, if it is due to normal condition (such 10337 * as with offline or HBA function reset), this routine will wait for the 10338 * outstanding mailbox command to complete before invoking the mailbox 10339 * sub-system flush routine to gracefully bring down mailbox sub-system. 10340 **/ 10341 void 10342 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 10343 { 10344 struct lpfc_sli *psli = &phba->sli; 10345 unsigned long timeout; 10346 10347 if (mbx_action == LPFC_MBX_NO_WAIT) { 10348 /* delay 100ms for port state */ 10349 msleep(100); 10350 lpfc_sli_mbox_sys_flush(phba); 10351 return; 10352 } 10353 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 10354 10355 spin_lock_irq(&phba->hbalock); 10356 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 10357 10358 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 10359 /* Determine how long we might wait for the active mailbox 10360 * command to be gracefully completed by firmware. 10361 */ 10362 if (phba->sli.mbox_active) 10363 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 10364 phba->sli.mbox_active) * 10365 1000) + jiffies; 10366 spin_unlock_irq(&phba->hbalock); 10367 10368 while (phba->sli.mbox_active) { 10369 /* Check active mailbox complete status every 2ms */ 10370 msleep(2); 10371 if (time_after(jiffies, timeout)) 10372 /* Timeout, let the mailbox flush routine to 10373 * forcefully release active mailbox command 10374 */ 10375 break; 10376 } 10377 } else 10378 spin_unlock_irq(&phba->hbalock); 10379 10380 lpfc_sli_mbox_sys_flush(phba); 10381 } 10382 10383 /** 10384 * lpfc_sli_eratt_read - read sli-3 error attention events 10385 * @phba: Pointer to HBA context. 10386 * 10387 * This function is called to read the SLI3 device error attention registers 10388 * for possible error attention events. The caller must hold the hostlock 10389 * with spin_lock_irq(). 10390 * 10391 * This function returns 1 when there is Error Attention in the Host Attention 10392 * Register and returns 0 otherwise. 10393 **/ 10394 static int 10395 lpfc_sli_eratt_read(struct lpfc_hba *phba) 10396 { 10397 uint32_t ha_copy; 10398 10399 /* Read chip Host Attention (HA) register */ 10400 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10401 goto unplug_err; 10402 10403 if (ha_copy & HA_ERATT) { 10404 /* Read host status register to retrieve error event */ 10405 if (lpfc_sli_read_hs(phba)) 10406 goto unplug_err; 10407 10408 /* Check if there is a deferred error condition is active */ 10409 if ((HS_FFER1 & phba->work_hs) && 10410 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 10411 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 10412 phba->hba_flag |= DEFER_ERATT; 10413 /* Clear all interrupt enable conditions */ 10414 writel(0, phba->HCregaddr); 10415 readl(phba->HCregaddr); 10416 } 10417 10418 /* Set the driver HA work bitmap */ 10419 phba->work_ha |= HA_ERATT; 10420 /* Indicate polling handles this ERATT */ 10421 phba->hba_flag |= HBA_ERATT_HANDLED; 10422 return 1; 10423 } 10424 return 0; 10425 10426 unplug_err: 10427 /* Set the driver HS work bitmap */ 10428 phba->work_hs |= UNPLUG_ERR; 10429 /* Set the driver HA work bitmap */ 10430 phba->work_ha |= HA_ERATT; 10431 /* Indicate polling handles this ERATT */ 10432 phba->hba_flag |= HBA_ERATT_HANDLED; 10433 return 1; 10434 } 10435 10436 /** 10437 * lpfc_sli4_eratt_read - read sli-4 error attention events 10438 * @phba: Pointer to HBA context. 10439 * 10440 * This function is called to read the SLI4 device error attention registers 10441 * for possible error attention events. The caller must hold the hostlock 10442 * with spin_lock_irq(). 10443 * 10444 * This function returns 1 when there is Error Attention in the Host Attention 10445 * Register and returns 0 otherwise. 10446 **/ 10447 static int 10448 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 10449 { 10450 uint32_t uerr_sta_hi, uerr_sta_lo; 10451 uint32_t if_type, portsmphr; 10452 struct lpfc_register portstat_reg; 10453 10454 /* 10455 * For now, use the SLI4 device internal unrecoverable error 10456 * registers for error attention. This can be changed later. 10457 */ 10458 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10459 switch (if_type) { 10460 case LPFC_SLI_INTF_IF_TYPE_0: 10461 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 10462 &uerr_sta_lo) || 10463 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 10464 &uerr_sta_hi)) { 10465 phba->work_hs |= UNPLUG_ERR; 10466 phba->work_ha |= HA_ERATT; 10467 phba->hba_flag |= HBA_ERATT_HANDLED; 10468 return 1; 10469 } 10470 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 10471 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 10472 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10473 "1423 HBA Unrecoverable error: " 10474 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 10475 "ue_mask_lo_reg=0x%x, " 10476 "ue_mask_hi_reg=0x%x\n", 10477 uerr_sta_lo, uerr_sta_hi, 10478 phba->sli4_hba.ue_mask_lo, 10479 phba->sli4_hba.ue_mask_hi); 10480 phba->work_status[0] = uerr_sta_lo; 10481 phba->work_status[1] = uerr_sta_hi; 10482 phba->work_ha |= HA_ERATT; 10483 phba->hba_flag |= HBA_ERATT_HANDLED; 10484 return 1; 10485 } 10486 break; 10487 case LPFC_SLI_INTF_IF_TYPE_2: 10488 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 10489 &portstat_reg.word0) || 10490 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 10491 &portsmphr)){ 10492 phba->work_hs |= UNPLUG_ERR; 10493 phba->work_ha |= HA_ERATT; 10494 phba->hba_flag |= HBA_ERATT_HANDLED; 10495 return 1; 10496 } 10497 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 10498 phba->work_status[0] = 10499 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 10500 phba->work_status[1] = 10501 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 10502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10503 "2885 Port Status Event: " 10504 "port status reg 0x%x, " 10505 "port smphr reg 0x%x, " 10506 "error 1=0x%x, error 2=0x%x\n", 10507 portstat_reg.word0, 10508 portsmphr, 10509 phba->work_status[0], 10510 phba->work_status[1]); 10511 phba->work_ha |= HA_ERATT; 10512 phba->hba_flag |= HBA_ERATT_HANDLED; 10513 return 1; 10514 } 10515 break; 10516 case LPFC_SLI_INTF_IF_TYPE_1: 10517 default: 10518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10519 "2886 HBA Error Attention on unsupported " 10520 "if type %d.", if_type); 10521 return 1; 10522 } 10523 10524 return 0; 10525 } 10526 10527 /** 10528 * lpfc_sli_check_eratt - check error attention events 10529 * @phba: Pointer to HBA context. 10530 * 10531 * This function is called from timer soft interrupt context to check HBA's 10532 * error attention register bit for error attention events. 10533 * 10534 * This function returns 1 when there is Error Attention in the Host Attention 10535 * Register and returns 0 otherwise. 10536 **/ 10537 int 10538 lpfc_sli_check_eratt(struct lpfc_hba *phba) 10539 { 10540 uint32_t ha_copy; 10541 10542 /* If somebody is waiting to handle an eratt, don't process it 10543 * here. The brdkill function will do this. 10544 */ 10545 if (phba->link_flag & LS_IGNORE_ERATT) 10546 return 0; 10547 10548 /* Check if interrupt handler handles this ERATT */ 10549 spin_lock_irq(&phba->hbalock); 10550 if (phba->hba_flag & HBA_ERATT_HANDLED) { 10551 /* Interrupt handler has handled ERATT */ 10552 spin_unlock_irq(&phba->hbalock); 10553 return 0; 10554 } 10555 10556 /* 10557 * If there is deferred error attention, do not check for error 10558 * attention 10559 */ 10560 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10561 spin_unlock_irq(&phba->hbalock); 10562 return 0; 10563 } 10564 10565 /* If PCI channel is offline, don't process it */ 10566 if (unlikely(pci_channel_offline(phba->pcidev))) { 10567 spin_unlock_irq(&phba->hbalock); 10568 return 0; 10569 } 10570 10571 switch (phba->sli_rev) { 10572 case LPFC_SLI_REV2: 10573 case LPFC_SLI_REV3: 10574 /* Read chip Host Attention (HA) register */ 10575 ha_copy = lpfc_sli_eratt_read(phba); 10576 break; 10577 case LPFC_SLI_REV4: 10578 /* Read device Uncoverable Error (UERR) registers */ 10579 ha_copy = lpfc_sli4_eratt_read(phba); 10580 break; 10581 default: 10582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10583 "0299 Invalid SLI revision (%d)\n", 10584 phba->sli_rev); 10585 ha_copy = 0; 10586 break; 10587 } 10588 spin_unlock_irq(&phba->hbalock); 10589 10590 return ha_copy; 10591 } 10592 10593 /** 10594 * lpfc_intr_state_check - Check device state for interrupt handling 10595 * @phba: Pointer to HBA context. 10596 * 10597 * This inline routine checks whether a device or its PCI slot is in a state 10598 * that the interrupt should be handled. 10599 * 10600 * This function returns 0 if the device or the PCI slot is in a state that 10601 * interrupt should be handled, otherwise -EIO. 10602 */ 10603 static inline int 10604 lpfc_intr_state_check(struct lpfc_hba *phba) 10605 { 10606 /* If the pci channel is offline, ignore all the interrupts */ 10607 if (unlikely(pci_channel_offline(phba->pcidev))) 10608 return -EIO; 10609 10610 /* Update device level interrupt statistics */ 10611 phba->sli.slistat.sli_intr++; 10612 10613 /* Ignore all interrupts during initialization. */ 10614 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 10615 return -EIO; 10616 10617 return 0; 10618 } 10619 10620 /** 10621 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 10622 * @irq: Interrupt number. 10623 * @dev_id: The device context pointer. 10624 * 10625 * This function is directly called from the PCI layer as an interrupt 10626 * service routine when device with SLI-3 interface spec is enabled with 10627 * MSI-X multi-message interrupt mode and there are slow-path events in 10628 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 10629 * interrupt mode, this function is called as part of the device-level 10630 * interrupt handler. When the PCI slot is in error recovery or the HBA 10631 * is undergoing initialization, the interrupt handler will not process 10632 * the interrupt. The link attention and ELS ring attention events are 10633 * handled by the worker thread. The interrupt handler signals the worker 10634 * thread and returns for these events. This function is called without 10635 * any lock held. It gets the hbalock to access and update SLI data 10636 * structures. 10637 * 10638 * This function returns IRQ_HANDLED when interrupt is handled else it 10639 * returns IRQ_NONE. 10640 **/ 10641 irqreturn_t 10642 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 10643 { 10644 struct lpfc_hba *phba; 10645 uint32_t ha_copy, hc_copy; 10646 uint32_t work_ha_copy; 10647 unsigned long status; 10648 unsigned long iflag; 10649 uint32_t control; 10650 10651 MAILBOX_t *mbox, *pmbox; 10652 struct lpfc_vport *vport; 10653 struct lpfc_nodelist *ndlp; 10654 struct lpfc_dmabuf *mp; 10655 LPFC_MBOXQ_t *pmb; 10656 int rc; 10657 10658 /* 10659 * Get the driver's phba structure from the dev_id and 10660 * assume the HBA is not interrupting. 10661 */ 10662 phba = (struct lpfc_hba *)dev_id; 10663 10664 if (unlikely(!phba)) 10665 return IRQ_NONE; 10666 10667 /* 10668 * Stuff needs to be attented to when this function is invoked as an 10669 * individual interrupt handler in MSI-X multi-message interrupt mode 10670 */ 10671 if (phba->intr_type == MSIX) { 10672 /* Check device state for handling interrupt */ 10673 if (lpfc_intr_state_check(phba)) 10674 return IRQ_NONE; 10675 /* Need to read HA REG for slow-path events */ 10676 spin_lock_irqsave(&phba->hbalock, iflag); 10677 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10678 goto unplug_error; 10679 /* If somebody is waiting to handle an eratt don't process it 10680 * here. The brdkill function will do this. 10681 */ 10682 if (phba->link_flag & LS_IGNORE_ERATT) 10683 ha_copy &= ~HA_ERATT; 10684 /* Check the need for handling ERATT in interrupt handler */ 10685 if (ha_copy & HA_ERATT) { 10686 if (phba->hba_flag & HBA_ERATT_HANDLED) 10687 /* ERATT polling has handled ERATT */ 10688 ha_copy &= ~HA_ERATT; 10689 else 10690 /* Indicate interrupt handler handles ERATT */ 10691 phba->hba_flag |= HBA_ERATT_HANDLED; 10692 } 10693 10694 /* 10695 * If there is deferred error attention, do not check for any 10696 * interrupt. 10697 */ 10698 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10699 spin_unlock_irqrestore(&phba->hbalock, iflag); 10700 return IRQ_NONE; 10701 } 10702 10703 /* Clear up only attention source related to slow-path */ 10704 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 10705 goto unplug_error; 10706 10707 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 10708 HC_LAINT_ENA | HC_ERINT_ENA), 10709 phba->HCregaddr); 10710 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 10711 phba->HAregaddr); 10712 writel(hc_copy, phba->HCregaddr); 10713 readl(phba->HAregaddr); /* flush */ 10714 spin_unlock_irqrestore(&phba->hbalock, iflag); 10715 } else 10716 ha_copy = phba->ha_copy; 10717 10718 work_ha_copy = ha_copy & phba->work_ha_mask; 10719 10720 if (work_ha_copy) { 10721 if (work_ha_copy & HA_LATT) { 10722 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 10723 /* 10724 * Turn off Link Attention interrupts 10725 * until CLEAR_LA done 10726 */ 10727 spin_lock_irqsave(&phba->hbalock, iflag); 10728 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 10729 if (lpfc_readl(phba->HCregaddr, &control)) 10730 goto unplug_error; 10731 control &= ~HC_LAINT_ENA; 10732 writel(control, phba->HCregaddr); 10733 readl(phba->HCregaddr); /* flush */ 10734 spin_unlock_irqrestore(&phba->hbalock, iflag); 10735 } 10736 else 10737 work_ha_copy &= ~HA_LATT; 10738 } 10739 10740 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 10741 /* 10742 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 10743 * the only slow ring. 10744 */ 10745 status = (work_ha_copy & 10746 (HA_RXMASK << (4*LPFC_ELS_RING))); 10747 status >>= (4*LPFC_ELS_RING); 10748 if (status & HA_RXMASK) { 10749 spin_lock_irqsave(&phba->hbalock, iflag); 10750 if (lpfc_readl(phba->HCregaddr, &control)) 10751 goto unplug_error; 10752 10753 lpfc_debugfs_slow_ring_trc(phba, 10754 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 10755 control, status, 10756 (uint32_t)phba->sli.slistat.sli_intr); 10757 10758 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 10759 lpfc_debugfs_slow_ring_trc(phba, 10760 "ISR Disable ring:" 10761 "pwork:x%x hawork:x%x wait:x%x", 10762 phba->work_ha, work_ha_copy, 10763 (uint32_t)((unsigned long) 10764 &phba->work_waitq)); 10765 10766 control &= 10767 ~(HC_R0INT_ENA << LPFC_ELS_RING); 10768 writel(control, phba->HCregaddr); 10769 readl(phba->HCregaddr); /* flush */ 10770 } 10771 else { 10772 lpfc_debugfs_slow_ring_trc(phba, 10773 "ISR slow ring: pwork:" 10774 "x%x hawork:x%x wait:x%x", 10775 phba->work_ha, work_ha_copy, 10776 (uint32_t)((unsigned long) 10777 &phba->work_waitq)); 10778 } 10779 spin_unlock_irqrestore(&phba->hbalock, iflag); 10780 } 10781 } 10782 spin_lock_irqsave(&phba->hbalock, iflag); 10783 if (work_ha_copy & HA_ERATT) { 10784 if (lpfc_sli_read_hs(phba)) 10785 goto unplug_error; 10786 /* 10787 * Check if there is a deferred error condition 10788 * is active 10789 */ 10790 if ((HS_FFER1 & phba->work_hs) && 10791 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 10792 HS_FFER6 | HS_FFER7 | HS_FFER8) & 10793 phba->work_hs)) { 10794 phba->hba_flag |= DEFER_ERATT; 10795 /* Clear all interrupt enable conditions */ 10796 writel(0, phba->HCregaddr); 10797 readl(phba->HCregaddr); 10798 } 10799 } 10800 10801 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 10802 pmb = phba->sli.mbox_active; 10803 pmbox = &pmb->u.mb; 10804 mbox = phba->mbox; 10805 vport = pmb->vport; 10806 10807 /* First check out the status word */ 10808 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 10809 if (pmbox->mbxOwner != OWN_HOST) { 10810 spin_unlock_irqrestore(&phba->hbalock, iflag); 10811 /* 10812 * Stray Mailbox Interrupt, mbxCommand <cmd> 10813 * mbxStatus <status> 10814 */ 10815 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10816 LOG_SLI, 10817 "(%d):0304 Stray Mailbox " 10818 "Interrupt mbxCommand x%x " 10819 "mbxStatus x%x\n", 10820 (vport ? vport->vpi : 0), 10821 pmbox->mbxCommand, 10822 pmbox->mbxStatus); 10823 /* clear mailbox attention bit */ 10824 work_ha_copy &= ~HA_MBATT; 10825 } else { 10826 phba->sli.mbox_active = NULL; 10827 spin_unlock_irqrestore(&phba->hbalock, iflag); 10828 phba->last_completion_time = jiffies; 10829 del_timer(&phba->sli.mbox_tmo); 10830 if (pmb->mbox_cmpl) { 10831 lpfc_sli_pcimem_bcopy(mbox, pmbox, 10832 MAILBOX_CMD_SIZE); 10833 if (pmb->out_ext_byte_len && 10834 pmb->context2) 10835 lpfc_sli_pcimem_bcopy( 10836 phba->mbox_ext, 10837 pmb->context2, 10838 pmb->out_ext_byte_len); 10839 } 10840 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 10841 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 10842 10843 lpfc_debugfs_disc_trc(vport, 10844 LPFC_DISC_TRC_MBOX_VPORT, 10845 "MBOX dflt rpi: : " 10846 "status:x%x rpi:x%x", 10847 (uint32_t)pmbox->mbxStatus, 10848 pmbox->un.varWords[0], 0); 10849 10850 if (!pmbox->mbxStatus) { 10851 mp = (struct lpfc_dmabuf *) 10852 (pmb->context1); 10853 ndlp = (struct lpfc_nodelist *) 10854 pmb->context2; 10855 10856 /* Reg_LOGIN of dflt RPI was 10857 * successful. new lets get 10858 * rid of the RPI using the 10859 * same mbox buffer. 10860 */ 10861 lpfc_unreg_login(phba, 10862 vport->vpi, 10863 pmbox->un.varWords[0], 10864 pmb); 10865 pmb->mbox_cmpl = 10866 lpfc_mbx_cmpl_dflt_rpi; 10867 pmb->context1 = mp; 10868 pmb->context2 = ndlp; 10869 pmb->vport = vport; 10870 rc = lpfc_sli_issue_mbox(phba, 10871 pmb, 10872 MBX_NOWAIT); 10873 if (rc != MBX_BUSY) 10874 lpfc_printf_log(phba, 10875 KERN_ERR, 10876 LOG_MBOX | LOG_SLI, 10877 "0350 rc should have" 10878 "been MBX_BUSY\n"); 10879 if (rc != MBX_NOT_FINISHED) 10880 goto send_current_mbox; 10881 } 10882 } 10883 spin_lock_irqsave( 10884 &phba->pport->work_port_lock, 10885 iflag); 10886 phba->pport->work_port_events &= 10887 ~WORKER_MBOX_TMO; 10888 spin_unlock_irqrestore( 10889 &phba->pport->work_port_lock, 10890 iflag); 10891 lpfc_mbox_cmpl_put(phba, pmb); 10892 } 10893 } else 10894 spin_unlock_irqrestore(&phba->hbalock, iflag); 10895 10896 if ((work_ha_copy & HA_MBATT) && 10897 (phba->sli.mbox_active == NULL)) { 10898 send_current_mbox: 10899 /* Process next mailbox command if there is one */ 10900 do { 10901 rc = lpfc_sli_issue_mbox(phba, NULL, 10902 MBX_NOWAIT); 10903 } while (rc == MBX_NOT_FINISHED); 10904 if (rc != MBX_SUCCESS) 10905 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10906 LOG_SLI, "0349 rc should be " 10907 "MBX_SUCCESS\n"); 10908 } 10909 10910 spin_lock_irqsave(&phba->hbalock, iflag); 10911 phba->work_ha |= work_ha_copy; 10912 spin_unlock_irqrestore(&phba->hbalock, iflag); 10913 lpfc_worker_wake_up(phba); 10914 } 10915 return IRQ_HANDLED; 10916 unplug_error: 10917 spin_unlock_irqrestore(&phba->hbalock, iflag); 10918 return IRQ_HANDLED; 10919 10920 } /* lpfc_sli_sp_intr_handler */ 10921 10922 /** 10923 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 10924 * @irq: Interrupt number. 10925 * @dev_id: The device context pointer. 10926 * 10927 * This function is directly called from the PCI layer as an interrupt 10928 * service routine when device with SLI-3 interface spec is enabled with 10929 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 10930 * ring event in the HBA. However, when the device is enabled with either 10931 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 10932 * device-level interrupt handler. When the PCI slot is in error recovery 10933 * or the HBA is undergoing initialization, the interrupt handler will not 10934 * process the interrupt. The SCSI FCP fast-path ring event are handled in 10935 * the intrrupt context. This function is called without any lock held. 10936 * It gets the hbalock to access and update SLI data structures. 10937 * 10938 * This function returns IRQ_HANDLED when interrupt is handled else it 10939 * returns IRQ_NONE. 10940 **/ 10941 irqreturn_t 10942 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 10943 { 10944 struct lpfc_hba *phba; 10945 uint32_t ha_copy; 10946 unsigned long status; 10947 unsigned long iflag; 10948 10949 /* Get the driver's phba structure from the dev_id and 10950 * assume the HBA is not interrupting. 10951 */ 10952 phba = (struct lpfc_hba *) dev_id; 10953 10954 if (unlikely(!phba)) 10955 return IRQ_NONE; 10956 10957 /* 10958 * Stuff needs to be attented to when this function is invoked as an 10959 * individual interrupt handler in MSI-X multi-message interrupt mode 10960 */ 10961 if (phba->intr_type == MSIX) { 10962 /* Check device state for handling interrupt */ 10963 if (lpfc_intr_state_check(phba)) 10964 return IRQ_NONE; 10965 /* Need to read HA REG for FCP ring and other ring events */ 10966 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10967 return IRQ_HANDLED; 10968 /* Clear up only attention source related to fast-path */ 10969 spin_lock_irqsave(&phba->hbalock, iflag); 10970 /* 10971 * If there is deferred error attention, do not check for 10972 * any interrupt. 10973 */ 10974 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10975 spin_unlock_irqrestore(&phba->hbalock, iflag); 10976 return IRQ_NONE; 10977 } 10978 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 10979 phba->HAregaddr); 10980 readl(phba->HAregaddr); /* flush */ 10981 spin_unlock_irqrestore(&phba->hbalock, iflag); 10982 } else 10983 ha_copy = phba->ha_copy; 10984 10985 /* 10986 * Process all events on FCP ring. Take the optimized path for FCP IO. 10987 */ 10988 ha_copy &= ~(phba->work_ha_mask); 10989 10990 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 10991 status >>= (4*LPFC_FCP_RING); 10992 if (status & HA_RXMASK) 10993 lpfc_sli_handle_fast_ring_event(phba, 10994 &phba->sli.ring[LPFC_FCP_RING], 10995 status); 10996 10997 if (phba->cfg_multi_ring_support == 2) { 10998 /* 10999 * Process all events on extra ring. Take the optimized path 11000 * for extra ring IO. 11001 */ 11002 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 11003 status >>= (4*LPFC_EXTRA_RING); 11004 if (status & HA_RXMASK) { 11005 lpfc_sli_handle_fast_ring_event(phba, 11006 &phba->sli.ring[LPFC_EXTRA_RING], 11007 status); 11008 } 11009 } 11010 return IRQ_HANDLED; 11011 } /* lpfc_sli_fp_intr_handler */ 11012 11013 /** 11014 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 11015 * @irq: Interrupt number. 11016 * @dev_id: The device context pointer. 11017 * 11018 * This function is the HBA device-level interrupt handler to device with 11019 * SLI-3 interface spec, called from the PCI layer when either MSI or 11020 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 11021 * requires driver attention. This function invokes the slow-path interrupt 11022 * attention handling function and fast-path interrupt attention handling 11023 * function in turn to process the relevant HBA attention events. This 11024 * function is called without any lock held. It gets the hbalock to access 11025 * and update SLI data structures. 11026 * 11027 * This function returns IRQ_HANDLED when interrupt is handled, else it 11028 * returns IRQ_NONE. 11029 **/ 11030 irqreturn_t 11031 lpfc_sli_intr_handler(int irq, void *dev_id) 11032 { 11033 struct lpfc_hba *phba; 11034 irqreturn_t sp_irq_rc, fp_irq_rc; 11035 unsigned long status1, status2; 11036 uint32_t hc_copy; 11037 11038 /* 11039 * Get the driver's phba structure from the dev_id and 11040 * assume the HBA is not interrupting. 11041 */ 11042 phba = (struct lpfc_hba *) dev_id; 11043 11044 if (unlikely(!phba)) 11045 return IRQ_NONE; 11046 11047 /* Check device state for handling interrupt */ 11048 if (lpfc_intr_state_check(phba)) 11049 return IRQ_NONE; 11050 11051 spin_lock(&phba->hbalock); 11052 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 11053 spin_unlock(&phba->hbalock); 11054 return IRQ_HANDLED; 11055 } 11056 11057 if (unlikely(!phba->ha_copy)) { 11058 spin_unlock(&phba->hbalock); 11059 return IRQ_NONE; 11060 } else if (phba->ha_copy & HA_ERATT) { 11061 if (phba->hba_flag & HBA_ERATT_HANDLED) 11062 /* ERATT polling has handled ERATT */ 11063 phba->ha_copy &= ~HA_ERATT; 11064 else 11065 /* Indicate interrupt handler handles ERATT */ 11066 phba->hba_flag |= HBA_ERATT_HANDLED; 11067 } 11068 11069 /* 11070 * If there is deferred error attention, do not check for any interrupt. 11071 */ 11072 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11073 spin_unlock(&phba->hbalock); 11074 return IRQ_NONE; 11075 } 11076 11077 /* Clear attention sources except link and error attentions */ 11078 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 11079 spin_unlock(&phba->hbalock); 11080 return IRQ_HANDLED; 11081 } 11082 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 11083 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 11084 phba->HCregaddr); 11085 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 11086 writel(hc_copy, phba->HCregaddr); 11087 readl(phba->HAregaddr); /* flush */ 11088 spin_unlock(&phba->hbalock); 11089 11090 /* 11091 * Invokes slow-path host attention interrupt handling as appropriate. 11092 */ 11093 11094 /* status of events with mailbox and link attention */ 11095 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 11096 11097 /* status of events with ELS ring */ 11098 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 11099 status2 >>= (4*LPFC_ELS_RING); 11100 11101 if (status1 || (status2 & HA_RXMASK)) 11102 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 11103 else 11104 sp_irq_rc = IRQ_NONE; 11105 11106 /* 11107 * Invoke fast-path host attention interrupt handling as appropriate. 11108 */ 11109 11110 /* status of events with FCP ring */ 11111 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 11112 status1 >>= (4*LPFC_FCP_RING); 11113 11114 /* status of events with extra ring */ 11115 if (phba->cfg_multi_ring_support == 2) { 11116 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 11117 status2 >>= (4*LPFC_EXTRA_RING); 11118 } else 11119 status2 = 0; 11120 11121 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 11122 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 11123 else 11124 fp_irq_rc = IRQ_NONE; 11125 11126 /* Return device-level interrupt handling status */ 11127 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 11128 } /* lpfc_sli_intr_handler */ 11129 11130 /** 11131 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 11132 * @phba: pointer to lpfc hba data structure. 11133 * 11134 * This routine is invoked by the worker thread to process all the pending 11135 * SLI4 FCP abort XRI events. 11136 **/ 11137 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 11138 { 11139 struct lpfc_cq_event *cq_event; 11140 11141 /* First, declare the fcp xri abort event has been handled */ 11142 spin_lock_irq(&phba->hbalock); 11143 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 11144 spin_unlock_irq(&phba->hbalock); 11145 /* Now, handle all the fcp xri abort events */ 11146 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 11147 /* Get the first event from the head of the event queue */ 11148 spin_lock_irq(&phba->hbalock); 11149 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 11150 cq_event, struct lpfc_cq_event, list); 11151 spin_unlock_irq(&phba->hbalock); 11152 /* Notify aborted XRI for FCP work queue */ 11153 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 11154 /* Free the event processed back to the free pool */ 11155 lpfc_sli4_cq_event_release(phba, cq_event); 11156 } 11157 } 11158 11159 /** 11160 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 11161 * @phba: pointer to lpfc hba data structure. 11162 * 11163 * This routine is invoked by the worker thread to process all the pending 11164 * SLI4 els abort xri events. 11165 **/ 11166 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 11167 { 11168 struct lpfc_cq_event *cq_event; 11169 11170 /* First, declare the els xri abort event has been handled */ 11171 spin_lock_irq(&phba->hbalock); 11172 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 11173 spin_unlock_irq(&phba->hbalock); 11174 /* Now, handle all the els xri abort events */ 11175 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 11176 /* Get the first event from the head of the event queue */ 11177 spin_lock_irq(&phba->hbalock); 11178 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11179 cq_event, struct lpfc_cq_event, list); 11180 spin_unlock_irq(&phba->hbalock); 11181 /* Notify aborted XRI for ELS work queue */ 11182 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 11183 /* Free the event processed back to the free pool */ 11184 lpfc_sli4_cq_event_release(phba, cq_event); 11185 } 11186 } 11187 11188 /** 11189 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 11190 * @phba: pointer to lpfc hba data structure 11191 * @pIocbIn: pointer to the rspiocbq 11192 * @pIocbOut: pointer to the cmdiocbq 11193 * @wcqe: pointer to the complete wcqe 11194 * 11195 * This routine transfers the fields of a command iocbq to a response iocbq 11196 * by copying all the IOCB fields from command iocbq and transferring the 11197 * completion status information from the complete wcqe. 11198 **/ 11199 static void 11200 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 11201 struct lpfc_iocbq *pIocbIn, 11202 struct lpfc_iocbq *pIocbOut, 11203 struct lpfc_wcqe_complete *wcqe) 11204 { 11205 int numBdes, i; 11206 unsigned long iflags; 11207 uint32_t status, max_response; 11208 struct lpfc_dmabuf *dmabuf; 11209 struct ulp_bde64 *bpl, bde; 11210 size_t offset = offsetof(struct lpfc_iocbq, iocb); 11211 11212 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 11213 sizeof(struct lpfc_iocbq) - offset); 11214 /* Map WCQE parameters into irspiocb parameters */ 11215 status = bf_get(lpfc_wcqe_c_status, wcqe); 11216 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 11217 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 11218 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 11219 pIocbIn->iocb.un.fcpi.fcpi_parm = 11220 pIocbOut->iocb.un.fcpi.fcpi_parm - 11221 wcqe->total_data_placed; 11222 else 11223 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 11224 else { 11225 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 11226 switch (pIocbOut->iocb.ulpCommand) { 11227 case CMD_ELS_REQUEST64_CR: 11228 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 11229 bpl = (struct ulp_bde64 *)dmabuf->virt; 11230 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 11231 max_response = bde.tus.f.bdeSize; 11232 break; 11233 case CMD_GEN_REQUEST64_CR: 11234 max_response = 0; 11235 if (!pIocbOut->context3) 11236 break; 11237 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 11238 sizeof(struct ulp_bde64); 11239 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 11240 bpl = (struct ulp_bde64 *)dmabuf->virt; 11241 for (i = 0; i < numBdes; i++) { 11242 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 11243 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 11244 max_response += bde.tus.f.bdeSize; 11245 } 11246 break; 11247 default: 11248 max_response = wcqe->total_data_placed; 11249 break; 11250 } 11251 if (max_response < wcqe->total_data_placed) 11252 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 11253 else 11254 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 11255 wcqe->total_data_placed; 11256 } 11257 11258 /* Convert BG errors for completion status */ 11259 if (status == CQE_STATUS_DI_ERROR) { 11260 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 11261 11262 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 11263 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 11264 else 11265 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 11266 11267 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 11268 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 11269 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11270 BGS_GUARD_ERR_MASK; 11271 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 11272 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11273 BGS_APPTAG_ERR_MASK; 11274 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 11275 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11276 BGS_REFTAG_ERR_MASK; 11277 11278 /* Check to see if there was any good data before the error */ 11279 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 11280 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11281 BGS_HI_WATER_MARK_PRESENT_MASK; 11282 pIocbIn->iocb.unsli3.sli3_bg.bghm = 11283 wcqe->total_data_placed; 11284 } 11285 11286 /* 11287 * Set ALL the error bits to indicate we don't know what 11288 * type of error it is. 11289 */ 11290 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 11291 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11292 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 11293 BGS_GUARD_ERR_MASK); 11294 } 11295 11296 /* Pick up HBA exchange busy condition */ 11297 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 11298 spin_lock_irqsave(&phba->hbalock, iflags); 11299 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 11300 spin_unlock_irqrestore(&phba->hbalock, iflags); 11301 } 11302 } 11303 11304 /** 11305 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 11306 * @phba: Pointer to HBA context object. 11307 * @wcqe: Pointer to work-queue completion queue entry. 11308 * 11309 * This routine handles an ELS work-queue completion event and construct 11310 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 11311 * discovery engine to handle. 11312 * 11313 * Return: Pointer to the receive IOCBQ, NULL otherwise. 11314 **/ 11315 static struct lpfc_iocbq * 11316 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 11317 struct lpfc_iocbq *irspiocbq) 11318 { 11319 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 11320 struct lpfc_iocbq *cmdiocbq; 11321 struct lpfc_wcqe_complete *wcqe; 11322 unsigned long iflags; 11323 11324 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 11325 spin_lock_irqsave(&pring->ring_lock, iflags); 11326 pring->stats.iocb_event++; 11327 /* Look up the ELS command IOCB and create pseudo response IOCB */ 11328 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11329 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11330 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11331 11332 if (unlikely(!cmdiocbq)) { 11333 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11334 "0386 ELS complete with no corresponding " 11335 "cmdiocb: iotag (%d)\n", 11336 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11337 lpfc_sli_release_iocbq(phba, irspiocbq); 11338 return NULL; 11339 } 11340 11341 /* Fake the irspiocbq and copy necessary response information */ 11342 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 11343 11344 return irspiocbq; 11345 } 11346 11347 /** 11348 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 11349 * @phba: Pointer to HBA context object. 11350 * @cqe: Pointer to mailbox completion queue entry. 11351 * 11352 * This routine process a mailbox completion queue entry with asynchrous 11353 * event. 11354 * 11355 * Return: true if work posted to worker thread, otherwise false. 11356 **/ 11357 static bool 11358 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 11359 { 11360 struct lpfc_cq_event *cq_event; 11361 unsigned long iflags; 11362 11363 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11364 "0392 Async Event: word0:x%x, word1:x%x, " 11365 "word2:x%x, word3:x%x\n", mcqe->word0, 11366 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 11367 11368 /* Allocate a new internal CQ_EVENT entry */ 11369 cq_event = lpfc_sli4_cq_event_alloc(phba); 11370 if (!cq_event) { 11371 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11372 "0394 Failed to allocate CQ_EVENT entry\n"); 11373 return false; 11374 } 11375 11376 /* Move the CQE into an asynchronous event entry */ 11377 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 11378 spin_lock_irqsave(&phba->hbalock, iflags); 11379 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 11380 /* Set the async event flag */ 11381 phba->hba_flag |= ASYNC_EVENT; 11382 spin_unlock_irqrestore(&phba->hbalock, iflags); 11383 11384 return true; 11385 } 11386 11387 /** 11388 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 11389 * @phba: Pointer to HBA context object. 11390 * @cqe: Pointer to mailbox completion queue entry. 11391 * 11392 * This routine process a mailbox completion queue entry with mailbox 11393 * completion event. 11394 * 11395 * Return: true if work posted to worker thread, otherwise false. 11396 **/ 11397 static bool 11398 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 11399 { 11400 uint32_t mcqe_status; 11401 MAILBOX_t *mbox, *pmbox; 11402 struct lpfc_mqe *mqe; 11403 struct lpfc_vport *vport; 11404 struct lpfc_nodelist *ndlp; 11405 struct lpfc_dmabuf *mp; 11406 unsigned long iflags; 11407 LPFC_MBOXQ_t *pmb; 11408 bool workposted = false; 11409 int rc; 11410 11411 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 11412 if (!bf_get(lpfc_trailer_completed, mcqe)) 11413 goto out_no_mqe_complete; 11414 11415 /* Get the reference to the active mbox command */ 11416 spin_lock_irqsave(&phba->hbalock, iflags); 11417 pmb = phba->sli.mbox_active; 11418 if (unlikely(!pmb)) { 11419 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 11420 "1832 No pending MBOX command to handle\n"); 11421 spin_unlock_irqrestore(&phba->hbalock, iflags); 11422 goto out_no_mqe_complete; 11423 } 11424 spin_unlock_irqrestore(&phba->hbalock, iflags); 11425 mqe = &pmb->u.mqe; 11426 pmbox = (MAILBOX_t *)&pmb->u.mqe; 11427 mbox = phba->mbox; 11428 vport = pmb->vport; 11429 11430 /* Reset heartbeat timer */ 11431 phba->last_completion_time = jiffies; 11432 del_timer(&phba->sli.mbox_tmo); 11433 11434 /* Move mbox data to caller's mailbox region, do endian swapping */ 11435 if (pmb->mbox_cmpl && mbox) 11436 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 11437 11438 /* 11439 * For mcqe errors, conditionally move a modified error code to 11440 * the mbox so that the error will not be missed. 11441 */ 11442 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 11443 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 11444 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 11445 bf_set(lpfc_mqe_status, mqe, 11446 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 11447 } 11448 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 11449 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 11450 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 11451 "MBOX dflt rpi: status:x%x rpi:x%x", 11452 mcqe_status, 11453 pmbox->un.varWords[0], 0); 11454 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 11455 mp = (struct lpfc_dmabuf *)(pmb->context1); 11456 ndlp = (struct lpfc_nodelist *)pmb->context2; 11457 /* Reg_LOGIN of dflt RPI was successful. Now lets get 11458 * RID of the PPI using the same mbox buffer. 11459 */ 11460 lpfc_unreg_login(phba, vport->vpi, 11461 pmbox->un.varWords[0], pmb); 11462 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 11463 pmb->context1 = mp; 11464 pmb->context2 = ndlp; 11465 pmb->vport = vport; 11466 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 11467 if (rc != MBX_BUSY) 11468 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 11469 LOG_SLI, "0385 rc should " 11470 "have been MBX_BUSY\n"); 11471 if (rc != MBX_NOT_FINISHED) 11472 goto send_current_mbox; 11473 } 11474 } 11475 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11476 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 11477 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11478 11479 /* There is mailbox completion work to do */ 11480 spin_lock_irqsave(&phba->hbalock, iflags); 11481 __lpfc_mbox_cmpl_put(phba, pmb); 11482 phba->work_ha |= HA_MBATT; 11483 spin_unlock_irqrestore(&phba->hbalock, iflags); 11484 workposted = true; 11485 11486 send_current_mbox: 11487 spin_lock_irqsave(&phba->hbalock, iflags); 11488 /* Release the mailbox command posting token */ 11489 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11490 /* Setting active mailbox pointer need to be in sync to flag clear */ 11491 phba->sli.mbox_active = NULL; 11492 spin_unlock_irqrestore(&phba->hbalock, iflags); 11493 /* Wake up worker thread to post the next pending mailbox command */ 11494 lpfc_worker_wake_up(phba); 11495 out_no_mqe_complete: 11496 if (bf_get(lpfc_trailer_consumed, mcqe)) 11497 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 11498 return workposted; 11499 } 11500 11501 /** 11502 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 11503 * @phba: Pointer to HBA context object. 11504 * @cqe: Pointer to mailbox completion queue entry. 11505 * 11506 * This routine process a mailbox completion queue entry, it invokes the 11507 * proper mailbox complete handling or asynchrous event handling routine 11508 * according to the MCQE's async bit. 11509 * 11510 * Return: true if work posted to worker thread, otherwise false. 11511 **/ 11512 static bool 11513 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 11514 { 11515 struct lpfc_mcqe mcqe; 11516 bool workposted; 11517 11518 /* Copy the mailbox MCQE and convert endian order as needed */ 11519 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 11520 11521 /* Invoke the proper event handling routine */ 11522 if (!bf_get(lpfc_trailer_async, &mcqe)) 11523 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 11524 else 11525 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 11526 return workposted; 11527 } 11528 11529 /** 11530 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 11531 * @phba: Pointer to HBA context object. 11532 * @cq: Pointer to associated CQ 11533 * @wcqe: Pointer to work-queue completion queue entry. 11534 * 11535 * This routine handles an ELS work-queue completion event. 11536 * 11537 * Return: true if work posted to worker thread, otherwise false. 11538 **/ 11539 static bool 11540 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11541 struct lpfc_wcqe_complete *wcqe) 11542 { 11543 struct lpfc_iocbq *irspiocbq; 11544 unsigned long iflags; 11545 struct lpfc_sli_ring *pring = cq->pring; 11546 int txq_cnt = 0; 11547 int txcmplq_cnt = 0; 11548 int fcp_txcmplq_cnt = 0; 11549 11550 /* Get an irspiocbq for later ELS response processing use */ 11551 irspiocbq = lpfc_sli_get_iocbq(phba); 11552 if (!irspiocbq) { 11553 if (!list_empty(&pring->txq)) 11554 txq_cnt++; 11555 if (!list_empty(&pring->txcmplq)) 11556 txcmplq_cnt++; 11557 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) 11558 fcp_txcmplq_cnt++; 11559 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11560 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 11561 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 11562 txq_cnt, phba->iocb_cnt, 11563 fcp_txcmplq_cnt, 11564 txcmplq_cnt); 11565 return false; 11566 } 11567 11568 /* Save off the slow-path queue event for work thread to process */ 11569 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 11570 spin_lock_irqsave(&phba->hbalock, iflags); 11571 list_add_tail(&irspiocbq->cq_event.list, 11572 &phba->sli4_hba.sp_queue_event); 11573 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11574 spin_unlock_irqrestore(&phba->hbalock, iflags); 11575 11576 return true; 11577 } 11578 11579 /** 11580 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 11581 * @phba: Pointer to HBA context object. 11582 * @wcqe: Pointer to work-queue completion queue entry. 11583 * 11584 * This routine handles slow-path WQ entry comsumed event by invoking the 11585 * proper WQ release routine to the slow-path WQ. 11586 **/ 11587 static void 11588 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 11589 struct lpfc_wcqe_release *wcqe) 11590 { 11591 /* sanity check on queue memory */ 11592 if (unlikely(!phba->sli4_hba.els_wq)) 11593 return; 11594 /* Check for the slow-path ELS work queue */ 11595 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 11596 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 11597 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 11598 else 11599 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11600 "2579 Slow-path wqe consume event carries " 11601 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 11602 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 11603 phba->sli4_hba.els_wq->queue_id); 11604 } 11605 11606 /** 11607 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 11608 * @phba: Pointer to HBA context object. 11609 * @cq: Pointer to a WQ completion queue. 11610 * @wcqe: Pointer to work-queue completion queue entry. 11611 * 11612 * This routine handles an XRI abort event. 11613 * 11614 * Return: true if work posted to worker thread, otherwise false. 11615 **/ 11616 static bool 11617 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 11618 struct lpfc_queue *cq, 11619 struct sli4_wcqe_xri_aborted *wcqe) 11620 { 11621 bool workposted = false; 11622 struct lpfc_cq_event *cq_event; 11623 unsigned long iflags; 11624 11625 /* Allocate a new internal CQ_EVENT entry */ 11626 cq_event = lpfc_sli4_cq_event_alloc(phba); 11627 if (!cq_event) { 11628 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11629 "0602 Failed to allocate CQ_EVENT entry\n"); 11630 return false; 11631 } 11632 11633 /* Move the CQE into the proper xri abort event list */ 11634 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 11635 switch (cq->subtype) { 11636 case LPFC_FCP: 11637 spin_lock_irqsave(&phba->hbalock, iflags); 11638 list_add_tail(&cq_event->list, 11639 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 11640 /* Set the fcp xri abort event flag */ 11641 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 11642 spin_unlock_irqrestore(&phba->hbalock, iflags); 11643 workposted = true; 11644 break; 11645 case LPFC_ELS: 11646 spin_lock_irqsave(&phba->hbalock, iflags); 11647 list_add_tail(&cq_event->list, 11648 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 11649 /* Set the els xri abort event flag */ 11650 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 11651 spin_unlock_irqrestore(&phba->hbalock, iflags); 11652 workposted = true; 11653 break; 11654 default: 11655 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11656 "0603 Invalid work queue CQE subtype (x%x)\n", 11657 cq->subtype); 11658 workposted = false; 11659 break; 11660 } 11661 return workposted; 11662 } 11663 11664 /** 11665 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 11666 * @phba: Pointer to HBA context object. 11667 * @rcqe: Pointer to receive-queue completion queue entry. 11668 * 11669 * This routine process a receive-queue completion queue entry. 11670 * 11671 * Return: true if work posted to worker thread, otherwise false. 11672 **/ 11673 static bool 11674 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 11675 { 11676 bool workposted = false; 11677 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 11678 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 11679 struct hbq_dmabuf *dma_buf; 11680 uint32_t status, rq_id; 11681 unsigned long iflags; 11682 11683 /* sanity check on queue memory */ 11684 if (unlikely(!hrq) || unlikely(!drq)) 11685 return workposted; 11686 11687 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 11688 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 11689 else 11690 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 11691 if (rq_id != hrq->queue_id) 11692 goto out; 11693 11694 status = bf_get(lpfc_rcqe_status, rcqe); 11695 switch (status) { 11696 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11698 "2537 Receive Frame Truncated!!\n"); 11699 hrq->RQ_buf_trunc++; 11700 case FC_STATUS_RQ_SUCCESS: 11701 lpfc_sli4_rq_release(hrq, drq); 11702 spin_lock_irqsave(&phba->hbalock, iflags); 11703 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11704 if (!dma_buf) { 11705 hrq->RQ_no_buf_found++; 11706 spin_unlock_irqrestore(&phba->hbalock, iflags); 11707 goto out; 11708 } 11709 hrq->RQ_rcv_buf++; 11710 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11711 /* save off the frame for the word thread to process */ 11712 list_add_tail(&dma_buf->cq_event.list, 11713 &phba->sli4_hba.sp_queue_event); 11714 /* Frame received */ 11715 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11716 spin_unlock_irqrestore(&phba->hbalock, iflags); 11717 workposted = true; 11718 break; 11719 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11720 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11721 hrq->RQ_no_posted_buf++; 11722 /* Post more buffers if possible */ 11723 spin_lock_irqsave(&phba->hbalock, iflags); 11724 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11725 spin_unlock_irqrestore(&phba->hbalock, iflags); 11726 workposted = true; 11727 break; 11728 } 11729 out: 11730 return workposted; 11731 } 11732 11733 /** 11734 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 11735 * @phba: Pointer to HBA context object. 11736 * @cq: Pointer to the completion queue. 11737 * @wcqe: Pointer to a completion queue entry. 11738 * 11739 * This routine process a slow-path work-queue or receive queue completion queue 11740 * entry. 11741 * 11742 * Return: true if work posted to worker thread, otherwise false. 11743 **/ 11744 static bool 11745 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11746 struct lpfc_cqe *cqe) 11747 { 11748 struct lpfc_cqe cqevt; 11749 bool workposted = false; 11750 11751 /* Copy the work queue CQE and convert endian order if needed */ 11752 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 11753 11754 /* Check and process for different type of WCQE and dispatch */ 11755 switch (bf_get(lpfc_cqe_code, &cqevt)) { 11756 case CQE_CODE_COMPL_WQE: 11757 /* Process the WQ/RQ complete event */ 11758 phba->last_completion_time = jiffies; 11759 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 11760 (struct lpfc_wcqe_complete *)&cqevt); 11761 break; 11762 case CQE_CODE_RELEASE_WQE: 11763 /* Process the WQ release event */ 11764 lpfc_sli4_sp_handle_rel_wcqe(phba, 11765 (struct lpfc_wcqe_release *)&cqevt); 11766 break; 11767 case CQE_CODE_XRI_ABORTED: 11768 /* Process the WQ XRI abort event */ 11769 phba->last_completion_time = jiffies; 11770 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11771 (struct sli4_wcqe_xri_aborted *)&cqevt); 11772 break; 11773 case CQE_CODE_RECEIVE: 11774 case CQE_CODE_RECEIVE_V1: 11775 /* Process the RQ event */ 11776 phba->last_completion_time = jiffies; 11777 workposted = lpfc_sli4_sp_handle_rcqe(phba, 11778 (struct lpfc_rcqe *)&cqevt); 11779 break; 11780 default: 11781 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11782 "0388 Not a valid WCQE code: x%x\n", 11783 bf_get(lpfc_cqe_code, &cqevt)); 11784 break; 11785 } 11786 return workposted; 11787 } 11788 11789 /** 11790 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 11791 * @phba: Pointer to HBA context object. 11792 * @eqe: Pointer to fast-path event queue entry. 11793 * 11794 * This routine process a event queue entry from the slow-path event queue. 11795 * It will check the MajorCode and MinorCode to determine this is for a 11796 * completion event on a completion queue, if not, an error shall be logged 11797 * and just return. Otherwise, it will get to the corresponding completion 11798 * queue and process all the entries on that completion queue, rearm the 11799 * completion queue, and then return. 11800 * 11801 **/ 11802 static void 11803 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11804 struct lpfc_queue *speq) 11805 { 11806 struct lpfc_queue *cq = NULL, *childq; 11807 struct lpfc_cqe *cqe; 11808 bool workposted = false; 11809 int ecount = 0; 11810 uint16_t cqid; 11811 11812 /* Get the reference to the corresponding CQ */ 11813 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11814 11815 list_for_each_entry(childq, &speq->child_list, list) { 11816 if (childq->queue_id == cqid) { 11817 cq = childq; 11818 break; 11819 } 11820 } 11821 if (unlikely(!cq)) { 11822 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11823 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11824 "0365 Slow-path CQ identifier " 11825 "(%d) does not exist\n", cqid); 11826 return; 11827 } 11828 11829 /* Process all the entries to the CQ */ 11830 switch (cq->type) { 11831 case LPFC_MCQ: 11832 while ((cqe = lpfc_sli4_cq_get(cq))) { 11833 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11834 if (!(++ecount % cq->entry_repost)) 11835 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11836 cq->CQ_mbox++; 11837 } 11838 break; 11839 case LPFC_WCQ: 11840 while ((cqe = lpfc_sli4_cq_get(cq))) { 11841 if (cq->subtype == LPFC_FCP) 11842 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, 11843 cqe); 11844 else 11845 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 11846 cqe); 11847 if (!(++ecount % cq->entry_repost)) 11848 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11849 } 11850 11851 /* Track the max number of CQEs processed in 1 EQ */ 11852 if (ecount > cq->CQ_max_cqe) 11853 cq->CQ_max_cqe = ecount; 11854 break; 11855 default: 11856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11857 "0370 Invalid completion queue type (%d)\n", 11858 cq->type); 11859 return; 11860 } 11861 11862 /* Catch the no cq entry condition, log an error */ 11863 if (unlikely(ecount == 0)) 11864 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11865 "0371 No entry from the CQ: identifier " 11866 "(x%x), type (%d)\n", cq->queue_id, cq->type); 11867 11868 /* In any case, flash and re-arm the RCQ */ 11869 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 11870 11871 /* wake up worker thread if there are works to be done */ 11872 if (workposted) 11873 lpfc_worker_wake_up(phba); 11874 } 11875 11876 /** 11877 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 11878 * @phba: Pointer to HBA context object. 11879 * @cq: Pointer to associated CQ 11880 * @wcqe: Pointer to work-queue completion queue entry. 11881 * 11882 * This routine process a fast-path work queue completion entry from fast-path 11883 * event queue for FCP command response completion. 11884 **/ 11885 static void 11886 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11887 struct lpfc_wcqe_complete *wcqe) 11888 { 11889 struct lpfc_sli_ring *pring = cq->pring; 11890 struct lpfc_iocbq *cmdiocbq; 11891 struct lpfc_iocbq irspiocbq; 11892 unsigned long iflags; 11893 11894 /* Check for response status */ 11895 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 11896 /* If resource errors reported from HBA, reduce queue 11897 * depth of the SCSI device. 11898 */ 11899 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 11900 IOSTAT_LOCAL_REJECT)) && 11901 ((wcqe->parameter & IOERR_PARAM_MASK) == 11902 IOERR_NO_RESOURCES)) 11903 phba->lpfc_rampdown_queue_depth(phba); 11904 11905 /* Log the error status */ 11906 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11907 "0373 FCP complete error: status=x%x, " 11908 "hw_status=x%x, total_data_specified=%d, " 11909 "parameter=x%x, word3=x%x\n", 11910 bf_get(lpfc_wcqe_c_status, wcqe), 11911 bf_get(lpfc_wcqe_c_hw_status, wcqe), 11912 wcqe->total_data_placed, wcqe->parameter, 11913 wcqe->word3); 11914 } 11915 11916 /* Look up the FCP command IOCB and create pseudo response IOCB */ 11917 spin_lock_irqsave(&pring->ring_lock, iflags); 11918 pring->stats.iocb_event++; 11919 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11920 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11921 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11922 if (unlikely(!cmdiocbq)) { 11923 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11924 "0374 FCP complete with no corresponding " 11925 "cmdiocb: iotag (%d)\n", 11926 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11927 return; 11928 } 11929 if (unlikely(!cmdiocbq->iocb_cmpl)) { 11930 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11931 "0375 FCP cmdiocb not callback function " 11932 "iotag: (%d)\n", 11933 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11934 return; 11935 } 11936 11937 /* Fake the irspiocb and copy necessary response information */ 11938 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 11939 11940 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 11941 spin_lock_irqsave(&phba->hbalock, iflags); 11942 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 11943 spin_unlock_irqrestore(&phba->hbalock, iflags); 11944 } 11945 11946 /* Pass the cmd_iocb and the rsp state to the upper layer */ 11947 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 11948 } 11949 11950 /** 11951 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 11952 * @phba: Pointer to HBA context object. 11953 * @cq: Pointer to completion queue. 11954 * @wcqe: Pointer to work-queue completion queue entry. 11955 * 11956 * This routine handles an fast-path WQ entry comsumed event by invoking the 11957 * proper WQ release routine to the slow-path WQ. 11958 **/ 11959 static void 11960 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11961 struct lpfc_wcqe_release *wcqe) 11962 { 11963 struct lpfc_queue *childwq; 11964 bool wqid_matched = false; 11965 uint16_t fcp_wqid; 11966 11967 /* Check for fast-path FCP work queue release */ 11968 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 11969 list_for_each_entry(childwq, &cq->child_list, list) { 11970 if (childwq->queue_id == fcp_wqid) { 11971 lpfc_sli4_wq_release(childwq, 11972 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 11973 wqid_matched = true; 11974 break; 11975 } 11976 } 11977 /* Report warning log message if no match found */ 11978 if (wqid_matched != true) 11979 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11980 "2580 Fast-path wqe consume event carries " 11981 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 11982 } 11983 11984 /** 11985 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 11986 * @cq: Pointer to the completion queue. 11987 * @eqe: Pointer to fast-path completion queue entry. 11988 * 11989 * This routine process a fast-path work queue completion entry from fast-path 11990 * event queue for FCP command response completion. 11991 **/ 11992 static int 11993 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11994 struct lpfc_cqe *cqe) 11995 { 11996 struct lpfc_wcqe_release wcqe; 11997 bool workposted = false; 11998 11999 /* Copy the work queue CQE and convert endian order if needed */ 12000 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 12001 12002 /* Check and process for different type of WCQE and dispatch */ 12003 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 12004 case CQE_CODE_COMPL_WQE: 12005 cq->CQ_wq++; 12006 /* Process the WQ complete event */ 12007 phba->last_completion_time = jiffies; 12008 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 12009 (struct lpfc_wcqe_complete *)&wcqe); 12010 break; 12011 case CQE_CODE_RELEASE_WQE: 12012 cq->CQ_release_wqe++; 12013 /* Process the WQ release event */ 12014 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 12015 (struct lpfc_wcqe_release *)&wcqe); 12016 break; 12017 case CQE_CODE_XRI_ABORTED: 12018 cq->CQ_xri_aborted++; 12019 /* Process the WQ XRI abort event */ 12020 phba->last_completion_time = jiffies; 12021 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 12022 (struct sli4_wcqe_xri_aborted *)&wcqe); 12023 break; 12024 default: 12025 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12026 "0144 Not a valid WCQE code: x%x\n", 12027 bf_get(lpfc_wcqe_c_code, &wcqe)); 12028 break; 12029 } 12030 return workposted; 12031 } 12032 12033 /** 12034 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 12035 * @phba: Pointer to HBA context object. 12036 * @eqe: Pointer to fast-path event queue entry. 12037 * 12038 * This routine process a event queue entry from the fast-path event queue. 12039 * It will check the MajorCode and MinorCode to determine this is for a 12040 * completion event on a completion queue, if not, an error shall be logged 12041 * and just return. Otherwise, it will get to the corresponding completion 12042 * queue and process all the entries on the completion queue, rearm the 12043 * completion queue, and then return. 12044 **/ 12045 static void 12046 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 12047 uint32_t qidx) 12048 { 12049 struct lpfc_queue *cq; 12050 struct lpfc_cqe *cqe; 12051 bool workposted = false; 12052 uint16_t cqid; 12053 int ecount = 0; 12054 12055 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 12056 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12057 "0366 Not a valid completion " 12058 "event: majorcode=x%x, minorcode=x%x\n", 12059 bf_get_le32(lpfc_eqe_major_code, eqe), 12060 bf_get_le32(lpfc_eqe_minor_code, eqe)); 12061 return; 12062 } 12063 12064 /* Get the reference to the corresponding CQ */ 12065 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 12066 12067 /* Check if this is a Slow path event */ 12068 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) { 12069 lpfc_sli4_sp_handle_eqe(phba, eqe, 12070 phba->sli4_hba.hba_eq[qidx]); 12071 return; 12072 } 12073 12074 if (unlikely(!phba->sli4_hba.fcp_cq)) { 12075 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12076 "3146 Fast-path completion queues " 12077 "does not exist\n"); 12078 return; 12079 } 12080 cq = phba->sli4_hba.fcp_cq[qidx]; 12081 if (unlikely(!cq)) { 12082 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 12083 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12084 "0367 Fast-path completion queue " 12085 "(%d) does not exist\n", qidx); 12086 return; 12087 } 12088 12089 if (unlikely(cqid != cq->queue_id)) { 12090 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12091 "0368 Miss-matched fast-path completion " 12092 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 12093 cqid, cq->queue_id); 12094 return; 12095 } 12096 12097 /* Process all the entries to the CQ */ 12098 while ((cqe = lpfc_sli4_cq_get(cq))) { 12099 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 12100 if (!(++ecount % cq->entry_repost)) 12101 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 12102 } 12103 12104 /* Track the max number of CQEs processed in 1 EQ */ 12105 if (ecount > cq->CQ_max_cqe) 12106 cq->CQ_max_cqe = ecount; 12107 12108 /* Catch the no cq entry condition */ 12109 if (unlikely(ecount == 0)) 12110 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12111 "0369 No entry from fast-path completion " 12112 "queue fcpcqid=%d\n", cq->queue_id); 12113 12114 /* In any case, flash and re-arm the CQ */ 12115 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 12116 12117 /* wake up worker thread if there are works to be done */ 12118 if (workposted) 12119 lpfc_worker_wake_up(phba); 12120 } 12121 12122 static void 12123 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 12124 { 12125 struct lpfc_eqe *eqe; 12126 12127 /* walk all the EQ entries and drop on the floor */ 12128 while ((eqe = lpfc_sli4_eq_get(eq))) 12129 ; 12130 12131 /* Clear and re-arm the EQ */ 12132 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 12133 } 12134 12135 /** 12136 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 12137 * @irq: Interrupt number. 12138 * @dev_id: The device context pointer. 12139 * 12140 * This function is directly called from the PCI layer as an interrupt 12141 * service routine when device with SLI-4 interface spec is enabled with 12142 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12143 * ring event in the HBA. However, when the device is enabled with either 12144 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12145 * device-level interrupt handler. When the PCI slot is in error recovery 12146 * or the HBA is undergoing initialization, the interrupt handler will not 12147 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12148 * the intrrupt context. This function is called without any lock held. 12149 * It gets the hbalock to access and update SLI data structures. Note that, 12150 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 12151 * equal to that of FCP CQ index. 12152 * 12153 * The link attention and ELS ring attention events are handled 12154 * by the worker thread. The interrupt handler signals the worker thread 12155 * and returns for these events. This function is called without any lock 12156 * held. It gets the hbalock to access and update SLI data structures. 12157 * 12158 * This function returns IRQ_HANDLED when interrupt is handled else it 12159 * returns IRQ_NONE. 12160 **/ 12161 irqreturn_t 12162 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 12163 { 12164 struct lpfc_hba *phba; 12165 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 12166 struct lpfc_queue *fpeq; 12167 struct lpfc_eqe *eqe; 12168 unsigned long iflag; 12169 int ecount = 0; 12170 int fcp_eqidx; 12171 12172 /* Get the driver's phba structure from the dev_id */ 12173 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 12174 phba = fcp_eq_hdl->phba; 12175 fcp_eqidx = fcp_eq_hdl->idx; 12176 12177 if (unlikely(!phba)) 12178 return IRQ_NONE; 12179 if (unlikely(!phba->sli4_hba.hba_eq)) 12180 return IRQ_NONE; 12181 12182 /* Get to the EQ struct associated with this vector */ 12183 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx]; 12184 if (unlikely(!fpeq)) 12185 return IRQ_NONE; 12186 12187 if (lpfc_fcp_look_ahead) { 12188 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use)) 12189 lpfc_sli4_eq_clr_intr(fpeq); 12190 else { 12191 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12192 return IRQ_NONE; 12193 } 12194 } 12195 12196 /* Check device state for handling interrupt */ 12197 if (unlikely(lpfc_intr_state_check(phba))) { 12198 fpeq->EQ_badstate++; 12199 /* Check again for link_state with lock held */ 12200 spin_lock_irqsave(&phba->hbalock, iflag); 12201 if (phba->link_state < LPFC_LINK_DOWN) 12202 /* Flush, clear interrupt, and rearm the EQ */ 12203 lpfc_sli4_eq_flush(phba, fpeq); 12204 spin_unlock_irqrestore(&phba->hbalock, iflag); 12205 if (lpfc_fcp_look_ahead) 12206 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12207 return IRQ_NONE; 12208 } 12209 12210 /* 12211 * Process all the event on FCP fast-path EQ 12212 */ 12213 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 12214 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx); 12215 if (!(++ecount % fpeq->entry_repost)) 12216 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 12217 fpeq->EQ_processed++; 12218 } 12219 12220 /* Track the max number of EQEs processed in 1 intr */ 12221 if (ecount > fpeq->EQ_max_eqe) 12222 fpeq->EQ_max_eqe = ecount; 12223 12224 /* Always clear and re-arm the fast-path EQ */ 12225 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 12226 12227 if (unlikely(ecount == 0)) { 12228 fpeq->EQ_no_entry++; 12229 12230 if (lpfc_fcp_look_ahead) { 12231 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12232 return IRQ_NONE; 12233 } 12234 12235 if (phba->intr_type == MSIX) 12236 /* MSI-X treated interrupt served as no EQ share INT */ 12237 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12238 "0358 MSI-X interrupt with no EQE\n"); 12239 else 12240 /* Non MSI-X treated on interrupt as EQ share INT */ 12241 return IRQ_NONE; 12242 } 12243 12244 if (lpfc_fcp_look_ahead) 12245 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12246 return IRQ_HANDLED; 12247 } /* lpfc_sli4_fp_intr_handler */ 12248 12249 /** 12250 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 12251 * @irq: Interrupt number. 12252 * @dev_id: The device context pointer. 12253 * 12254 * This function is the device-level interrupt handler to device with SLI-4 12255 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 12256 * interrupt mode is enabled and there is an event in the HBA which requires 12257 * driver attention. This function invokes the slow-path interrupt attention 12258 * handling function and fast-path interrupt attention handling function in 12259 * turn to process the relevant HBA attention events. This function is called 12260 * without any lock held. It gets the hbalock to access and update SLI data 12261 * structures. 12262 * 12263 * This function returns IRQ_HANDLED when interrupt is handled, else it 12264 * returns IRQ_NONE. 12265 **/ 12266 irqreturn_t 12267 lpfc_sli4_intr_handler(int irq, void *dev_id) 12268 { 12269 struct lpfc_hba *phba; 12270 irqreturn_t hba_irq_rc; 12271 bool hba_handled = false; 12272 int fcp_eqidx; 12273 12274 /* Get the driver's phba structure from the dev_id */ 12275 phba = (struct lpfc_hba *)dev_id; 12276 12277 if (unlikely(!phba)) 12278 return IRQ_NONE; 12279 12280 /* 12281 * Invoke fast-path host attention interrupt handling as appropriate. 12282 */ 12283 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 12284 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 12285 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 12286 if (hba_irq_rc == IRQ_HANDLED) 12287 hba_handled |= true; 12288 } 12289 12290 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 12291 } /* lpfc_sli4_intr_handler */ 12292 12293 /** 12294 * lpfc_sli4_queue_free - free a queue structure and associated memory 12295 * @queue: The queue structure to free. 12296 * 12297 * This function frees a queue structure and the DMAable memory used for 12298 * the host resident queue. This function must be called after destroying the 12299 * queue on the HBA. 12300 **/ 12301 void 12302 lpfc_sli4_queue_free(struct lpfc_queue *queue) 12303 { 12304 struct lpfc_dmabuf *dmabuf; 12305 12306 if (!queue) 12307 return; 12308 12309 while (!list_empty(&queue->page_list)) { 12310 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 12311 list); 12312 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 12313 dmabuf->virt, dmabuf->phys); 12314 kfree(dmabuf); 12315 } 12316 kfree(queue); 12317 return; 12318 } 12319 12320 /** 12321 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 12322 * @phba: The HBA that this queue is being created on. 12323 * @entry_size: The size of each queue entry for this queue. 12324 * @entry count: The number of entries that this queue will handle. 12325 * 12326 * This function allocates a queue structure and the DMAable memory used for 12327 * the host resident queue. This function must be called before creating the 12328 * queue on the HBA. 12329 **/ 12330 struct lpfc_queue * 12331 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 12332 uint32_t entry_count) 12333 { 12334 struct lpfc_queue *queue; 12335 struct lpfc_dmabuf *dmabuf; 12336 int x, total_qe_count; 12337 void *dma_pointer; 12338 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12339 12340 if (!phba->sli4_hba.pc_sli4_params.supported) 12341 hw_page_size = SLI4_PAGE_SIZE; 12342 12343 queue = kzalloc(sizeof(struct lpfc_queue) + 12344 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 12345 if (!queue) 12346 return NULL; 12347 queue->page_count = (ALIGN(entry_size * entry_count, 12348 hw_page_size))/hw_page_size; 12349 INIT_LIST_HEAD(&queue->list); 12350 INIT_LIST_HEAD(&queue->page_list); 12351 INIT_LIST_HEAD(&queue->child_list); 12352 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 12353 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 12354 if (!dmabuf) 12355 goto out_fail; 12356 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12357 hw_page_size, &dmabuf->phys, 12358 GFP_KERNEL); 12359 if (!dmabuf->virt) { 12360 kfree(dmabuf); 12361 goto out_fail; 12362 } 12363 memset(dmabuf->virt, 0, hw_page_size); 12364 dmabuf->buffer_tag = x; 12365 list_add_tail(&dmabuf->list, &queue->page_list); 12366 /* initialize queue's entry array */ 12367 dma_pointer = dmabuf->virt; 12368 for (; total_qe_count < entry_count && 12369 dma_pointer < (hw_page_size + dmabuf->virt); 12370 total_qe_count++, dma_pointer += entry_size) { 12371 queue->qe[total_qe_count].address = dma_pointer; 12372 } 12373 } 12374 queue->entry_size = entry_size; 12375 queue->entry_count = entry_count; 12376 12377 /* 12378 * entry_repost is calculated based on the number of entries in the 12379 * queue. This works out except for RQs. If buffers are NOT initially 12380 * posted for every RQE, entry_repost should be adjusted accordingly. 12381 */ 12382 queue->entry_repost = (entry_count >> 3); 12383 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST) 12384 queue->entry_repost = LPFC_QUEUE_MIN_REPOST; 12385 queue->phba = phba; 12386 12387 return queue; 12388 out_fail: 12389 lpfc_sli4_queue_free(queue); 12390 return NULL; 12391 } 12392 12393 /** 12394 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 12395 * @phba: HBA structure that indicates port to create a queue on. 12396 * @pci_barset: PCI BAR set flag. 12397 * 12398 * This function shall perform iomap of the specified PCI BAR address to host 12399 * memory address if not already done so and return it. The returned host 12400 * memory address can be NULL. 12401 */ 12402 static void __iomem * 12403 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 12404 { 12405 struct pci_dev *pdev; 12406 12407 if (!phba->pcidev) 12408 return NULL; 12409 else 12410 pdev = phba->pcidev; 12411 12412 switch (pci_barset) { 12413 case WQ_PCI_BAR_0_AND_1: 12414 return phba->pci_bar0_memmap_p; 12415 case WQ_PCI_BAR_2_AND_3: 12416 return phba->pci_bar2_memmap_p; 12417 case WQ_PCI_BAR_4_AND_5: 12418 return phba->pci_bar4_memmap_p; 12419 default: 12420 break; 12421 } 12422 return NULL; 12423 } 12424 12425 /** 12426 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs 12427 * @phba: HBA structure that indicates port to create a queue on. 12428 * @startq: The starting FCP EQ to modify 12429 * 12430 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 12431 * 12432 * The @phba struct is used to send mailbox command to HBA. The @startq 12433 * is used to get the starting FCP EQ to change. 12434 * This function is asynchronous and will wait for the mailbox 12435 * command to finish before continuing. 12436 * 12437 * On success this function will return a zero. If unable to allocate enough 12438 * memory this function will return -ENOMEM. If the queue create mailbox command 12439 * fails this function will return -ENXIO. 12440 **/ 12441 uint32_t 12442 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) 12443 { 12444 struct lpfc_mbx_modify_eq_delay *eq_delay; 12445 LPFC_MBOXQ_t *mbox; 12446 struct lpfc_queue *eq; 12447 int cnt, rc, length, status = 0; 12448 uint32_t shdr_status, shdr_add_status; 12449 uint32_t result; 12450 int fcp_eqidx; 12451 union lpfc_sli4_cfg_shdr *shdr; 12452 uint16_t dmult; 12453 12454 if (startq >= phba->cfg_fcp_io_channel) 12455 return 0; 12456 12457 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12458 if (!mbox) 12459 return -ENOMEM; 12460 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 12461 sizeof(struct lpfc_sli4_cfg_mhdr)); 12462 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12463 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 12464 length, LPFC_SLI4_MBX_EMBED); 12465 eq_delay = &mbox->u.mqe.un.eq_delay; 12466 12467 /* Calculate delay multiper from maximum interrupt per second */ 12468 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; 12469 if (result > LPFC_DMULT_CONST) 12470 dmult = 0; 12471 else 12472 dmult = LPFC_DMULT_CONST/result - 1; 12473 12474 cnt = 0; 12475 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; 12476 fcp_eqidx++) { 12477 eq = phba->sli4_hba.hba_eq[fcp_eqidx]; 12478 if (!eq) 12479 continue; 12480 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 12481 eq_delay->u.request.eq[cnt].phase = 0; 12482 eq_delay->u.request.eq[cnt].delay_multi = dmult; 12483 cnt++; 12484 if (cnt >= LPFC_MAX_EQ_DELAY) 12485 break; 12486 } 12487 eq_delay->u.request.num_eq = cnt; 12488 12489 mbox->vport = phba->pport; 12490 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12491 mbox->context1 = NULL; 12492 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12493 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 12494 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12495 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12496 if (shdr_status || shdr_add_status || rc) { 12497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12498 "2512 MODIFY_EQ_DELAY mailbox failed with " 12499 "status x%x add_status x%x, mbx status x%x\n", 12500 shdr_status, shdr_add_status, rc); 12501 status = -ENXIO; 12502 } 12503 mempool_free(mbox, phba->mbox_mem_pool); 12504 return status; 12505 } 12506 12507 /** 12508 * lpfc_eq_create - Create an Event Queue on the HBA 12509 * @phba: HBA structure that indicates port to create a queue on. 12510 * @eq: The queue structure to use to create the event queue. 12511 * @imax: The maximum interrupt per second limit. 12512 * 12513 * This function creates an event queue, as detailed in @eq, on a port, 12514 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 12515 * 12516 * The @phba struct is used to send mailbox command to HBA. The @eq struct 12517 * is used to get the entry count and entry size that are necessary to 12518 * determine the number of pages to allocate and use for this queue. This 12519 * function will send the EQ_CREATE mailbox command to the HBA to setup the 12520 * event queue. This function is asynchronous and will wait for the mailbox 12521 * command to finish before continuing. 12522 * 12523 * On success this function will return a zero. If unable to allocate enough 12524 * memory this function will return -ENOMEM. If the queue create mailbox command 12525 * fails this function will return -ENXIO. 12526 **/ 12527 uint32_t 12528 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 12529 { 12530 struct lpfc_mbx_eq_create *eq_create; 12531 LPFC_MBOXQ_t *mbox; 12532 int rc, length, status = 0; 12533 struct lpfc_dmabuf *dmabuf; 12534 uint32_t shdr_status, shdr_add_status; 12535 union lpfc_sli4_cfg_shdr *shdr; 12536 uint16_t dmult; 12537 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12538 12539 /* sanity check on queue memory */ 12540 if (!eq) 12541 return -ENODEV; 12542 if (!phba->sli4_hba.pc_sli4_params.supported) 12543 hw_page_size = SLI4_PAGE_SIZE; 12544 12545 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12546 if (!mbox) 12547 return -ENOMEM; 12548 length = (sizeof(struct lpfc_mbx_eq_create) - 12549 sizeof(struct lpfc_sli4_cfg_mhdr)); 12550 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12551 LPFC_MBOX_OPCODE_EQ_CREATE, 12552 length, LPFC_SLI4_MBX_EMBED); 12553 eq_create = &mbox->u.mqe.un.eq_create; 12554 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 12555 eq->page_count); 12556 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 12557 LPFC_EQE_SIZE); 12558 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 12559 /* Calculate delay multiper from maximum interrupt per second */ 12560 if (imax > LPFC_DMULT_CONST) 12561 dmult = 0; 12562 else 12563 dmult = LPFC_DMULT_CONST/imax - 1; 12564 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 12565 dmult); 12566 switch (eq->entry_count) { 12567 default: 12568 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12569 "0360 Unsupported EQ count. (%d)\n", 12570 eq->entry_count); 12571 if (eq->entry_count < 256) 12572 return -EINVAL; 12573 /* otherwise default to smallest count (drop through) */ 12574 case 256: 12575 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12576 LPFC_EQ_CNT_256); 12577 break; 12578 case 512: 12579 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12580 LPFC_EQ_CNT_512); 12581 break; 12582 case 1024: 12583 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12584 LPFC_EQ_CNT_1024); 12585 break; 12586 case 2048: 12587 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12588 LPFC_EQ_CNT_2048); 12589 break; 12590 case 4096: 12591 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12592 LPFC_EQ_CNT_4096); 12593 break; 12594 } 12595 list_for_each_entry(dmabuf, &eq->page_list, list) { 12596 memset(dmabuf->virt, 0, hw_page_size); 12597 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12598 putPaddrLow(dmabuf->phys); 12599 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12600 putPaddrHigh(dmabuf->phys); 12601 } 12602 mbox->vport = phba->pport; 12603 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12604 mbox->context1 = NULL; 12605 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12606 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 12607 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12608 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12609 if (shdr_status || shdr_add_status || rc) { 12610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12611 "2500 EQ_CREATE mailbox failed with " 12612 "status x%x add_status x%x, mbx status x%x\n", 12613 shdr_status, shdr_add_status, rc); 12614 status = -ENXIO; 12615 } 12616 eq->type = LPFC_EQ; 12617 eq->subtype = LPFC_NONE; 12618 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 12619 if (eq->queue_id == 0xFFFF) 12620 status = -ENXIO; 12621 eq->host_index = 0; 12622 eq->hba_index = 0; 12623 12624 mempool_free(mbox, phba->mbox_mem_pool); 12625 return status; 12626 } 12627 12628 /** 12629 * lpfc_cq_create - Create a Completion Queue on the HBA 12630 * @phba: HBA structure that indicates port to create a queue on. 12631 * @cq: The queue structure to use to create the completion queue. 12632 * @eq: The event queue to bind this completion queue to. 12633 * 12634 * This function creates a completion queue, as detailed in @wq, on a port, 12635 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 12636 * 12637 * The @phba struct is used to send mailbox command to HBA. The @cq struct 12638 * is used to get the entry count and entry size that are necessary to 12639 * determine the number of pages to allocate and use for this queue. The @eq 12640 * is used to indicate which event queue to bind this completion queue to. This 12641 * function will send the CQ_CREATE mailbox command to the HBA to setup the 12642 * completion queue. This function is asynchronous and will wait for the mailbox 12643 * command to finish before continuing. 12644 * 12645 * On success this function will return a zero. If unable to allocate enough 12646 * memory this function will return -ENOMEM. If the queue create mailbox command 12647 * fails this function will return -ENXIO. 12648 **/ 12649 uint32_t 12650 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 12651 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 12652 { 12653 struct lpfc_mbx_cq_create *cq_create; 12654 struct lpfc_dmabuf *dmabuf; 12655 LPFC_MBOXQ_t *mbox; 12656 int rc, length, status = 0; 12657 uint32_t shdr_status, shdr_add_status; 12658 union lpfc_sli4_cfg_shdr *shdr; 12659 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12660 12661 /* sanity check on queue memory */ 12662 if (!cq || !eq) 12663 return -ENODEV; 12664 if (!phba->sli4_hba.pc_sli4_params.supported) 12665 hw_page_size = SLI4_PAGE_SIZE; 12666 12667 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12668 if (!mbox) 12669 return -ENOMEM; 12670 length = (sizeof(struct lpfc_mbx_cq_create) - 12671 sizeof(struct lpfc_sli4_cfg_mhdr)); 12672 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12673 LPFC_MBOX_OPCODE_CQ_CREATE, 12674 length, LPFC_SLI4_MBX_EMBED); 12675 cq_create = &mbox->u.mqe.un.cq_create; 12676 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 12677 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 12678 cq->page_count); 12679 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 12680 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 12681 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12682 phba->sli4_hba.pc_sli4_params.cqv); 12683 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 12684 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ 12685 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); 12686 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 12687 eq->queue_id); 12688 } else { 12689 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 12690 eq->queue_id); 12691 } 12692 switch (cq->entry_count) { 12693 default: 12694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12695 "0361 Unsupported CQ count. (%d)\n", 12696 cq->entry_count); 12697 if (cq->entry_count < 256) { 12698 status = -EINVAL; 12699 goto out; 12700 } 12701 /* otherwise default to smallest count (drop through) */ 12702 case 256: 12703 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12704 LPFC_CQ_CNT_256); 12705 break; 12706 case 512: 12707 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12708 LPFC_CQ_CNT_512); 12709 break; 12710 case 1024: 12711 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12712 LPFC_CQ_CNT_1024); 12713 break; 12714 } 12715 list_for_each_entry(dmabuf, &cq->page_list, list) { 12716 memset(dmabuf->virt, 0, hw_page_size); 12717 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12718 putPaddrLow(dmabuf->phys); 12719 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12720 putPaddrHigh(dmabuf->phys); 12721 } 12722 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12723 12724 /* The IOCTL status is embedded in the mailbox subheader. */ 12725 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12726 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12727 if (shdr_status || shdr_add_status || rc) { 12728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12729 "2501 CQ_CREATE mailbox failed with " 12730 "status x%x add_status x%x, mbx status x%x\n", 12731 shdr_status, shdr_add_status, rc); 12732 status = -ENXIO; 12733 goto out; 12734 } 12735 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12736 if (cq->queue_id == 0xFFFF) { 12737 status = -ENXIO; 12738 goto out; 12739 } 12740 /* link the cq onto the parent eq child list */ 12741 list_add_tail(&cq->list, &eq->child_list); 12742 /* Set up completion queue's type and subtype */ 12743 cq->type = type; 12744 cq->subtype = subtype; 12745 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12746 cq->assoc_qid = eq->queue_id; 12747 cq->host_index = 0; 12748 cq->hba_index = 0; 12749 12750 out: 12751 mempool_free(mbox, phba->mbox_mem_pool); 12752 return status; 12753 } 12754 12755 /** 12756 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 12757 * @phba: HBA structure that indicates port to create a queue on. 12758 * @mq: The queue structure to use to create the mailbox queue. 12759 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 12760 * @cq: The completion queue to associate with this cq. 12761 * 12762 * This function provides failback (fb) functionality when the 12763 * mq_create_ext fails on older FW generations. It's purpose is identical 12764 * to mq_create_ext otherwise. 12765 * 12766 * This routine cannot fail as all attributes were previously accessed and 12767 * initialized in mq_create_ext. 12768 **/ 12769 static void 12770 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 12771 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 12772 { 12773 struct lpfc_mbx_mq_create *mq_create; 12774 struct lpfc_dmabuf *dmabuf; 12775 int length; 12776 12777 length = (sizeof(struct lpfc_mbx_mq_create) - 12778 sizeof(struct lpfc_sli4_cfg_mhdr)); 12779 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12780 LPFC_MBOX_OPCODE_MQ_CREATE, 12781 length, LPFC_SLI4_MBX_EMBED); 12782 mq_create = &mbox->u.mqe.un.mq_create; 12783 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 12784 mq->page_count); 12785 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 12786 cq->queue_id); 12787 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 12788 switch (mq->entry_count) { 12789 case 16: 12790 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12791 LPFC_MQ_RING_SIZE_16); 12792 break; 12793 case 32: 12794 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12795 LPFC_MQ_RING_SIZE_32); 12796 break; 12797 case 64: 12798 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12799 LPFC_MQ_RING_SIZE_64); 12800 break; 12801 case 128: 12802 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12803 LPFC_MQ_RING_SIZE_128); 12804 break; 12805 } 12806 list_for_each_entry(dmabuf, &mq->page_list, list) { 12807 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12808 putPaddrLow(dmabuf->phys); 12809 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12810 putPaddrHigh(dmabuf->phys); 12811 } 12812 } 12813 12814 /** 12815 * lpfc_mq_create - Create a mailbox Queue on the HBA 12816 * @phba: HBA structure that indicates port to create a queue on. 12817 * @mq: The queue structure to use to create the mailbox queue. 12818 * @cq: The completion queue to associate with this cq. 12819 * @subtype: The queue's subtype. 12820 * 12821 * This function creates a mailbox queue, as detailed in @mq, on a port, 12822 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 12823 * 12824 * The @phba struct is used to send mailbox command to HBA. The @cq struct 12825 * is used to get the entry count and entry size that are necessary to 12826 * determine the number of pages to allocate and use for this queue. This 12827 * function will send the MQ_CREATE mailbox command to the HBA to setup the 12828 * mailbox queue. This function is asynchronous and will wait for the mailbox 12829 * command to finish before continuing. 12830 * 12831 * On success this function will return a zero. If unable to allocate enough 12832 * memory this function will return -ENOMEM. If the queue create mailbox command 12833 * fails this function will return -ENXIO. 12834 **/ 12835 int32_t 12836 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 12837 struct lpfc_queue *cq, uint32_t subtype) 12838 { 12839 struct lpfc_mbx_mq_create *mq_create; 12840 struct lpfc_mbx_mq_create_ext *mq_create_ext; 12841 struct lpfc_dmabuf *dmabuf; 12842 LPFC_MBOXQ_t *mbox; 12843 int rc, length, status = 0; 12844 uint32_t shdr_status, shdr_add_status; 12845 union lpfc_sli4_cfg_shdr *shdr; 12846 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12847 12848 /* sanity check on queue memory */ 12849 if (!mq || !cq) 12850 return -ENODEV; 12851 if (!phba->sli4_hba.pc_sli4_params.supported) 12852 hw_page_size = SLI4_PAGE_SIZE; 12853 12854 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12855 if (!mbox) 12856 return -ENOMEM; 12857 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 12858 sizeof(struct lpfc_sli4_cfg_mhdr)); 12859 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12860 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 12861 length, LPFC_SLI4_MBX_EMBED); 12862 12863 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 12864 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 12865 bf_set(lpfc_mbx_mq_create_ext_num_pages, 12866 &mq_create_ext->u.request, mq->page_count); 12867 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 12868 &mq_create_ext->u.request, 1); 12869 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 12870 &mq_create_ext->u.request, 1); 12871 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 12872 &mq_create_ext->u.request, 1); 12873 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 12874 &mq_create_ext->u.request, 1); 12875 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 12876 &mq_create_ext->u.request, 1); 12877 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 12878 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12879 phba->sli4_hba.pc_sli4_params.mqv); 12880 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 12881 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 12882 cq->queue_id); 12883 else 12884 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 12885 cq->queue_id); 12886 switch (mq->entry_count) { 12887 default: 12888 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12889 "0362 Unsupported MQ count. (%d)\n", 12890 mq->entry_count); 12891 if (mq->entry_count < 16) { 12892 status = -EINVAL; 12893 goto out; 12894 } 12895 /* otherwise default to smallest count (drop through) */ 12896 case 16: 12897 bf_set(lpfc_mq_context_ring_size, 12898 &mq_create_ext->u.request.context, 12899 LPFC_MQ_RING_SIZE_16); 12900 break; 12901 case 32: 12902 bf_set(lpfc_mq_context_ring_size, 12903 &mq_create_ext->u.request.context, 12904 LPFC_MQ_RING_SIZE_32); 12905 break; 12906 case 64: 12907 bf_set(lpfc_mq_context_ring_size, 12908 &mq_create_ext->u.request.context, 12909 LPFC_MQ_RING_SIZE_64); 12910 break; 12911 case 128: 12912 bf_set(lpfc_mq_context_ring_size, 12913 &mq_create_ext->u.request.context, 12914 LPFC_MQ_RING_SIZE_128); 12915 break; 12916 } 12917 list_for_each_entry(dmabuf, &mq->page_list, list) { 12918 memset(dmabuf->virt, 0, hw_page_size); 12919 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 12920 putPaddrLow(dmabuf->phys); 12921 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 12922 putPaddrHigh(dmabuf->phys); 12923 } 12924 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12925 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 12926 &mq_create_ext->u.response); 12927 if (rc != MBX_SUCCESS) { 12928 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12929 "2795 MQ_CREATE_EXT failed with " 12930 "status x%x. Failback to MQ_CREATE.\n", 12931 rc); 12932 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 12933 mq_create = &mbox->u.mqe.un.mq_create; 12934 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12935 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 12936 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 12937 &mq_create->u.response); 12938 } 12939 12940 /* The IOCTL status is embedded in the mailbox subheader. */ 12941 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12942 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12943 if (shdr_status || shdr_add_status || rc) { 12944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12945 "2502 MQ_CREATE mailbox failed with " 12946 "status x%x add_status x%x, mbx status x%x\n", 12947 shdr_status, shdr_add_status, rc); 12948 status = -ENXIO; 12949 goto out; 12950 } 12951 if (mq->queue_id == 0xFFFF) { 12952 status = -ENXIO; 12953 goto out; 12954 } 12955 mq->type = LPFC_MQ; 12956 mq->assoc_qid = cq->queue_id; 12957 mq->subtype = subtype; 12958 mq->host_index = 0; 12959 mq->hba_index = 0; 12960 12961 /* link the mq onto the parent cq child list */ 12962 list_add_tail(&mq->list, &cq->child_list); 12963 out: 12964 mempool_free(mbox, phba->mbox_mem_pool); 12965 return status; 12966 } 12967 12968 /** 12969 * lpfc_wq_create - Create a Work Queue on the HBA 12970 * @phba: HBA structure that indicates port to create a queue on. 12971 * @wq: The queue structure to use to create the work queue. 12972 * @cq: The completion queue to bind this work queue to. 12973 * @subtype: The subtype of the work queue indicating its functionality. 12974 * 12975 * This function creates a work queue, as detailed in @wq, on a port, described 12976 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 12977 * 12978 * The @phba struct is used to send mailbox command to HBA. The @wq struct 12979 * is used to get the entry count and entry size that are necessary to 12980 * determine the number of pages to allocate and use for this queue. The @cq 12981 * is used to indicate which completion queue to bind this work queue to. This 12982 * function will send the WQ_CREATE mailbox command to the HBA to setup the 12983 * work queue. This function is asynchronous and will wait for the mailbox 12984 * command to finish before continuing. 12985 * 12986 * On success this function will return a zero. If unable to allocate enough 12987 * memory this function will return -ENOMEM. If the queue create mailbox command 12988 * fails this function will return -ENXIO. 12989 **/ 12990 uint32_t 12991 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 12992 struct lpfc_queue *cq, uint32_t subtype) 12993 { 12994 struct lpfc_mbx_wq_create *wq_create; 12995 struct lpfc_dmabuf *dmabuf; 12996 LPFC_MBOXQ_t *mbox; 12997 int rc, length, status = 0; 12998 uint32_t shdr_status, shdr_add_status; 12999 union lpfc_sli4_cfg_shdr *shdr; 13000 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13001 struct dma_address *page; 13002 void __iomem *bar_memmap_p; 13003 uint32_t db_offset; 13004 uint16_t pci_barset; 13005 13006 /* sanity check on queue memory */ 13007 if (!wq || !cq) 13008 return -ENODEV; 13009 if (!phba->sli4_hba.pc_sli4_params.supported) 13010 hw_page_size = SLI4_PAGE_SIZE; 13011 13012 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13013 if (!mbox) 13014 return -ENOMEM; 13015 length = (sizeof(struct lpfc_mbx_wq_create) - 13016 sizeof(struct lpfc_sli4_cfg_mhdr)); 13017 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13018 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 13019 length, LPFC_SLI4_MBX_EMBED); 13020 wq_create = &mbox->u.mqe.un.wq_create; 13021 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 13022 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 13023 wq->page_count); 13024 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 13025 cq->queue_id); 13026 13027 /* wqv is the earliest version supported, NOT the latest */ 13028 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13029 phba->sli4_hba.pc_sli4_params.wqv); 13030 13031 switch (phba->sli4_hba.pc_sli4_params.wqv) { 13032 case LPFC_Q_CREATE_VERSION_0: 13033 switch (wq->entry_size) { 13034 default: 13035 case 64: 13036 /* Nothing to do, version 0 ONLY supports 64 byte */ 13037 page = wq_create->u.request.page; 13038 break; 13039 case 128: 13040 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 13041 LPFC_WQ_SZ128_SUPPORT)) { 13042 status = -ERANGE; 13043 goto out; 13044 } 13045 /* If we get here the HBA MUST also support V1 and 13046 * we MUST use it 13047 */ 13048 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13049 LPFC_Q_CREATE_VERSION_1); 13050 13051 bf_set(lpfc_mbx_wq_create_wqe_count, 13052 &wq_create->u.request_1, wq->entry_count); 13053 bf_set(lpfc_mbx_wq_create_wqe_size, 13054 &wq_create->u.request_1, 13055 LPFC_WQ_WQE_SIZE_128); 13056 bf_set(lpfc_mbx_wq_create_page_size, 13057 &wq_create->u.request_1, 13058 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13059 page = wq_create->u.request_1.page; 13060 break; 13061 } 13062 break; 13063 case LPFC_Q_CREATE_VERSION_1: 13064 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 13065 wq->entry_count); 13066 switch (wq->entry_size) { 13067 default: 13068 case 64: 13069 bf_set(lpfc_mbx_wq_create_wqe_size, 13070 &wq_create->u.request_1, 13071 LPFC_WQ_WQE_SIZE_64); 13072 break; 13073 case 128: 13074 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 13075 LPFC_WQ_SZ128_SUPPORT)) { 13076 status = -ERANGE; 13077 goto out; 13078 } 13079 bf_set(lpfc_mbx_wq_create_wqe_size, 13080 &wq_create->u.request_1, 13081 LPFC_WQ_WQE_SIZE_128); 13082 break; 13083 } 13084 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, 13085 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13086 page = wq_create->u.request_1.page; 13087 break; 13088 default: 13089 status = -ERANGE; 13090 goto out; 13091 } 13092 13093 list_for_each_entry(dmabuf, &wq->page_list, list) { 13094 memset(dmabuf->virt, 0, hw_page_size); 13095 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 13096 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 13097 } 13098 13099 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 13100 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 13101 13102 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13103 /* The IOCTL status is embedded in the mailbox subheader. */ 13104 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13105 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13106 if (shdr_status || shdr_add_status || rc) { 13107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13108 "2503 WQ_CREATE mailbox failed with " 13109 "status x%x add_status x%x, mbx status x%x\n", 13110 shdr_status, shdr_add_status, rc); 13111 status = -ENXIO; 13112 goto out; 13113 } 13114 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 13115 if (wq->queue_id == 0xFFFF) { 13116 status = -ENXIO; 13117 goto out; 13118 } 13119 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 13120 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 13121 &wq_create->u.response); 13122 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 13123 (wq->db_format != LPFC_DB_RING_FORMAT)) { 13124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13125 "3265 WQ[%d] doorbell format not " 13126 "supported: x%x\n", wq->queue_id, 13127 wq->db_format); 13128 status = -EINVAL; 13129 goto out; 13130 } 13131 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 13132 &wq_create->u.response); 13133 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 13134 if (!bar_memmap_p) { 13135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13136 "3263 WQ[%d] failed to memmap pci " 13137 "barset:x%x\n", wq->queue_id, 13138 pci_barset); 13139 status = -ENOMEM; 13140 goto out; 13141 } 13142 db_offset = wq_create->u.response.doorbell_offset; 13143 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 13144 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 13145 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13146 "3252 WQ[%d] doorbell offset not " 13147 "supported: x%x\n", wq->queue_id, 13148 db_offset); 13149 status = -EINVAL; 13150 goto out; 13151 } 13152 wq->db_regaddr = bar_memmap_p + db_offset; 13153 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13154 "3264 WQ[%d]: barset:x%x, offset:x%x, " 13155 "format:x%x\n", wq->queue_id, pci_barset, 13156 db_offset, wq->db_format); 13157 } else { 13158 wq->db_format = LPFC_DB_LIST_FORMAT; 13159 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 13160 } 13161 wq->type = LPFC_WQ; 13162 wq->assoc_qid = cq->queue_id; 13163 wq->subtype = subtype; 13164 wq->host_index = 0; 13165 wq->hba_index = 0; 13166 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 13167 13168 /* link the wq onto the parent cq child list */ 13169 list_add_tail(&wq->list, &cq->child_list); 13170 out: 13171 mempool_free(mbox, phba->mbox_mem_pool); 13172 return status; 13173 } 13174 13175 /** 13176 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ 13177 * @phba: HBA structure that indicates port to create a queue on. 13178 * @rq: The queue structure to use for the receive queue. 13179 * @qno: The associated HBQ number 13180 * 13181 * 13182 * For SLI4 we need to adjust the RQ repost value based on 13183 * the number of buffers that are initially posted to the RQ. 13184 */ 13185 void 13186 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) 13187 { 13188 uint32_t cnt; 13189 13190 /* sanity check on queue memory */ 13191 if (!rq) 13192 return; 13193 cnt = lpfc_hbq_defs[qno]->entry_count; 13194 13195 /* Recalc repost for RQs based on buffers initially posted */ 13196 cnt = (cnt >> 3); 13197 if (cnt < LPFC_QUEUE_MIN_REPOST) 13198 cnt = LPFC_QUEUE_MIN_REPOST; 13199 13200 rq->entry_repost = cnt; 13201 } 13202 13203 /** 13204 * lpfc_rq_create - Create a Receive Queue on the HBA 13205 * @phba: HBA structure that indicates port to create a queue on. 13206 * @hrq: The queue structure to use to create the header receive queue. 13207 * @drq: The queue structure to use to create the data receive queue. 13208 * @cq: The completion queue to bind this work queue to. 13209 * 13210 * This function creates a receive buffer queue pair , as detailed in @hrq and 13211 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 13212 * to the HBA. 13213 * 13214 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 13215 * struct is used to get the entry count that is necessary to determine the 13216 * number of pages to use for this queue. The @cq is used to indicate which 13217 * completion queue to bind received buffers that are posted to these queues to. 13218 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 13219 * receive queue pair. This function is asynchronous and will wait for the 13220 * mailbox command to finish before continuing. 13221 * 13222 * On success this function will return a zero. If unable to allocate enough 13223 * memory this function will return -ENOMEM. If the queue create mailbox command 13224 * fails this function will return -ENXIO. 13225 **/ 13226 uint32_t 13227 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 13228 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 13229 { 13230 struct lpfc_mbx_rq_create *rq_create; 13231 struct lpfc_dmabuf *dmabuf; 13232 LPFC_MBOXQ_t *mbox; 13233 int rc, length, status = 0; 13234 uint32_t shdr_status, shdr_add_status; 13235 union lpfc_sli4_cfg_shdr *shdr; 13236 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13237 void __iomem *bar_memmap_p; 13238 uint32_t db_offset; 13239 uint16_t pci_barset; 13240 13241 /* sanity check on queue memory */ 13242 if (!hrq || !drq || !cq) 13243 return -ENODEV; 13244 if (!phba->sli4_hba.pc_sli4_params.supported) 13245 hw_page_size = SLI4_PAGE_SIZE; 13246 13247 if (hrq->entry_count != drq->entry_count) 13248 return -EINVAL; 13249 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13250 if (!mbox) 13251 return -ENOMEM; 13252 length = (sizeof(struct lpfc_mbx_rq_create) - 13253 sizeof(struct lpfc_sli4_cfg_mhdr)); 13254 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13255 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 13256 length, LPFC_SLI4_MBX_EMBED); 13257 rq_create = &mbox->u.mqe.un.rq_create; 13258 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 13259 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13260 phba->sli4_hba.pc_sli4_params.rqv); 13261 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 13262 bf_set(lpfc_rq_context_rqe_count_1, 13263 &rq_create->u.request.context, 13264 hrq->entry_count); 13265 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 13266 bf_set(lpfc_rq_context_rqe_size, 13267 &rq_create->u.request.context, 13268 LPFC_RQE_SIZE_8); 13269 bf_set(lpfc_rq_context_page_size, 13270 &rq_create->u.request.context, 13271 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13272 } else { 13273 switch (hrq->entry_count) { 13274 default: 13275 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13276 "2535 Unsupported RQ count. (%d)\n", 13277 hrq->entry_count); 13278 if (hrq->entry_count < 512) { 13279 status = -EINVAL; 13280 goto out; 13281 } 13282 /* otherwise default to smallest count (drop through) */ 13283 case 512: 13284 bf_set(lpfc_rq_context_rqe_count, 13285 &rq_create->u.request.context, 13286 LPFC_RQ_RING_SIZE_512); 13287 break; 13288 case 1024: 13289 bf_set(lpfc_rq_context_rqe_count, 13290 &rq_create->u.request.context, 13291 LPFC_RQ_RING_SIZE_1024); 13292 break; 13293 case 2048: 13294 bf_set(lpfc_rq_context_rqe_count, 13295 &rq_create->u.request.context, 13296 LPFC_RQ_RING_SIZE_2048); 13297 break; 13298 case 4096: 13299 bf_set(lpfc_rq_context_rqe_count, 13300 &rq_create->u.request.context, 13301 LPFC_RQ_RING_SIZE_4096); 13302 break; 13303 } 13304 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 13305 LPFC_HDR_BUF_SIZE); 13306 } 13307 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 13308 cq->queue_id); 13309 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 13310 hrq->page_count); 13311 list_for_each_entry(dmabuf, &hrq->page_list, list) { 13312 memset(dmabuf->virt, 0, hw_page_size); 13313 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13314 putPaddrLow(dmabuf->phys); 13315 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13316 putPaddrHigh(dmabuf->phys); 13317 } 13318 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 13319 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 13320 13321 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13322 /* The IOCTL status is embedded in the mailbox subheader. */ 13323 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13324 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13325 if (shdr_status || shdr_add_status || rc) { 13326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13327 "2504 RQ_CREATE mailbox failed with " 13328 "status x%x add_status x%x, mbx status x%x\n", 13329 shdr_status, shdr_add_status, rc); 13330 status = -ENXIO; 13331 goto out; 13332 } 13333 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 13334 if (hrq->queue_id == 0xFFFF) { 13335 status = -ENXIO; 13336 goto out; 13337 } 13338 13339 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 13340 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 13341 &rq_create->u.response); 13342 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 13343 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 13344 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13345 "3262 RQ [%d] doorbell format not " 13346 "supported: x%x\n", hrq->queue_id, 13347 hrq->db_format); 13348 status = -EINVAL; 13349 goto out; 13350 } 13351 13352 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 13353 &rq_create->u.response); 13354 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 13355 if (!bar_memmap_p) { 13356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13357 "3269 RQ[%d] failed to memmap pci " 13358 "barset:x%x\n", hrq->queue_id, 13359 pci_barset); 13360 status = -ENOMEM; 13361 goto out; 13362 } 13363 13364 db_offset = rq_create->u.response.doorbell_offset; 13365 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 13366 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 13367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13368 "3270 RQ[%d] doorbell offset not " 13369 "supported: x%x\n", hrq->queue_id, 13370 db_offset); 13371 status = -EINVAL; 13372 goto out; 13373 } 13374 hrq->db_regaddr = bar_memmap_p + db_offset; 13375 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13376 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 13377 "format:x%x\n", hrq->queue_id, pci_barset, 13378 db_offset, hrq->db_format); 13379 } else { 13380 hrq->db_format = LPFC_DB_RING_FORMAT; 13381 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 13382 } 13383 hrq->type = LPFC_HRQ; 13384 hrq->assoc_qid = cq->queue_id; 13385 hrq->subtype = subtype; 13386 hrq->host_index = 0; 13387 hrq->hba_index = 0; 13388 13389 /* now create the data queue */ 13390 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13391 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 13392 length, LPFC_SLI4_MBX_EMBED); 13393 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13394 phba->sli4_hba.pc_sli4_params.rqv); 13395 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 13396 bf_set(lpfc_rq_context_rqe_count_1, 13397 &rq_create->u.request.context, hrq->entry_count); 13398 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 13399 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 13400 LPFC_RQE_SIZE_8); 13401 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 13402 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13403 } else { 13404 switch (drq->entry_count) { 13405 default: 13406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13407 "2536 Unsupported RQ count. (%d)\n", 13408 drq->entry_count); 13409 if (drq->entry_count < 512) { 13410 status = -EINVAL; 13411 goto out; 13412 } 13413 /* otherwise default to smallest count (drop through) */ 13414 case 512: 13415 bf_set(lpfc_rq_context_rqe_count, 13416 &rq_create->u.request.context, 13417 LPFC_RQ_RING_SIZE_512); 13418 break; 13419 case 1024: 13420 bf_set(lpfc_rq_context_rqe_count, 13421 &rq_create->u.request.context, 13422 LPFC_RQ_RING_SIZE_1024); 13423 break; 13424 case 2048: 13425 bf_set(lpfc_rq_context_rqe_count, 13426 &rq_create->u.request.context, 13427 LPFC_RQ_RING_SIZE_2048); 13428 break; 13429 case 4096: 13430 bf_set(lpfc_rq_context_rqe_count, 13431 &rq_create->u.request.context, 13432 LPFC_RQ_RING_SIZE_4096); 13433 break; 13434 } 13435 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 13436 LPFC_DATA_BUF_SIZE); 13437 } 13438 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 13439 cq->queue_id); 13440 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 13441 drq->page_count); 13442 list_for_each_entry(dmabuf, &drq->page_list, list) { 13443 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13444 putPaddrLow(dmabuf->phys); 13445 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13446 putPaddrHigh(dmabuf->phys); 13447 } 13448 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 13449 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 13450 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13451 /* The IOCTL status is embedded in the mailbox subheader. */ 13452 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 13453 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13454 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13455 if (shdr_status || shdr_add_status || rc) { 13456 status = -ENXIO; 13457 goto out; 13458 } 13459 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 13460 if (drq->queue_id == 0xFFFF) { 13461 status = -ENXIO; 13462 goto out; 13463 } 13464 drq->type = LPFC_DRQ; 13465 drq->assoc_qid = cq->queue_id; 13466 drq->subtype = subtype; 13467 drq->host_index = 0; 13468 drq->hba_index = 0; 13469 13470 /* link the header and data RQs onto the parent cq child list */ 13471 list_add_tail(&hrq->list, &cq->child_list); 13472 list_add_tail(&drq->list, &cq->child_list); 13473 13474 out: 13475 mempool_free(mbox, phba->mbox_mem_pool); 13476 return status; 13477 } 13478 13479 /** 13480 * lpfc_eq_destroy - Destroy an event Queue on the HBA 13481 * @eq: The queue structure associated with the queue to destroy. 13482 * 13483 * This function destroys a queue, as detailed in @eq by sending an mailbox 13484 * command, specific to the type of queue, to the HBA. 13485 * 13486 * The @eq struct is used to get the queue ID of the queue to destroy. 13487 * 13488 * On success this function will return a zero. If the queue destroy mailbox 13489 * command fails this function will return -ENXIO. 13490 **/ 13491 uint32_t 13492 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 13493 { 13494 LPFC_MBOXQ_t *mbox; 13495 int rc, length, status = 0; 13496 uint32_t shdr_status, shdr_add_status; 13497 union lpfc_sli4_cfg_shdr *shdr; 13498 13499 /* sanity check on queue memory */ 13500 if (!eq) 13501 return -ENODEV; 13502 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 13503 if (!mbox) 13504 return -ENOMEM; 13505 length = (sizeof(struct lpfc_mbx_eq_destroy) - 13506 sizeof(struct lpfc_sli4_cfg_mhdr)); 13507 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13508 LPFC_MBOX_OPCODE_EQ_DESTROY, 13509 length, LPFC_SLI4_MBX_EMBED); 13510 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 13511 eq->queue_id); 13512 mbox->vport = eq->phba->pport; 13513 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13514 13515 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 13516 /* The IOCTL status is embedded in the mailbox subheader. */ 13517 shdr = (union lpfc_sli4_cfg_shdr *) 13518 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 13519 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13520 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13521 if (shdr_status || shdr_add_status || rc) { 13522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13523 "2505 EQ_DESTROY mailbox failed with " 13524 "status x%x add_status x%x, mbx status x%x\n", 13525 shdr_status, shdr_add_status, rc); 13526 status = -ENXIO; 13527 } 13528 13529 /* Remove eq from any list */ 13530 list_del_init(&eq->list); 13531 mempool_free(mbox, eq->phba->mbox_mem_pool); 13532 return status; 13533 } 13534 13535 /** 13536 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 13537 * @cq: The queue structure associated with the queue to destroy. 13538 * 13539 * This function destroys a queue, as detailed in @cq by sending an mailbox 13540 * command, specific to the type of queue, to the HBA. 13541 * 13542 * The @cq struct is used to get the queue ID of the queue to destroy. 13543 * 13544 * On success this function will return a zero. If the queue destroy mailbox 13545 * command fails this function will return -ENXIO. 13546 **/ 13547 uint32_t 13548 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 13549 { 13550 LPFC_MBOXQ_t *mbox; 13551 int rc, length, status = 0; 13552 uint32_t shdr_status, shdr_add_status; 13553 union lpfc_sli4_cfg_shdr *shdr; 13554 13555 /* sanity check on queue memory */ 13556 if (!cq) 13557 return -ENODEV; 13558 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 13559 if (!mbox) 13560 return -ENOMEM; 13561 length = (sizeof(struct lpfc_mbx_cq_destroy) - 13562 sizeof(struct lpfc_sli4_cfg_mhdr)); 13563 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13564 LPFC_MBOX_OPCODE_CQ_DESTROY, 13565 length, LPFC_SLI4_MBX_EMBED); 13566 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 13567 cq->queue_id); 13568 mbox->vport = cq->phba->pport; 13569 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13570 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 13571 /* The IOCTL status is embedded in the mailbox subheader. */ 13572 shdr = (union lpfc_sli4_cfg_shdr *) 13573 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 13574 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13575 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13576 if (shdr_status || shdr_add_status || rc) { 13577 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13578 "2506 CQ_DESTROY mailbox failed with " 13579 "status x%x add_status x%x, mbx status x%x\n", 13580 shdr_status, shdr_add_status, rc); 13581 status = -ENXIO; 13582 } 13583 /* Remove cq from any list */ 13584 list_del_init(&cq->list); 13585 mempool_free(mbox, cq->phba->mbox_mem_pool); 13586 return status; 13587 } 13588 13589 /** 13590 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 13591 * @qm: The queue structure associated with the queue to destroy. 13592 * 13593 * This function destroys a queue, as detailed in @mq by sending an mailbox 13594 * command, specific to the type of queue, to the HBA. 13595 * 13596 * The @mq struct is used to get the queue ID of the queue to destroy. 13597 * 13598 * On success this function will return a zero. If the queue destroy mailbox 13599 * command fails this function will return -ENXIO. 13600 **/ 13601 uint32_t 13602 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 13603 { 13604 LPFC_MBOXQ_t *mbox; 13605 int rc, length, status = 0; 13606 uint32_t shdr_status, shdr_add_status; 13607 union lpfc_sli4_cfg_shdr *shdr; 13608 13609 /* sanity check on queue memory */ 13610 if (!mq) 13611 return -ENODEV; 13612 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 13613 if (!mbox) 13614 return -ENOMEM; 13615 length = (sizeof(struct lpfc_mbx_mq_destroy) - 13616 sizeof(struct lpfc_sli4_cfg_mhdr)); 13617 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13618 LPFC_MBOX_OPCODE_MQ_DESTROY, 13619 length, LPFC_SLI4_MBX_EMBED); 13620 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 13621 mq->queue_id); 13622 mbox->vport = mq->phba->pport; 13623 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13624 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 13625 /* The IOCTL status is embedded in the mailbox subheader. */ 13626 shdr = (union lpfc_sli4_cfg_shdr *) 13627 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 13628 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13629 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13630 if (shdr_status || shdr_add_status || rc) { 13631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13632 "2507 MQ_DESTROY mailbox failed with " 13633 "status x%x add_status x%x, mbx status x%x\n", 13634 shdr_status, shdr_add_status, rc); 13635 status = -ENXIO; 13636 } 13637 /* Remove mq from any list */ 13638 list_del_init(&mq->list); 13639 mempool_free(mbox, mq->phba->mbox_mem_pool); 13640 return status; 13641 } 13642 13643 /** 13644 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 13645 * @wq: The queue structure associated with the queue to destroy. 13646 * 13647 * This function destroys a queue, as detailed in @wq by sending an mailbox 13648 * command, specific to the type of queue, to the HBA. 13649 * 13650 * The @wq struct is used to get the queue ID of the queue to destroy. 13651 * 13652 * On success this function will return a zero. If the queue destroy mailbox 13653 * command fails this function will return -ENXIO. 13654 **/ 13655 uint32_t 13656 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 13657 { 13658 LPFC_MBOXQ_t *mbox; 13659 int rc, length, status = 0; 13660 uint32_t shdr_status, shdr_add_status; 13661 union lpfc_sli4_cfg_shdr *shdr; 13662 13663 /* sanity check on queue memory */ 13664 if (!wq) 13665 return -ENODEV; 13666 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 13667 if (!mbox) 13668 return -ENOMEM; 13669 length = (sizeof(struct lpfc_mbx_wq_destroy) - 13670 sizeof(struct lpfc_sli4_cfg_mhdr)); 13671 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13672 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 13673 length, LPFC_SLI4_MBX_EMBED); 13674 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 13675 wq->queue_id); 13676 mbox->vport = wq->phba->pport; 13677 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13678 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 13679 shdr = (union lpfc_sli4_cfg_shdr *) 13680 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 13681 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13682 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13683 if (shdr_status || shdr_add_status || rc) { 13684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13685 "2508 WQ_DESTROY mailbox failed with " 13686 "status x%x add_status x%x, mbx status x%x\n", 13687 shdr_status, shdr_add_status, rc); 13688 status = -ENXIO; 13689 } 13690 /* Remove wq from any list */ 13691 list_del_init(&wq->list); 13692 mempool_free(mbox, wq->phba->mbox_mem_pool); 13693 return status; 13694 } 13695 13696 /** 13697 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 13698 * @rq: The queue structure associated with the queue to destroy. 13699 * 13700 * This function destroys a queue, as detailed in @rq by sending an mailbox 13701 * command, specific to the type of queue, to the HBA. 13702 * 13703 * The @rq struct is used to get the queue ID of the queue to destroy. 13704 * 13705 * On success this function will return a zero. If the queue destroy mailbox 13706 * command fails this function will return -ENXIO. 13707 **/ 13708 uint32_t 13709 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 13710 struct lpfc_queue *drq) 13711 { 13712 LPFC_MBOXQ_t *mbox; 13713 int rc, length, status = 0; 13714 uint32_t shdr_status, shdr_add_status; 13715 union lpfc_sli4_cfg_shdr *shdr; 13716 13717 /* sanity check on queue memory */ 13718 if (!hrq || !drq) 13719 return -ENODEV; 13720 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 13721 if (!mbox) 13722 return -ENOMEM; 13723 length = (sizeof(struct lpfc_mbx_rq_destroy) - 13724 sizeof(struct lpfc_sli4_cfg_mhdr)); 13725 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13726 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 13727 length, LPFC_SLI4_MBX_EMBED); 13728 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 13729 hrq->queue_id); 13730 mbox->vport = hrq->phba->pport; 13731 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13732 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 13733 /* The IOCTL status is embedded in the mailbox subheader. */ 13734 shdr = (union lpfc_sli4_cfg_shdr *) 13735 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 13736 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13737 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13738 if (shdr_status || shdr_add_status || rc) { 13739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13740 "2509 RQ_DESTROY mailbox failed with " 13741 "status x%x add_status x%x, mbx status x%x\n", 13742 shdr_status, shdr_add_status, rc); 13743 if (rc != MBX_TIMEOUT) 13744 mempool_free(mbox, hrq->phba->mbox_mem_pool); 13745 return -ENXIO; 13746 } 13747 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 13748 drq->queue_id); 13749 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 13750 shdr = (union lpfc_sli4_cfg_shdr *) 13751 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 13752 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13753 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13754 if (shdr_status || shdr_add_status || rc) { 13755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13756 "2510 RQ_DESTROY mailbox failed with " 13757 "status x%x add_status x%x, mbx status x%x\n", 13758 shdr_status, shdr_add_status, rc); 13759 status = -ENXIO; 13760 } 13761 list_del_init(&hrq->list); 13762 list_del_init(&drq->list); 13763 mempool_free(mbox, hrq->phba->mbox_mem_pool); 13764 return status; 13765 } 13766 13767 /** 13768 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 13769 * @phba: The virtual port for which this call being executed. 13770 * @pdma_phys_addr0: Physical address of the 1st SGL page. 13771 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 13772 * @xritag: the xritag that ties this io to the SGL pages. 13773 * 13774 * This routine will post the sgl pages for the IO that has the xritag 13775 * that is in the iocbq structure. The xritag is assigned during iocbq 13776 * creation and persists for as long as the driver is loaded. 13777 * if the caller has fewer than 256 scatter gather segments to map then 13778 * pdma_phys_addr1 should be 0. 13779 * If the caller needs to map more than 256 scatter gather segment then 13780 * pdma_phys_addr1 should be a valid physical address. 13781 * physical address for SGLs must be 64 byte aligned. 13782 * If you are going to map 2 SGL's then the first one must have 256 entries 13783 * the second sgl can have between 1 and 256 entries. 13784 * 13785 * Return codes: 13786 * 0 - Success 13787 * -ENXIO, -ENOMEM - Failure 13788 **/ 13789 int 13790 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 13791 dma_addr_t pdma_phys_addr0, 13792 dma_addr_t pdma_phys_addr1, 13793 uint16_t xritag) 13794 { 13795 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 13796 LPFC_MBOXQ_t *mbox; 13797 int rc; 13798 uint32_t shdr_status, shdr_add_status; 13799 uint32_t mbox_tmo; 13800 union lpfc_sli4_cfg_shdr *shdr; 13801 13802 if (xritag == NO_XRI) { 13803 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13804 "0364 Invalid param:\n"); 13805 return -EINVAL; 13806 } 13807 13808 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13809 if (!mbox) 13810 return -ENOMEM; 13811 13812 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13813 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 13814 sizeof(struct lpfc_mbx_post_sgl_pages) - 13815 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 13816 13817 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 13818 &mbox->u.mqe.un.post_sgl_pages; 13819 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 13820 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 13821 13822 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 13823 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 13824 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 13825 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 13826 13827 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 13828 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 13829 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 13830 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 13831 if (!phba->sli4_hba.intr_enable) 13832 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13833 else { 13834 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13835 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13836 } 13837 /* The IOCTL status is embedded in the mailbox subheader. */ 13838 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 13839 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13840 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13841 if (rc != MBX_TIMEOUT) 13842 mempool_free(mbox, phba->mbox_mem_pool); 13843 if (shdr_status || shdr_add_status || rc) { 13844 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13845 "2511 POST_SGL mailbox failed with " 13846 "status x%x add_status x%x, mbx status x%x\n", 13847 shdr_status, shdr_add_status, rc); 13848 rc = -ENXIO; 13849 } 13850 return 0; 13851 } 13852 13853 /** 13854 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 13855 * @phba: pointer to lpfc hba data structure. 13856 * 13857 * This routine is invoked to post rpi header templates to the 13858 * HBA consistent with the SLI-4 interface spec. This routine 13859 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 13860 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 13861 * 13862 * Returns 13863 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 13864 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 13865 **/ 13866 uint16_t 13867 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 13868 { 13869 unsigned long xri; 13870 13871 /* 13872 * Fetch the next logical xri. Because this index is logical, 13873 * the driver starts at 0 each time. 13874 */ 13875 spin_lock_irq(&phba->hbalock); 13876 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 13877 phba->sli4_hba.max_cfg_param.max_xri, 0); 13878 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 13879 spin_unlock_irq(&phba->hbalock); 13880 return NO_XRI; 13881 } else { 13882 set_bit(xri, phba->sli4_hba.xri_bmask); 13883 phba->sli4_hba.max_cfg_param.xri_used++; 13884 } 13885 spin_unlock_irq(&phba->hbalock); 13886 return xri; 13887 } 13888 13889 /** 13890 * lpfc_sli4_free_xri - Release an xri for reuse. 13891 * @phba: pointer to lpfc hba data structure. 13892 * 13893 * This routine is invoked to release an xri to the pool of 13894 * available rpis maintained by the driver. 13895 **/ 13896 void 13897 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13898 { 13899 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 13900 phba->sli4_hba.max_cfg_param.xri_used--; 13901 } 13902 } 13903 13904 /** 13905 * lpfc_sli4_free_xri - Release an xri for reuse. 13906 * @phba: pointer to lpfc hba data structure. 13907 * 13908 * This routine is invoked to release an xri to the pool of 13909 * available rpis maintained by the driver. 13910 **/ 13911 void 13912 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13913 { 13914 spin_lock_irq(&phba->hbalock); 13915 __lpfc_sli4_free_xri(phba, xri); 13916 spin_unlock_irq(&phba->hbalock); 13917 } 13918 13919 /** 13920 * lpfc_sli4_next_xritag - Get an xritag for the io 13921 * @phba: Pointer to HBA context object. 13922 * 13923 * This function gets an xritag for the iocb. If there is no unused xritag 13924 * it will return 0xffff. 13925 * The function returns the allocated xritag if successful, else returns zero. 13926 * Zero is not a valid xritag. 13927 * The caller is not required to hold any lock. 13928 **/ 13929 uint16_t 13930 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 13931 { 13932 uint16_t xri_index; 13933 13934 xri_index = lpfc_sli4_alloc_xri(phba); 13935 if (xri_index == NO_XRI) 13936 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13937 "2004 Failed to allocate XRI.last XRITAG is %d" 13938 " Max XRI is %d, Used XRI is %d\n", 13939 xri_index, 13940 phba->sli4_hba.max_cfg_param.max_xri, 13941 phba->sli4_hba.max_cfg_param.xri_used); 13942 return xri_index; 13943 } 13944 13945 /** 13946 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 13947 * @phba: pointer to lpfc hba data structure. 13948 * @post_sgl_list: pointer to els sgl entry list. 13949 * @count: number of els sgl entries on the list. 13950 * 13951 * This routine is invoked to post a block of driver's sgl pages to the 13952 * HBA using non-embedded mailbox command. No Lock is held. This routine 13953 * is only called when the driver is loading and after all IO has been 13954 * stopped. 13955 **/ 13956 static int 13957 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, 13958 struct list_head *post_sgl_list, 13959 int post_cnt) 13960 { 13961 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 13962 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13963 struct sgl_page_pairs *sgl_pg_pairs; 13964 void *viraddr; 13965 LPFC_MBOXQ_t *mbox; 13966 uint32_t reqlen, alloclen, pg_pairs; 13967 uint32_t mbox_tmo; 13968 uint16_t xritag_start = 0; 13969 int rc = 0; 13970 uint32_t shdr_status, shdr_add_status; 13971 union lpfc_sli4_cfg_shdr *shdr; 13972 13973 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) + 13974 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13975 if (reqlen > SLI4_PAGE_SIZE) { 13976 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13977 "2559 Block sgl registration required DMA " 13978 "size (%d) great than a page\n", reqlen); 13979 return -ENOMEM; 13980 } 13981 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13982 if (!mbox) 13983 return -ENOMEM; 13984 13985 /* Allocate DMA memory and set up the non-embedded mailbox command */ 13986 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13987 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 13988 LPFC_SLI4_MBX_NEMBED); 13989 13990 if (alloclen < reqlen) { 13991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13992 "0285 Allocated DMA memory size (%d) is " 13993 "less than the requested DMA memory " 13994 "size (%d)\n", alloclen, reqlen); 13995 lpfc_sli4_mbox_cmd_free(phba, mbox); 13996 return -ENOMEM; 13997 } 13998 /* Set up the SGL pages in the non-embedded DMA pages */ 13999 viraddr = mbox->sge_array->addr[0]; 14000 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 14001 sgl_pg_pairs = &sgl->sgl_pg_pairs; 14002 14003 pg_pairs = 0; 14004 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 14005 /* Set up the sge entry */ 14006 sgl_pg_pairs->sgl_pg0_addr_lo = 14007 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 14008 sgl_pg_pairs->sgl_pg0_addr_hi = 14009 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 14010 sgl_pg_pairs->sgl_pg1_addr_lo = 14011 cpu_to_le32(putPaddrLow(0)); 14012 sgl_pg_pairs->sgl_pg1_addr_hi = 14013 cpu_to_le32(putPaddrHigh(0)); 14014 14015 /* Keep the first xritag on the list */ 14016 if (pg_pairs == 0) 14017 xritag_start = sglq_entry->sli4_xritag; 14018 sgl_pg_pairs++; 14019 pg_pairs++; 14020 } 14021 14022 /* Complete initialization and perform endian conversion. */ 14023 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 14024 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt); 14025 sgl->word0 = cpu_to_le32(sgl->word0); 14026 if (!phba->sli4_hba.intr_enable) 14027 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14028 else { 14029 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 14030 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 14031 } 14032 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 14033 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14034 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14035 if (rc != MBX_TIMEOUT) 14036 lpfc_sli4_mbox_cmd_free(phba, mbox); 14037 if (shdr_status || shdr_add_status || rc) { 14038 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14039 "2513 POST_SGL_BLOCK mailbox command failed " 14040 "status x%x add_status x%x mbx status x%x\n", 14041 shdr_status, shdr_add_status, rc); 14042 rc = -ENXIO; 14043 } 14044 return rc; 14045 } 14046 14047 /** 14048 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 14049 * @phba: pointer to lpfc hba data structure. 14050 * @sblist: pointer to scsi buffer list. 14051 * @count: number of scsi buffers on the list. 14052 * 14053 * This routine is invoked to post a block of @count scsi sgl pages from a 14054 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 14055 * No Lock is held. 14056 * 14057 **/ 14058 int 14059 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, 14060 struct list_head *sblist, 14061 int count) 14062 { 14063 struct lpfc_scsi_buf *psb; 14064 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 14065 struct sgl_page_pairs *sgl_pg_pairs; 14066 void *viraddr; 14067 LPFC_MBOXQ_t *mbox; 14068 uint32_t reqlen, alloclen, pg_pairs; 14069 uint32_t mbox_tmo; 14070 uint16_t xritag_start = 0; 14071 int rc = 0; 14072 uint32_t shdr_status, shdr_add_status; 14073 dma_addr_t pdma_phys_bpl1; 14074 union lpfc_sli4_cfg_shdr *shdr; 14075 14076 /* Calculate the requested length of the dma memory */ 14077 reqlen = count * sizeof(struct sgl_page_pairs) + 14078 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 14079 if (reqlen > SLI4_PAGE_SIZE) { 14080 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 14081 "0217 Block sgl registration required DMA " 14082 "size (%d) great than a page\n", reqlen); 14083 return -ENOMEM; 14084 } 14085 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14086 if (!mbox) { 14087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14088 "0283 Failed to allocate mbox cmd memory\n"); 14089 return -ENOMEM; 14090 } 14091 14092 /* Allocate DMA memory and set up the non-embedded mailbox command */ 14093 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14094 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 14095 LPFC_SLI4_MBX_NEMBED); 14096 14097 if (alloclen < reqlen) { 14098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14099 "2561 Allocated DMA memory size (%d) is " 14100 "less than the requested DMA memory " 14101 "size (%d)\n", alloclen, reqlen); 14102 lpfc_sli4_mbox_cmd_free(phba, mbox); 14103 return -ENOMEM; 14104 } 14105 14106 /* Get the first SGE entry from the non-embedded DMA memory */ 14107 viraddr = mbox->sge_array->addr[0]; 14108 14109 /* Set up the SGL pages in the non-embedded DMA pages */ 14110 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 14111 sgl_pg_pairs = &sgl->sgl_pg_pairs; 14112 14113 pg_pairs = 0; 14114 list_for_each_entry(psb, sblist, list) { 14115 /* Set up the sge entry */ 14116 sgl_pg_pairs->sgl_pg0_addr_lo = 14117 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 14118 sgl_pg_pairs->sgl_pg0_addr_hi = 14119 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 14120 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 14121 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 14122 else 14123 pdma_phys_bpl1 = 0; 14124 sgl_pg_pairs->sgl_pg1_addr_lo = 14125 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 14126 sgl_pg_pairs->sgl_pg1_addr_hi = 14127 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 14128 /* Keep the first xritag on the list */ 14129 if (pg_pairs == 0) 14130 xritag_start = psb->cur_iocbq.sli4_xritag; 14131 sgl_pg_pairs++; 14132 pg_pairs++; 14133 } 14134 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 14135 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 14136 /* Perform endian conversion if necessary */ 14137 sgl->word0 = cpu_to_le32(sgl->word0); 14138 14139 if (!phba->sli4_hba.intr_enable) 14140 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14141 else { 14142 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 14143 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 14144 } 14145 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 14146 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14147 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14148 if (rc != MBX_TIMEOUT) 14149 lpfc_sli4_mbox_cmd_free(phba, mbox); 14150 if (shdr_status || shdr_add_status || rc) { 14151 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14152 "2564 POST_SGL_BLOCK mailbox command failed " 14153 "status x%x add_status x%x mbx status x%x\n", 14154 shdr_status, shdr_add_status, rc); 14155 rc = -ENXIO; 14156 } 14157 return rc; 14158 } 14159 14160 /** 14161 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 14162 * @phba: pointer to lpfc_hba struct that the frame was received on 14163 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 14164 * 14165 * This function checks the fields in the @fc_hdr to see if the FC frame is a 14166 * valid type of frame that the LPFC driver will handle. This function will 14167 * return a zero if the frame is a valid frame or a non zero value when the 14168 * frame does not pass the check. 14169 **/ 14170 static int 14171 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 14172 { 14173 /* make rctl_names static to save stack space */ 14174 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 14175 char *type_names[] = FC_TYPE_NAMES_INIT; 14176 struct fc_vft_header *fc_vft_hdr; 14177 uint32_t *header = (uint32_t *) fc_hdr; 14178 14179 switch (fc_hdr->fh_r_ctl) { 14180 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 14181 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 14182 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 14183 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 14184 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 14185 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 14186 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 14187 case FC_RCTL_DD_CMD_STATUS: /* command status */ 14188 case FC_RCTL_ELS_REQ: /* extended link services request */ 14189 case FC_RCTL_ELS_REP: /* extended link services reply */ 14190 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 14191 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 14192 case FC_RCTL_BA_NOP: /* basic link service NOP */ 14193 case FC_RCTL_BA_ABTS: /* basic link service abort */ 14194 case FC_RCTL_BA_RMC: /* remove connection */ 14195 case FC_RCTL_BA_ACC: /* basic accept */ 14196 case FC_RCTL_BA_RJT: /* basic reject */ 14197 case FC_RCTL_BA_PRMT: 14198 case FC_RCTL_ACK_1: /* acknowledge_1 */ 14199 case FC_RCTL_ACK_0: /* acknowledge_0 */ 14200 case FC_RCTL_P_RJT: /* port reject */ 14201 case FC_RCTL_F_RJT: /* fabric reject */ 14202 case FC_RCTL_P_BSY: /* port busy */ 14203 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 14204 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 14205 case FC_RCTL_LCR: /* link credit reset */ 14206 case FC_RCTL_END: /* end */ 14207 break; 14208 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 14209 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 14210 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 14211 return lpfc_fc_frame_check(phba, fc_hdr); 14212 default: 14213 goto drop; 14214 } 14215 switch (fc_hdr->fh_type) { 14216 case FC_TYPE_BLS: 14217 case FC_TYPE_ELS: 14218 case FC_TYPE_FCP: 14219 case FC_TYPE_CT: 14220 break; 14221 case FC_TYPE_IP: 14222 case FC_TYPE_ILS: 14223 default: 14224 goto drop; 14225 } 14226 14227 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 14228 "2538 Received frame rctl:%s (x%x), type:%s (x%x), " 14229 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 14230 rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, 14231 type_names[fc_hdr->fh_type], fc_hdr->fh_type, 14232 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 14233 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 14234 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 14235 be32_to_cpu(header[6])); 14236 return 0; 14237 drop: 14238 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14239 "2539 Dropped frame rctl:%s type:%s\n", 14240 rctl_names[fc_hdr->fh_r_ctl], 14241 type_names[fc_hdr->fh_type]); 14242 return 1; 14243 } 14244 14245 /** 14246 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 14247 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 14248 * 14249 * This function processes the FC header to retrieve the VFI from the VF 14250 * header, if one exists. This function will return the VFI if one exists 14251 * or 0 if no VSAN Header exists. 14252 **/ 14253 static uint32_t 14254 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 14255 { 14256 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 14257 14258 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 14259 return 0; 14260 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 14261 } 14262 14263 /** 14264 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 14265 * @phba: Pointer to the HBA structure to search for the vport on 14266 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 14267 * @fcfi: The FC Fabric ID that the frame came from 14268 * 14269 * This function searches the @phba for a vport that matches the content of the 14270 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 14271 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 14272 * returns the matching vport pointer or NULL if unable to match frame to a 14273 * vport. 14274 **/ 14275 static struct lpfc_vport * 14276 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 14277 uint16_t fcfi) 14278 { 14279 struct lpfc_vport **vports; 14280 struct lpfc_vport *vport = NULL; 14281 int i; 14282 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 14283 fc_hdr->fh_d_id[1] << 8 | 14284 fc_hdr->fh_d_id[2]); 14285 14286 if (did == Fabric_DID) 14287 return phba->pport; 14288 if ((phba->pport->fc_flag & FC_PT2PT) && 14289 !(phba->link_state == LPFC_HBA_READY)) 14290 return phba->pport; 14291 14292 vports = lpfc_create_vport_work_array(phba); 14293 if (vports != NULL) 14294 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 14295 if (phba->fcf.fcfi == fcfi && 14296 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 14297 vports[i]->fc_myDID == did) { 14298 vport = vports[i]; 14299 break; 14300 } 14301 } 14302 lpfc_destroy_vport_work_array(phba, vports); 14303 return vport; 14304 } 14305 14306 /** 14307 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 14308 * @vport: The vport to work on. 14309 * 14310 * This function updates the receive sequence time stamp for this vport. The 14311 * receive sequence time stamp indicates the time that the last frame of the 14312 * the sequence that has been idle for the longest amount of time was received. 14313 * the driver uses this time stamp to indicate if any received sequences have 14314 * timed out. 14315 **/ 14316 void 14317 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 14318 { 14319 struct lpfc_dmabuf *h_buf; 14320 struct hbq_dmabuf *dmabuf = NULL; 14321 14322 /* get the oldest sequence on the rcv list */ 14323 h_buf = list_get_first(&vport->rcv_buffer_list, 14324 struct lpfc_dmabuf, list); 14325 if (!h_buf) 14326 return; 14327 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14328 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 14329 } 14330 14331 /** 14332 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 14333 * @vport: The vport that the received sequences were sent to. 14334 * 14335 * This function cleans up all outstanding received sequences. This is called 14336 * by the driver when a link event or user action invalidates all the received 14337 * sequences. 14338 **/ 14339 void 14340 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 14341 { 14342 struct lpfc_dmabuf *h_buf, *hnext; 14343 struct lpfc_dmabuf *d_buf, *dnext; 14344 struct hbq_dmabuf *dmabuf = NULL; 14345 14346 /* start with the oldest sequence on the rcv list */ 14347 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 14348 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14349 list_del_init(&dmabuf->hbuf.list); 14350 list_for_each_entry_safe(d_buf, dnext, 14351 &dmabuf->dbuf.list, list) { 14352 list_del_init(&d_buf->list); 14353 lpfc_in_buf_free(vport->phba, d_buf); 14354 } 14355 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 14356 } 14357 } 14358 14359 /** 14360 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 14361 * @vport: The vport that the received sequences were sent to. 14362 * 14363 * This function determines whether any received sequences have timed out by 14364 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 14365 * indicates that there is at least one timed out sequence this routine will 14366 * go through the received sequences one at a time from most inactive to most 14367 * active to determine which ones need to be cleaned up. Once it has determined 14368 * that a sequence needs to be cleaned up it will simply free up the resources 14369 * without sending an abort. 14370 **/ 14371 void 14372 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 14373 { 14374 struct lpfc_dmabuf *h_buf, *hnext; 14375 struct lpfc_dmabuf *d_buf, *dnext; 14376 struct hbq_dmabuf *dmabuf = NULL; 14377 unsigned long timeout; 14378 int abort_count = 0; 14379 14380 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 14381 vport->rcv_buffer_time_stamp); 14382 if (list_empty(&vport->rcv_buffer_list) || 14383 time_before(jiffies, timeout)) 14384 return; 14385 /* start with the oldest sequence on the rcv list */ 14386 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 14387 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14388 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 14389 dmabuf->time_stamp); 14390 if (time_before(jiffies, timeout)) 14391 break; 14392 abort_count++; 14393 list_del_init(&dmabuf->hbuf.list); 14394 list_for_each_entry_safe(d_buf, dnext, 14395 &dmabuf->dbuf.list, list) { 14396 list_del_init(&d_buf->list); 14397 lpfc_in_buf_free(vport->phba, d_buf); 14398 } 14399 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 14400 } 14401 if (abort_count) 14402 lpfc_update_rcv_time_stamp(vport); 14403 } 14404 14405 /** 14406 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 14407 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 14408 * 14409 * This function searches through the existing incomplete sequences that have 14410 * been sent to this @vport. If the frame matches one of the incomplete 14411 * sequences then the dbuf in the @dmabuf is added to the list of frames that 14412 * make up that sequence. If no sequence is found that matches this frame then 14413 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 14414 * This function returns a pointer to the first dmabuf in the sequence list that 14415 * the frame was linked to. 14416 **/ 14417 static struct hbq_dmabuf * 14418 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 14419 { 14420 struct fc_frame_header *new_hdr; 14421 struct fc_frame_header *temp_hdr; 14422 struct lpfc_dmabuf *d_buf; 14423 struct lpfc_dmabuf *h_buf; 14424 struct hbq_dmabuf *seq_dmabuf = NULL; 14425 struct hbq_dmabuf *temp_dmabuf = NULL; 14426 14427 INIT_LIST_HEAD(&dmabuf->dbuf.list); 14428 dmabuf->time_stamp = jiffies; 14429 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14430 /* Use the hdr_buf to find the sequence that this frame belongs to */ 14431 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 14432 temp_hdr = (struct fc_frame_header *)h_buf->virt; 14433 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 14434 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 14435 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 14436 continue; 14437 /* found a pending sequence that matches this frame */ 14438 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14439 break; 14440 } 14441 if (!seq_dmabuf) { 14442 /* 14443 * This indicates first frame received for this sequence. 14444 * Queue the buffer on the vport's rcv_buffer_list. 14445 */ 14446 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 14447 lpfc_update_rcv_time_stamp(vport); 14448 return dmabuf; 14449 } 14450 temp_hdr = seq_dmabuf->hbuf.virt; 14451 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 14452 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 14453 list_del_init(&seq_dmabuf->hbuf.list); 14454 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 14455 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 14456 lpfc_update_rcv_time_stamp(vport); 14457 return dmabuf; 14458 } 14459 /* move this sequence to the tail to indicate a young sequence */ 14460 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 14461 seq_dmabuf->time_stamp = jiffies; 14462 lpfc_update_rcv_time_stamp(vport); 14463 if (list_empty(&seq_dmabuf->dbuf.list)) { 14464 temp_hdr = dmabuf->hbuf.virt; 14465 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 14466 return seq_dmabuf; 14467 } 14468 /* find the correct place in the sequence to insert this frame */ 14469 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 14470 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14471 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 14472 /* 14473 * If the frame's sequence count is greater than the frame on 14474 * the list then insert the frame right after this frame 14475 */ 14476 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 14477 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 14478 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 14479 return seq_dmabuf; 14480 } 14481 } 14482 return NULL; 14483 } 14484 14485 /** 14486 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 14487 * @vport: pointer to a vitural port 14488 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14489 * 14490 * This function tries to abort from the partially assembed sequence, described 14491 * by the information from basic abbort @dmabuf. It checks to see whether such 14492 * partially assembled sequence held by the driver. If so, it shall free up all 14493 * the frames from the partially assembled sequence. 14494 * 14495 * Return 14496 * true -- if there is matching partially assembled sequence present and all 14497 * the frames freed with the sequence; 14498 * false -- if there is no matching partially assembled sequence present so 14499 * nothing got aborted in the lower layer driver 14500 **/ 14501 static bool 14502 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 14503 struct hbq_dmabuf *dmabuf) 14504 { 14505 struct fc_frame_header *new_hdr; 14506 struct fc_frame_header *temp_hdr; 14507 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 14508 struct hbq_dmabuf *seq_dmabuf = NULL; 14509 14510 /* Use the hdr_buf to find the sequence that matches this frame */ 14511 INIT_LIST_HEAD(&dmabuf->dbuf.list); 14512 INIT_LIST_HEAD(&dmabuf->hbuf.list); 14513 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14514 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 14515 temp_hdr = (struct fc_frame_header *)h_buf->virt; 14516 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 14517 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 14518 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 14519 continue; 14520 /* found a pending sequence that matches this frame */ 14521 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14522 break; 14523 } 14524 14525 /* Free up all the frames from the partially assembled sequence */ 14526 if (seq_dmabuf) { 14527 list_for_each_entry_safe(d_buf, n_buf, 14528 &seq_dmabuf->dbuf.list, list) { 14529 list_del_init(&d_buf->list); 14530 lpfc_in_buf_free(vport->phba, d_buf); 14531 } 14532 return true; 14533 } 14534 return false; 14535 } 14536 14537 /** 14538 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 14539 * @vport: pointer to a vitural port 14540 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14541 * 14542 * This function tries to abort from the assembed sequence from upper level 14543 * protocol, described by the information from basic abbort @dmabuf. It 14544 * checks to see whether such pending context exists at upper level protocol. 14545 * If so, it shall clean up the pending context. 14546 * 14547 * Return 14548 * true -- if there is matching pending context of the sequence cleaned 14549 * at ulp; 14550 * false -- if there is no matching pending context of the sequence present 14551 * at ulp. 14552 **/ 14553 static bool 14554 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 14555 { 14556 struct lpfc_hba *phba = vport->phba; 14557 int handled; 14558 14559 /* Accepting abort at ulp with SLI4 only */ 14560 if (phba->sli_rev < LPFC_SLI_REV4) 14561 return false; 14562 14563 /* Register all caring upper level protocols to attend abort */ 14564 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 14565 if (handled) 14566 return true; 14567 14568 return false; 14569 } 14570 14571 /** 14572 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 14573 * @phba: Pointer to HBA context object. 14574 * @cmd_iocbq: pointer to the command iocbq structure. 14575 * @rsp_iocbq: pointer to the response iocbq structure. 14576 * 14577 * This function handles the sequence abort response iocb command complete 14578 * event. It properly releases the memory allocated to the sequence abort 14579 * accept iocb. 14580 **/ 14581 static void 14582 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 14583 struct lpfc_iocbq *cmd_iocbq, 14584 struct lpfc_iocbq *rsp_iocbq) 14585 { 14586 struct lpfc_nodelist *ndlp; 14587 14588 if (cmd_iocbq) { 14589 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 14590 lpfc_nlp_put(ndlp); 14591 lpfc_nlp_not_used(ndlp); 14592 lpfc_sli_release_iocbq(phba, cmd_iocbq); 14593 } 14594 14595 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 14596 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 14597 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14598 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 14599 rsp_iocbq->iocb.ulpStatus, 14600 rsp_iocbq->iocb.un.ulpWord[4]); 14601 } 14602 14603 /** 14604 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 14605 * @phba: Pointer to HBA context object. 14606 * @xri: xri id in transaction. 14607 * 14608 * This function validates the xri maps to the known range of XRIs allocated an 14609 * used by the driver. 14610 **/ 14611 uint16_t 14612 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 14613 uint16_t xri) 14614 { 14615 int i; 14616 14617 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 14618 if (xri == phba->sli4_hba.xri_ids[i]) 14619 return i; 14620 } 14621 return NO_XRI; 14622 } 14623 14624 /** 14625 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 14626 * @phba: Pointer to HBA context object. 14627 * @fc_hdr: pointer to a FC frame header. 14628 * 14629 * This function sends a basic response to a previous unsol sequence abort 14630 * event after aborting the sequence handling. 14631 **/ 14632 static void 14633 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 14634 struct fc_frame_header *fc_hdr, bool aborted) 14635 { 14636 struct lpfc_hba *phba = vport->phba; 14637 struct lpfc_iocbq *ctiocb = NULL; 14638 struct lpfc_nodelist *ndlp; 14639 uint16_t oxid, rxid, xri, lxri; 14640 uint32_t sid, fctl; 14641 IOCB_t *icmd; 14642 int rc; 14643 14644 if (!lpfc_is_link_up(phba)) 14645 return; 14646 14647 sid = sli4_sid_from_fc_hdr(fc_hdr); 14648 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 14649 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 14650 14651 ndlp = lpfc_findnode_did(vport, sid); 14652 if (!ndlp) { 14653 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 14654 if (!ndlp) { 14655 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 14656 "1268 Failed to allocate ndlp for " 14657 "oxid:x%x SID:x%x\n", oxid, sid); 14658 return; 14659 } 14660 lpfc_nlp_init(vport, ndlp, sid); 14661 /* Put ndlp onto pport node list */ 14662 lpfc_enqueue_node(vport, ndlp); 14663 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 14664 /* re-setup ndlp without removing from node list */ 14665 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 14666 if (!ndlp) { 14667 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 14668 "3275 Failed to active ndlp found " 14669 "for oxid:x%x SID:x%x\n", oxid, sid); 14670 return; 14671 } 14672 } 14673 14674 /* Allocate buffer for rsp iocb */ 14675 ctiocb = lpfc_sli_get_iocbq(phba); 14676 if (!ctiocb) 14677 return; 14678 14679 /* Extract the F_CTL field from FC_HDR */ 14680 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 14681 14682 icmd = &ctiocb->iocb; 14683 icmd->un.xseq64.bdl.bdeSize = 0; 14684 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 14685 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 14686 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 14687 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 14688 14689 /* Fill in the rest of iocb fields */ 14690 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 14691 icmd->ulpBdeCount = 0; 14692 icmd->ulpLe = 1; 14693 icmd->ulpClass = CLASS3; 14694 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 14695 ctiocb->context1 = lpfc_nlp_get(ndlp); 14696 14697 ctiocb->iocb_cmpl = NULL; 14698 ctiocb->vport = phba->pport; 14699 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 14700 ctiocb->sli4_lxritag = NO_XRI; 14701 ctiocb->sli4_xritag = NO_XRI; 14702 14703 if (fctl & FC_FC_EX_CTX) 14704 /* Exchange responder sent the abort so we 14705 * own the oxid. 14706 */ 14707 xri = oxid; 14708 else 14709 xri = rxid; 14710 lxri = lpfc_sli4_xri_inrange(phba, xri); 14711 if (lxri != NO_XRI) 14712 lpfc_set_rrq_active(phba, ndlp, lxri, 14713 (xri == oxid) ? rxid : oxid, 0); 14714 /* For BA_ABTS from exchange responder, if the logical xri with 14715 * the oxid maps to the FCP XRI range, the port no longer has 14716 * that exchange context, send a BLS_RJT. Override the IOCB for 14717 * a BA_RJT. 14718 */ 14719 if ((fctl & FC_FC_EX_CTX) && 14720 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) { 14721 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14722 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14723 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 14724 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 14725 } 14726 14727 /* If BA_ABTS failed to abort a partially assembled receive sequence, 14728 * the driver no longer has that exchange, send a BLS_RJT. Override 14729 * the IOCB for a BA_RJT. 14730 */ 14731 if (aborted == false) { 14732 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14733 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14734 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 14735 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 14736 } 14737 14738 if (fctl & FC_FC_EX_CTX) { 14739 /* ABTS sent by responder to CT exchange, construction 14740 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 14741 * field and RX_ID from ABTS for RX_ID field. 14742 */ 14743 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 14744 } else { 14745 /* ABTS sent by initiator to CT exchange, construction 14746 * of BA_ACC will need to allocate a new XRI as for the 14747 * XRI_TAG field. 14748 */ 14749 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 14750 } 14751 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 14752 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 14753 14754 /* Xmit CT abts response on exchange <xid> */ 14755 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 14756 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 14757 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 14758 14759 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 14760 if (rc == IOCB_ERROR) { 14761 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 14762 "2925 Failed to issue CT ABTS RSP x%x on " 14763 "xri x%x, Data x%x\n", 14764 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 14765 phba->link_state); 14766 lpfc_nlp_put(ndlp); 14767 ctiocb->context1 = NULL; 14768 lpfc_sli_release_iocbq(phba, ctiocb); 14769 } 14770 } 14771 14772 /** 14773 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 14774 * @vport: Pointer to the vport on which this sequence was received 14775 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14776 * 14777 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 14778 * receive sequence is only partially assembed by the driver, it shall abort 14779 * the partially assembled frames for the sequence. Otherwise, if the 14780 * unsolicited receive sequence has been completely assembled and passed to 14781 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 14782 * unsolicited sequence has been aborted. After that, it will issue a basic 14783 * accept to accept the abort. 14784 **/ 14785 void 14786 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 14787 struct hbq_dmabuf *dmabuf) 14788 { 14789 struct lpfc_hba *phba = vport->phba; 14790 struct fc_frame_header fc_hdr; 14791 uint32_t fctl; 14792 bool aborted; 14793 14794 /* Make a copy of fc_hdr before the dmabuf being released */ 14795 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 14796 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 14797 14798 if (fctl & FC_FC_EX_CTX) { 14799 /* ABTS by responder to exchange, no cleanup needed */ 14800 aborted = true; 14801 } else { 14802 /* ABTS by initiator to exchange, need to do cleanup */ 14803 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 14804 if (aborted == false) 14805 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 14806 } 14807 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14808 14809 /* Respond with BA_ACC or BA_RJT accordingly */ 14810 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 14811 } 14812 14813 /** 14814 * lpfc_seq_complete - Indicates if a sequence is complete 14815 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14816 * 14817 * This function checks the sequence, starting with the frame described by 14818 * @dmabuf, to see if all the frames associated with this sequence are present. 14819 * the frames associated with this sequence are linked to the @dmabuf using the 14820 * dbuf list. This function looks for two major things. 1) That the first frame 14821 * has a sequence count of zero. 2) There is a frame with last frame of sequence 14822 * set. 3) That there are no holes in the sequence count. The function will 14823 * return 1 when the sequence is complete, otherwise it will return 0. 14824 **/ 14825 static int 14826 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 14827 { 14828 struct fc_frame_header *hdr; 14829 struct lpfc_dmabuf *d_buf; 14830 struct hbq_dmabuf *seq_dmabuf; 14831 uint32_t fctl; 14832 int seq_count = 0; 14833 14834 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14835 /* make sure first fame of sequence has a sequence count of zero */ 14836 if (hdr->fh_seq_cnt != seq_count) 14837 return 0; 14838 fctl = (hdr->fh_f_ctl[0] << 16 | 14839 hdr->fh_f_ctl[1] << 8 | 14840 hdr->fh_f_ctl[2]); 14841 /* If last frame of sequence we can return success. */ 14842 if (fctl & FC_FC_END_SEQ) 14843 return 1; 14844 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 14845 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14846 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14847 /* If there is a hole in the sequence count then fail. */ 14848 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 14849 return 0; 14850 fctl = (hdr->fh_f_ctl[0] << 16 | 14851 hdr->fh_f_ctl[1] << 8 | 14852 hdr->fh_f_ctl[2]); 14853 /* If last frame of sequence we can return success. */ 14854 if (fctl & FC_FC_END_SEQ) 14855 return 1; 14856 } 14857 return 0; 14858 } 14859 14860 /** 14861 * lpfc_prep_seq - Prep sequence for ULP processing 14862 * @vport: Pointer to the vport on which this sequence was received 14863 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14864 * 14865 * This function takes a sequence, described by a list of frames, and creates 14866 * a list of iocbq structures to describe the sequence. This iocbq list will be 14867 * used to issue to the generic unsolicited sequence handler. This routine 14868 * returns a pointer to the first iocbq in the list. If the function is unable 14869 * to allocate an iocbq then it throw out the received frames that were not 14870 * able to be described and return a pointer to the first iocbq. If unable to 14871 * allocate any iocbqs (including the first) this function will return NULL. 14872 **/ 14873 static struct lpfc_iocbq * 14874 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 14875 { 14876 struct hbq_dmabuf *hbq_buf; 14877 struct lpfc_dmabuf *d_buf, *n_buf; 14878 struct lpfc_iocbq *first_iocbq, *iocbq; 14879 struct fc_frame_header *fc_hdr; 14880 uint32_t sid; 14881 uint32_t len, tot_len; 14882 struct ulp_bde64 *pbde; 14883 14884 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14885 /* remove from receive buffer list */ 14886 list_del_init(&seq_dmabuf->hbuf.list); 14887 lpfc_update_rcv_time_stamp(vport); 14888 /* get the Remote Port's SID */ 14889 sid = sli4_sid_from_fc_hdr(fc_hdr); 14890 tot_len = 0; 14891 /* Get an iocbq struct to fill in. */ 14892 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 14893 if (first_iocbq) { 14894 /* Initialize the first IOCB. */ 14895 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 14896 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 14897 14898 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 14899 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 14900 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 14901 first_iocbq->iocb.un.rcvels.parmRo = 14902 sli4_did_from_fc_hdr(fc_hdr); 14903 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 14904 } else 14905 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 14906 first_iocbq->iocb.ulpContext = NO_XRI; 14907 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 14908 be16_to_cpu(fc_hdr->fh_ox_id); 14909 /* iocbq is prepped for internal consumption. Physical vpi. */ 14910 first_iocbq->iocb.unsli3.rcvsli3.vpi = 14911 vport->phba->vpi_ids[vport->vpi]; 14912 /* put the first buffer into the first IOCBq */ 14913 tot_len = bf_get(lpfc_rcqe_length, 14914 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 14915 14916 first_iocbq->context2 = &seq_dmabuf->dbuf; 14917 first_iocbq->context3 = NULL; 14918 first_iocbq->iocb.ulpBdeCount = 1; 14919 if (tot_len > LPFC_DATA_BUF_SIZE) 14920 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14921 LPFC_DATA_BUF_SIZE; 14922 else 14923 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 14924 14925 first_iocbq->iocb.un.rcvels.remoteID = sid; 14926 14927 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14928 } 14929 iocbq = first_iocbq; 14930 /* 14931 * Each IOCBq can have two Buffers assigned, so go through the list 14932 * of buffers for this sequence and save two buffers in each IOCBq 14933 */ 14934 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 14935 if (!iocbq) { 14936 lpfc_in_buf_free(vport->phba, d_buf); 14937 continue; 14938 } 14939 if (!iocbq->context3) { 14940 iocbq->context3 = d_buf; 14941 iocbq->iocb.ulpBdeCount++; 14942 /* We need to get the size out of the right CQE */ 14943 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14944 len = bf_get(lpfc_rcqe_length, 14945 &hbq_buf->cq_event.cqe.rcqe_cmpl); 14946 pbde = (struct ulp_bde64 *) 14947 &iocbq->iocb.unsli3.sli3Words[4]; 14948 if (len > LPFC_DATA_BUF_SIZE) 14949 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 14950 else 14951 pbde->tus.f.bdeSize = len; 14952 14953 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 14954 tot_len += len; 14955 } else { 14956 iocbq = lpfc_sli_get_iocbq(vport->phba); 14957 if (!iocbq) { 14958 if (first_iocbq) { 14959 first_iocbq->iocb.ulpStatus = 14960 IOSTAT_FCP_RSP_ERROR; 14961 first_iocbq->iocb.un.ulpWord[4] = 14962 IOERR_NO_RESOURCES; 14963 } 14964 lpfc_in_buf_free(vport->phba, d_buf); 14965 continue; 14966 } 14967 /* We need to get the size out of the right CQE */ 14968 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14969 len = bf_get(lpfc_rcqe_length, 14970 &hbq_buf->cq_event.cqe.rcqe_cmpl); 14971 iocbq->context2 = d_buf; 14972 iocbq->context3 = NULL; 14973 iocbq->iocb.ulpBdeCount = 1; 14974 if (len > LPFC_DATA_BUF_SIZE) 14975 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14976 LPFC_DATA_BUF_SIZE; 14977 else 14978 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 14979 14980 tot_len += len; 14981 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14982 14983 iocbq->iocb.un.rcvels.remoteID = sid; 14984 list_add_tail(&iocbq->list, &first_iocbq->list); 14985 } 14986 } 14987 return first_iocbq; 14988 } 14989 14990 static void 14991 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 14992 struct hbq_dmabuf *seq_dmabuf) 14993 { 14994 struct fc_frame_header *fc_hdr; 14995 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 14996 struct lpfc_hba *phba = vport->phba; 14997 14998 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14999 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 15000 if (!iocbq) { 15001 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15002 "2707 Ring %d handler: Failed to allocate " 15003 "iocb Rctl x%x Type x%x received\n", 15004 LPFC_ELS_RING, 15005 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 15006 return; 15007 } 15008 if (!lpfc_complete_unsol_iocb(phba, 15009 &phba->sli.ring[LPFC_ELS_RING], 15010 iocbq, fc_hdr->fh_r_ctl, 15011 fc_hdr->fh_type)) 15012 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15013 "2540 Ring %d handler: unexpected Rctl " 15014 "x%x Type x%x received\n", 15015 LPFC_ELS_RING, 15016 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 15017 15018 /* Free iocb created in lpfc_prep_seq */ 15019 list_for_each_entry_safe(curr_iocb, next_iocb, 15020 &iocbq->list, list) { 15021 list_del_init(&curr_iocb->list); 15022 lpfc_sli_release_iocbq(phba, curr_iocb); 15023 } 15024 lpfc_sli_release_iocbq(phba, iocbq); 15025 } 15026 15027 /** 15028 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 15029 * @phba: Pointer to HBA context object. 15030 * 15031 * This function is called with no lock held. This function processes all 15032 * the received buffers and gives it to upper layers when a received buffer 15033 * indicates that it is the final frame in the sequence. The interrupt 15034 * service routine processes received buffers at interrupt contexts and adds 15035 * received dma buffers to the rb_pend_list queue and signals the worker thread. 15036 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 15037 * appropriate receive function when the final frame in a sequence is received. 15038 **/ 15039 void 15040 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 15041 struct hbq_dmabuf *dmabuf) 15042 { 15043 struct hbq_dmabuf *seq_dmabuf; 15044 struct fc_frame_header *fc_hdr; 15045 struct lpfc_vport *vport; 15046 uint32_t fcfi; 15047 uint32_t did; 15048 15049 /* Process each received buffer */ 15050 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 15051 /* check to see if this a valid type of frame */ 15052 if (lpfc_fc_frame_check(phba, fc_hdr)) { 15053 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15054 return; 15055 } 15056 if ((bf_get(lpfc_cqe_code, 15057 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 15058 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 15059 &dmabuf->cq_event.cqe.rcqe_cmpl); 15060 else 15061 fcfi = bf_get(lpfc_rcqe_fcf_id, 15062 &dmabuf->cq_event.cqe.rcqe_cmpl); 15063 15064 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 15065 if (!vport) { 15066 /* throw out the frame */ 15067 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15068 return; 15069 } 15070 15071 /* d_id this frame is directed to */ 15072 did = sli4_did_from_fc_hdr(fc_hdr); 15073 15074 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 15075 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 15076 (did != Fabric_DID)) { 15077 /* 15078 * Throw out the frame if we are not pt2pt. 15079 * The pt2pt protocol allows for discovery frames 15080 * to be received without a registered VPI. 15081 */ 15082 if (!(vport->fc_flag & FC_PT2PT) || 15083 (phba->link_state == LPFC_HBA_READY)) { 15084 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15085 return; 15086 } 15087 } 15088 15089 /* Handle the basic abort sequence (BA_ABTS) event */ 15090 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 15091 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 15092 return; 15093 } 15094 15095 /* Link this frame */ 15096 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 15097 if (!seq_dmabuf) { 15098 /* unable to add frame to vport - throw it out */ 15099 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15100 return; 15101 } 15102 /* If not last frame in sequence continue processing frames. */ 15103 if (!lpfc_seq_complete(seq_dmabuf)) 15104 return; 15105 15106 /* Send the complete sequence to the upper layer protocol */ 15107 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 15108 } 15109 15110 /** 15111 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 15112 * @phba: pointer to lpfc hba data structure. 15113 * 15114 * This routine is invoked to post rpi header templates to the 15115 * HBA consistent with the SLI-4 interface spec. This routine 15116 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 15117 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 15118 * 15119 * This routine does not require any locks. It's usage is expected 15120 * to be driver load or reset recovery when the driver is 15121 * sequential. 15122 * 15123 * Return codes 15124 * 0 - successful 15125 * -EIO - The mailbox failed to complete successfully. 15126 * When this error occurs, the driver is not guaranteed 15127 * to have any rpi regions posted to the device and 15128 * must either attempt to repost the regions or take a 15129 * fatal error. 15130 **/ 15131 int 15132 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 15133 { 15134 struct lpfc_rpi_hdr *rpi_page; 15135 uint32_t rc = 0; 15136 uint16_t lrpi = 0; 15137 15138 /* SLI4 ports that support extents do not require RPI headers. */ 15139 if (!phba->sli4_hba.rpi_hdrs_in_use) 15140 goto exit; 15141 if (phba->sli4_hba.extents_in_use) 15142 return -EIO; 15143 15144 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 15145 /* 15146 * Assign the rpi headers a physical rpi only if the driver 15147 * has not initialized those resources. A port reset only 15148 * needs the headers posted. 15149 */ 15150 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 15151 LPFC_RPI_RSRC_RDY) 15152 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 15153 15154 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 15155 if (rc != MBX_SUCCESS) { 15156 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15157 "2008 Error %d posting all rpi " 15158 "headers\n", rc); 15159 rc = -EIO; 15160 break; 15161 } 15162 } 15163 15164 exit: 15165 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 15166 LPFC_RPI_RSRC_RDY); 15167 return rc; 15168 } 15169 15170 /** 15171 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 15172 * @phba: pointer to lpfc hba data structure. 15173 * @rpi_page: pointer to the rpi memory region. 15174 * 15175 * This routine is invoked to post a single rpi header to the 15176 * HBA consistent with the SLI-4 interface spec. This memory region 15177 * maps up to 64 rpi context regions. 15178 * 15179 * Return codes 15180 * 0 - successful 15181 * -ENOMEM - No available memory 15182 * -EIO - The mailbox failed to complete successfully. 15183 **/ 15184 int 15185 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 15186 { 15187 LPFC_MBOXQ_t *mboxq; 15188 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 15189 uint32_t rc = 0; 15190 uint32_t shdr_status, shdr_add_status; 15191 union lpfc_sli4_cfg_shdr *shdr; 15192 15193 /* SLI4 ports that support extents do not require RPI headers. */ 15194 if (!phba->sli4_hba.rpi_hdrs_in_use) 15195 return rc; 15196 if (phba->sli4_hba.extents_in_use) 15197 return -EIO; 15198 15199 /* The port is notified of the header region via a mailbox command. */ 15200 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15201 if (!mboxq) { 15202 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15203 "2001 Unable to allocate memory for issuing " 15204 "SLI_CONFIG_SPECIAL mailbox command\n"); 15205 return -ENOMEM; 15206 } 15207 15208 /* Post all rpi memory regions to the port. */ 15209 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 15210 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 15211 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 15212 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 15213 sizeof(struct lpfc_sli4_cfg_mhdr), 15214 LPFC_SLI4_MBX_EMBED); 15215 15216 15217 /* Post the physical rpi to the port for this rpi header. */ 15218 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 15219 rpi_page->start_rpi); 15220 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 15221 hdr_tmpl, rpi_page->page_count); 15222 15223 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 15224 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 15225 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 15226 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 15227 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15228 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15229 if (rc != MBX_TIMEOUT) 15230 mempool_free(mboxq, phba->mbox_mem_pool); 15231 if (shdr_status || shdr_add_status || rc) { 15232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15233 "2514 POST_RPI_HDR mailbox failed with " 15234 "status x%x add_status x%x, mbx status x%x\n", 15235 shdr_status, shdr_add_status, rc); 15236 rc = -ENXIO; 15237 } 15238 return rc; 15239 } 15240 15241 /** 15242 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 15243 * @phba: pointer to lpfc hba data structure. 15244 * 15245 * This routine is invoked to post rpi header templates to the 15246 * HBA consistent with the SLI-4 interface spec. This routine 15247 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 15248 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 15249 * 15250 * Returns 15251 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 15252 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 15253 **/ 15254 int 15255 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 15256 { 15257 unsigned long rpi; 15258 uint16_t max_rpi, rpi_limit; 15259 uint16_t rpi_remaining, lrpi = 0; 15260 struct lpfc_rpi_hdr *rpi_hdr; 15261 unsigned long iflag; 15262 15263 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 15264 rpi_limit = phba->sli4_hba.next_rpi; 15265 15266 /* 15267 * Fetch the next logical rpi. Because this index is logical, 15268 * the driver starts at 0 each time. 15269 */ 15270 spin_lock_irqsave(&phba->hbalock, iflag); 15271 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 15272 if (rpi >= rpi_limit) 15273 rpi = LPFC_RPI_ALLOC_ERROR; 15274 else { 15275 set_bit(rpi, phba->sli4_hba.rpi_bmask); 15276 phba->sli4_hba.max_cfg_param.rpi_used++; 15277 phba->sli4_hba.rpi_count++; 15278 } 15279 15280 /* 15281 * Don't try to allocate more rpi header regions if the device limit 15282 * has been exhausted. 15283 */ 15284 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 15285 (phba->sli4_hba.rpi_count >= max_rpi)) { 15286 spin_unlock_irqrestore(&phba->hbalock, iflag); 15287 return rpi; 15288 } 15289 15290 /* 15291 * RPI header postings are not required for SLI4 ports capable of 15292 * extents. 15293 */ 15294 if (!phba->sli4_hba.rpi_hdrs_in_use) { 15295 spin_unlock_irqrestore(&phba->hbalock, iflag); 15296 return rpi; 15297 } 15298 15299 /* 15300 * If the driver is running low on rpi resources, allocate another 15301 * page now. Note that the next_rpi value is used because 15302 * it represents how many are actually in use whereas max_rpi notes 15303 * how many are supported max by the device. 15304 */ 15305 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 15306 spin_unlock_irqrestore(&phba->hbalock, iflag); 15307 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 15308 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 15309 if (!rpi_hdr) { 15310 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15311 "2002 Error Could not grow rpi " 15312 "count\n"); 15313 } else { 15314 lrpi = rpi_hdr->start_rpi; 15315 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 15316 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 15317 } 15318 } 15319 15320 return rpi; 15321 } 15322 15323 /** 15324 * lpfc_sli4_free_rpi - Release an rpi for reuse. 15325 * @phba: pointer to lpfc hba data structure. 15326 * 15327 * This routine is invoked to release an rpi to the pool of 15328 * available rpis maintained by the driver. 15329 **/ 15330 void 15331 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 15332 { 15333 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 15334 phba->sli4_hba.rpi_count--; 15335 phba->sli4_hba.max_cfg_param.rpi_used--; 15336 } 15337 } 15338 15339 /** 15340 * lpfc_sli4_free_rpi - Release an rpi for reuse. 15341 * @phba: pointer to lpfc hba data structure. 15342 * 15343 * This routine is invoked to release an rpi to the pool of 15344 * available rpis maintained by the driver. 15345 **/ 15346 void 15347 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 15348 { 15349 spin_lock_irq(&phba->hbalock); 15350 __lpfc_sli4_free_rpi(phba, rpi); 15351 spin_unlock_irq(&phba->hbalock); 15352 } 15353 15354 /** 15355 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 15356 * @phba: pointer to lpfc hba data structure. 15357 * 15358 * This routine is invoked to remove the memory region that 15359 * provided rpi via a bitmask. 15360 **/ 15361 void 15362 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 15363 { 15364 kfree(phba->sli4_hba.rpi_bmask); 15365 kfree(phba->sli4_hba.rpi_ids); 15366 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 15367 } 15368 15369 /** 15370 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 15371 * @phba: pointer to lpfc hba data structure. 15372 * 15373 * This routine is invoked to remove the memory region that 15374 * provided rpi via a bitmask. 15375 **/ 15376 int 15377 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 15378 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 15379 { 15380 LPFC_MBOXQ_t *mboxq; 15381 struct lpfc_hba *phba = ndlp->phba; 15382 int rc; 15383 15384 /* The port is notified of the header region via a mailbox command. */ 15385 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15386 if (!mboxq) 15387 return -ENOMEM; 15388 15389 /* Post all rpi memory regions to the port. */ 15390 lpfc_resume_rpi(mboxq, ndlp); 15391 if (cmpl) { 15392 mboxq->mbox_cmpl = cmpl; 15393 mboxq->context1 = arg; 15394 mboxq->context2 = ndlp; 15395 } else 15396 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15397 mboxq->vport = ndlp->vport; 15398 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15399 if (rc == MBX_NOT_FINISHED) { 15400 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15401 "2010 Resume RPI Mailbox failed " 15402 "status %d, mbxStatus x%x\n", rc, 15403 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 15404 mempool_free(mboxq, phba->mbox_mem_pool); 15405 return -EIO; 15406 } 15407 return 0; 15408 } 15409 15410 /** 15411 * lpfc_sli4_init_vpi - Initialize a vpi with the port 15412 * @vport: Pointer to the vport for which the vpi is being initialized 15413 * 15414 * This routine is invoked to activate a vpi with the port. 15415 * 15416 * Returns: 15417 * 0 success 15418 * -Evalue otherwise 15419 **/ 15420 int 15421 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 15422 { 15423 LPFC_MBOXQ_t *mboxq; 15424 int rc = 0; 15425 int retval = MBX_SUCCESS; 15426 uint32_t mbox_tmo; 15427 struct lpfc_hba *phba = vport->phba; 15428 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15429 if (!mboxq) 15430 return -ENOMEM; 15431 lpfc_init_vpi(phba, mboxq, vport->vpi); 15432 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 15433 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 15434 if (rc != MBX_SUCCESS) { 15435 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 15436 "2022 INIT VPI Mailbox failed " 15437 "status %d, mbxStatus x%x\n", rc, 15438 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 15439 retval = -EIO; 15440 } 15441 if (rc != MBX_TIMEOUT) 15442 mempool_free(mboxq, vport->phba->mbox_mem_pool); 15443 15444 return retval; 15445 } 15446 15447 /** 15448 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 15449 * @phba: pointer to lpfc hba data structure. 15450 * @mboxq: Pointer to mailbox object. 15451 * 15452 * This routine is invoked to manually add a single FCF record. The caller 15453 * must pass a completely initialized FCF_Record. This routine takes 15454 * care of the nonembedded mailbox operations. 15455 **/ 15456 static void 15457 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 15458 { 15459 void *virt_addr; 15460 union lpfc_sli4_cfg_shdr *shdr; 15461 uint32_t shdr_status, shdr_add_status; 15462 15463 virt_addr = mboxq->sge_array->addr[0]; 15464 /* The IOCTL status is embedded in the mailbox subheader. */ 15465 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 15466 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15467 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15468 15469 if ((shdr_status || shdr_add_status) && 15470 (shdr_status != STATUS_FCF_IN_USE)) 15471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15472 "2558 ADD_FCF_RECORD mailbox failed with " 15473 "status x%x add_status x%x\n", 15474 shdr_status, shdr_add_status); 15475 15476 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15477 } 15478 15479 /** 15480 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 15481 * @phba: pointer to lpfc hba data structure. 15482 * @fcf_record: pointer to the initialized fcf record to add. 15483 * 15484 * This routine is invoked to manually add a single FCF record. The caller 15485 * must pass a completely initialized FCF_Record. This routine takes 15486 * care of the nonembedded mailbox operations. 15487 **/ 15488 int 15489 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 15490 { 15491 int rc = 0; 15492 LPFC_MBOXQ_t *mboxq; 15493 uint8_t *bytep; 15494 void *virt_addr; 15495 dma_addr_t phys_addr; 15496 struct lpfc_mbx_sge sge; 15497 uint32_t alloc_len, req_len; 15498 uint32_t fcfindex; 15499 15500 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15501 if (!mboxq) { 15502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15503 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 15504 return -ENOMEM; 15505 } 15506 15507 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 15508 sizeof(uint32_t); 15509 15510 /* Allocate DMA memory and set up the non-embedded mailbox command */ 15511 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 15512 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 15513 req_len, LPFC_SLI4_MBX_NEMBED); 15514 if (alloc_len < req_len) { 15515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15516 "2523 Allocated DMA memory size (x%x) is " 15517 "less than the requested DMA memory " 15518 "size (x%x)\n", alloc_len, req_len); 15519 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15520 return -ENOMEM; 15521 } 15522 15523 /* 15524 * Get the first SGE entry from the non-embedded DMA memory. This 15525 * routine only uses a single SGE. 15526 */ 15527 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 15528 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 15529 virt_addr = mboxq->sge_array->addr[0]; 15530 /* 15531 * Configure the FCF record for FCFI 0. This is the driver's 15532 * hardcoded default and gets used in nonFIP mode. 15533 */ 15534 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 15535 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 15536 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 15537 15538 /* 15539 * Copy the fcf_index and the FCF Record Data. The data starts after 15540 * the FCoE header plus word10. The data copy needs to be endian 15541 * correct. 15542 */ 15543 bytep += sizeof(uint32_t); 15544 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 15545 mboxq->vport = phba->pport; 15546 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 15547 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15548 if (rc == MBX_NOT_FINISHED) { 15549 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15550 "2515 ADD_FCF_RECORD mailbox failed with " 15551 "status 0x%x\n", rc); 15552 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15553 rc = -EIO; 15554 } else 15555 rc = 0; 15556 15557 return rc; 15558 } 15559 15560 /** 15561 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 15562 * @phba: pointer to lpfc hba data structure. 15563 * @fcf_record: pointer to the fcf record to write the default data. 15564 * @fcf_index: FCF table entry index. 15565 * 15566 * This routine is invoked to build the driver's default FCF record. The 15567 * values used are hardcoded. This routine handles memory initialization. 15568 * 15569 **/ 15570 void 15571 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 15572 struct fcf_record *fcf_record, 15573 uint16_t fcf_index) 15574 { 15575 memset(fcf_record, 0, sizeof(struct fcf_record)); 15576 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 15577 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 15578 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 15579 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 15580 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 15581 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 15582 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 15583 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 15584 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 15585 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 15586 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 15587 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 15588 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 15589 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 15590 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 15591 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 15592 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 15593 /* Set the VLAN bit map */ 15594 if (phba->valid_vlan) { 15595 fcf_record->vlan_bitmap[phba->vlan_id / 8] 15596 = 1 << (phba->vlan_id % 8); 15597 } 15598 } 15599 15600 /** 15601 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 15602 * @phba: pointer to lpfc hba data structure. 15603 * @fcf_index: FCF table entry offset. 15604 * 15605 * This routine is invoked to scan the entire FCF table by reading FCF 15606 * record and processing it one at a time starting from the @fcf_index 15607 * for initial FCF discovery or fast FCF failover rediscovery. 15608 * 15609 * Return 0 if the mailbox command is submitted successfully, none 0 15610 * otherwise. 15611 **/ 15612 int 15613 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15614 { 15615 int rc = 0, error; 15616 LPFC_MBOXQ_t *mboxq; 15617 15618 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 15619 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 15620 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15621 if (!mboxq) { 15622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15623 "2000 Failed to allocate mbox for " 15624 "READ_FCF cmd\n"); 15625 error = -ENOMEM; 15626 goto fail_fcf_scan; 15627 } 15628 /* Construct the read FCF record mailbox command */ 15629 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15630 if (rc) { 15631 error = -EINVAL; 15632 goto fail_fcf_scan; 15633 } 15634 /* Issue the mailbox command asynchronously */ 15635 mboxq->vport = phba->pport; 15636 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 15637 15638 spin_lock_irq(&phba->hbalock); 15639 phba->hba_flag |= FCF_TS_INPROG; 15640 spin_unlock_irq(&phba->hbalock); 15641 15642 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15643 if (rc == MBX_NOT_FINISHED) 15644 error = -EIO; 15645 else { 15646 /* Reset eligible FCF count for new scan */ 15647 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 15648 phba->fcf.eligible_fcf_cnt = 0; 15649 error = 0; 15650 } 15651 fail_fcf_scan: 15652 if (error) { 15653 if (mboxq) 15654 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15655 /* FCF scan failed, clear FCF_TS_INPROG flag */ 15656 spin_lock_irq(&phba->hbalock); 15657 phba->hba_flag &= ~FCF_TS_INPROG; 15658 spin_unlock_irq(&phba->hbalock); 15659 } 15660 return error; 15661 } 15662 15663 /** 15664 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 15665 * @phba: pointer to lpfc hba data structure. 15666 * @fcf_index: FCF table entry offset. 15667 * 15668 * This routine is invoked to read an FCF record indicated by @fcf_index 15669 * and to use it for FLOGI roundrobin FCF failover. 15670 * 15671 * Return 0 if the mailbox command is submitted successfully, none 0 15672 * otherwise. 15673 **/ 15674 int 15675 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15676 { 15677 int rc = 0, error; 15678 LPFC_MBOXQ_t *mboxq; 15679 15680 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15681 if (!mboxq) { 15682 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15683 "2763 Failed to allocate mbox for " 15684 "READ_FCF cmd\n"); 15685 error = -ENOMEM; 15686 goto fail_fcf_read; 15687 } 15688 /* Construct the read FCF record mailbox command */ 15689 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15690 if (rc) { 15691 error = -EINVAL; 15692 goto fail_fcf_read; 15693 } 15694 /* Issue the mailbox command asynchronously */ 15695 mboxq->vport = phba->pport; 15696 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 15697 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15698 if (rc == MBX_NOT_FINISHED) 15699 error = -EIO; 15700 else 15701 error = 0; 15702 15703 fail_fcf_read: 15704 if (error && mboxq) 15705 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15706 return error; 15707 } 15708 15709 /** 15710 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 15711 * @phba: pointer to lpfc hba data structure. 15712 * @fcf_index: FCF table entry offset. 15713 * 15714 * This routine is invoked to read an FCF record indicated by @fcf_index to 15715 * determine whether it's eligible for FLOGI roundrobin failover list. 15716 * 15717 * Return 0 if the mailbox command is submitted successfully, none 0 15718 * otherwise. 15719 **/ 15720 int 15721 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15722 { 15723 int rc = 0, error; 15724 LPFC_MBOXQ_t *mboxq; 15725 15726 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15727 if (!mboxq) { 15728 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15729 "2758 Failed to allocate mbox for " 15730 "READ_FCF cmd\n"); 15731 error = -ENOMEM; 15732 goto fail_fcf_read; 15733 } 15734 /* Construct the read FCF record mailbox command */ 15735 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15736 if (rc) { 15737 error = -EINVAL; 15738 goto fail_fcf_read; 15739 } 15740 /* Issue the mailbox command asynchronously */ 15741 mboxq->vport = phba->pport; 15742 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 15743 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15744 if (rc == MBX_NOT_FINISHED) 15745 error = -EIO; 15746 else 15747 error = 0; 15748 15749 fail_fcf_read: 15750 if (error && mboxq) 15751 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15752 return error; 15753 } 15754 15755 /** 15756 * lpfc_check_next_fcf_pri 15757 * phba pointer to the lpfc_hba struct for this port. 15758 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 15759 * routine when the rr_bmask is empty. The FCF indecies are put into the 15760 * rr_bmask based on their priority level. Starting from the highest priority 15761 * to the lowest. The most likely FCF candidate will be in the highest 15762 * priority group. When this routine is called it searches the fcf_pri list for 15763 * next lowest priority group and repopulates the rr_bmask with only those 15764 * fcf_indexes. 15765 * returns: 15766 * 1=success 0=failure 15767 **/ 15768 int 15769 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 15770 { 15771 uint16_t next_fcf_pri; 15772 uint16_t last_index; 15773 struct lpfc_fcf_pri *fcf_pri; 15774 int rc; 15775 int ret = 0; 15776 15777 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 15778 LPFC_SLI4_FCF_TBL_INDX_MAX); 15779 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15780 "3060 Last IDX %d\n", last_index); 15781 15782 /* Verify the priority list has 2 or more entries */ 15783 spin_lock_irq(&phba->hbalock); 15784 if (list_empty(&phba->fcf.fcf_pri_list) || 15785 list_is_singular(&phba->fcf.fcf_pri_list)) { 15786 spin_unlock_irq(&phba->hbalock); 15787 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15788 "3061 Last IDX %d\n", last_index); 15789 return 0; /* Empty rr list */ 15790 } 15791 spin_unlock_irq(&phba->hbalock); 15792 15793 next_fcf_pri = 0; 15794 /* 15795 * Clear the rr_bmask and set all of the bits that are at this 15796 * priority. 15797 */ 15798 memset(phba->fcf.fcf_rr_bmask, 0, 15799 sizeof(*phba->fcf.fcf_rr_bmask)); 15800 spin_lock_irq(&phba->hbalock); 15801 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15802 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 15803 continue; 15804 /* 15805 * the 1st priority that has not FLOGI failed 15806 * will be the highest. 15807 */ 15808 if (!next_fcf_pri) 15809 next_fcf_pri = fcf_pri->fcf_rec.priority; 15810 spin_unlock_irq(&phba->hbalock); 15811 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 15812 rc = lpfc_sli4_fcf_rr_index_set(phba, 15813 fcf_pri->fcf_rec.fcf_index); 15814 if (rc) 15815 return 0; 15816 } 15817 spin_lock_irq(&phba->hbalock); 15818 } 15819 /* 15820 * if next_fcf_pri was not set above and the list is not empty then 15821 * we have failed flogis on all of them. So reset flogi failed 15822 * and start at the beginning. 15823 */ 15824 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 15825 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15826 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 15827 /* 15828 * the 1st priority that has not FLOGI failed 15829 * will be the highest. 15830 */ 15831 if (!next_fcf_pri) 15832 next_fcf_pri = fcf_pri->fcf_rec.priority; 15833 spin_unlock_irq(&phba->hbalock); 15834 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 15835 rc = lpfc_sli4_fcf_rr_index_set(phba, 15836 fcf_pri->fcf_rec.fcf_index); 15837 if (rc) 15838 return 0; 15839 } 15840 spin_lock_irq(&phba->hbalock); 15841 } 15842 } else 15843 ret = 1; 15844 spin_unlock_irq(&phba->hbalock); 15845 15846 return ret; 15847 } 15848 /** 15849 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 15850 * @phba: pointer to lpfc hba data structure. 15851 * 15852 * This routine is to get the next eligible FCF record index in a round 15853 * robin fashion. If the next eligible FCF record index equals to the 15854 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 15855 * shall be returned, otherwise, the next eligible FCF record's index 15856 * shall be returned. 15857 **/ 15858 uint16_t 15859 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 15860 { 15861 uint16_t next_fcf_index; 15862 15863 initial_priority: 15864 /* Search start from next bit of currently registered FCF index */ 15865 next_fcf_index = phba->fcf.current_rec.fcf_indx; 15866 15867 next_priority: 15868 /* Determine the next fcf index to check */ 15869 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 15870 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15871 LPFC_SLI4_FCF_TBL_INDX_MAX, 15872 next_fcf_index); 15873 15874 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 15875 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15876 /* 15877 * If we have wrapped then we need to clear the bits that 15878 * have been tested so that we can detect when we should 15879 * change the priority level. 15880 */ 15881 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15882 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 15883 } 15884 15885 15886 /* Check roundrobin failover list empty condition */ 15887 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 15888 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 15889 /* 15890 * If next fcf index is not found check if there are lower 15891 * Priority level fcf's in the fcf_priority list. 15892 * Set up the rr_bmask with all of the avaiable fcf bits 15893 * at that level and continue the selection process. 15894 */ 15895 if (lpfc_check_next_fcf_pri_level(phba)) 15896 goto initial_priority; 15897 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15898 "2844 No roundrobin failover FCF available\n"); 15899 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 15900 return LPFC_FCOE_FCF_NEXT_NONE; 15901 else { 15902 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15903 "3063 Only FCF available idx %d, flag %x\n", 15904 next_fcf_index, 15905 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 15906 return next_fcf_index; 15907 } 15908 } 15909 15910 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 15911 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 15912 LPFC_FCF_FLOGI_FAILED) 15913 goto next_priority; 15914 15915 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15916 "2845 Get next roundrobin failover FCF (x%x)\n", 15917 next_fcf_index); 15918 15919 return next_fcf_index; 15920 } 15921 15922 /** 15923 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 15924 * @phba: pointer to lpfc hba data structure. 15925 * 15926 * This routine sets the FCF record index in to the eligible bmask for 15927 * roundrobin failover search. It checks to make sure that the index 15928 * does not go beyond the range of the driver allocated bmask dimension 15929 * before setting the bit. 15930 * 15931 * Returns 0 if the index bit successfully set, otherwise, it returns 15932 * -EINVAL. 15933 **/ 15934 int 15935 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 15936 { 15937 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15938 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15939 "2610 FCF (x%x) reached driver's book " 15940 "keeping dimension:x%x\n", 15941 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 15942 return -EINVAL; 15943 } 15944 /* Set the eligible FCF record index bmask */ 15945 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 15946 15947 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15948 "2790 Set FCF (x%x) to roundrobin FCF failover " 15949 "bmask\n", fcf_index); 15950 15951 return 0; 15952 } 15953 15954 /** 15955 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 15956 * @phba: pointer to lpfc hba data structure. 15957 * 15958 * This routine clears the FCF record index from the eligible bmask for 15959 * roundrobin failover search. It checks to make sure that the index 15960 * does not go beyond the range of the driver allocated bmask dimension 15961 * before clearing the bit. 15962 **/ 15963 void 15964 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 15965 { 15966 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 15967 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15968 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15969 "2762 FCF (x%x) reached driver's book " 15970 "keeping dimension:x%x\n", 15971 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 15972 return; 15973 } 15974 /* Clear the eligible FCF record index bmask */ 15975 spin_lock_irq(&phba->hbalock); 15976 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 15977 list) { 15978 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 15979 list_del_init(&fcf_pri->list); 15980 break; 15981 } 15982 } 15983 spin_unlock_irq(&phba->hbalock); 15984 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 15985 15986 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15987 "2791 Clear FCF (x%x) from roundrobin failover " 15988 "bmask\n", fcf_index); 15989 } 15990 15991 /** 15992 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 15993 * @phba: pointer to lpfc hba data structure. 15994 * 15995 * This routine is the completion routine for the rediscover FCF table mailbox 15996 * command. If the mailbox command returned failure, it will try to stop the 15997 * FCF rediscover wait timer. 15998 **/ 15999 void 16000 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 16001 { 16002 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 16003 uint32_t shdr_status, shdr_add_status; 16004 16005 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 16006 16007 shdr_status = bf_get(lpfc_mbox_hdr_status, 16008 &redisc_fcf->header.cfg_shdr.response); 16009 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 16010 &redisc_fcf->header.cfg_shdr.response); 16011 if (shdr_status || shdr_add_status) { 16012 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 16013 "2746 Requesting for FCF rediscovery failed " 16014 "status x%x add_status x%x\n", 16015 shdr_status, shdr_add_status); 16016 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 16017 spin_lock_irq(&phba->hbalock); 16018 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 16019 spin_unlock_irq(&phba->hbalock); 16020 /* 16021 * CVL event triggered FCF rediscover request failed, 16022 * last resort to re-try current registered FCF entry. 16023 */ 16024 lpfc_retry_pport_discovery(phba); 16025 } else { 16026 spin_lock_irq(&phba->hbalock); 16027 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 16028 spin_unlock_irq(&phba->hbalock); 16029 /* 16030 * DEAD FCF event triggered FCF rediscover request 16031 * failed, last resort to fail over as a link down 16032 * to FCF registration. 16033 */ 16034 lpfc_sli4_fcf_dead_failthrough(phba); 16035 } 16036 } else { 16037 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16038 "2775 Start FCF rediscover quiescent timer\n"); 16039 /* 16040 * Start FCF rediscovery wait timer for pending FCF 16041 * before rescan FCF record table. 16042 */ 16043 lpfc_fcf_redisc_wait_start_timer(phba); 16044 } 16045 16046 mempool_free(mbox, phba->mbox_mem_pool); 16047 } 16048 16049 /** 16050 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 16051 * @phba: pointer to lpfc hba data structure. 16052 * 16053 * This routine is invoked to request for rediscovery of the entire FCF table 16054 * by the port. 16055 **/ 16056 int 16057 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 16058 { 16059 LPFC_MBOXQ_t *mbox; 16060 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 16061 int rc, length; 16062 16063 /* Cancel retry delay timers to all vports before FCF rediscover */ 16064 lpfc_cancel_all_vport_retry_delay_timer(phba); 16065 16066 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16067 if (!mbox) { 16068 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16069 "2745 Failed to allocate mbox for " 16070 "requesting FCF rediscover.\n"); 16071 return -ENOMEM; 16072 } 16073 16074 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 16075 sizeof(struct lpfc_sli4_cfg_mhdr)); 16076 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16077 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 16078 length, LPFC_SLI4_MBX_EMBED); 16079 16080 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 16081 /* Set count to 0 for invalidating the entire FCF database */ 16082 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 16083 16084 /* Issue the mailbox command asynchronously */ 16085 mbox->vport = phba->pport; 16086 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 16087 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 16088 16089 if (rc == MBX_NOT_FINISHED) { 16090 mempool_free(mbox, phba->mbox_mem_pool); 16091 return -EIO; 16092 } 16093 return 0; 16094 } 16095 16096 /** 16097 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 16098 * @phba: pointer to lpfc hba data structure. 16099 * 16100 * This function is the failover routine as a last resort to the FCF DEAD 16101 * event when driver failed to perform fast FCF failover. 16102 **/ 16103 void 16104 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 16105 { 16106 uint32_t link_state; 16107 16108 /* 16109 * Last resort as FCF DEAD event failover will treat this as 16110 * a link down, but save the link state because we don't want 16111 * it to be changed to Link Down unless it is already down. 16112 */ 16113 link_state = phba->link_state; 16114 lpfc_linkdown(phba); 16115 phba->link_state = link_state; 16116 16117 /* Unregister FCF if no devices connected to it */ 16118 lpfc_unregister_unused_fcf(phba); 16119 } 16120 16121 /** 16122 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 16123 * @phba: pointer to lpfc hba data structure. 16124 * @rgn23_data: pointer to configure region 23 data. 16125 * 16126 * This function gets SLI3 port configure region 23 data through memory dump 16127 * mailbox command. When it successfully retrieves data, the size of the data 16128 * will be returned, otherwise, 0 will be returned. 16129 **/ 16130 static uint32_t 16131 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 16132 { 16133 LPFC_MBOXQ_t *pmb = NULL; 16134 MAILBOX_t *mb; 16135 uint32_t offset = 0; 16136 int rc; 16137 16138 if (!rgn23_data) 16139 return 0; 16140 16141 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16142 if (!pmb) { 16143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16144 "2600 failed to allocate mailbox memory\n"); 16145 return 0; 16146 } 16147 mb = &pmb->u.mb; 16148 16149 do { 16150 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 16151 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 16152 16153 if (rc != MBX_SUCCESS) { 16154 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 16155 "2601 failed to read config " 16156 "region 23, rc 0x%x Status 0x%x\n", 16157 rc, mb->mbxStatus); 16158 mb->un.varDmp.word_cnt = 0; 16159 } 16160 /* 16161 * dump mem may return a zero when finished or we got a 16162 * mailbox error, either way we are done. 16163 */ 16164 if (mb->un.varDmp.word_cnt == 0) 16165 break; 16166 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 16167 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 16168 16169 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 16170 rgn23_data + offset, 16171 mb->un.varDmp.word_cnt); 16172 offset += mb->un.varDmp.word_cnt; 16173 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 16174 16175 mempool_free(pmb, phba->mbox_mem_pool); 16176 return offset; 16177 } 16178 16179 /** 16180 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 16181 * @phba: pointer to lpfc hba data structure. 16182 * @rgn23_data: pointer to configure region 23 data. 16183 * 16184 * This function gets SLI4 port configure region 23 data through memory dump 16185 * mailbox command. When it successfully retrieves data, the size of the data 16186 * will be returned, otherwise, 0 will be returned. 16187 **/ 16188 static uint32_t 16189 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 16190 { 16191 LPFC_MBOXQ_t *mboxq = NULL; 16192 struct lpfc_dmabuf *mp = NULL; 16193 struct lpfc_mqe *mqe; 16194 uint32_t data_length = 0; 16195 int rc; 16196 16197 if (!rgn23_data) 16198 return 0; 16199 16200 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16201 if (!mboxq) { 16202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16203 "3105 failed to allocate mailbox memory\n"); 16204 return 0; 16205 } 16206 16207 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 16208 goto out; 16209 mqe = &mboxq->u.mqe; 16210 mp = (struct lpfc_dmabuf *) mboxq->context1; 16211 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 16212 if (rc) 16213 goto out; 16214 data_length = mqe->un.mb_words[5]; 16215 if (data_length == 0) 16216 goto out; 16217 if (data_length > DMP_RGN23_SIZE) { 16218 data_length = 0; 16219 goto out; 16220 } 16221 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 16222 out: 16223 mempool_free(mboxq, phba->mbox_mem_pool); 16224 if (mp) { 16225 lpfc_mbuf_free(phba, mp->virt, mp->phys); 16226 kfree(mp); 16227 } 16228 return data_length; 16229 } 16230 16231 /** 16232 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 16233 * @phba: pointer to lpfc hba data structure. 16234 * 16235 * This function read region 23 and parse TLV for port status to 16236 * decide if the user disaled the port. If the TLV indicates the 16237 * port is disabled, the hba_flag is set accordingly. 16238 **/ 16239 void 16240 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 16241 { 16242 uint8_t *rgn23_data = NULL; 16243 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 16244 uint32_t offset = 0; 16245 16246 /* Get adapter Region 23 data */ 16247 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 16248 if (!rgn23_data) 16249 goto out; 16250 16251 if (phba->sli_rev < LPFC_SLI_REV4) 16252 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 16253 else { 16254 if_type = bf_get(lpfc_sli_intf_if_type, 16255 &phba->sli4_hba.sli_intf); 16256 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 16257 goto out; 16258 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 16259 } 16260 16261 if (!data_size) 16262 goto out; 16263 16264 /* Check the region signature first */ 16265 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 16266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16267 "2619 Config region 23 has bad signature\n"); 16268 goto out; 16269 } 16270 offset += 4; 16271 16272 /* Check the data structure version */ 16273 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 16274 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16275 "2620 Config region 23 has bad version\n"); 16276 goto out; 16277 } 16278 offset += 4; 16279 16280 /* Parse TLV entries in the region */ 16281 while (offset < data_size) { 16282 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 16283 break; 16284 /* 16285 * If the TLV is not driver specific TLV or driver id is 16286 * not linux driver id, skip the record. 16287 */ 16288 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 16289 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 16290 (rgn23_data[offset + 3] != 0)) { 16291 offset += rgn23_data[offset + 1] * 4 + 4; 16292 continue; 16293 } 16294 16295 /* Driver found a driver specific TLV in the config region */ 16296 sub_tlv_len = rgn23_data[offset + 1] * 4; 16297 offset += 4; 16298 tlv_offset = 0; 16299 16300 /* 16301 * Search for configured port state sub-TLV. 16302 */ 16303 while ((offset < data_size) && 16304 (tlv_offset < sub_tlv_len)) { 16305 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 16306 offset += 4; 16307 tlv_offset += 4; 16308 break; 16309 } 16310 if (rgn23_data[offset] != PORT_STE_TYPE) { 16311 offset += rgn23_data[offset + 1] * 4 + 4; 16312 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 16313 continue; 16314 } 16315 16316 /* This HBA contains PORT_STE configured */ 16317 if (!rgn23_data[offset + 2]) 16318 phba->hba_flag |= LINK_DISABLED; 16319 16320 goto out; 16321 } 16322 } 16323 16324 out: 16325 kfree(rgn23_data); 16326 return; 16327 } 16328 16329 /** 16330 * lpfc_wr_object - write an object to the firmware 16331 * @phba: HBA structure that indicates port to create a queue on. 16332 * @dmabuf_list: list of dmabufs to write to the port. 16333 * @size: the total byte value of the objects to write to the port. 16334 * @offset: the current offset to be used to start the transfer. 16335 * 16336 * This routine will create a wr_object mailbox command to send to the port. 16337 * the mailbox command will be constructed using the dma buffers described in 16338 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 16339 * BDEs that the imbedded mailbox can support. The @offset variable will be 16340 * used to indicate the starting offset of the transfer and will also return 16341 * the offset after the write object mailbox has completed. @size is used to 16342 * determine the end of the object and whether the eof bit should be set. 16343 * 16344 * Return 0 is successful and offset will contain the the new offset to use 16345 * for the next write. 16346 * Return negative value for error cases. 16347 **/ 16348 int 16349 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 16350 uint32_t size, uint32_t *offset) 16351 { 16352 struct lpfc_mbx_wr_object *wr_object; 16353 LPFC_MBOXQ_t *mbox; 16354 int rc = 0, i = 0; 16355 uint32_t shdr_status, shdr_add_status; 16356 uint32_t mbox_tmo; 16357 union lpfc_sli4_cfg_shdr *shdr; 16358 struct lpfc_dmabuf *dmabuf; 16359 uint32_t written = 0; 16360 16361 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16362 if (!mbox) 16363 return -ENOMEM; 16364 16365 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16366 LPFC_MBOX_OPCODE_WRITE_OBJECT, 16367 sizeof(struct lpfc_mbx_wr_object) - 16368 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16369 16370 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 16371 wr_object->u.request.write_offset = *offset; 16372 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 16373 wr_object->u.request.object_name[0] = 16374 cpu_to_le32(wr_object->u.request.object_name[0]); 16375 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 16376 list_for_each_entry(dmabuf, dmabuf_list, list) { 16377 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 16378 break; 16379 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 16380 wr_object->u.request.bde[i].addrHigh = 16381 putPaddrHigh(dmabuf->phys); 16382 if (written + SLI4_PAGE_SIZE >= size) { 16383 wr_object->u.request.bde[i].tus.f.bdeSize = 16384 (size - written); 16385 written += (size - written); 16386 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 16387 } else { 16388 wr_object->u.request.bde[i].tus.f.bdeSize = 16389 SLI4_PAGE_SIZE; 16390 written += SLI4_PAGE_SIZE; 16391 } 16392 i++; 16393 } 16394 wr_object->u.request.bde_count = i; 16395 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 16396 if (!phba->sli4_hba.intr_enable) 16397 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16398 else { 16399 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16400 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16401 } 16402 /* The IOCTL status is embedded in the mailbox subheader. */ 16403 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 16404 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16405 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16406 if (rc != MBX_TIMEOUT) 16407 mempool_free(mbox, phba->mbox_mem_pool); 16408 if (shdr_status || shdr_add_status || rc) { 16409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16410 "3025 Write Object mailbox failed with " 16411 "status x%x add_status x%x, mbx status x%x\n", 16412 shdr_status, shdr_add_status, rc); 16413 rc = -ENXIO; 16414 } else 16415 *offset += wr_object->u.response.actual_write_length; 16416 return rc; 16417 } 16418 16419 /** 16420 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 16421 * @vport: pointer to vport data structure. 16422 * 16423 * This function iterate through the mailboxq and clean up all REG_LOGIN 16424 * and REG_VPI mailbox commands associated with the vport. This function 16425 * is called when driver want to restart discovery of the vport due to 16426 * a Clear Virtual Link event. 16427 **/ 16428 void 16429 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 16430 { 16431 struct lpfc_hba *phba = vport->phba; 16432 LPFC_MBOXQ_t *mb, *nextmb; 16433 struct lpfc_dmabuf *mp; 16434 struct lpfc_nodelist *ndlp; 16435 struct lpfc_nodelist *act_mbx_ndlp = NULL; 16436 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 16437 LIST_HEAD(mbox_cmd_list); 16438 uint8_t restart_loop; 16439 16440 /* Clean up internally queued mailbox commands with the vport */ 16441 spin_lock_irq(&phba->hbalock); 16442 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 16443 if (mb->vport != vport) 16444 continue; 16445 16446 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 16447 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 16448 continue; 16449 16450 list_del(&mb->list); 16451 list_add_tail(&mb->list, &mbox_cmd_list); 16452 } 16453 /* Clean up active mailbox command with the vport */ 16454 mb = phba->sli.mbox_active; 16455 if (mb && (mb->vport == vport)) { 16456 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 16457 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 16458 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16459 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 16460 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 16461 /* Put reference count for delayed processing */ 16462 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 16463 /* Unregister the RPI when mailbox complete */ 16464 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 16465 } 16466 } 16467 /* Cleanup any mailbox completions which are not yet processed */ 16468 do { 16469 restart_loop = 0; 16470 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 16471 /* 16472 * If this mailox is already processed or it is 16473 * for another vport ignore it. 16474 */ 16475 if ((mb->vport != vport) || 16476 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 16477 continue; 16478 16479 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 16480 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 16481 continue; 16482 16483 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16484 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 16485 ndlp = (struct lpfc_nodelist *)mb->context2; 16486 /* Unregister the RPI when mailbox complete */ 16487 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 16488 restart_loop = 1; 16489 spin_unlock_irq(&phba->hbalock); 16490 spin_lock(shost->host_lock); 16491 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 16492 spin_unlock(shost->host_lock); 16493 spin_lock_irq(&phba->hbalock); 16494 break; 16495 } 16496 } 16497 } while (restart_loop); 16498 16499 spin_unlock_irq(&phba->hbalock); 16500 16501 /* Release the cleaned-up mailbox commands */ 16502 while (!list_empty(&mbox_cmd_list)) { 16503 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 16504 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 16505 mp = (struct lpfc_dmabuf *) (mb->context1); 16506 if (mp) { 16507 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 16508 kfree(mp); 16509 } 16510 ndlp = (struct lpfc_nodelist *) mb->context2; 16511 mb->context2 = NULL; 16512 if (ndlp) { 16513 spin_lock(shost->host_lock); 16514 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 16515 spin_unlock(shost->host_lock); 16516 lpfc_nlp_put(ndlp); 16517 } 16518 } 16519 mempool_free(mb, phba->mbox_mem_pool); 16520 } 16521 16522 /* Release the ndlp with the cleaned-up active mailbox command */ 16523 if (act_mbx_ndlp) { 16524 spin_lock(shost->host_lock); 16525 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 16526 spin_unlock(shost->host_lock); 16527 lpfc_nlp_put(act_mbx_ndlp); 16528 } 16529 } 16530 16531 /** 16532 * lpfc_drain_txq - Drain the txq 16533 * @phba: Pointer to HBA context object. 16534 * 16535 * This function attempt to submit IOCBs on the txq 16536 * to the adapter. For SLI4 adapters, the txq contains 16537 * ELS IOCBs that have been deferred because the there 16538 * are no SGLs. This congestion can occur with large 16539 * vport counts during node discovery. 16540 **/ 16541 16542 uint32_t 16543 lpfc_drain_txq(struct lpfc_hba *phba) 16544 { 16545 LIST_HEAD(completions); 16546 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 16547 struct lpfc_iocbq *piocbq = 0; 16548 unsigned long iflags = 0; 16549 char *fail_msg = NULL; 16550 struct lpfc_sglq *sglq; 16551 union lpfc_wqe wqe; 16552 int txq_cnt = 0; 16553 16554 spin_lock_irqsave(&pring->ring_lock, iflags); 16555 list_for_each_entry(piocbq, &pring->txq, list) { 16556 txq_cnt++; 16557 } 16558 16559 if (txq_cnt > pring->txq_max) 16560 pring->txq_max = txq_cnt; 16561 16562 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16563 16564 while (!list_empty(&pring->txq)) { 16565 spin_lock_irqsave(&pring->ring_lock, iflags); 16566 16567 piocbq = lpfc_sli_ringtx_get(phba, pring); 16568 if (!piocbq) { 16569 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16570 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16571 "2823 txq empty and txq_cnt is %d\n ", 16572 txq_cnt); 16573 break; 16574 } 16575 sglq = __lpfc_sli_get_sglq(phba, piocbq); 16576 if (!sglq) { 16577 __lpfc_sli_ringtx_put(phba, pring, piocbq); 16578 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16579 break; 16580 } 16581 txq_cnt--; 16582 16583 /* The xri and iocb resources secured, 16584 * attempt to issue request 16585 */ 16586 piocbq->sli4_lxritag = sglq->sli4_lxritag; 16587 piocbq->sli4_xritag = sglq->sli4_xritag; 16588 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 16589 fail_msg = "to convert bpl to sgl"; 16590 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 16591 fail_msg = "to convert iocb to wqe"; 16592 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 16593 fail_msg = " - Wq is full"; 16594 else 16595 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 16596 16597 if (fail_msg) { 16598 /* Failed means we can't issue and need to cancel */ 16599 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16600 "2822 IOCB failed %s iotag 0x%x " 16601 "xri 0x%x\n", 16602 fail_msg, 16603 piocbq->iotag, piocbq->sli4_xritag); 16604 list_add_tail(&piocbq->list, &completions); 16605 } 16606 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16607 } 16608 16609 /* Cancel all the IOCBs that cannot be issued */ 16610 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 16611 IOERR_SLI_ABORTED); 16612 16613 return txq_cnt; 16614 } 16615