1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 69 struct lpfc_cqe *); 70 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, 71 int); 72 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, 73 uint32_t); 74 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 75 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 76 77 static IOCB_t * 78 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 79 { 80 return &iocbq->iocb; 81 } 82 83 /** 84 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 85 * @q: The Work Queue to operate on. 86 * @wqe: The work Queue Entry to put on the Work queue. 87 * 88 * This routine will copy the contents of @wqe to the next available entry on 89 * the @q. This function will then ring the Work Queue Doorbell to signal the 90 * HBA to start processing the Work Queue Entry. This function returns 0 if 91 * successful. If no entries are available on @q then this function will return 92 * -ENOMEM. 93 * The caller is expected to hold the hbalock when calling this routine. 94 **/ 95 static uint32_t 96 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 97 { 98 union lpfc_wqe *temp_wqe; 99 struct lpfc_register doorbell; 100 uint32_t host_index; 101 uint32_t idx; 102 103 /* sanity check on queue memory */ 104 if (unlikely(!q)) 105 return -ENOMEM; 106 temp_wqe = q->qe[q->host_index].wqe; 107 108 /* If the host has not yet processed the next entry then we are done */ 109 idx = ((q->host_index + 1) % q->entry_count); 110 if (idx == q->hba_index) { 111 q->WQ_overflow++; 112 return -ENOMEM; 113 } 114 q->WQ_posted++; 115 /* set consumption flag every once in a while */ 116 if (!((q->host_index + 1) % q->entry_repost)) 117 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 118 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 119 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 120 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 121 122 /* Update the host index before invoking device */ 123 host_index = q->host_index; 124 125 q->host_index = idx; 126 127 /* Ring Doorbell */ 128 doorbell.word0 = 0; 129 if (q->db_format == LPFC_DB_LIST_FORMAT) { 130 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 131 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); 132 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 133 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 134 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 135 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 136 } else { 137 return -EINVAL; 138 } 139 writel(doorbell.word0, q->db_regaddr); 140 141 return 0; 142 } 143 144 /** 145 * lpfc_sli4_wq_release - Updates internal hba index for WQ 146 * @q: The Work Queue to operate on. 147 * @index: The index to advance the hba index to. 148 * 149 * This routine will update the HBA index of a queue to reflect consumption of 150 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 151 * an entry the host calls this function to update the queue's internal 152 * pointers. This routine returns the number of entries that were consumed by 153 * the HBA. 154 **/ 155 static uint32_t 156 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 157 { 158 uint32_t released = 0; 159 160 /* sanity check on queue memory */ 161 if (unlikely(!q)) 162 return 0; 163 164 if (q->hba_index == index) 165 return 0; 166 do { 167 q->hba_index = ((q->hba_index + 1) % q->entry_count); 168 released++; 169 } while (q->hba_index != index); 170 return released; 171 } 172 173 /** 174 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 175 * @q: The Mailbox Queue to operate on. 176 * @wqe: The Mailbox Queue Entry to put on the Work queue. 177 * 178 * This routine will copy the contents of @mqe to the next available entry on 179 * the @q. This function will then ring the Work Queue Doorbell to signal the 180 * HBA to start processing the Work Queue Entry. This function returns 0 if 181 * successful. If no entries are available on @q then this function will return 182 * -ENOMEM. 183 * The caller is expected to hold the hbalock when calling this routine. 184 **/ 185 static uint32_t 186 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 187 { 188 struct lpfc_mqe *temp_mqe; 189 struct lpfc_register doorbell; 190 uint32_t host_index; 191 192 /* sanity check on queue memory */ 193 if (unlikely(!q)) 194 return -ENOMEM; 195 temp_mqe = q->qe[q->host_index].mqe; 196 197 /* If the host has not yet processed the next entry then we are done */ 198 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 199 return -ENOMEM; 200 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 201 /* Save off the mailbox pointer for completion */ 202 q->phba->mbox = (MAILBOX_t *)temp_mqe; 203 204 /* Update the host index before invoking device */ 205 host_index = q->host_index; 206 q->host_index = ((q->host_index + 1) % q->entry_count); 207 208 /* Ring Doorbell */ 209 doorbell.word0 = 0; 210 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 211 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 212 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 213 return 0; 214 } 215 216 /** 217 * lpfc_sli4_mq_release - Updates internal hba index for MQ 218 * @q: The Mailbox Queue to operate on. 219 * 220 * This routine will update the HBA index of a queue to reflect consumption of 221 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 222 * an entry the host calls this function to update the queue's internal 223 * pointers. This routine returns the number of entries that were consumed by 224 * the HBA. 225 **/ 226 static uint32_t 227 lpfc_sli4_mq_release(struct lpfc_queue *q) 228 { 229 /* sanity check on queue memory */ 230 if (unlikely(!q)) 231 return 0; 232 233 /* Clear the mailbox pointer for completion */ 234 q->phba->mbox = NULL; 235 q->hba_index = ((q->hba_index + 1) % q->entry_count); 236 return 1; 237 } 238 239 /** 240 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 241 * @q: The Event Queue to get the first valid EQE from 242 * 243 * This routine will get the first valid Event Queue Entry from @q, update 244 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 245 * the Queue (no more work to do), or the Queue is full of EQEs that have been 246 * processed, but not popped back to the HBA then this routine will return NULL. 247 **/ 248 static struct lpfc_eqe * 249 lpfc_sli4_eq_get(struct lpfc_queue *q) 250 { 251 struct lpfc_eqe *eqe; 252 uint32_t idx; 253 254 /* sanity check on queue memory */ 255 if (unlikely(!q)) 256 return NULL; 257 eqe = q->qe[q->hba_index].eqe; 258 259 /* If the next EQE is not valid then we are done */ 260 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 261 return NULL; 262 /* If the host has not yet processed the next entry then we are done */ 263 idx = ((q->hba_index + 1) % q->entry_count); 264 if (idx == q->host_index) 265 return NULL; 266 267 q->hba_index = idx; 268 return eqe; 269 } 270 271 /** 272 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 273 * @q: The Event Queue to disable interrupts 274 * 275 **/ 276 static inline void 277 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 278 { 279 struct lpfc_register doorbell; 280 281 doorbell.word0 = 0; 282 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 283 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 284 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 285 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 286 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 287 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 288 } 289 290 /** 291 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 292 * @q: The Event Queue that the host has completed processing for. 293 * @arm: Indicates whether the host wants to arms this CQ. 294 * 295 * This routine will mark all Event Queue Entries on @q, from the last 296 * known completed entry to the last entry that was processed, as completed 297 * by clearing the valid bit for each completion queue entry. Then it will 298 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 299 * The internal host index in the @q will be updated by this routine to indicate 300 * that the host has finished processing the entries. The @arm parameter 301 * indicates that the queue should be rearmed when ringing the doorbell. 302 * 303 * This function will return the number of EQEs that were popped. 304 **/ 305 uint32_t 306 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 307 { 308 uint32_t released = 0; 309 struct lpfc_eqe *temp_eqe; 310 struct lpfc_register doorbell; 311 312 /* sanity check on queue memory */ 313 if (unlikely(!q)) 314 return 0; 315 316 /* while there are valid entries */ 317 while (q->hba_index != q->host_index) { 318 temp_eqe = q->qe[q->host_index].eqe; 319 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 320 released++; 321 q->host_index = ((q->host_index + 1) % q->entry_count); 322 } 323 if (unlikely(released == 0 && !arm)) 324 return 0; 325 326 /* ring doorbell for number popped */ 327 doorbell.word0 = 0; 328 if (arm) { 329 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 330 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 331 } 332 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 333 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 334 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 335 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 336 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 337 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 338 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 339 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 340 readl(q->phba->sli4_hba.EQCQDBregaddr); 341 return released; 342 } 343 344 /** 345 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 346 * @q: The Completion Queue to get the first valid CQE from 347 * 348 * This routine will get the first valid Completion Queue Entry from @q, update 349 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 350 * the Queue (no more work to do), or the Queue is full of CQEs that have been 351 * processed, but not popped back to the HBA then this routine will return NULL. 352 **/ 353 static struct lpfc_cqe * 354 lpfc_sli4_cq_get(struct lpfc_queue *q) 355 { 356 struct lpfc_cqe *cqe; 357 uint32_t idx; 358 359 /* sanity check on queue memory */ 360 if (unlikely(!q)) 361 return NULL; 362 363 /* If the next CQE is not valid then we are done */ 364 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 365 return NULL; 366 /* If the host has not yet processed the next entry then we are done */ 367 idx = ((q->hba_index + 1) % q->entry_count); 368 if (idx == q->host_index) 369 return NULL; 370 371 cqe = q->qe[q->hba_index].cqe; 372 q->hba_index = idx; 373 return cqe; 374 } 375 376 /** 377 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 378 * @q: The Completion Queue that the host has completed processing for. 379 * @arm: Indicates whether the host wants to arms this CQ. 380 * 381 * This routine will mark all Completion queue entries on @q, from the last 382 * known completed entry to the last entry that was processed, as completed 383 * by clearing the valid bit for each completion queue entry. Then it will 384 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 385 * The internal host index in the @q will be updated by this routine to indicate 386 * that the host has finished processing the entries. The @arm parameter 387 * indicates that the queue should be rearmed when ringing the doorbell. 388 * 389 * This function will return the number of CQEs that were released. 390 **/ 391 uint32_t 392 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 393 { 394 uint32_t released = 0; 395 struct lpfc_cqe *temp_qe; 396 struct lpfc_register doorbell; 397 398 /* sanity check on queue memory */ 399 if (unlikely(!q)) 400 return 0; 401 /* while there are valid entries */ 402 while (q->hba_index != q->host_index) { 403 temp_qe = q->qe[q->host_index].cqe; 404 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 405 released++; 406 q->host_index = ((q->host_index + 1) % q->entry_count); 407 } 408 if (unlikely(released == 0 && !arm)) 409 return 0; 410 411 /* ring doorbell for number popped */ 412 doorbell.word0 = 0; 413 if (arm) 414 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 415 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 416 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 417 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 418 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 419 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 420 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 421 return released; 422 } 423 424 /** 425 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 426 * @q: The Header Receive Queue to operate on. 427 * @wqe: The Receive Queue Entry to put on the Receive queue. 428 * 429 * This routine will copy the contents of @wqe to the next available entry on 430 * the @q. This function will then ring the Receive Queue Doorbell to signal the 431 * HBA to start processing the Receive Queue Entry. This function returns the 432 * index that the rqe was copied to if successful. If no entries are available 433 * on @q then this function will return -ENOMEM. 434 * The caller is expected to hold the hbalock when calling this routine. 435 **/ 436 static int 437 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 438 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 439 { 440 struct lpfc_rqe *temp_hrqe; 441 struct lpfc_rqe *temp_drqe; 442 struct lpfc_register doorbell; 443 int put_index; 444 445 /* sanity check on queue memory */ 446 if (unlikely(!hq) || unlikely(!dq)) 447 return -ENOMEM; 448 put_index = hq->host_index; 449 temp_hrqe = hq->qe[hq->host_index].rqe; 450 temp_drqe = dq->qe[dq->host_index].rqe; 451 452 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 453 return -EINVAL; 454 if (hq->host_index != dq->host_index) 455 return -EINVAL; 456 /* If the host has not yet processed the next entry then we are done */ 457 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 458 return -EBUSY; 459 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 460 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 461 462 /* Update the host index to point to the next slot */ 463 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 464 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 465 466 /* Ring The Header Receive Queue Doorbell */ 467 if (!(hq->host_index % hq->entry_repost)) { 468 doorbell.word0 = 0; 469 if (hq->db_format == LPFC_DB_RING_FORMAT) { 470 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 471 hq->entry_repost); 472 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 473 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 474 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 475 hq->entry_repost); 476 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 477 hq->host_index); 478 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 479 } else { 480 return -EINVAL; 481 } 482 writel(doorbell.word0, hq->db_regaddr); 483 } 484 return put_index; 485 } 486 487 /** 488 * lpfc_sli4_rq_release - Updates internal hba index for RQ 489 * @q: The Header Receive Queue to operate on. 490 * 491 * This routine will update the HBA index of a queue to reflect consumption of 492 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 493 * consumed an entry the host calls this function to update the queue's 494 * internal pointers. This routine returns the number of entries that were 495 * consumed by the HBA. 496 **/ 497 static uint32_t 498 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 499 { 500 /* sanity check on queue memory */ 501 if (unlikely(!hq) || unlikely(!dq)) 502 return 0; 503 504 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 505 return 0; 506 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 507 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 508 return 1; 509 } 510 511 /** 512 * lpfc_cmd_iocb - Get next command iocb entry in the ring 513 * @phba: Pointer to HBA context object. 514 * @pring: Pointer to driver SLI ring object. 515 * 516 * This function returns pointer to next command iocb entry 517 * in the command ring. The caller must hold hbalock to prevent 518 * other threads consume the next command iocb. 519 * SLI-2/SLI-3 provide different sized iocbs. 520 **/ 521 static inline IOCB_t * 522 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 523 { 524 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 525 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 526 } 527 528 /** 529 * lpfc_resp_iocb - Get next response iocb entry in the ring 530 * @phba: Pointer to HBA context object. 531 * @pring: Pointer to driver SLI ring object. 532 * 533 * This function returns pointer to next response iocb entry 534 * in the response ring. The caller must hold hbalock to make sure 535 * that no other thread consume the next response iocb. 536 * SLI-2/SLI-3 provide different sized iocbs. 537 **/ 538 static inline IOCB_t * 539 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 540 { 541 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 542 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 543 } 544 545 /** 546 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 547 * @phba: Pointer to HBA context object. 548 * 549 * This function is called with hbalock held. This function 550 * allocates a new driver iocb object from the iocb pool. If the 551 * allocation is successful, it returns pointer to the newly 552 * allocated iocb object else it returns NULL. 553 **/ 554 struct lpfc_iocbq * 555 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 556 { 557 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 558 struct lpfc_iocbq * iocbq = NULL; 559 560 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 561 if (iocbq) 562 phba->iocb_cnt++; 563 if (phba->iocb_cnt > phba->iocb_max) 564 phba->iocb_max = phba->iocb_cnt; 565 return iocbq; 566 } 567 568 /** 569 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 570 * @phba: Pointer to HBA context object. 571 * @xritag: XRI value. 572 * 573 * This function clears the sglq pointer from the array of acive 574 * sglq's. The xritag that is passed in is used to index into the 575 * array. Before the xritag can be used it needs to be adjusted 576 * by subtracting the xribase. 577 * 578 * Returns sglq ponter = success, NULL = Failure. 579 **/ 580 static struct lpfc_sglq * 581 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 582 { 583 struct lpfc_sglq *sglq; 584 585 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 586 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 587 return sglq; 588 } 589 590 /** 591 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 592 * @phba: Pointer to HBA context object. 593 * @xritag: XRI value. 594 * 595 * This function returns the sglq pointer from the array of acive 596 * sglq's. The xritag that is passed in is used to index into the 597 * array. Before the xritag can be used it needs to be adjusted 598 * by subtracting the xribase. 599 * 600 * Returns sglq ponter = success, NULL = Failure. 601 **/ 602 struct lpfc_sglq * 603 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 604 { 605 struct lpfc_sglq *sglq; 606 607 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 608 return sglq; 609 } 610 611 /** 612 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 613 * @phba: Pointer to HBA context object. 614 * @xritag: xri used in this exchange. 615 * @rrq: The RRQ to be cleared. 616 * 617 **/ 618 void 619 lpfc_clr_rrq_active(struct lpfc_hba *phba, 620 uint16_t xritag, 621 struct lpfc_node_rrq *rrq) 622 { 623 struct lpfc_nodelist *ndlp = NULL; 624 625 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 626 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 627 628 /* The target DID could have been swapped (cable swap) 629 * we should use the ndlp from the findnode if it is 630 * available. 631 */ 632 if ((!ndlp) && rrq->ndlp) 633 ndlp = rrq->ndlp; 634 635 if (!ndlp) 636 goto out; 637 638 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 639 rrq->send_rrq = 0; 640 rrq->xritag = 0; 641 rrq->rrq_stop_time = 0; 642 } 643 out: 644 mempool_free(rrq, phba->rrq_pool); 645 } 646 647 /** 648 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 649 * @phba: Pointer to HBA context object. 650 * 651 * This function is called with hbalock held. This function 652 * Checks if stop_time (ratov from setting rrq active) has 653 * been reached, if it has and the send_rrq flag is set then 654 * it will call lpfc_send_rrq. If the send_rrq flag is not set 655 * then it will just call the routine to clear the rrq and 656 * free the rrq resource. 657 * The timer is set to the next rrq that is going to expire before 658 * leaving the routine. 659 * 660 **/ 661 void 662 lpfc_handle_rrq_active(struct lpfc_hba *phba) 663 { 664 struct lpfc_node_rrq *rrq; 665 struct lpfc_node_rrq *nextrrq; 666 unsigned long next_time; 667 unsigned long iflags; 668 LIST_HEAD(send_rrq); 669 670 spin_lock_irqsave(&phba->hbalock, iflags); 671 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 672 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 673 list_for_each_entry_safe(rrq, nextrrq, 674 &phba->active_rrq_list, list) { 675 if (time_after(jiffies, rrq->rrq_stop_time)) 676 list_move(&rrq->list, &send_rrq); 677 else if (time_before(rrq->rrq_stop_time, next_time)) 678 next_time = rrq->rrq_stop_time; 679 } 680 spin_unlock_irqrestore(&phba->hbalock, iflags); 681 if ((!list_empty(&phba->active_rrq_list)) && 682 (!(phba->pport->load_flag & FC_UNLOADING))) 683 mod_timer(&phba->rrq_tmr, next_time); 684 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 685 list_del(&rrq->list); 686 if (!rrq->send_rrq) 687 /* this call will free the rrq */ 688 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 689 else if (lpfc_send_rrq(phba, rrq)) { 690 /* if we send the rrq then the completion handler 691 * will clear the bit in the xribitmap. 692 */ 693 lpfc_clr_rrq_active(phba, rrq->xritag, 694 rrq); 695 } 696 } 697 } 698 699 /** 700 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 701 * @vport: Pointer to vport context object. 702 * @xri: The xri used in the exchange. 703 * @did: The targets DID for this exchange. 704 * 705 * returns NULL = rrq not found in the phba->active_rrq_list. 706 * rrq = rrq for this xri and target. 707 **/ 708 struct lpfc_node_rrq * 709 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 710 { 711 struct lpfc_hba *phba = vport->phba; 712 struct lpfc_node_rrq *rrq; 713 struct lpfc_node_rrq *nextrrq; 714 unsigned long iflags; 715 716 if (phba->sli_rev != LPFC_SLI_REV4) 717 return NULL; 718 spin_lock_irqsave(&phba->hbalock, iflags); 719 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 720 if (rrq->vport == vport && rrq->xritag == xri && 721 rrq->nlp_DID == did){ 722 list_del(&rrq->list); 723 spin_unlock_irqrestore(&phba->hbalock, iflags); 724 return rrq; 725 } 726 } 727 spin_unlock_irqrestore(&phba->hbalock, iflags); 728 return NULL; 729 } 730 731 /** 732 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 733 * @vport: Pointer to vport context object. 734 * @ndlp: Pointer to the lpfc_node_list structure. 735 * If ndlp is NULL Remove all active RRQs for this vport from the 736 * phba->active_rrq_list and clear the rrq. 737 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 738 **/ 739 void 740 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 741 742 { 743 struct lpfc_hba *phba = vport->phba; 744 struct lpfc_node_rrq *rrq; 745 struct lpfc_node_rrq *nextrrq; 746 unsigned long iflags; 747 LIST_HEAD(rrq_list); 748 749 if (phba->sli_rev != LPFC_SLI_REV4) 750 return; 751 if (!ndlp) { 752 lpfc_sli4_vport_delete_els_xri_aborted(vport); 753 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 754 } 755 spin_lock_irqsave(&phba->hbalock, iflags); 756 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 757 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 758 list_move(&rrq->list, &rrq_list); 759 spin_unlock_irqrestore(&phba->hbalock, iflags); 760 761 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 762 list_del(&rrq->list); 763 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 764 } 765 } 766 767 /** 768 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. 769 * @phba: Pointer to HBA context object. 770 * 771 * Remove all rrqs from the phba->active_rrq_list and free them by 772 * calling __lpfc_clr_active_rrq 773 * 774 **/ 775 void 776 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) 777 { 778 struct lpfc_node_rrq *rrq; 779 struct lpfc_node_rrq *nextrrq; 780 unsigned long next_time; 781 unsigned long iflags; 782 LIST_HEAD(rrq_list); 783 784 if (phba->sli_rev != LPFC_SLI_REV4) 785 return; 786 spin_lock_irqsave(&phba->hbalock, iflags); 787 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 788 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); 789 list_splice_init(&phba->active_rrq_list, &rrq_list); 790 spin_unlock_irqrestore(&phba->hbalock, iflags); 791 792 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 793 list_del(&rrq->list); 794 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 795 } 796 if ((!list_empty(&phba->active_rrq_list)) && 797 (!(phba->pport->load_flag & FC_UNLOADING))) 798 799 mod_timer(&phba->rrq_tmr, next_time); 800 } 801 802 803 /** 804 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 805 * @phba: Pointer to HBA context object. 806 * @ndlp: Targets nodelist pointer for this exchange. 807 * @xritag the xri in the bitmap to test. 808 * 809 * This function is called with hbalock held. This function 810 * returns 0 = rrq not active for this xri 811 * 1 = rrq is valid for this xri. 812 **/ 813 int 814 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 815 uint16_t xritag) 816 { 817 if (!ndlp) 818 return 0; 819 if (!ndlp->active_rrqs_xri_bitmap) 820 return 0; 821 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 822 return 1; 823 else 824 return 0; 825 } 826 827 /** 828 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 829 * @phba: Pointer to HBA context object. 830 * @ndlp: nodelist pointer for this target. 831 * @xritag: xri used in this exchange. 832 * @rxid: Remote Exchange ID. 833 * @send_rrq: Flag used to determine if we should send rrq els cmd. 834 * 835 * This function takes the hbalock. 836 * The active bit is always set in the active rrq xri_bitmap even 837 * if there is no slot avaiable for the other rrq information. 838 * 839 * returns 0 rrq actived for this xri 840 * < 0 No memory or invalid ndlp. 841 **/ 842 int 843 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 844 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 845 { 846 unsigned long iflags; 847 struct lpfc_node_rrq *rrq; 848 int empty; 849 850 if (!ndlp) 851 return -EINVAL; 852 853 if (!phba->cfg_enable_rrq) 854 return -EINVAL; 855 856 spin_lock_irqsave(&phba->hbalock, iflags); 857 if (phba->pport->load_flag & FC_UNLOADING) { 858 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 859 goto out; 860 } 861 862 /* 863 * set the active bit even if there is no mem available. 864 */ 865 if (NLP_CHK_FREE_REQ(ndlp)) 866 goto out; 867 868 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 869 goto out; 870 871 if (!ndlp->active_rrqs_xri_bitmap) 872 goto out; 873 874 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 875 goto out; 876 877 spin_unlock_irqrestore(&phba->hbalock, iflags); 878 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 879 if (!rrq) { 880 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 881 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 882 " DID:0x%x Send:%d\n", 883 xritag, rxid, ndlp->nlp_DID, send_rrq); 884 return -EINVAL; 885 } 886 if (phba->cfg_enable_rrq == 1) 887 rrq->send_rrq = send_rrq; 888 else 889 rrq->send_rrq = 0; 890 rrq->xritag = xritag; 891 rrq->rrq_stop_time = jiffies + 892 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 893 rrq->ndlp = ndlp; 894 rrq->nlp_DID = ndlp->nlp_DID; 895 rrq->vport = ndlp->vport; 896 rrq->rxid = rxid; 897 spin_lock_irqsave(&phba->hbalock, iflags); 898 empty = list_empty(&phba->active_rrq_list); 899 list_add_tail(&rrq->list, &phba->active_rrq_list); 900 phba->hba_flag |= HBA_RRQ_ACTIVE; 901 if (empty) 902 lpfc_worker_wake_up(phba); 903 spin_unlock_irqrestore(&phba->hbalock, iflags); 904 return 0; 905 out: 906 spin_unlock_irqrestore(&phba->hbalock, iflags); 907 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 908 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 909 " DID:0x%x Send:%d\n", 910 xritag, rxid, ndlp->nlp_DID, send_rrq); 911 return -EINVAL; 912 } 913 914 /** 915 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 916 * @phba: Pointer to HBA context object. 917 * @piocb: Pointer to the iocbq. 918 * 919 * This function is called with hbalock held. This function 920 * gets a new driver sglq object from the sglq list. If the 921 * list is not empty then it is successful, it returns pointer to the newly 922 * allocated sglq object else it returns NULL. 923 **/ 924 static struct lpfc_sglq * 925 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 926 { 927 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 928 struct lpfc_sglq *sglq = NULL; 929 struct lpfc_sglq *start_sglq = NULL; 930 struct lpfc_scsi_buf *lpfc_cmd; 931 struct lpfc_nodelist *ndlp; 932 int found = 0; 933 934 if (piocbq->iocb_flag & LPFC_IO_FCP) { 935 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 936 ndlp = lpfc_cmd->rdata->pnode; 937 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 938 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 939 ndlp = piocbq->context_un.ndlp; 940 else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) 941 ndlp = piocbq->context_un.ndlp; 942 else 943 ndlp = piocbq->context1; 944 945 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 946 start_sglq = sglq; 947 while (!found) { 948 if (!sglq) 949 return NULL; 950 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) { 951 /* This xri has an rrq outstanding for this DID. 952 * put it back in the list and get another xri. 953 */ 954 list_add_tail(&sglq->list, lpfc_sgl_list); 955 sglq = NULL; 956 list_remove_head(lpfc_sgl_list, sglq, 957 struct lpfc_sglq, list); 958 if (sglq == start_sglq) { 959 sglq = NULL; 960 break; 961 } else 962 continue; 963 } 964 sglq->ndlp = ndlp; 965 found = 1; 966 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 967 sglq->state = SGL_ALLOCATED; 968 } 969 return sglq; 970 } 971 972 /** 973 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 974 * @phba: Pointer to HBA context object. 975 * 976 * This function is called with no lock held. This function 977 * allocates a new driver iocb object from the iocb pool. If the 978 * allocation is successful, it returns pointer to the newly 979 * allocated iocb object else it returns NULL. 980 **/ 981 struct lpfc_iocbq * 982 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 983 { 984 struct lpfc_iocbq * iocbq = NULL; 985 unsigned long iflags; 986 987 spin_lock_irqsave(&phba->hbalock, iflags); 988 iocbq = __lpfc_sli_get_iocbq(phba); 989 spin_unlock_irqrestore(&phba->hbalock, iflags); 990 return iocbq; 991 } 992 993 /** 994 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 995 * @phba: Pointer to HBA context object. 996 * @iocbq: Pointer to driver iocb object. 997 * 998 * This function is called with hbalock held to release driver 999 * iocb object to the iocb pool. The iotag in the iocb object 1000 * does not change for each use of the iocb object. This function 1001 * clears all other fields of the iocb object when it is freed. 1002 * The sqlq structure that holds the xritag and phys and virtual 1003 * mappings for the scatter gather list is retrieved from the 1004 * active array of sglq. The get of the sglq pointer also clears 1005 * the entry in the array. If the status of the IO indiactes that 1006 * this IO was aborted then the sglq entry it put on the 1007 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1008 * IO has good status or fails for any other reason then the sglq 1009 * entry is added to the free list (lpfc_sgl_list). 1010 **/ 1011 static void 1012 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1013 { 1014 struct lpfc_sglq *sglq; 1015 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1016 unsigned long iflag = 0; 1017 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 1018 1019 if (iocbq->sli4_xritag == NO_XRI) 1020 sglq = NULL; 1021 else 1022 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1023 1024 1025 if (sglq) { 1026 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1027 (sglq->state != SGL_XRI_ABORTED)) { 1028 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 1029 iflag); 1030 list_add(&sglq->list, 1031 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1032 spin_unlock_irqrestore( 1033 &phba->sli4_hba.abts_sgl_list_lock, iflag); 1034 } else { 1035 sglq->state = SGL_FREED; 1036 sglq->ndlp = NULL; 1037 list_add_tail(&sglq->list, 1038 &phba->sli4_hba.lpfc_sgl_list); 1039 1040 /* Check if TXQ queue needs to be serviced */ 1041 if (!list_empty(&pring->txq)) 1042 lpfc_worker_wake_up(phba); 1043 } 1044 } 1045 1046 1047 /* 1048 * Clean all volatile data fields, preserve iotag and node struct. 1049 */ 1050 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1051 iocbq->sli4_lxritag = NO_XRI; 1052 iocbq->sli4_xritag = NO_XRI; 1053 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1054 } 1055 1056 1057 /** 1058 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1059 * @phba: Pointer to HBA context object. 1060 * @iocbq: Pointer to driver iocb object. 1061 * 1062 * This function is called with hbalock held to release driver 1063 * iocb object to the iocb pool. The iotag in the iocb object 1064 * does not change for each use of the iocb object. This function 1065 * clears all other fields of the iocb object when it is freed. 1066 **/ 1067 static void 1068 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1069 { 1070 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1071 1072 1073 /* 1074 * Clean all volatile data fields, preserve iotag and node struct. 1075 */ 1076 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1077 iocbq->sli4_xritag = NO_XRI; 1078 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1079 } 1080 1081 /** 1082 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1083 * @phba: Pointer to HBA context object. 1084 * @iocbq: Pointer to driver iocb object. 1085 * 1086 * This function is called with hbalock held to release driver 1087 * iocb object to the iocb pool. The iotag in the iocb object 1088 * does not change for each use of the iocb object. This function 1089 * clears all other fields of the iocb object when it is freed. 1090 **/ 1091 static void 1092 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1093 { 1094 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1095 phba->iocb_cnt--; 1096 } 1097 1098 /** 1099 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1100 * @phba: Pointer to HBA context object. 1101 * @iocbq: Pointer to driver iocb object. 1102 * 1103 * This function is called with no lock held to release the iocb to 1104 * iocb pool. 1105 **/ 1106 void 1107 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1108 { 1109 unsigned long iflags; 1110 1111 /* 1112 * Clean all volatile data fields, preserve iotag and node struct. 1113 */ 1114 spin_lock_irqsave(&phba->hbalock, iflags); 1115 __lpfc_sli_release_iocbq(phba, iocbq); 1116 spin_unlock_irqrestore(&phba->hbalock, iflags); 1117 } 1118 1119 /** 1120 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1121 * @phba: Pointer to HBA context object. 1122 * @iocblist: List of IOCBs. 1123 * @ulpstatus: ULP status in IOCB command field. 1124 * @ulpWord4: ULP word-4 in IOCB command field. 1125 * 1126 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1127 * on the list by invoking the complete callback function associated with the 1128 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1129 * fields. 1130 **/ 1131 void 1132 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1133 uint32_t ulpstatus, uint32_t ulpWord4) 1134 { 1135 struct lpfc_iocbq *piocb; 1136 1137 while (!list_empty(iocblist)) { 1138 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1139 if (!piocb->iocb_cmpl) 1140 lpfc_sli_release_iocbq(phba, piocb); 1141 else { 1142 piocb->iocb.ulpStatus = ulpstatus; 1143 piocb->iocb.un.ulpWord[4] = ulpWord4; 1144 (piocb->iocb_cmpl) (phba, piocb, piocb); 1145 } 1146 } 1147 return; 1148 } 1149 1150 /** 1151 * lpfc_sli_iocb_cmd_type - Get the iocb type 1152 * @iocb_cmnd: iocb command code. 1153 * 1154 * This function is called by ring event handler function to get the iocb type. 1155 * This function translates the iocb command to an iocb command type used to 1156 * decide the final disposition of each completed IOCB. 1157 * The function returns 1158 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1159 * LPFC_SOL_IOCB if it is a solicited iocb completion 1160 * LPFC_ABORT_IOCB if it is an abort iocb 1161 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1162 * 1163 * The caller is not required to hold any lock. 1164 **/ 1165 static lpfc_iocb_type 1166 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1167 { 1168 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1169 1170 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1171 return 0; 1172 1173 switch (iocb_cmnd) { 1174 case CMD_XMIT_SEQUENCE_CR: 1175 case CMD_XMIT_SEQUENCE_CX: 1176 case CMD_XMIT_BCAST_CN: 1177 case CMD_XMIT_BCAST_CX: 1178 case CMD_ELS_REQUEST_CR: 1179 case CMD_ELS_REQUEST_CX: 1180 case CMD_CREATE_XRI_CR: 1181 case CMD_CREATE_XRI_CX: 1182 case CMD_GET_RPI_CN: 1183 case CMD_XMIT_ELS_RSP_CX: 1184 case CMD_GET_RPI_CR: 1185 case CMD_FCP_IWRITE_CR: 1186 case CMD_FCP_IWRITE_CX: 1187 case CMD_FCP_IREAD_CR: 1188 case CMD_FCP_IREAD_CX: 1189 case CMD_FCP_ICMND_CR: 1190 case CMD_FCP_ICMND_CX: 1191 case CMD_FCP_TSEND_CX: 1192 case CMD_FCP_TRSP_CX: 1193 case CMD_FCP_TRECEIVE_CX: 1194 case CMD_FCP_AUTO_TRSP_CX: 1195 case CMD_ADAPTER_MSG: 1196 case CMD_ADAPTER_DUMP: 1197 case CMD_XMIT_SEQUENCE64_CR: 1198 case CMD_XMIT_SEQUENCE64_CX: 1199 case CMD_XMIT_BCAST64_CN: 1200 case CMD_XMIT_BCAST64_CX: 1201 case CMD_ELS_REQUEST64_CR: 1202 case CMD_ELS_REQUEST64_CX: 1203 case CMD_FCP_IWRITE64_CR: 1204 case CMD_FCP_IWRITE64_CX: 1205 case CMD_FCP_IREAD64_CR: 1206 case CMD_FCP_IREAD64_CX: 1207 case CMD_FCP_ICMND64_CR: 1208 case CMD_FCP_ICMND64_CX: 1209 case CMD_FCP_TSEND64_CX: 1210 case CMD_FCP_TRSP64_CX: 1211 case CMD_FCP_TRECEIVE64_CX: 1212 case CMD_GEN_REQUEST64_CR: 1213 case CMD_GEN_REQUEST64_CX: 1214 case CMD_XMIT_ELS_RSP64_CX: 1215 case DSSCMD_IWRITE64_CR: 1216 case DSSCMD_IWRITE64_CX: 1217 case DSSCMD_IREAD64_CR: 1218 case DSSCMD_IREAD64_CX: 1219 type = LPFC_SOL_IOCB; 1220 break; 1221 case CMD_ABORT_XRI_CN: 1222 case CMD_ABORT_XRI_CX: 1223 case CMD_CLOSE_XRI_CN: 1224 case CMD_CLOSE_XRI_CX: 1225 case CMD_XRI_ABORTED_CX: 1226 case CMD_ABORT_MXRI64_CN: 1227 case CMD_XMIT_BLS_RSP64_CX: 1228 type = LPFC_ABORT_IOCB; 1229 break; 1230 case CMD_RCV_SEQUENCE_CX: 1231 case CMD_RCV_ELS_REQ_CX: 1232 case CMD_RCV_SEQUENCE64_CX: 1233 case CMD_RCV_ELS_REQ64_CX: 1234 case CMD_ASYNC_STATUS: 1235 case CMD_IOCB_RCV_SEQ64_CX: 1236 case CMD_IOCB_RCV_ELS64_CX: 1237 case CMD_IOCB_RCV_CONT64_CX: 1238 case CMD_IOCB_RET_XRI64_CX: 1239 type = LPFC_UNSOL_IOCB; 1240 break; 1241 case CMD_IOCB_XMIT_MSEQ64_CR: 1242 case CMD_IOCB_XMIT_MSEQ64_CX: 1243 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1244 case CMD_IOCB_RCV_ELS_LIST64_CX: 1245 case CMD_IOCB_CLOSE_EXTENDED_CN: 1246 case CMD_IOCB_ABORT_EXTENDED_CN: 1247 case CMD_IOCB_RET_HBQE64_CN: 1248 case CMD_IOCB_FCP_IBIDIR64_CR: 1249 case CMD_IOCB_FCP_IBIDIR64_CX: 1250 case CMD_IOCB_FCP_ITASKMGT64_CX: 1251 case CMD_IOCB_LOGENTRY_CN: 1252 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1253 printk("%s - Unhandled SLI-3 Command x%x\n", 1254 __func__, iocb_cmnd); 1255 type = LPFC_UNKNOWN_IOCB; 1256 break; 1257 default: 1258 type = LPFC_UNKNOWN_IOCB; 1259 break; 1260 } 1261 1262 return type; 1263 } 1264 1265 /** 1266 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1267 * @phba: Pointer to HBA context object. 1268 * 1269 * This function is called from SLI initialization code 1270 * to configure every ring of the HBA's SLI interface. The 1271 * caller is not required to hold any lock. This function issues 1272 * a config_ring mailbox command for each ring. 1273 * This function returns zero if successful else returns a negative 1274 * error code. 1275 **/ 1276 static int 1277 lpfc_sli_ring_map(struct lpfc_hba *phba) 1278 { 1279 struct lpfc_sli *psli = &phba->sli; 1280 LPFC_MBOXQ_t *pmb; 1281 MAILBOX_t *pmbox; 1282 int i, rc, ret = 0; 1283 1284 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1285 if (!pmb) 1286 return -ENOMEM; 1287 pmbox = &pmb->u.mb; 1288 phba->link_state = LPFC_INIT_MBX_CMDS; 1289 for (i = 0; i < psli->num_rings; i++) { 1290 lpfc_config_ring(phba, i, pmb); 1291 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1292 if (rc != MBX_SUCCESS) { 1293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1294 "0446 Adapter failed to init (%d), " 1295 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1296 "ring %d\n", 1297 rc, pmbox->mbxCommand, 1298 pmbox->mbxStatus, i); 1299 phba->link_state = LPFC_HBA_ERROR; 1300 ret = -ENXIO; 1301 break; 1302 } 1303 } 1304 mempool_free(pmb, phba->mbox_mem_pool); 1305 return ret; 1306 } 1307 1308 /** 1309 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1310 * @phba: Pointer to HBA context object. 1311 * @pring: Pointer to driver SLI ring object. 1312 * @piocb: Pointer to the driver iocb object. 1313 * 1314 * This function is called with hbalock held. The function adds the 1315 * new iocb to txcmplq of the given ring. This function always returns 1316 * 0. If this function is called for ELS ring, this function checks if 1317 * there is a vport associated with the ELS command. This function also 1318 * starts els_tmofunc timer if this is an ELS command. 1319 **/ 1320 static int 1321 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1322 struct lpfc_iocbq *piocb) 1323 { 1324 list_add_tail(&piocb->list, &pring->txcmplq); 1325 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1326 1327 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1328 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1329 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) && 1330 (!(piocb->vport->load_flag & FC_UNLOADING))) { 1331 if (!piocb->vport) 1332 BUG(); 1333 else 1334 mod_timer(&piocb->vport->els_tmofunc, 1335 jiffies + 1336 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1337 } 1338 1339 1340 return 0; 1341 } 1342 1343 /** 1344 * lpfc_sli_ringtx_get - Get first element of the txq 1345 * @phba: Pointer to HBA context object. 1346 * @pring: Pointer to driver SLI ring object. 1347 * 1348 * This function is called with hbalock held to get next 1349 * iocb in txq of the given ring. If there is any iocb in 1350 * the txq, the function returns first iocb in the list after 1351 * removing the iocb from the list, else it returns NULL. 1352 **/ 1353 struct lpfc_iocbq * 1354 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1355 { 1356 struct lpfc_iocbq *cmd_iocb; 1357 1358 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1359 return cmd_iocb; 1360 } 1361 1362 /** 1363 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1364 * @phba: Pointer to HBA context object. 1365 * @pring: Pointer to driver SLI ring object. 1366 * 1367 * This function is called with hbalock held and the caller must post the 1368 * iocb without releasing the lock. If the caller releases the lock, 1369 * iocb slot returned by the function is not guaranteed to be available. 1370 * The function returns pointer to the next available iocb slot if there 1371 * is available slot in the ring, else it returns NULL. 1372 * If the get index of the ring is ahead of the put index, the function 1373 * will post an error attention event to the worker thread to take the 1374 * HBA to offline state. 1375 **/ 1376 static IOCB_t * 1377 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1378 { 1379 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1380 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1381 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1382 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1383 pring->sli.sli3.next_cmdidx = 0; 1384 1385 if (unlikely(pring->sli.sli3.local_getidx == 1386 pring->sli.sli3.next_cmdidx)) { 1387 1388 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1389 1390 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1391 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1392 "0315 Ring %d issue: portCmdGet %d " 1393 "is bigger than cmd ring %d\n", 1394 pring->ringno, 1395 pring->sli.sli3.local_getidx, 1396 max_cmd_idx); 1397 1398 phba->link_state = LPFC_HBA_ERROR; 1399 /* 1400 * All error attention handlers are posted to 1401 * worker thread 1402 */ 1403 phba->work_ha |= HA_ERATT; 1404 phba->work_hs = HS_FFER3; 1405 1406 lpfc_worker_wake_up(phba); 1407 1408 return NULL; 1409 } 1410 1411 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1412 return NULL; 1413 } 1414 1415 return lpfc_cmd_iocb(phba, pring); 1416 } 1417 1418 /** 1419 * lpfc_sli_next_iotag - Get an iotag for the iocb 1420 * @phba: Pointer to HBA context object. 1421 * @iocbq: Pointer to driver iocb object. 1422 * 1423 * This function gets an iotag for the iocb. If there is no unused iotag and 1424 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1425 * array and assigns a new iotag. 1426 * The function returns the allocated iotag if successful, else returns zero. 1427 * Zero is not a valid iotag. 1428 * The caller is not required to hold any lock. 1429 **/ 1430 uint16_t 1431 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1432 { 1433 struct lpfc_iocbq **new_arr; 1434 struct lpfc_iocbq **old_arr; 1435 size_t new_len; 1436 struct lpfc_sli *psli = &phba->sli; 1437 uint16_t iotag; 1438 1439 spin_lock_irq(&phba->hbalock); 1440 iotag = psli->last_iotag; 1441 if(++iotag < psli->iocbq_lookup_len) { 1442 psli->last_iotag = iotag; 1443 psli->iocbq_lookup[iotag] = iocbq; 1444 spin_unlock_irq(&phba->hbalock); 1445 iocbq->iotag = iotag; 1446 return iotag; 1447 } else if (psli->iocbq_lookup_len < (0xffff 1448 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1449 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1450 spin_unlock_irq(&phba->hbalock); 1451 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1452 GFP_KERNEL); 1453 if (new_arr) { 1454 spin_lock_irq(&phba->hbalock); 1455 old_arr = psli->iocbq_lookup; 1456 if (new_len <= psli->iocbq_lookup_len) { 1457 /* highly unprobable case */ 1458 kfree(new_arr); 1459 iotag = psli->last_iotag; 1460 if(++iotag < psli->iocbq_lookup_len) { 1461 psli->last_iotag = iotag; 1462 psli->iocbq_lookup[iotag] = iocbq; 1463 spin_unlock_irq(&phba->hbalock); 1464 iocbq->iotag = iotag; 1465 return iotag; 1466 } 1467 spin_unlock_irq(&phba->hbalock); 1468 return 0; 1469 } 1470 if (psli->iocbq_lookup) 1471 memcpy(new_arr, old_arr, 1472 ((psli->last_iotag + 1) * 1473 sizeof (struct lpfc_iocbq *))); 1474 psli->iocbq_lookup = new_arr; 1475 psli->iocbq_lookup_len = new_len; 1476 psli->last_iotag = iotag; 1477 psli->iocbq_lookup[iotag] = iocbq; 1478 spin_unlock_irq(&phba->hbalock); 1479 iocbq->iotag = iotag; 1480 kfree(old_arr); 1481 return iotag; 1482 } 1483 } else 1484 spin_unlock_irq(&phba->hbalock); 1485 1486 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1487 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1488 psli->last_iotag); 1489 1490 return 0; 1491 } 1492 1493 /** 1494 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1495 * @phba: Pointer to HBA context object. 1496 * @pring: Pointer to driver SLI ring object. 1497 * @iocb: Pointer to iocb slot in the ring. 1498 * @nextiocb: Pointer to driver iocb object which need to be 1499 * posted to firmware. 1500 * 1501 * This function is called with hbalock held to post a new iocb to 1502 * the firmware. This function copies the new iocb to ring iocb slot and 1503 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1504 * a completion call back for this iocb else the function will free the 1505 * iocb object. 1506 **/ 1507 static void 1508 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1509 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1510 { 1511 /* 1512 * Set up an iotag 1513 */ 1514 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1515 1516 1517 if (pring->ringno == LPFC_ELS_RING) { 1518 lpfc_debugfs_slow_ring_trc(phba, 1519 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1520 *(((uint32_t *) &nextiocb->iocb) + 4), 1521 *(((uint32_t *) &nextiocb->iocb) + 6), 1522 *(((uint32_t *) &nextiocb->iocb) + 7)); 1523 } 1524 1525 /* 1526 * Issue iocb command to adapter 1527 */ 1528 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1529 wmb(); 1530 pring->stats.iocb_cmd++; 1531 1532 /* 1533 * If there is no completion routine to call, we can release the 1534 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1535 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1536 */ 1537 if (nextiocb->iocb_cmpl) 1538 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1539 else 1540 __lpfc_sli_release_iocbq(phba, nextiocb); 1541 1542 /* 1543 * Let the HBA know what IOCB slot will be the next one the 1544 * driver will put a command into. 1545 */ 1546 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1547 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1548 } 1549 1550 /** 1551 * lpfc_sli_update_full_ring - Update the chip attention register 1552 * @phba: Pointer to HBA context object. 1553 * @pring: Pointer to driver SLI ring object. 1554 * 1555 * The caller is not required to hold any lock for calling this function. 1556 * This function updates the chip attention bits for the ring to inform firmware 1557 * that there are pending work to be done for this ring and requests an 1558 * interrupt when there is space available in the ring. This function is 1559 * called when the driver is unable to post more iocbs to the ring due 1560 * to unavailability of space in the ring. 1561 **/ 1562 static void 1563 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1564 { 1565 int ringno = pring->ringno; 1566 1567 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1568 1569 wmb(); 1570 1571 /* 1572 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1573 * The HBA will tell us when an IOCB entry is available. 1574 */ 1575 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1576 readl(phba->CAregaddr); /* flush */ 1577 1578 pring->stats.iocb_cmd_full++; 1579 } 1580 1581 /** 1582 * lpfc_sli_update_ring - Update chip attention register 1583 * @phba: Pointer to HBA context object. 1584 * @pring: Pointer to driver SLI ring object. 1585 * 1586 * This function updates the chip attention register bit for the 1587 * given ring to inform HBA that there is more work to be done 1588 * in this ring. The caller is not required to hold any lock. 1589 **/ 1590 static void 1591 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1592 { 1593 int ringno = pring->ringno; 1594 1595 /* 1596 * Tell the HBA that there is work to do in this ring. 1597 */ 1598 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1599 wmb(); 1600 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1601 readl(phba->CAregaddr); /* flush */ 1602 } 1603 } 1604 1605 /** 1606 * lpfc_sli_resume_iocb - Process iocbs in the txq 1607 * @phba: Pointer to HBA context object. 1608 * @pring: Pointer to driver SLI ring object. 1609 * 1610 * This function is called with hbalock held to post pending iocbs 1611 * in the txq to the firmware. This function is called when driver 1612 * detects space available in the ring. 1613 **/ 1614 static void 1615 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1616 { 1617 IOCB_t *iocb; 1618 struct lpfc_iocbq *nextiocb; 1619 1620 /* 1621 * Check to see if: 1622 * (a) there is anything on the txq to send 1623 * (b) link is up 1624 * (c) link attention events can be processed (fcp ring only) 1625 * (d) IOCB processing is not blocked by the outstanding mbox command. 1626 */ 1627 1628 if (lpfc_is_link_up(phba) && 1629 (!list_empty(&pring->txq)) && 1630 (pring->ringno != phba->sli.fcp_ring || 1631 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1632 1633 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1634 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1635 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1636 1637 if (iocb) 1638 lpfc_sli_update_ring(phba, pring); 1639 else 1640 lpfc_sli_update_full_ring(phba, pring); 1641 } 1642 1643 return; 1644 } 1645 1646 /** 1647 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1648 * @phba: Pointer to HBA context object. 1649 * @hbqno: HBQ number. 1650 * 1651 * This function is called with hbalock held to get the next 1652 * available slot for the given HBQ. If there is free slot 1653 * available for the HBQ it will return pointer to the next available 1654 * HBQ entry else it will return NULL. 1655 **/ 1656 static struct lpfc_hbq_entry * 1657 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1658 { 1659 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1660 1661 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1662 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1663 hbqp->next_hbqPutIdx = 0; 1664 1665 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1666 uint32_t raw_index = phba->hbq_get[hbqno]; 1667 uint32_t getidx = le32_to_cpu(raw_index); 1668 1669 hbqp->local_hbqGetIdx = getidx; 1670 1671 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1672 lpfc_printf_log(phba, KERN_ERR, 1673 LOG_SLI | LOG_VPORT, 1674 "1802 HBQ %d: local_hbqGetIdx " 1675 "%u is > than hbqp->entry_count %u\n", 1676 hbqno, hbqp->local_hbqGetIdx, 1677 hbqp->entry_count); 1678 1679 phba->link_state = LPFC_HBA_ERROR; 1680 return NULL; 1681 } 1682 1683 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1684 return NULL; 1685 } 1686 1687 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1688 hbqp->hbqPutIdx; 1689 } 1690 1691 /** 1692 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1693 * @phba: Pointer to HBA context object. 1694 * 1695 * This function is called with no lock held to free all the 1696 * hbq buffers while uninitializing the SLI interface. It also 1697 * frees the HBQ buffers returned by the firmware but not yet 1698 * processed by the upper layers. 1699 **/ 1700 void 1701 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1702 { 1703 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1704 struct hbq_dmabuf *hbq_buf; 1705 unsigned long flags; 1706 int i, hbq_count; 1707 uint32_t hbqno; 1708 1709 hbq_count = lpfc_sli_hbq_count(); 1710 /* Return all memory used by all HBQs */ 1711 spin_lock_irqsave(&phba->hbalock, flags); 1712 for (i = 0; i < hbq_count; ++i) { 1713 list_for_each_entry_safe(dmabuf, next_dmabuf, 1714 &phba->hbqs[i].hbq_buffer_list, list) { 1715 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1716 list_del(&hbq_buf->dbuf.list); 1717 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1718 } 1719 phba->hbqs[i].buffer_count = 0; 1720 } 1721 /* Return all HBQ buffer that are in-fly */ 1722 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1723 list) { 1724 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1725 list_del(&hbq_buf->dbuf.list); 1726 if (hbq_buf->tag == -1) { 1727 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1728 (phba, hbq_buf); 1729 } else { 1730 hbqno = hbq_buf->tag >> 16; 1731 if (hbqno >= LPFC_MAX_HBQS) 1732 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1733 (phba, hbq_buf); 1734 else 1735 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1736 hbq_buf); 1737 } 1738 } 1739 1740 /* Mark the HBQs not in use */ 1741 phba->hbq_in_use = 0; 1742 spin_unlock_irqrestore(&phba->hbalock, flags); 1743 } 1744 1745 /** 1746 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1747 * @phba: Pointer to HBA context object. 1748 * @hbqno: HBQ number. 1749 * @hbq_buf: Pointer to HBQ buffer. 1750 * 1751 * This function is called with the hbalock held to post a 1752 * hbq buffer to the firmware. If the function finds an empty 1753 * slot in the HBQ, it will post the buffer. The function will return 1754 * pointer to the hbq entry if it successfully post the buffer 1755 * else it will return NULL. 1756 **/ 1757 static int 1758 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1759 struct hbq_dmabuf *hbq_buf) 1760 { 1761 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1762 } 1763 1764 /** 1765 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1766 * @phba: Pointer to HBA context object. 1767 * @hbqno: HBQ number. 1768 * @hbq_buf: Pointer to HBQ buffer. 1769 * 1770 * This function is called with the hbalock held to post a hbq buffer to the 1771 * firmware. If the function finds an empty slot in the HBQ, it will post the 1772 * buffer and place it on the hbq_buffer_list. The function will return zero if 1773 * it successfully post the buffer else it will return an error. 1774 **/ 1775 static int 1776 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1777 struct hbq_dmabuf *hbq_buf) 1778 { 1779 struct lpfc_hbq_entry *hbqe; 1780 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1781 1782 /* Get next HBQ entry slot to use */ 1783 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1784 if (hbqe) { 1785 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1786 1787 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1788 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1789 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1790 hbqe->bde.tus.f.bdeFlags = 0; 1791 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1792 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1793 /* Sync SLIM */ 1794 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1795 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1796 /* flush */ 1797 readl(phba->hbq_put + hbqno); 1798 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1799 return 0; 1800 } else 1801 return -ENOMEM; 1802 } 1803 1804 /** 1805 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1806 * @phba: Pointer to HBA context object. 1807 * @hbqno: HBQ number. 1808 * @hbq_buf: Pointer to HBQ buffer. 1809 * 1810 * This function is called with the hbalock held to post an RQE to the SLI4 1811 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1812 * the hbq_buffer_list and return zero, otherwise it will return an error. 1813 **/ 1814 static int 1815 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1816 struct hbq_dmabuf *hbq_buf) 1817 { 1818 int rc; 1819 struct lpfc_rqe hrqe; 1820 struct lpfc_rqe drqe; 1821 1822 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1823 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1824 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1825 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1826 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1827 &hrqe, &drqe); 1828 if (rc < 0) 1829 return rc; 1830 hbq_buf->tag = rc; 1831 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1832 return 0; 1833 } 1834 1835 /* HBQ for ELS and CT traffic. */ 1836 static struct lpfc_hbq_init lpfc_els_hbq = { 1837 .rn = 1, 1838 .entry_count = 256, 1839 .mask_count = 0, 1840 .profile = 0, 1841 .ring_mask = (1 << LPFC_ELS_RING), 1842 .buffer_count = 0, 1843 .init_count = 40, 1844 .add_count = 40, 1845 }; 1846 1847 /* HBQ for the extra ring if needed */ 1848 static struct lpfc_hbq_init lpfc_extra_hbq = { 1849 .rn = 1, 1850 .entry_count = 200, 1851 .mask_count = 0, 1852 .profile = 0, 1853 .ring_mask = (1 << LPFC_EXTRA_RING), 1854 .buffer_count = 0, 1855 .init_count = 0, 1856 .add_count = 5, 1857 }; 1858 1859 /* Array of HBQs */ 1860 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1861 &lpfc_els_hbq, 1862 &lpfc_extra_hbq, 1863 }; 1864 1865 /** 1866 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1867 * @phba: Pointer to HBA context object. 1868 * @hbqno: HBQ number. 1869 * @count: Number of HBQ buffers to be posted. 1870 * 1871 * This function is called with no lock held to post more hbq buffers to the 1872 * given HBQ. The function returns the number of HBQ buffers successfully 1873 * posted. 1874 **/ 1875 static int 1876 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1877 { 1878 uint32_t i, posted = 0; 1879 unsigned long flags; 1880 struct hbq_dmabuf *hbq_buffer; 1881 LIST_HEAD(hbq_buf_list); 1882 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1883 return 0; 1884 1885 if ((phba->hbqs[hbqno].buffer_count + count) > 1886 lpfc_hbq_defs[hbqno]->entry_count) 1887 count = lpfc_hbq_defs[hbqno]->entry_count - 1888 phba->hbqs[hbqno].buffer_count; 1889 if (!count) 1890 return 0; 1891 /* Allocate HBQ entries */ 1892 for (i = 0; i < count; i++) { 1893 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1894 if (!hbq_buffer) 1895 break; 1896 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1897 } 1898 /* Check whether HBQ is still in use */ 1899 spin_lock_irqsave(&phba->hbalock, flags); 1900 if (!phba->hbq_in_use) 1901 goto err; 1902 while (!list_empty(&hbq_buf_list)) { 1903 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1904 dbuf.list); 1905 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1906 (hbqno << 16)); 1907 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1908 phba->hbqs[hbqno].buffer_count++; 1909 posted++; 1910 } else 1911 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1912 } 1913 spin_unlock_irqrestore(&phba->hbalock, flags); 1914 return posted; 1915 err: 1916 spin_unlock_irqrestore(&phba->hbalock, flags); 1917 while (!list_empty(&hbq_buf_list)) { 1918 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1919 dbuf.list); 1920 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1921 } 1922 return 0; 1923 } 1924 1925 /** 1926 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1927 * @phba: Pointer to HBA context object. 1928 * @qno: HBQ number. 1929 * 1930 * This function posts more buffers to the HBQ. This function 1931 * is called with no lock held. The function returns the number of HBQ entries 1932 * successfully allocated. 1933 **/ 1934 int 1935 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1936 { 1937 if (phba->sli_rev == LPFC_SLI_REV4) 1938 return 0; 1939 else 1940 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1941 lpfc_hbq_defs[qno]->add_count); 1942 } 1943 1944 /** 1945 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1946 * @phba: Pointer to HBA context object. 1947 * @qno: HBQ queue number. 1948 * 1949 * This function is called from SLI initialization code path with 1950 * no lock held to post initial HBQ buffers to firmware. The 1951 * function returns the number of HBQ entries successfully allocated. 1952 **/ 1953 static int 1954 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1955 { 1956 if (phba->sli_rev == LPFC_SLI_REV4) 1957 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1958 lpfc_hbq_defs[qno]->entry_count); 1959 else 1960 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1961 lpfc_hbq_defs[qno]->init_count); 1962 } 1963 1964 /** 1965 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1966 * @phba: Pointer to HBA context object. 1967 * @hbqno: HBQ number. 1968 * 1969 * This function removes the first hbq buffer on an hbq list and returns a 1970 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1971 **/ 1972 static struct hbq_dmabuf * 1973 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1974 { 1975 struct lpfc_dmabuf *d_buf; 1976 1977 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1978 if (!d_buf) 1979 return NULL; 1980 return container_of(d_buf, struct hbq_dmabuf, dbuf); 1981 } 1982 1983 /** 1984 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1985 * @phba: Pointer to HBA context object. 1986 * @tag: Tag of the hbq buffer. 1987 * 1988 * This function is called with hbalock held. This function searches 1989 * for the hbq buffer associated with the given tag in the hbq buffer 1990 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 1991 * it returns NULL. 1992 **/ 1993 static struct hbq_dmabuf * 1994 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 1995 { 1996 struct lpfc_dmabuf *d_buf; 1997 struct hbq_dmabuf *hbq_buf; 1998 uint32_t hbqno; 1999 2000 hbqno = tag >> 16; 2001 if (hbqno >= LPFC_MAX_HBQS) 2002 return NULL; 2003 2004 spin_lock_irq(&phba->hbalock); 2005 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2006 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2007 if (hbq_buf->tag == tag) { 2008 spin_unlock_irq(&phba->hbalock); 2009 return hbq_buf; 2010 } 2011 } 2012 spin_unlock_irq(&phba->hbalock); 2013 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2014 "1803 Bad hbq tag. Data: x%x x%x\n", 2015 tag, phba->hbqs[tag >> 16].buffer_count); 2016 return NULL; 2017 } 2018 2019 /** 2020 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2021 * @phba: Pointer to HBA context object. 2022 * @hbq_buffer: Pointer to HBQ buffer. 2023 * 2024 * This function is called with hbalock. This function gives back 2025 * the hbq buffer to firmware. If the HBQ does not have space to 2026 * post the buffer, it will free the buffer. 2027 **/ 2028 void 2029 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2030 { 2031 uint32_t hbqno; 2032 2033 if (hbq_buffer) { 2034 hbqno = hbq_buffer->tag >> 16; 2035 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2036 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2037 } 2038 } 2039 2040 /** 2041 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2042 * @mbxCommand: mailbox command code. 2043 * 2044 * This function is called by the mailbox event handler function to verify 2045 * that the completed mailbox command is a legitimate mailbox command. If the 2046 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2047 * and the mailbox event handler will take the HBA offline. 2048 **/ 2049 static int 2050 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2051 { 2052 uint8_t ret; 2053 2054 switch (mbxCommand) { 2055 case MBX_LOAD_SM: 2056 case MBX_READ_NV: 2057 case MBX_WRITE_NV: 2058 case MBX_WRITE_VPARMS: 2059 case MBX_RUN_BIU_DIAG: 2060 case MBX_INIT_LINK: 2061 case MBX_DOWN_LINK: 2062 case MBX_CONFIG_LINK: 2063 case MBX_CONFIG_RING: 2064 case MBX_RESET_RING: 2065 case MBX_READ_CONFIG: 2066 case MBX_READ_RCONFIG: 2067 case MBX_READ_SPARM: 2068 case MBX_READ_STATUS: 2069 case MBX_READ_RPI: 2070 case MBX_READ_XRI: 2071 case MBX_READ_REV: 2072 case MBX_READ_LNK_STAT: 2073 case MBX_REG_LOGIN: 2074 case MBX_UNREG_LOGIN: 2075 case MBX_CLEAR_LA: 2076 case MBX_DUMP_MEMORY: 2077 case MBX_DUMP_CONTEXT: 2078 case MBX_RUN_DIAGS: 2079 case MBX_RESTART: 2080 case MBX_UPDATE_CFG: 2081 case MBX_DOWN_LOAD: 2082 case MBX_DEL_LD_ENTRY: 2083 case MBX_RUN_PROGRAM: 2084 case MBX_SET_MASK: 2085 case MBX_SET_VARIABLE: 2086 case MBX_UNREG_D_ID: 2087 case MBX_KILL_BOARD: 2088 case MBX_CONFIG_FARP: 2089 case MBX_BEACON: 2090 case MBX_LOAD_AREA: 2091 case MBX_RUN_BIU_DIAG64: 2092 case MBX_CONFIG_PORT: 2093 case MBX_READ_SPARM64: 2094 case MBX_READ_RPI64: 2095 case MBX_REG_LOGIN64: 2096 case MBX_READ_TOPOLOGY: 2097 case MBX_WRITE_WWN: 2098 case MBX_SET_DEBUG: 2099 case MBX_LOAD_EXP_ROM: 2100 case MBX_ASYNCEVT_ENABLE: 2101 case MBX_REG_VPI: 2102 case MBX_UNREG_VPI: 2103 case MBX_HEARTBEAT: 2104 case MBX_PORT_CAPABILITIES: 2105 case MBX_PORT_IOV_CONTROL: 2106 case MBX_SLI4_CONFIG: 2107 case MBX_SLI4_REQ_FTRS: 2108 case MBX_REG_FCFI: 2109 case MBX_UNREG_FCFI: 2110 case MBX_REG_VFI: 2111 case MBX_UNREG_VFI: 2112 case MBX_INIT_VPI: 2113 case MBX_INIT_VFI: 2114 case MBX_RESUME_RPI: 2115 case MBX_READ_EVENT_LOG_STATUS: 2116 case MBX_READ_EVENT_LOG: 2117 case MBX_SECURITY_MGMT: 2118 case MBX_AUTH_PORT: 2119 case MBX_ACCESS_VDATA: 2120 ret = mbxCommand; 2121 break; 2122 default: 2123 ret = MBX_SHUTDOWN; 2124 break; 2125 } 2126 return ret; 2127 } 2128 2129 /** 2130 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2131 * @phba: Pointer to HBA context object. 2132 * @pmboxq: Pointer to mailbox command. 2133 * 2134 * This is completion handler function for mailbox commands issued from 2135 * lpfc_sli_issue_mbox_wait function. This function is called by the 2136 * mailbox event handler function with no lock held. This function 2137 * will wake up thread waiting on the wait queue pointed by context1 2138 * of the mailbox. 2139 **/ 2140 void 2141 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2142 { 2143 wait_queue_head_t *pdone_q; 2144 unsigned long drvr_flag; 2145 2146 /* 2147 * If pdone_q is empty, the driver thread gave up waiting and 2148 * continued running. 2149 */ 2150 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2151 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2152 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2153 if (pdone_q) 2154 wake_up_interruptible(pdone_q); 2155 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2156 return; 2157 } 2158 2159 2160 /** 2161 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2162 * @phba: Pointer to HBA context object. 2163 * @pmb: Pointer to mailbox object. 2164 * 2165 * This function is the default mailbox completion handler. It 2166 * frees the memory resources associated with the completed mailbox 2167 * command. If the completed command is a REG_LOGIN mailbox command, 2168 * this function will issue a UREG_LOGIN to re-claim the RPI. 2169 **/ 2170 void 2171 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2172 { 2173 struct lpfc_vport *vport = pmb->vport; 2174 struct lpfc_dmabuf *mp; 2175 struct lpfc_nodelist *ndlp; 2176 struct Scsi_Host *shost; 2177 uint16_t rpi, vpi; 2178 int rc; 2179 2180 mp = (struct lpfc_dmabuf *) (pmb->context1); 2181 2182 if (mp) { 2183 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2184 kfree(mp); 2185 } 2186 2187 /* 2188 * If a REG_LOGIN succeeded after node is destroyed or node 2189 * is in re-discovery driver need to cleanup the RPI. 2190 */ 2191 if (!(phba->pport->load_flag & FC_UNLOADING) && 2192 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2193 !pmb->u.mb.mbxStatus) { 2194 rpi = pmb->u.mb.un.varWords[0]; 2195 vpi = pmb->u.mb.un.varRegLogin.vpi; 2196 lpfc_unreg_login(phba, vpi, rpi, pmb); 2197 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2198 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2199 if (rc != MBX_NOT_FINISHED) 2200 return; 2201 } 2202 2203 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2204 !(phba->pport->load_flag & FC_UNLOADING) && 2205 !pmb->u.mb.mbxStatus) { 2206 shost = lpfc_shost_from_vport(vport); 2207 spin_lock_irq(shost->host_lock); 2208 vport->vpi_state |= LPFC_VPI_REGISTERED; 2209 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2210 spin_unlock_irq(shost->host_lock); 2211 } 2212 2213 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2214 ndlp = (struct lpfc_nodelist *)pmb->context2; 2215 lpfc_nlp_put(ndlp); 2216 pmb->context2 = NULL; 2217 } 2218 2219 /* Check security permission status on INIT_LINK mailbox command */ 2220 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2221 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2222 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2223 "2860 SLI authentication is required " 2224 "for INIT_LINK but has not done yet\n"); 2225 2226 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2227 lpfc_sli4_mbox_cmd_free(phba, pmb); 2228 else 2229 mempool_free(pmb, phba->mbox_mem_pool); 2230 } 2231 2232 /** 2233 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2234 * @phba: Pointer to HBA context object. 2235 * 2236 * This function is called with no lock held. This function processes all 2237 * the completed mailbox commands and gives it to upper layers. The interrupt 2238 * service routine processes mailbox completion interrupt and adds completed 2239 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2240 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2241 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2242 * function returns the mailbox commands to the upper layer by calling the 2243 * completion handler function of each mailbox. 2244 **/ 2245 int 2246 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2247 { 2248 MAILBOX_t *pmbox; 2249 LPFC_MBOXQ_t *pmb; 2250 int rc; 2251 LIST_HEAD(cmplq); 2252 2253 phba->sli.slistat.mbox_event++; 2254 2255 /* Get all completed mailboxe buffers into the cmplq */ 2256 spin_lock_irq(&phba->hbalock); 2257 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2258 spin_unlock_irq(&phba->hbalock); 2259 2260 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2261 do { 2262 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2263 if (pmb == NULL) 2264 break; 2265 2266 pmbox = &pmb->u.mb; 2267 2268 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2269 if (pmb->vport) { 2270 lpfc_debugfs_disc_trc(pmb->vport, 2271 LPFC_DISC_TRC_MBOX_VPORT, 2272 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2273 (uint32_t)pmbox->mbxCommand, 2274 pmbox->un.varWords[0], 2275 pmbox->un.varWords[1]); 2276 } 2277 else { 2278 lpfc_debugfs_disc_trc(phba->pport, 2279 LPFC_DISC_TRC_MBOX, 2280 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2281 (uint32_t)pmbox->mbxCommand, 2282 pmbox->un.varWords[0], 2283 pmbox->un.varWords[1]); 2284 } 2285 } 2286 2287 /* 2288 * It is a fatal error if unknown mbox command completion. 2289 */ 2290 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2291 MBX_SHUTDOWN) { 2292 /* Unknown mailbox command compl */ 2293 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2294 "(%d):0323 Unknown Mailbox command " 2295 "x%x (x%x/x%x) Cmpl\n", 2296 pmb->vport ? pmb->vport->vpi : 0, 2297 pmbox->mbxCommand, 2298 lpfc_sli_config_mbox_subsys_get(phba, 2299 pmb), 2300 lpfc_sli_config_mbox_opcode_get(phba, 2301 pmb)); 2302 phba->link_state = LPFC_HBA_ERROR; 2303 phba->work_hs = HS_FFER3; 2304 lpfc_handle_eratt(phba); 2305 continue; 2306 } 2307 2308 if (pmbox->mbxStatus) { 2309 phba->sli.slistat.mbox_stat_err++; 2310 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2311 /* Mbox cmd cmpl error - RETRYing */ 2312 lpfc_printf_log(phba, KERN_INFO, 2313 LOG_MBOX | LOG_SLI, 2314 "(%d):0305 Mbox cmd cmpl " 2315 "error - RETRYing Data: x%x " 2316 "(x%x/x%x) x%x x%x x%x\n", 2317 pmb->vport ? pmb->vport->vpi : 0, 2318 pmbox->mbxCommand, 2319 lpfc_sli_config_mbox_subsys_get(phba, 2320 pmb), 2321 lpfc_sli_config_mbox_opcode_get(phba, 2322 pmb), 2323 pmbox->mbxStatus, 2324 pmbox->un.varWords[0], 2325 pmb->vport->port_state); 2326 pmbox->mbxStatus = 0; 2327 pmbox->mbxOwner = OWN_HOST; 2328 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2329 if (rc != MBX_NOT_FINISHED) 2330 continue; 2331 } 2332 } 2333 2334 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2335 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2336 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2337 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2338 "x%x x%x x%x\n", 2339 pmb->vport ? pmb->vport->vpi : 0, 2340 pmbox->mbxCommand, 2341 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2342 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2343 pmb->mbox_cmpl, 2344 *((uint32_t *) pmbox), 2345 pmbox->un.varWords[0], 2346 pmbox->un.varWords[1], 2347 pmbox->un.varWords[2], 2348 pmbox->un.varWords[3], 2349 pmbox->un.varWords[4], 2350 pmbox->un.varWords[5], 2351 pmbox->un.varWords[6], 2352 pmbox->un.varWords[7], 2353 pmbox->un.varWords[8], 2354 pmbox->un.varWords[9], 2355 pmbox->un.varWords[10]); 2356 2357 if (pmb->mbox_cmpl) 2358 pmb->mbox_cmpl(phba,pmb); 2359 } while (1); 2360 return 0; 2361 } 2362 2363 /** 2364 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2365 * @phba: Pointer to HBA context object. 2366 * @pring: Pointer to driver SLI ring object. 2367 * @tag: buffer tag. 2368 * 2369 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2370 * is set in the tag the buffer is posted for a particular exchange, 2371 * the function will return the buffer without replacing the buffer. 2372 * If the buffer is for unsolicited ELS or CT traffic, this function 2373 * returns the buffer and also posts another buffer to the firmware. 2374 **/ 2375 static struct lpfc_dmabuf * 2376 lpfc_sli_get_buff(struct lpfc_hba *phba, 2377 struct lpfc_sli_ring *pring, 2378 uint32_t tag) 2379 { 2380 struct hbq_dmabuf *hbq_entry; 2381 2382 if (tag & QUE_BUFTAG_BIT) 2383 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2384 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2385 if (!hbq_entry) 2386 return NULL; 2387 return &hbq_entry->dbuf; 2388 } 2389 2390 /** 2391 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2392 * @phba: Pointer to HBA context object. 2393 * @pring: Pointer to driver SLI ring object. 2394 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2395 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2396 * @fch_type: the type for the first frame of the sequence. 2397 * 2398 * This function is called with no lock held. This function uses the r_ctl and 2399 * type of the received sequence to find the correct callback function to call 2400 * to process the sequence. 2401 **/ 2402 static int 2403 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2404 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2405 uint32_t fch_type) 2406 { 2407 int i; 2408 2409 /* unSolicited Responses */ 2410 if (pring->prt[0].profile) { 2411 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2412 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2413 saveq); 2414 return 1; 2415 } 2416 /* We must search, based on rctl / type 2417 for the right routine */ 2418 for (i = 0; i < pring->num_mask; i++) { 2419 if ((pring->prt[i].rctl == fch_r_ctl) && 2420 (pring->prt[i].type == fch_type)) { 2421 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2422 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2423 (phba, pring, saveq); 2424 return 1; 2425 } 2426 } 2427 return 0; 2428 } 2429 2430 /** 2431 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2432 * @phba: Pointer to HBA context object. 2433 * @pring: Pointer to driver SLI ring object. 2434 * @saveq: Pointer to the unsolicited iocb. 2435 * 2436 * This function is called with no lock held by the ring event handler 2437 * when there is an unsolicited iocb posted to the response ring by the 2438 * firmware. This function gets the buffer associated with the iocbs 2439 * and calls the event handler for the ring. This function handles both 2440 * qring buffers and hbq buffers. 2441 * When the function returns 1 the caller can free the iocb object otherwise 2442 * upper layer functions will free the iocb objects. 2443 **/ 2444 static int 2445 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2446 struct lpfc_iocbq *saveq) 2447 { 2448 IOCB_t * irsp; 2449 WORD5 * w5p; 2450 uint32_t Rctl, Type; 2451 uint32_t match; 2452 struct lpfc_iocbq *iocbq; 2453 struct lpfc_dmabuf *dmzbuf; 2454 2455 match = 0; 2456 irsp = &(saveq->iocb); 2457 2458 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2459 if (pring->lpfc_sli_rcv_async_status) 2460 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2461 else 2462 lpfc_printf_log(phba, 2463 KERN_WARNING, 2464 LOG_SLI, 2465 "0316 Ring %d handler: unexpected " 2466 "ASYNC_STATUS iocb received evt_code " 2467 "0x%x\n", 2468 pring->ringno, 2469 irsp->un.asyncstat.evt_code); 2470 return 1; 2471 } 2472 2473 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2474 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2475 if (irsp->ulpBdeCount > 0) { 2476 dmzbuf = lpfc_sli_get_buff(phba, pring, 2477 irsp->un.ulpWord[3]); 2478 lpfc_in_buf_free(phba, dmzbuf); 2479 } 2480 2481 if (irsp->ulpBdeCount > 1) { 2482 dmzbuf = lpfc_sli_get_buff(phba, pring, 2483 irsp->unsli3.sli3Words[3]); 2484 lpfc_in_buf_free(phba, dmzbuf); 2485 } 2486 2487 if (irsp->ulpBdeCount > 2) { 2488 dmzbuf = lpfc_sli_get_buff(phba, pring, 2489 irsp->unsli3.sli3Words[7]); 2490 lpfc_in_buf_free(phba, dmzbuf); 2491 } 2492 2493 return 1; 2494 } 2495 2496 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2497 if (irsp->ulpBdeCount != 0) { 2498 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2499 irsp->un.ulpWord[3]); 2500 if (!saveq->context2) 2501 lpfc_printf_log(phba, 2502 KERN_ERR, 2503 LOG_SLI, 2504 "0341 Ring %d Cannot find buffer for " 2505 "an unsolicited iocb. tag 0x%x\n", 2506 pring->ringno, 2507 irsp->un.ulpWord[3]); 2508 } 2509 if (irsp->ulpBdeCount == 2) { 2510 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2511 irsp->unsli3.sli3Words[7]); 2512 if (!saveq->context3) 2513 lpfc_printf_log(phba, 2514 KERN_ERR, 2515 LOG_SLI, 2516 "0342 Ring %d Cannot find buffer for an" 2517 " unsolicited iocb. tag 0x%x\n", 2518 pring->ringno, 2519 irsp->unsli3.sli3Words[7]); 2520 } 2521 list_for_each_entry(iocbq, &saveq->list, list) { 2522 irsp = &(iocbq->iocb); 2523 if (irsp->ulpBdeCount != 0) { 2524 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2525 irsp->un.ulpWord[3]); 2526 if (!iocbq->context2) 2527 lpfc_printf_log(phba, 2528 KERN_ERR, 2529 LOG_SLI, 2530 "0343 Ring %d Cannot find " 2531 "buffer for an unsolicited iocb" 2532 ". tag 0x%x\n", pring->ringno, 2533 irsp->un.ulpWord[3]); 2534 } 2535 if (irsp->ulpBdeCount == 2) { 2536 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2537 irsp->unsli3.sli3Words[7]); 2538 if (!iocbq->context3) 2539 lpfc_printf_log(phba, 2540 KERN_ERR, 2541 LOG_SLI, 2542 "0344 Ring %d Cannot find " 2543 "buffer for an unsolicited " 2544 "iocb. tag 0x%x\n", 2545 pring->ringno, 2546 irsp->unsli3.sli3Words[7]); 2547 } 2548 } 2549 } 2550 if (irsp->ulpBdeCount != 0 && 2551 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2552 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2553 int found = 0; 2554 2555 /* search continue save q for same XRI */ 2556 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2557 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2558 saveq->iocb.unsli3.rcvsli3.ox_id) { 2559 list_add_tail(&saveq->list, &iocbq->list); 2560 found = 1; 2561 break; 2562 } 2563 } 2564 if (!found) 2565 list_add_tail(&saveq->clist, 2566 &pring->iocb_continue_saveq); 2567 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2568 list_del_init(&iocbq->clist); 2569 saveq = iocbq; 2570 irsp = &(saveq->iocb); 2571 } else 2572 return 0; 2573 } 2574 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2575 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2576 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2577 Rctl = FC_RCTL_ELS_REQ; 2578 Type = FC_TYPE_ELS; 2579 } else { 2580 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2581 Rctl = w5p->hcsw.Rctl; 2582 Type = w5p->hcsw.Type; 2583 2584 /* Firmware Workaround */ 2585 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2586 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2587 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2588 Rctl = FC_RCTL_ELS_REQ; 2589 Type = FC_TYPE_ELS; 2590 w5p->hcsw.Rctl = Rctl; 2591 w5p->hcsw.Type = Type; 2592 } 2593 } 2594 2595 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2596 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2597 "0313 Ring %d handler: unexpected Rctl x%x " 2598 "Type x%x received\n", 2599 pring->ringno, Rctl, Type); 2600 2601 return 1; 2602 } 2603 2604 /** 2605 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2606 * @phba: Pointer to HBA context object. 2607 * @pring: Pointer to driver SLI ring object. 2608 * @prspiocb: Pointer to response iocb object. 2609 * 2610 * This function looks up the iocb_lookup table to get the command iocb 2611 * corresponding to the given response iocb using the iotag of the 2612 * response iocb. This function is called with the hbalock held. 2613 * This function returns the command iocb object if it finds the command 2614 * iocb else returns NULL. 2615 **/ 2616 static struct lpfc_iocbq * 2617 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2618 struct lpfc_sli_ring *pring, 2619 struct lpfc_iocbq *prspiocb) 2620 { 2621 struct lpfc_iocbq *cmd_iocb = NULL; 2622 uint16_t iotag; 2623 2624 iotag = prspiocb->iocb.ulpIoTag; 2625 2626 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2627 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2628 list_del_init(&cmd_iocb->list); 2629 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2630 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2631 } 2632 return cmd_iocb; 2633 } 2634 2635 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2636 "0317 iotag x%x is out off " 2637 "range: max iotag x%x wd0 x%x\n", 2638 iotag, phba->sli.last_iotag, 2639 *(((uint32_t *) &prspiocb->iocb) + 7)); 2640 return NULL; 2641 } 2642 2643 /** 2644 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2645 * @phba: Pointer to HBA context object. 2646 * @pring: Pointer to driver SLI ring object. 2647 * @iotag: IOCB tag. 2648 * 2649 * This function looks up the iocb_lookup table to get the command iocb 2650 * corresponding to the given iotag. This function is called with the 2651 * hbalock held. 2652 * This function returns the command iocb object if it finds the command 2653 * iocb else returns NULL. 2654 **/ 2655 static struct lpfc_iocbq * 2656 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2657 struct lpfc_sli_ring *pring, uint16_t iotag) 2658 { 2659 struct lpfc_iocbq *cmd_iocb; 2660 2661 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2662 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2663 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2664 /* remove from txcmpl queue list */ 2665 list_del_init(&cmd_iocb->list); 2666 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2667 return cmd_iocb; 2668 } 2669 } 2670 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2671 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2672 iotag, phba->sli.last_iotag); 2673 return NULL; 2674 } 2675 2676 /** 2677 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2678 * @phba: Pointer to HBA context object. 2679 * @pring: Pointer to driver SLI ring object. 2680 * @saveq: Pointer to the response iocb to be processed. 2681 * 2682 * This function is called by the ring event handler for non-fcp 2683 * rings when there is a new response iocb in the response ring. 2684 * The caller is not required to hold any locks. This function 2685 * gets the command iocb associated with the response iocb and 2686 * calls the completion handler for the command iocb. If there 2687 * is no completion handler, the function will free the resources 2688 * associated with command iocb. If the response iocb is for 2689 * an already aborted command iocb, the status of the completion 2690 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2691 * This function always returns 1. 2692 **/ 2693 static int 2694 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2695 struct lpfc_iocbq *saveq) 2696 { 2697 struct lpfc_iocbq *cmdiocbp; 2698 int rc = 1; 2699 unsigned long iflag; 2700 2701 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2702 spin_lock_irqsave(&phba->hbalock, iflag); 2703 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2704 spin_unlock_irqrestore(&phba->hbalock, iflag); 2705 2706 if (cmdiocbp) { 2707 if (cmdiocbp->iocb_cmpl) { 2708 /* 2709 * If an ELS command failed send an event to mgmt 2710 * application. 2711 */ 2712 if (saveq->iocb.ulpStatus && 2713 (pring->ringno == LPFC_ELS_RING) && 2714 (cmdiocbp->iocb.ulpCommand == 2715 CMD_ELS_REQUEST64_CR)) 2716 lpfc_send_els_failure_event(phba, 2717 cmdiocbp, saveq); 2718 2719 /* 2720 * Post all ELS completions to the worker thread. 2721 * All other are passed to the completion callback. 2722 */ 2723 if (pring->ringno == LPFC_ELS_RING) { 2724 if ((phba->sli_rev < LPFC_SLI_REV4) && 2725 (cmdiocbp->iocb_flag & 2726 LPFC_DRIVER_ABORTED)) { 2727 spin_lock_irqsave(&phba->hbalock, 2728 iflag); 2729 cmdiocbp->iocb_flag &= 2730 ~LPFC_DRIVER_ABORTED; 2731 spin_unlock_irqrestore(&phba->hbalock, 2732 iflag); 2733 saveq->iocb.ulpStatus = 2734 IOSTAT_LOCAL_REJECT; 2735 saveq->iocb.un.ulpWord[4] = 2736 IOERR_SLI_ABORTED; 2737 2738 /* Firmware could still be in progress 2739 * of DMAing payload, so don't free data 2740 * buffer till after a hbeat. 2741 */ 2742 spin_lock_irqsave(&phba->hbalock, 2743 iflag); 2744 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2745 spin_unlock_irqrestore(&phba->hbalock, 2746 iflag); 2747 } 2748 if (phba->sli_rev == LPFC_SLI_REV4) { 2749 if (saveq->iocb_flag & 2750 LPFC_EXCHANGE_BUSY) { 2751 /* Set cmdiocb flag for the 2752 * exchange busy so sgl (xri) 2753 * will not be released until 2754 * the abort xri is received 2755 * from hba. 2756 */ 2757 spin_lock_irqsave( 2758 &phba->hbalock, iflag); 2759 cmdiocbp->iocb_flag |= 2760 LPFC_EXCHANGE_BUSY; 2761 spin_unlock_irqrestore( 2762 &phba->hbalock, iflag); 2763 } 2764 if (cmdiocbp->iocb_flag & 2765 LPFC_DRIVER_ABORTED) { 2766 /* 2767 * Clear LPFC_DRIVER_ABORTED 2768 * bit in case it was driver 2769 * initiated abort. 2770 */ 2771 spin_lock_irqsave( 2772 &phba->hbalock, iflag); 2773 cmdiocbp->iocb_flag &= 2774 ~LPFC_DRIVER_ABORTED; 2775 spin_unlock_irqrestore( 2776 &phba->hbalock, iflag); 2777 cmdiocbp->iocb.ulpStatus = 2778 IOSTAT_LOCAL_REJECT; 2779 cmdiocbp->iocb.un.ulpWord[4] = 2780 IOERR_ABORT_REQUESTED; 2781 /* 2782 * For SLI4, irsiocb contains 2783 * NO_XRI in sli_xritag, it 2784 * shall not affect releasing 2785 * sgl (xri) process. 2786 */ 2787 saveq->iocb.ulpStatus = 2788 IOSTAT_LOCAL_REJECT; 2789 saveq->iocb.un.ulpWord[4] = 2790 IOERR_SLI_ABORTED; 2791 spin_lock_irqsave( 2792 &phba->hbalock, iflag); 2793 saveq->iocb_flag |= 2794 LPFC_DELAY_MEM_FREE; 2795 spin_unlock_irqrestore( 2796 &phba->hbalock, iflag); 2797 } 2798 } 2799 } 2800 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2801 } else 2802 lpfc_sli_release_iocbq(phba, cmdiocbp); 2803 } else { 2804 /* 2805 * Unknown initiating command based on the response iotag. 2806 * This could be the case on the ELS ring because of 2807 * lpfc_els_abort(). 2808 */ 2809 if (pring->ringno != LPFC_ELS_RING) { 2810 /* 2811 * Ring <ringno> handler: unexpected completion IoTag 2812 * <IoTag> 2813 */ 2814 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2815 "0322 Ring %d handler: " 2816 "unexpected completion IoTag x%x " 2817 "Data: x%x x%x x%x x%x\n", 2818 pring->ringno, 2819 saveq->iocb.ulpIoTag, 2820 saveq->iocb.ulpStatus, 2821 saveq->iocb.un.ulpWord[4], 2822 saveq->iocb.ulpCommand, 2823 saveq->iocb.ulpContext); 2824 } 2825 } 2826 2827 return rc; 2828 } 2829 2830 /** 2831 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2832 * @phba: Pointer to HBA context object. 2833 * @pring: Pointer to driver SLI ring object. 2834 * 2835 * This function is called from the iocb ring event handlers when 2836 * put pointer is ahead of the get pointer for a ring. This function signal 2837 * an error attention condition to the worker thread and the worker 2838 * thread will transition the HBA to offline state. 2839 **/ 2840 static void 2841 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2842 { 2843 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2844 /* 2845 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2846 * rsp ring <portRspMax> 2847 */ 2848 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2849 "0312 Ring %d handler: portRspPut %d " 2850 "is bigger than rsp ring %d\n", 2851 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2852 pring->sli.sli3.numRiocb); 2853 2854 phba->link_state = LPFC_HBA_ERROR; 2855 2856 /* 2857 * All error attention handlers are posted to 2858 * worker thread 2859 */ 2860 phba->work_ha |= HA_ERATT; 2861 phba->work_hs = HS_FFER3; 2862 2863 lpfc_worker_wake_up(phba); 2864 2865 return; 2866 } 2867 2868 /** 2869 * lpfc_poll_eratt - Error attention polling timer timeout handler 2870 * @ptr: Pointer to address of HBA context object. 2871 * 2872 * This function is invoked by the Error Attention polling timer when the 2873 * timer times out. It will check the SLI Error Attention register for 2874 * possible attention events. If so, it will post an Error Attention event 2875 * and wake up worker thread to process it. Otherwise, it will set up the 2876 * Error Attention polling timer for the next poll. 2877 **/ 2878 void lpfc_poll_eratt(unsigned long ptr) 2879 { 2880 struct lpfc_hba *phba; 2881 uint32_t eratt = 0, rem; 2882 uint64_t sli_intr, cnt; 2883 2884 phba = (struct lpfc_hba *)ptr; 2885 2886 /* Here we will also keep track of interrupts per sec of the hba */ 2887 sli_intr = phba->sli.slistat.sli_intr; 2888 2889 if (phba->sli.slistat.sli_prev_intr > sli_intr) 2890 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 2891 sli_intr); 2892 else 2893 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 2894 2895 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */ 2896 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL); 2897 phba->sli.slistat.sli_ips = cnt; 2898 2899 phba->sli.slistat.sli_prev_intr = sli_intr; 2900 2901 /* Check chip HA register for error event */ 2902 eratt = lpfc_sli_check_eratt(phba); 2903 2904 if (eratt) 2905 /* Tell the worker thread there is work to do */ 2906 lpfc_worker_wake_up(phba); 2907 else 2908 /* Restart the timer for next eratt poll */ 2909 mod_timer(&phba->eratt_poll, 2910 jiffies + 2911 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); 2912 return; 2913 } 2914 2915 2916 /** 2917 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2918 * @phba: Pointer to HBA context object. 2919 * @pring: Pointer to driver SLI ring object. 2920 * @mask: Host attention register mask for this ring. 2921 * 2922 * This function is called from the interrupt context when there is a ring 2923 * event for the fcp ring. The caller does not hold any lock. 2924 * The function processes each response iocb in the response ring until it 2925 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 2926 * LE bit set. The function will call the completion handler of the command iocb 2927 * if the response iocb indicates a completion for a command iocb or it is 2928 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2929 * function if this is an unsolicited iocb. 2930 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2931 * to check it explicitly. 2932 */ 2933 int 2934 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2935 struct lpfc_sli_ring *pring, uint32_t mask) 2936 { 2937 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2938 IOCB_t *irsp = NULL; 2939 IOCB_t *entry = NULL; 2940 struct lpfc_iocbq *cmdiocbq = NULL; 2941 struct lpfc_iocbq rspiocbq; 2942 uint32_t status; 2943 uint32_t portRspPut, portRspMax; 2944 int rc = 1; 2945 lpfc_iocb_type type; 2946 unsigned long iflag; 2947 uint32_t rsp_cmpl = 0; 2948 2949 spin_lock_irqsave(&phba->hbalock, iflag); 2950 pring->stats.iocb_event++; 2951 2952 /* 2953 * The next available response entry should never exceed the maximum 2954 * entries. If it does, treat it as an adapter hardware error. 2955 */ 2956 portRspMax = pring->sli.sli3.numRiocb; 2957 portRspPut = le32_to_cpu(pgp->rspPutInx); 2958 if (unlikely(portRspPut >= portRspMax)) { 2959 lpfc_sli_rsp_pointers_error(phba, pring); 2960 spin_unlock_irqrestore(&phba->hbalock, iflag); 2961 return 1; 2962 } 2963 if (phba->fcp_ring_in_use) { 2964 spin_unlock_irqrestore(&phba->hbalock, iflag); 2965 return 1; 2966 } else 2967 phba->fcp_ring_in_use = 1; 2968 2969 rmb(); 2970 while (pring->sli.sli3.rspidx != portRspPut) { 2971 /* 2972 * Fetch an entry off the ring and copy it into a local data 2973 * structure. The copy involves a byte-swap since the 2974 * network byte order and pci byte orders are different. 2975 */ 2976 entry = lpfc_resp_iocb(phba, pring); 2977 phba->last_completion_time = jiffies; 2978 2979 if (++pring->sli.sli3.rspidx >= portRspMax) 2980 pring->sli.sli3.rspidx = 0; 2981 2982 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2983 (uint32_t *) &rspiocbq.iocb, 2984 phba->iocb_rsp_size); 2985 INIT_LIST_HEAD(&(rspiocbq.list)); 2986 irsp = &rspiocbq.iocb; 2987 2988 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 2989 pring->stats.iocb_rsp++; 2990 rsp_cmpl++; 2991 2992 if (unlikely(irsp->ulpStatus)) { 2993 /* 2994 * If resource errors reported from HBA, reduce 2995 * queuedepths of the SCSI device. 2996 */ 2997 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2998 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 2999 IOERR_NO_RESOURCES)) { 3000 spin_unlock_irqrestore(&phba->hbalock, iflag); 3001 phba->lpfc_rampdown_queue_depth(phba); 3002 spin_lock_irqsave(&phba->hbalock, iflag); 3003 } 3004 3005 /* Rsp ring <ringno> error: IOCB */ 3006 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3007 "0336 Rsp Ring %d error: IOCB Data: " 3008 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3009 pring->ringno, 3010 irsp->un.ulpWord[0], 3011 irsp->un.ulpWord[1], 3012 irsp->un.ulpWord[2], 3013 irsp->un.ulpWord[3], 3014 irsp->un.ulpWord[4], 3015 irsp->un.ulpWord[5], 3016 *(uint32_t *)&irsp->un1, 3017 *((uint32_t *)&irsp->un1 + 1)); 3018 } 3019 3020 switch (type) { 3021 case LPFC_ABORT_IOCB: 3022 case LPFC_SOL_IOCB: 3023 /* 3024 * Idle exchange closed via ABTS from port. No iocb 3025 * resources need to be recovered. 3026 */ 3027 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3028 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3029 "0333 IOCB cmd 0x%x" 3030 " processed. Skipping" 3031 " completion\n", 3032 irsp->ulpCommand); 3033 break; 3034 } 3035 3036 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3037 &rspiocbq); 3038 if (unlikely(!cmdiocbq)) 3039 break; 3040 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3041 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3042 if (cmdiocbq->iocb_cmpl) { 3043 spin_unlock_irqrestore(&phba->hbalock, iflag); 3044 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3045 &rspiocbq); 3046 spin_lock_irqsave(&phba->hbalock, iflag); 3047 } 3048 break; 3049 case LPFC_UNSOL_IOCB: 3050 spin_unlock_irqrestore(&phba->hbalock, iflag); 3051 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3052 spin_lock_irqsave(&phba->hbalock, iflag); 3053 break; 3054 default: 3055 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3056 char adaptermsg[LPFC_MAX_ADPTMSG]; 3057 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3058 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3059 MAX_MSG_DATA); 3060 dev_warn(&((phba->pcidev)->dev), 3061 "lpfc%d: %s\n", 3062 phba->brd_no, adaptermsg); 3063 } else { 3064 /* Unknown IOCB command */ 3065 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3066 "0334 Unknown IOCB command " 3067 "Data: x%x, x%x x%x x%x x%x\n", 3068 type, irsp->ulpCommand, 3069 irsp->ulpStatus, 3070 irsp->ulpIoTag, 3071 irsp->ulpContext); 3072 } 3073 break; 3074 } 3075 3076 /* 3077 * The response IOCB has been processed. Update the ring 3078 * pointer in SLIM. If the port response put pointer has not 3079 * been updated, sync the pgp->rspPutInx and fetch the new port 3080 * response put pointer. 3081 */ 3082 writel(pring->sli.sli3.rspidx, 3083 &phba->host_gp[pring->ringno].rspGetInx); 3084 3085 if (pring->sli.sli3.rspidx == portRspPut) 3086 portRspPut = le32_to_cpu(pgp->rspPutInx); 3087 } 3088 3089 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3090 pring->stats.iocb_rsp_full++; 3091 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3092 writel(status, phba->CAregaddr); 3093 readl(phba->CAregaddr); 3094 } 3095 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3096 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3097 pring->stats.iocb_cmd_empty++; 3098 3099 /* Force update of the local copy of cmdGetInx */ 3100 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3101 lpfc_sli_resume_iocb(phba, pring); 3102 3103 if ((pring->lpfc_sli_cmd_available)) 3104 (pring->lpfc_sli_cmd_available) (phba, pring); 3105 3106 } 3107 3108 phba->fcp_ring_in_use = 0; 3109 spin_unlock_irqrestore(&phba->hbalock, iflag); 3110 return rc; 3111 } 3112 3113 /** 3114 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3115 * @phba: Pointer to HBA context object. 3116 * @pring: Pointer to driver SLI ring object. 3117 * @rspiocbp: Pointer to driver response IOCB object. 3118 * 3119 * This function is called from the worker thread when there is a slow-path 3120 * response IOCB to process. This function chains all the response iocbs until 3121 * seeing the iocb with the LE bit set. The function will call 3122 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3123 * completion of a command iocb. The function will call the 3124 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3125 * The function frees the resources or calls the completion handler if this 3126 * iocb is an abort completion. The function returns NULL when the response 3127 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3128 * this function shall chain the iocb on to the iocb_continueq and return the 3129 * response iocb passed in. 3130 **/ 3131 static struct lpfc_iocbq * 3132 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3133 struct lpfc_iocbq *rspiocbp) 3134 { 3135 struct lpfc_iocbq *saveq; 3136 struct lpfc_iocbq *cmdiocbp; 3137 struct lpfc_iocbq *next_iocb; 3138 IOCB_t *irsp = NULL; 3139 uint32_t free_saveq; 3140 uint8_t iocb_cmd_type; 3141 lpfc_iocb_type type; 3142 unsigned long iflag; 3143 int rc; 3144 3145 spin_lock_irqsave(&phba->hbalock, iflag); 3146 /* First add the response iocb to the countinueq list */ 3147 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3148 pring->iocb_continueq_cnt++; 3149 3150 /* Now, determine whether the list is completed for processing */ 3151 irsp = &rspiocbp->iocb; 3152 if (irsp->ulpLe) { 3153 /* 3154 * By default, the driver expects to free all resources 3155 * associated with this iocb completion. 3156 */ 3157 free_saveq = 1; 3158 saveq = list_get_first(&pring->iocb_continueq, 3159 struct lpfc_iocbq, list); 3160 irsp = &(saveq->iocb); 3161 list_del_init(&pring->iocb_continueq); 3162 pring->iocb_continueq_cnt = 0; 3163 3164 pring->stats.iocb_rsp++; 3165 3166 /* 3167 * If resource errors reported from HBA, reduce 3168 * queuedepths of the SCSI device. 3169 */ 3170 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3171 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3172 IOERR_NO_RESOURCES)) { 3173 spin_unlock_irqrestore(&phba->hbalock, iflag); 3174 phba->lpfc_rampdown_queue_depth(phba); 3175 spin_lock_irqsave(&phba->hbalock, iflag); 3176 } 3177 3178 if (irsp->ulpStatus) { 3179 /* Rsp ring <ringno> error: IOCB */ 3180 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3181 "0328 Rsp Ring %d error: " 3182 "IOCB Data: " 3183 "x%x x%x x%x x%x " 3184 "x%x x%x x%x x%x " 3185 "x%x x%x x%x x%x " 3186 "x%x x%x x%x x%x\n", 3187 pring->ringno, 3188 irsp->un.ulpWord[0], 3189 irsp->un.ulpWord[1], 3190 irsp->un.ulpWord[2], 3191 irsp->un.ulpWord[3], 3192 irsp->un.ulpWord[4], 3193 irsp->un.ulpWord[5], 3194 *(((uint32_t *) irsp) + 6), 3195 *(((uint32_t *) irsp) + 7), 3196 *(((uint32_t *) irsp) + 8), 3197 *(((uint32_t *) irsp) + 9), 3198 *(((uint32_t *) irsp) + 10), 3199 *(((uint32_t *) irsp) + 11), 3200 *(((uint32_t *) irsp) + 12), 3201 *(((uint32_t *) irsp) + 13), 3202 *(((uint32_t *) irsp) + 14), 3203 *(((uint32_t *) irsp) + 15)); 3204 } 3205 3206 /* 3207 * Fetch the IOCB command type and call the correct completion 3208 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3209 * get freed back to the lpfc_iocb_list by the discovery 3210 * kernel thread. 3211 */ 3212 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3213 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3214 switch (type) { 3215 case LPFC_SOL_IOCB: 3216 spin_unlock_irqrestore(&phba->hbalock, iflag); 3217 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3218 spin_lock_irqsave(&phba->hbalock, iflag); 3219 break; 3220 3221 case LPFC_UNSOL_IOCB: 3222 spin_unlock_irqrestore(&phba->hbalock, iflag); 3223 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3224 spin_lock_irqsave(&phba->hbalock, iflag); 3225 if (!rc) 3226 free_saveq = 0; 3227 break; 3228 3229 case LPFC_ABORT_IOCB: 3230 cmdiocbp = NULL; 3231 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3232 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3233 saveq); 3234 if (cmdiocbp) { 3235 /* Call the specified completion routine */ 3236 if (cmdiocbp->iocb_cmpl) { 3237 spin_unlock_irqrestore(&phba->hbalock, 3238 iflag); 3239 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3240 saveq); 3241 spin_lock_irqsave(&phba->hbalock, 3242 iflag); 3243 } else 3244 __lpfc_sli_release_iocbq(phba, 3245 cmdiocbp); 3246 } 3247 break; 3248 3249 case LPFC_UNKNOWN_IOCB: 3250 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3251 char adaptermsg[LPFC_MAX_ADPTMSG]; 3252 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3253 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3254 MAX_MSG_DATA); 3255 dev_warn(&((phba->pcidev)->dev), 3256 "lpfc%d: %s\n", 3257 phba->brd_no, adaptermsg); 3258 } else { 3259 /* Unknown IOCB command */ 3260 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3261 "0335 Unknown IOCB " 3262 "command Data: x%x " 3263 "x%x x%x x%x\n", 3264 irsp->ulpCommand, 3265 irsp->ulpStatus, 3266 irsp->ulpIoTag, 3267 irsp->ulpContext); 3268 } 3269 break; 3270 } 3271 3272 if (free_saveq) { 3273 list_for_each_entry_safe(rspiocbp, next_iocb, 3274 &saveq->list, list) { 3275 list_del_init(&rspiocbp->list); 3276 __lpfc_sli_release_iocbq(phba, rspiocbp); 3277 } 3278 __lpfc_sli_release_iocbq(phba, saveq); 3279 } 3280 rspiocbp = NULL; 3281 } 3282 spin_unlock_irqrestore(&phba->hbalock, iflag); 3283 return rspiocbp; 3284 } 3285 3286 /** 3287 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3288 * @phba: Pointer to HBA context object. 3289 * @pring: Pointer to driver SLI ring object. 3290 * @mask: Host attention register mask for this ring. 3291 * 3292 * This routine wraps the actual slow_ring event process routine from the 3293 * API jump table function pointer from the lpfc_hba struct. 3294 **/ 3295 void 3296 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3297 struct lpfc_sli_ring *pring, uint32_t mask) 3298 { 3299 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3300 } 3301 3302 /** 3303 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3304 * @phba: Pointer to HBA context object. 3305 * @pring: Pointer to driver SLI ring object. 3306 * @mask: Host attention register mask for this ring. 3307 * 3308 * This function is called from the worker thread when there is a ring event 3309 * for non-fcp rings. The caller does not hold any lock. The function will 3310 * remove each response iocb in the response ring and calls the handle 3311 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3312 **/ 3313 static void 3314 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3315 struct lpfc_sli_ring *pring, uint32_t mask) 3316 { 3317 struct lpfc_pgp *pgp; 3318 IOCB_t *entry; 3319 IOCB_t *irsp = NULL; 3320 struct lpfc_iocbq *rspiocbp = NULL; 3321 uint32_t portRspPut, portRspMax; 3322 unsigned long iflag; 3323 uint32_t status; 3324 3325 pgp = &phba->port_gp[pring->ringno]; 3326 spin_lock_irqsave(&phba->hbalock, iflag); 3327 pring->stats.iocb_event++; 3328 3329 /* 3330 * The next available response entry should never exceed the maximum 3331 * entries. If it does, treat it as an adapter hardware error. 3332 */ 3333 portRspMax = pring->sli.sli3.numRiocb; 3334 portRspPut = le32_to_cpu(pgp->rspPutInx); 3335 if (portRspPut >= portRspMax) { 3336 /* 3337 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3338 * rsp ring <portRspMax> 3339 */ 3340 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3341 "0303 Ring %d handler: portRspPut %d " 3342 "is bigger than rsp ring %d\n", 3343 pring->ringno, portRspPut, portRspMax); 3344 3345 phba->link_state = LPFC_HBA_ERROR; 3346 spin_unlock_irqrestore(&phba->hbalock, iflag); 3347 3348 phba->work_hs = HS_FFER3; 3349 lpfc_handle_eratt(phba); 3350 3351 return; 3352 } 3353 3354 rmb(); 3355 while (pring->sli.sli3.rspidx != portRspPut) { 3356 /* 3357 * Build a completion list and call the appropriate handler. 3358 * The process is to get the next available response iocb, get 3359 * a free iocb from the list, copy the response data into the 3360 * free iocb, insert to the continuation list, and update the 3361 * next response index to slim. This process makes response 3362 * iocb's in the ring available to DMA as fast as possible but 3363 * pays a penalty for a copy operation. Since the iocb is 3364 * only 32 bytes, this penalty is considered small relative to 3365 * the PCI reads for register values and a slim write. When 3366 * the ulpLe field is set, the entire Command has been 3367 * received. 3368 */ 3369 entry = lpfc_resp_iocb(phba, pring); 3370 3371 phba->last_completion_time = jiffies; 3372 rspiocbp = __lpfc_sli_get_iocbq(phba); 3373 if (rspiocbp == NULL) { 3374 printk(KERN_ERR "%s: out of buffers! Failing " 3375 "completion.\n", __func__); 3376 break; 3377 } 3378 3379 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3380 phba->iocb_rsp_size); 3381 irsp = &rspiocbp->iocb; 3382 3383 if (++pring->sli.sli3.rspidx >= portRspMax) 3384 pring->sli.sli3.rspidx = 0; 3385 3386 if (pring->ringno == LPFC_ELS_RING) { 3387 lpfc_debugfs_slow_ring_trc(phba, 3388 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3389 *(((uint32_t *) irsp) + 4), 3390 *(((uint32_t *) irsp) + 6), 3391 *(((uint32_t *) irsp) + 7)); 3392 } 3393 3394 writel(pring->sli.sli3.rspidx, 3395 &phba->host_gp[pring->ringno].rspGetInx); 3396 3397 spin_unlock_irqrestore(&phba->hbalock, iflag); 3398 /* Handle the response IOCB */ 3399 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3400 spin_lock_irqsave(&phba->hbalock, iflag); 3401 3402 /* 3403 * If the port response put pointer has not been updated, sync 3404 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3405 * response put pointer. 3406 */ 3407 if (pring->sli.sli3.rspidx == portRspPut) { 3408 portRspPut = le32_to_cpu(pgp->rspPutInx); 3409 } 3410 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3411 3412 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3413 /* At least one response entry has been freed */ 3414 pring->stats.iocb_rsp_full++; 3415 /* SET RxRE_RSP in Chip Att register */ 3416 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3417 writel(status, phba->CAregaddr); 3418 readl(phba->CAregaddr); /* flush */ 3419 } 3420 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3421 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3422 pring->stats.iocb_cmd_empty++; 3423 3424 /* Force update of the local copy of cmdGetInx */ 3425 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3426 lpfc_sli_resume_iocb(phba, pring); 3427 3428 if ((pring->lpfc_sli_cmd_available)) 3429 (pring->lpfc_sli_cmd_available) (phba, pring); 3430 3431 } 3432 3433 spin_unlock_irqrestore(&phba->hbalock, iflag); 3434 return; 3435 } 3436 3437 /** 3438 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3439 * @phba: Pointer to HBA context object. 3440 * @pring: Pointer to driver SLI ring object. 3441 * @mask: Host attention register mask for this ring. 3442 * 3443 * This function is called from the worker thread when there is a pending 3444 * ELS response iocb on the driver internal slow-path response iocb worker 3445 * queue. The caller does not hold any lock. The function will remove each 3446 * response iocb from the response worker queue and calls the handle 3447 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3448 **/ 3449 static void 3450 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3451 struct lpfc_sli_ring *pring, uint32_t mask) 3452 { 3453 struct lpfc_iocbq *irspiocbq; 3454 struct hbq_dmabuf *dmabuf; 3455 struct lpfc_cq_event *cq_event; 3456 unsigned long iflag; 3457 3458 spin_lock_irqsave(&phba->hbalock, iflag); 3459 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3460 spin_unlock_irqrestore(&phba->hbalock, iflag); 3461 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3462 /* Get the response iocb from the head of work queue */ 3463 spin_lock_irqsave(&phba->hbalock, iflag); 3464 list_remove_head(&phba->sli4_hba.sp_queue_event, 3465 cq_event, struct lpfc_cq_event, list); 3466 spin_unlock_irqrestore(&phba->hbalock, iflag); 3467 3468 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3469 case CQE_CODE_COMPL_WQE: 3470 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3471 cq_event); 3472 /* Translate ELS WCQE to response IOCBQ */ 3473 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3474 irspiocbq); 3475 if (irspiocbq) 3476 lpfc_sli_sp_handle_rspiocb(phba, pring, 3477 irspiocbq); 3478 break; 3479 case CQE_CODE_RECEIVE: 3480 case CQE_CODE_RECEIVE_V1: 3481 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3482 cq_event); 3483 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3484 break; 3485 default: 3486 break; 3487 } 3488 } 3489 } 3490 3491 /** 3492 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3493 * @phba: Pointer to HBA context object. 3494 * @pring: Pointer to driver SLI ring object. 3495 * 3496 * This function aborts all iocbs in the given ring and frees all the iocb 3497 * objects in txq. This function issues an abort iocb for all the iocb commands 3498 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3499 * the return of this function. The caller is not required to hold any locks. 3500 **/ 3501 void 3502 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3503 { 3504 LIST_HEAD(completions); 3505 struct lpfc_iocbq *iocb, *next_iocb; 3506 3507 if (pring->ringno == LPFC_ELS_RING) { 3508 lpfc_fabric_abort_hba(phba); 3509 } 3510 3511 /* Error everything on txq and txcmplq 3512 * First do the txq. 3513 */ 3514 spin_lock_irq(&phba->hbalock); 3515 list_splice_init(&pring->txq, &completions); 3516 3517 /* Next issue ABTS for everything on the txcmplq */ 3518 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3519 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3520 3521 spin_unlock_irq(&phba->hbalock); 3522 3523 /* Cancel all the IOCBs from the completions list */ 3524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3525 IOERR_SLI_ABORTED); 3526 } 3527 3528 /** 3529 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3530 * @phba: Pointer to HBA context object. 3531 * 3532 * This function flushes all iocbs in the fcp ring and frees all the iocb 3533 * objects in txq and txcmplq. This function will not issue abort iocbs 3534 * for all the iocb commands in txcmplq, they will just be returned with 3535 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3536 * slot has been permanently disabled. 3537 **/ 3538 void 3539 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3540 { 3541 LIST_HEAD(txq); 3542 LIST_HEAD(txcmplq); 3543 struct lpfc_sli *psli = &phba->sli; 3544 struct lpfc_sli_ring *pring; 3545 3546 /* Currently, only one fcp ring */ 3547 pring = &psli->ring[psli->fcp_ring]; 3548 3549 spin_lock_irq(&phba->hbalock); 3550 /* Retrieve everything on txq */ 3551 list_splice_init(&pring->txq, &txq); 3552 3553 /* Retrieve everything on the txcmplq */ 3554 list_splice_init(&pring->txcmplq, &txcmplq); 3555 3556 /* Indicate the I/O queues are flushed */ 3557 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3558 spin_unlock_irq(&phba->hbalock); 3559 3560 /* Flush the txq */ 3561 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3562 IOERR_SLI_DOWN); 3563 3564 /* Flush the txcmpq */ 3565 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3566 IOERR_SLI_DOWN); 3567 } 3568 3569 /** 3570 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3571 * @phba: Pointer to HBA context object. 3572 * @mask: Bit mask to be checked. 3573 * 3574 * This function reads the host status register and compares 3575 * with the provided bit mask to check if HBA completed 3576 * the restart. This function will wait in a loop for the 3577 * HBA to complete restart. If the HBA does not restart within 3578 * 15 iterations, the function will reset the HBA again. The 3579 * function returns 1 when HBA fail to restart otherwise returns 3580 * zero. 3581 **/ 3582 static int 3583 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3584 { 3585 uint32_t status; 3586 int i = 0; 3587 int retval = 0; 3588 3589 /* Read the HBA Host Status Register */ 3590 if (lpfc_readl(phba->HSregaddr, &status)) 3591 return 1; 3592 3593 /* 3594 * Check status register every 100ms for 5 retries, then every 3595 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3596 * every 2.5 sec for 4. 3597 * Break our of the loop if errors occurred during init. 3598 */ 3599 while (((status & mask) != mask) && 3600 !(status & HS_FFERM) && 3601 i++ < 20) { 3602 3603 if (i <= 5) 3604 msleep(10); 3605 else if (i <= 10) 3606 msleep(500); 3607 else 3608 msleep(2500); 3609 3610 if (i == 15) { 3611 /* Do post */ 3612 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3613 lpfc_sli_brdrestart(phba); 3614 } 3615 /* Read the HBA Host Status Register */ 3616 if (lpfc_readl(phba->HSregaddr, &status)) { 3617 retval = 1; 3618 break; 3619 } 3620 } 3621 3622 /* Check to see if any errors occurred during init */ 3623 if ((status & HS_FFERM) || (i >= 20)) { 3624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3625 "2751 Adapter failed to restart, " 3626 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3627 status, 3628 readl(phba->MBslimaddr + 0xa8), 3629 readl(phba->MBslimaddr + 0xac)); 3630 phba->link_state = LPFC_HBA_ERROR; 3631 retval = 1; 3632 } 3633 3634 return retval; 3635 } 3636 3637 /** 3638 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3639 * @phba: Pointer to HBA context object. 3640 * @mask: Bit mask to be checked. 3641 * 3642 * This function checks the host status register to check if HBA is 3643 * ready. This function will wait in a loop for the HBA to be ready 3644 * If the HBA is not ready , the function will will reset the HBA PCI 3645 * function again. The function returns 1 when HBA fail to be ready 3646 * otherwise returns zero. 3647 **/ 3648 static int 3649 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3650 { 3651 uint32_t status; 3652 int retval = 0; 3653 3654 /* Read the HBA Host Status Register */ 3655 status = lpfc_sli4_post_status_check(phba); 3656 3657 if (status) { 3658 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3659 lpfc_sli_brdrestart(phba); 3660 status = lpfc_sli4_post_status_check(phba); 3661 } 3662 3663 /* Check to see if any errors occurred during init */ 3664 if (status) { 3665 phba->link_state = LPFC_HBA_ERROR; 3666 retval = 1; 3667 } else 3668 phba->sli4_hba.intr_enable = 0; 3669 3670 return retval; 3671 } 3672 3673 /** 3674 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3675 * @phba: Pointer to HBA context object. 3676 * @mask: Bit mask to be checked. 3677 * 3678 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3679 * from the API jump table function pointer from the lpfc_hba struct. 3680 **/ 3681 int 3682 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3683 { 3684 return phba->lpfc_sli_brdready(phba, mask); 3685 } 3686 3687 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3688 3689 /** 3690 * lpfc_reset_barrier - Make HBA ready for HBA reset 3691 * @phba: Pointer to HBA context object. 3692 * 3693 * This function is called before resetting an HBA. This function is called 3694 * with hbalock held and requests HBA to quiesce DMAs before a reset. 3695 **/ 3696 void lpfc_reset_barrier(struct lpfc_hba *phba) 3697 { 3698 uint32_t __iomem *resp_buf; 3699 uint32_t __iomem *mbox_buf; 3700 volatile uint32_t mbox; 3701 uint32_t hc_copy, ha_copy, resp_data; 3702 int i; 3703 uint8_t hdrtype; 3704 3705 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3706 if (hdrtype != 0x80 || 3707 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3708 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3709 return; 3710 3711 /* 3712 * Tell the other part of the chip to suspend temporarily all 3713 * its DMA activity. 3714 */ 3715 resp_buf = phba->MBslimaddr; 3716 3717 /* Disable the error attention */ 3718 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 3719 return; 3720 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3721 readl(phba->HCregaddr); /* flush */ 3722 phba->link_flag |= LS_IGNORE_ERATT; 3723 3724 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3725 return; 3726 if (ha_copy & HA_ERATT) { 3727 /* Clear Chip error bit */ 3728 writel(HA_ERATT, phba->HAregaddr); 3729 phba->pport->stopped = 1; 3730 } 3731 3732 mbox = 0; 3733 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3734 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3735 3736 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3737 mbox_buf = phba->MBslimaddr; 3738 writel(mbox, mbox_buf); 3739 3740 for (i = 0; i < 50; i++) { 3741 if (lpfc_readl((resp_buf + 1), &resp_data)) 3742 return; 3743 if (resp_data != ~(BARRIER_TEST_PATTERN)) 3744 mdelay(1); 3745 else 3746 break; 3747 } 3748 resp_data = 0; 3749 if (lpfc_readl((resp_buf + 1), &resp_data)) 3750 return; 3751 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 3752 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3753 phba->pport->stopped) 3754 goto restore_hc; 3755 else 3756 goto clear_errat; 3757 } 3758 3759 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3760 resp_data = 0; 3761 for (i = 0; i < 500; i++) { 3762 if (lpfc_readl(resp_buf, &resp_data)) 3763 return; 3764 if (resp_data != mbox) 3765 mdelay(1); 3766 else 3767 break; 3768 } 3769 3770 clear_errat: 3771 3772 while (++i < 500) { 3773 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3774 return; 3775 if (!(ha_copy & HA_ERATT)) 3776 mdelay(1); 3777 else 3778 break; 3779 } 3780 3781 if (readl(phba->HAregaddr) & HA_ERATT) { 3782 writel(HA_ERATT, phba->HAregaddr); 3783 phba->pport->stopped = 1; 3784 } 3785 3786 restore_hc: 3787 phba->link_flag &= ~LS_IGNORE_ERATT; 3788 writel(hc_copy, phba->HCregaddr); 3789 readl(phba->HCregaddr); /* flush */ 3790 } 3791 3792 /** 3793 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3794 * @phba: Pointer to HBA context object. 3795 * 3796 * This function issues a kill_board mailbox command and waits for 3797 * the error attention interrupt. This function is called for stopping 3798 * the firmware processing. The caller is not required to hold any 3799 * locks. This function calls lpfc_hba_down_post function to free 3800 * any pending commands after the kill. The function will return 1 when it 3801 * fails to kill the board else will return 0. 3802 **/ 3803 int 3804 lpfc_sli_brdkill(struct lpfc_hba *phba) 3805 { 3806 struct lpfc_sli *psli; 3807 LPFC_MBOXQ_t *pmb; 3808 uint32_t status; 3809 uint32_t ha_copy; 3810 int retval; 3811 int i = 0; 3812 3813 psli = &phba->sli; 3814 3815 /* Kill HBA */ 3816 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3817 "0329 Kill HBA Data: x%x x%x\n", 3818 phba->pport->port_state, psli->sli_flag); 3819 3820 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3821 if (!pmb) 3822 return 1; 3823 3824 /* Disable the error attention */ 3825 spin_lock_irq(&phba->hbalock); 3826 if (lpfc_readl(phba->HCregaddr, &status)) { 3827 spin_unlock_irq(&phba->hbalock); 3828 mempool_free(pmb, phba->mbox_mem_pool); 3829 return 1; 3830 } 3831 status &= ~HC_ERINT_ENA; 3832 writel(status, phba->HCregaddr); 3833 readl(phba->HCregaddr); /* flush */ 3834 phba->link_flag |= LS_IGNORE_ERATT; 3835 spin_unlock_irq(&phba->hbalock); 3836 3837 lpfc_kill_board(phba, pmb); 3838 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3839 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3840 3841 if (retval != MBX_SUCCESS) { 3842 if (retval != MBX_BUSY) 3843 mempool_free(pmb, phba->mbox_mem_pool); 3844 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3845 "2752 KILL_BOARD command failed retval %d\n", 3846 retval); 3847 spin_lock_irq(&phba->hbalock); 3848 phba->link_flag &= ~LS_IGNORE_ERATT; 3849 spin_unlock_irq(&phba->hbalock); 3850 return 1; 3851 } 3852 3853 spin_lock_irq(&phba->hbalock); 3854 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3855 spin_unlock_irq(&phba->hbalock); 3856 3857 mempool_free(pmb, phba->mbox_mem_pool); 3858 3859 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3860 * attention every 100ms for 3 seconds. If we don't get ERATT after 3861 * 3 seconds we still set HBA_ERROR state because the status of the 3862 * board is now undefined. 3863 */ 3864 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3865 return 1; 3866 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3867 mdelay(100); 3868 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3869 return 1; 3870 } 3871 3872 del_timer_sync(&psli->mbox_tmo); 3873 if (ha_copy & HA_ERATT) { 3874 writel(HA_ERATT, phba->HAregaddr); 3875 phba->pport->stopped = 1; 3876 } 3877 spin_lock_irq(&phba->hbalock); 3878 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3879 psli->mbox_active = NULL; 3880 phba->link_flag &= ~LS_IGNORE_ERATT; 3881 spin_unlock_irq(&phba->hbalock); 3882 3883 lpfc_hba_down_post(phba); 3884 phba->link_state = LPFC_HBA_ERROR; 3885 3886 return ha_copy & HA_ERATT ? 0 : 1; 3887 } 3888 3889 /** 3890 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3891 * @phba: Pointer to HBA context object. 3892 * 3893 * This function resets the HBA by writing HC_INITFF to the control 3894 * register. After the HBA resets, this function resets all the iocb ring 3895 * indices. This function disables PCI layer parity checking during 3896 * the reset. 3897 * This function returns 0 always. 3898 * The caller is not required to hold any locks. 3899 **/ 3900 int 3901 lpfc_sli_brdreset(struct lpfc_hba *phba) 3902 { 3903 struct lpfc_sli *psli; 3904 struct lpfc_sli_ring *pring; 3905 uint16_t cfg_value; 3906 int i; 3907 3908 psli = &phba->sli; 3909 3910 /* Reset HBA */ 3911 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3912 "0325 Reset HBA Data: x%x x%x\n", 3913 phba->pport->port_state, psli->sli_flag); 3914 3915 /* perform board reset */ 3916 phba->fc_eventTag = 0; 3917 phba->link_events = 0; 3918 phba->pport->fc_myDID = 0; 3919 phba->pport->fc_prevDID = 0; 3920 3921 /* Turn off parity checking and serr during the physical reset */ 3922 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3923 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3924 (cfg_value & 3925 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3926 3927 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 3928 3929 /* Now toggle INITFF bit in the Host Control Register */ 3930 writel(HC_INITFF, phba->HCregaddr); 3931 mdelay(1); 3932 readl(phba->HCregaddr); /* flush */ 3933 writel(0, phba->HCregaddr); 3934 readl(phba->HCregaddr); /* flush */ 3935 3936 /* Restore PCI cmd register */ 3937 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3938 3939 /* Initialize relevant SLI info */ 3940 for (i = 0; i < psli->num_rings; i++) { 3941 pring = &psli->ring[i]; 3942 pring->flag = 0; 3943 pring->sli.sli3.rspidx = 0; 3944 pring->sli.sli3.next_cmdidx = 0; 3945 pring->sli.sli3.local_getidx = 0; 3946 pring->sli.sli3.cmdidx = 0; 3947 pring->missbufcnt = 0; 3948 } 3949 3950 phba->link_state = LPFC_WARM_START; 3951 return 0; 3952 } 3953 3954 /** 3955 * lpfc_sli4_brdreset - Reset a sli-4 HBA 3956 * @phba: Pointer to HBA context object. 3957 * 3958 * This function resets a SLI4 HBA. This function disables PCI layer parity 3959 * checking during resets the device. The caller is not required to hold 3960 * any locks. 3961 * 3962 * This function returns 0 always. 3963 **/ 3964 int 3965 lpfc_sli4_brdreset(struct lpfc_hba *phba) 3966 { 3967 struct lpfc_sli *psli = &phba->sli; 3968 uint16_t cfg_value; 3969 int rc; 3970 3971 /* Reset HBA */ 3972 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3973 "0295 Reset HBA Data: x%x x%x\n", 3974 phba->pport->port_state, psli->sli_flag); 3975 3976 /* perform board reset */ 3977 phba->fc_eventTag = 0; 3978 phba->link_events = 0; 3979 phba->pport->fc_myDID = 0; 3980 phba->pport->fc_prevDID = 0; 3981 3982 spin_lock_irq(&phba->hbalock); 3983 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3984 phba->fcf.fcf_flag = 0; 3985 spin_unlock_irq(&phba->hbalock); 3986 3987 /* Now physically reset the device */ 3988 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3989 "0389 Performing PCI function reset!\n"); 3990 3991 /* Turn off parity checking and serr during the physical reset */ 3992 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3993 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 3994 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3995 3996 /* Perform FCoE PCI function reset before freeing queue memory */ 3997 rc = lpfc_pci_function_reset(phba); 3998 lpfc_sli4_queue_destroy(phba); 3999 4000 /* Restore PCI cmd register */ 4001 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4002 4003 return rc; 4004 } 4005 4006 /** 4007 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4008 * @phba: Pointer to HBA context object. 4009 * 4010 * This function is called in the SLI initialization code path to 4011 * restart the HBA. The caller is not required to hold any lock. 4012 * This function writes MBX_RESTART mailbox command to the SLIM and 4013 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4014 * function to free any pending commands. The function enables 4015 * POST only during the first initialization. The function returns zero. 4016 * The function does not guarantee completion of MBX_RESTART mailbox 4017 * command before the return of this function. 4018 **/ 4019 static int 4020 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4021 { 4022 MAILBOX_t *mb; 4023 struct lpfc_sli *psli; 4024 volatile uint32_t word0; 4025 void __iomem *to_slim; 4026 uint32_t hba_aer_enabled; 4027 4028 spin_lock_irq(&phba->hbalock); 4029 4030 /* Take PCIe device Advanced Error Reporting (AER) state */ 4031 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4032 4033 psli = &phba->sli; 4034 4035 /* Restart HBA */ 4036 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4037 "0337 Restart HBA Data: x%x x%x\n", 4038 phba->pport->port_state, psli->sli_flag); 4039 4040 word0 = 0; 4041 mb = (MAILBOX_t *) &word0; 4042 mb->mbxCommand = MBX_RESTART; 4043 mb->mbxHc = 1; 4044 4045 lpfc_reset_barrier(phba); 4046 4047 to_slim = phba->MBslimaddr; 4048 writel(*(uint32_t *) mb, to_slim); 4049 readl(to_slim); /* flush */ 4050 4051 /* Only skip post after fc_ffinit is completed */ 4052 if (phba->pport->port_state) 4053 word0 = 1; /* This is really setting up word1 */ 4054 else 4055 word0 = 0; /* This is really setting up word1 */ 4056 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4057 writel(*(uint32_t *) mb, to_slim); 4058 readl(to_slim); /* flush */ 4059 4060 lpfc_sli_brdreset(phba); 4061 phba->pport->stopped = 0; 4062 phba->link_state = LPFC_INIT_START; 4063 phba->hba_flag = 0; 4064 spin_unlock_irq(&phba->hbalock); 4065 4066 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4067 psli->stats_start = get_seconds(); 4068 4069 /* Give the INITFF and Post time to settle. */ 4070 mdelay(100); 4071 4072 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4073 if (hba_aer_enabled) 4074 pci_disable_pcie_error_reporting(phba->pcidev); 4075 4076 lpfc_hba_down_post(phba); 4077 4078 return 0; 4079 } 4080 4081 /** 4082 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4083 * @phba: Pointer to HBA context object. 4084 * 4085 * This function is called in the SLI initialization code path to restart 4086 * a SLI4 HBA. The caller is not required to hold any lock. 4087 * At the end of the function, it calls lpfc_hba_down_post function to 4088 * free any pending commands. 4089 **/ 4090 static int 4091 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4092 { 4093 struct lpfc_sli *psli = &phba->sli; 4094 uint32_t hba_aer_enabled; 4095 int rc; 4096 4097 /* Restart HBA */ 4098 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4099 "0296 Restart HBA Data: x%x x%x\n", 4100 phba->pport->port_state, psli->sli_flag); 4101 4102 /* Take PCIe device Advanced Error Reporting (AER) state */ 4103 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4104 4105 rc = lpfc_sli4_brdreset(phba); 4106 4107 spin_lock_irq(&phba->hbalock); 4108 phba->pport->stopped = 0; 4109 phba->link_state = LPFC_INIT_START; 4110 phba->hba_flag = 0; 4111 spin_unlock_irq(&phba->hbalock); 4112 4113 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4114 psli->stats_start = get_seconds(); 4115 4116 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4117 if (hba_aer_enabled) 4118 pci_disable_pcie_error_reporting(phba->pcidev); 4119 4120 lpfc_hba_down_post(phba); 4121 4122 return rc; 4123 } 4124 4125 /** 4126 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4127 * @phba: Pointer to HBA context object. 4128 * 4129 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4130 * API jump table function pointer from the lpfc_hba struct. 4131 **/ 4132 int 4133 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4134 { 4135 return phba->lpfc_sli_brdrestart(phba); 4136 } 4137 4138 /** 4139 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4140 * @phba: Pointer to HBA context object. 4141 * 4142 * This function is called after a HBA restart to wait for successful 4143 * restart of the HBA. Successful restart of the HBA is indicated by 4144 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4145 * iteration, the function will restart the HBA again. The function returns 4146 * zero if HBA successfully restarted else returns negative error code. 4147 **/ 4148 static int 4149 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4150 { 4151 uint32_t status, i = 0; 4152 4153 /* Read the HBA Host Status Register */ 4154 if (lpfc_readl(phba->HSregaddr, &status)) 4155 return -EIO; 4156 4157 /* Check status register to see what current state is */ 4158 i = 0; 4159 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4160 4161 /* Check every 10ms for 10 retries, then every 100ms for 90 4162 * retries, then every 1 sec for 50 retires for a total of 4163 * ~60 seconds before reset the board again and check every 4164 * 1 sec for 50 retries. The up to 60 seconds before the 4165 * board ready is required by the Falcon FIPS zeroization 4166 * complete, and any reset the board in between shall cause 4167 * restart of zeroization, further delay the board ready. 4168 */ 4169 if (i++ >= 200) { 4170 /* Adapter failed to init, timeout, status reg 4171 <status> */ 4172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4173 "0436 Adapter failed to init, " 4174 "timeout, status reg x%x, " 4175 "FW Data: A8 x%x AC x%x\n", status, 4176 readl(phba->MBslimaddr + 0xa8), 4177 readl(phba->MBslimaddr + 0xac)); 4178 phba->link_state = LPFC_HBA_ERROR; 4179 return -ETIMEDOUT; 4180 } 4181 4182 /* Check to see if any errors occurred during init */ 4183 if (status & HS_FFERM) { 4184 /* ERROR: During chipset initialization */ 4185 /* Adapter failed to init, chipset, status reg 4186 <status> */ 4187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4188 "0437 Adapter failed to init, " 4189 "chipset, status reg x%x, " 4190 "FW Data: A8 x%x AC x%x\n", status, 4191 readl(phba->MBslimaddr + 0xa8), 4192 readl(phba->MBslimaddr + 0xac)); 4193 phba->link_state = LPFC_HBA_ERROR; 4194 return -EIO; 4195 } 4196 4197 if (i <= 10) 4198 msleep(10); 4199 else if (i <= 100) 4200 msleep(100); 4201 else 4202 msleep(1000); 4203 4204 if (i == 150) { 4205 /* Do post */ 4206 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4207 lpfc_sli_brdrestart(phba); 4208 } 4209 /* Read the HBA Host Status Register */ 4210 if (lpfc_readl(phba->HSregaddr, &status)) 4211 return -EIO; 4212 } 4213 4214 /* Check to see if any errors occurred during init */ 4215 if (status & HS_FFERM) { 4216 /* ERROR: During chipset initialization */ 4217 /* Adapter failed to init, chipset, status reg <status> */ 4218 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4219 "0438 Adapter failed to init, chipset, " 4220 "status reg x%x, " 4221 "FW Data: A8 x%x AC x%x\n", status, 4222 readl(phba->MBslimaddr + 0xa8), 4223 readl(phba->MBslimaddr + 0xac)); 4224 phba->link_state = LPFC_HBA_ERROR; 4225 return -EIO; 4226 } 4227 4228 /* Clear all interrupt enable conditions */ 4229 writel(0, phba->HCregaddr); 4230 readl(phba->HCregaddr); /* flush */ 4231 4232 /* setup host attn register */ 4233 writel(0xffffffff, phba->HAregaddr); 4234 readl(phba->HAregaddr); /* flush */ 4235 return 0; 4236 } 4237 4238 /** 4239 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4240 * 4241 * This function calculates and returns the number of HBQs required to be 4242 * configured. 4243 **/ 4244 int 4245 lpfc_sli_hbq_count(void) 4246 { 4247 return ARRAY_SIZE(lpfc_hbq_defs); 4248 } 4249 4250 /** 4251 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4252 * 4253 * This function adds the number of hbq entries in every HBQ to get 4254 * the total number of hbq entries required for the HBA and returns 4255 * the total count. 4256 **/ 4257 static int 4258 lpfc_sli_hbq_entry_count(void) 4259 { 4260 int hbq_count = lpfc_sli_hbq_count(); 4261 int count = 0; 4262 int i; 4263 4264 for (i = 0; i < hbq_count; ++i) 4265 count += lpfc_hbq_defs[i]->entry_count; 4266 return count; 4267 } 4268 4269 /** 4270 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4271 * 4272 * This function calculates amount of memory required for all hbq entries 4273 * to be configured and returns the total memory required. 4274 **/ 4275 int 4276 lpfc_sli_hbq_size(void) 4277 { 4278 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4279 } 4280 4281 /** 4282 * lpfc_sli_hbq_setup - configure and initialize HBQs 4283 * @phba: Pointer to HBA context object. 4284 * 4285 * This function is called during the SLI initialization to configure 4286 * all the HBQs and post buffers to the HBQ. The caller is not 4287 * required to hold any locks. This function will return zero if successful 4288 * else it will return negative error code. 4289 **/ 4290 static int 4291 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4292 { 4293 int hbq_count = lpfc_sli_hbq_count(); 4294 LPFC_MBOXQ_t *pmb; 4295 MAILBOX_t *pmbox; 4296 uint32_t hbqno; 4297 uint32_t hbq_entry_index; 4298 4299 /* Get a Mailbox buffer to setup mailbox 4300 * commands for HBA initialization 4301 */ 4302 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4303 4304 if (!pmb) 4305 return -ENOMEM; 4306 4307 pmbox = &pmb->u.mb; 4308 4309 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4310 phba->link_state = LPFC_INIT_MBX_CMDS; 4311 phba->hbq_in_use = 1; 4312 4313 hbq_entry_index = 0; 4314 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4315 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4316 phba->hbqs[hbqno].hbqPutIdx = 0; 4317 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4318 phba->hbqs[hbqno].entry_count = 4319 lpfc_hbq_defs[hbqno]->entry_count; 4320 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4321 hbq_entry_index, pmb); 4322 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4323 4324 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4325 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4326 mbxStatus <status>, ring <num> */ 4327 4328 lpfc_printf_log(phba, KERN_ERR, 4329 LOG_SLI | LOG_VPORT, 4330 "1805 Adapter failed to init. " 4331 "Data: x%x x%x x%x\n", 4332 pmbox->mbxCommand, 4333 pmbox->mbxStatus, hbqno); 4334 4335 phba->link_state = LPFC_HBA_ERROR; 4336 mempool_free(pmb, phba->mbox_mem_pool); 4337 return -ENXIO; 4338 } 4339 } 4340 phba->hbq_count = hbq_count; 4341 4342 mempool_free(pmb, phba->mbox_mem_pool); 4343 4344 /* Initially populate or replenish the HBQs */ 4345 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4346 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4347 return 0; 4348 } 4349 4350 /** 4351 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4352 * @phba: Pointer to HBA context object. 4353 * 4354 * This function is called during the SLI initialization to configure 4355 * all the HBQs and post buffers to the HBQ. The caller is not 4356 * required to hold any locks. This function will return zero if successful 4357 * else it will return negative error code. 4358 **/ 4359 static int 4360 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4361 { 4362 phba->hbq_in_use = 1; 4363 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 4364 phba->hbq_count = 1; 4365 /* Initially populate or replenish the HBQs */ 4366 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 4367 return 0; 4368 } 4369 4370 /** 4371 * lpfc_sli_config_port - Issue config port mailbox command 4372 * @phba: Pointer to HBA context object. 4373 * @sli_mode: sli mode - 2/3 4374 * 4375 * This function is called by the sli intialization code path 4376 * to issue config_port mailbox command. This function restarts the 4377 * HBA firmware and issues a config_port mailbox command to configure 4378 * the SLI interface in the sli mode specified by sli_mode 4379 * variable. The caller is not required to hold any locks. 4380 * The function returns 0 if successful, else returns negative error 4381 * code. 4382 **/ 4383 int 4384 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4385 { 4386 LPFC_MBOXQ_t *pmb; 4387 uint32_t resetcount = 0, rc = 0, done = 0; 4388 4389 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4390 if (!pmb) { 4391 phba->link_state = LPFC_HBA_ERROR; 4392 return -ENOMEM; 4393 } 4394 4395 phba->sli_rev = sli_mode; 4396 while (resetcount < 2 && !done) { 4397 spin_lock_irq(&phba->hbalock); 4398 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4399 spin_unlock_irq(&phba->hbalock); 4400 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4401 lpfc_sli_brdrestart(phba); 4402 rc = lpfc_sli_chipset_init(phba); 4403 if (rc) 4404 break; 4405 4406 spin_lock_irq(&phba->hbalock); 4407 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4408 spin_unlock_irq(&phba->hbalock); 4409 resetcount++; 4410 4411 /* Call pre CONFIG_PORT mailbox command initialization. A 4412 * value of 0 means the call was successful. Any other 4413 * nonzero value is a failure, but if ERESTART is returned, 4414 * the driver may reset the HBA and try again. 4415 */ 4416 rc = lpfc_config_port_prep(phba); 4417 if (rc == -ERESTART) { 4418 phba->link_state = LPFC_LINK_UNKNOWN; 4419 continue; 4420 } else if (rc) 4421 break; 4422 4423 phba->link_state = LPFC_INIT_MBX_CMDS; 4424 lpfc_config_port(phba, pmb); 4425 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4426 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4427 LPFC_SLI3_HBQ_ENABLED | 4428 LPFC_SLI3_CRP_ENABLED | 4429 LPFC_SLI3_BG_ENABLED | 4430 LPFC_SLI3_DSS_ENABLED); 4431 if (rc != MBX_SUCCESS) { 4432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4433 "0442 Adapter failed to init, mbxCmd x%x " 4434 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4435 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4436 spin_lock_irq(&phba->hbalock); 4437 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4438 spin_unlock_irq(&phba->hbalock); 4439 rc = -ENXIO; 4440 } else { 4441 /* Allow asynchronous mailbox command to go through */ 4442 spin_lock_irq(&phba->hbalock); 4443 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4444 spin_unlock_irq(&phba->hbalock); 4445 done = 1; 4446 4447 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4448 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4449 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4450 "3110 Port did not grant ASABT\n"); 4451 } 4452 } 4453 if (!done) { 4454 rc = -EINVAL; 4455 goto do_prep_failed; 4456 } 4457 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4458 if (!pmb->u.mb.un.varCfgPort.cMA) { 4459 rc = -ENXIO; 4460 goto do_prep_failed; 4461 } 4462 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4463 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4464 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4465 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4466 phba->max_vpi : phba->max_vports; 4467 4468 } else 4469 phba->max_vpi = 0; 4470 phba->fips_level = 0; 4471 phba->fips_spec_rev = 0; 4472 if (pmb->u.mb.un.varCfgPort.gdss) { 4473 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4474 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4475 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4476 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4477 "2850 Security Crypto Active. FIPS x%d " 4478 "(Spec Rev: x%d)", 4479 phba->fips_level, phba->fips_spec_rev); 4480 } 4481 if (pmb->u.mb.un.varCfgPort.sec_err) { 4482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4483 "2856 Config Port Security Crypto " 4484 "Error: x%x ", 4485 pmb->u.mb.un.varCfgPort.sec_err); 4486 } 4487 if (pmb->u.mb.un.varCfgPort.gerbm) 4488 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4489 if (pmb->u.mb.un.varCfgPort.gcrp) 4490 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4491 4492 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4493 phba->port_gp = phba->mbox->us.s3_pgp.port; 4494 4495 if (phba->cfg_enable_bg) { 4496 if (pmb->u.mb.un.varCfgPort.gbg) 4497 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4498 else 4499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4500 "0443 Adapter did not grant " 4501 "BlockGuard\n"); 4502 } 4503 } else { 4504 phba->hbq_get = NULL; 4505 phba->port_gp = phba->mbox->us.s2.port; 4506 phba->max_vpi = 0; 4507 } 4508 do_prep_failed: 4509 mempool_free(pmb, phba->mbox_mem_pool); 4510 return rc; 4511 } 4512 4513 4514 /** 4515 * lpfc_sli_hba_setup - SLI intialization function 4516 * @phba: Pointer to HBA context object. 4517 * 4518 * This function is the main SLI intialization function. This function 4519 * is called by the HBA intialization code, HBA reset code and HBA 4520 * error attention handler code. Caller is not required to hold any 4521 * locks. This function issues config_port mailbox command to configure 4522 * the SLI, setup iocb rings and HBQ rings. In the end the function 4523 * calls the config_port_post function to issue init_link mailbox 4524 * command and to start the discovery. The function will return zero 4525 * if successful, else it will return negative error code. 4526 **/ 4527 int 4528 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4529 { 4530 uint32_t rc; 4531 int mode = 3, i; 4532 int longs; 4533 4534 switch (lpfc_sli_mode) { 4535 case 2: 4536 if (phba->cfg_enable_npiv) { 4537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4538 "1824 NPIV enabled: Override lpfc_sli_mode " 4539 "parameter (%d) to auto (0).\n", 4540 lpfc_sli_mode); 4541 break; 4542 } 4543 mode = 2; 4544 break; 4545 case 0: 4546 case 3: 4547 break; 4548 default: 4549 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4550 "1819 Unrecognized lpfc_sli_mode " 4551 "parameter: %d.\n", lpfc_sli_mode); 4552 4553 break; 4554 } 4555 4556 rc = lpfc_sli_config_port(phba, mode); 4557 4558 if (rc && lpfc_sli_mode == 3) 4559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4560 "1820 Unable to select SLI-3. " 4561 "Not supported by adapter.\n"); 4562 if (rc && mode != 2) 4563 rc = lpfc_sli_config_port(phba, 2); 4564 if (rc) 4565 goto lpfc_sli_hba_setup_error; 4566 4567 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4568 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4569 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4570 if (!rc) { 4571 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4572 "2709 This device supports " 4573 "Advanced Error Reporting (AER)\n"); 4574 spin_lock_irq(&phba->hbalock); 4575 phba->hba_flag |= HBA_AER_ENABLED; 4576 spin_unlock_irq(&phba->hbalock); 4577 } else { 4578 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4579 "2708 This device does not support " 4580 "Advanced Error Reporting (AER): %d\n", 4581 rc); 4582 phba->cfg_aer_support = 0; 4583 } 4584 } 4585 4586 if (phba->sli_rev == 3) { 4587 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4588 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4589 } else { 4590 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4591 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4592 phba->sli3_options = 0; 4593 } 4594 4595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4596 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4597 phba->sli_rev, phba->max_vpi); 4598 rc = lpfc_sli_ring_map(phba); 4599 4600 if (rc) 4601 goto lpfc_sli_hba_setup_error; 4602 4603 /* Initialize VPIs. */ 4604 if (phba->sli_rev == LPFC_SLI_REV3) { 4605 /* 4606 * The VPI bitmask and physical ID array are allocated 4607 * and initialized once only - at driver load. A port 4608 * reset doesn't need to reinitialize this memory. 4609 */ 4610 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 4611 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 4612 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), 4613 GFP_KERNEL); 4614 if (!phba->vpi_bmask) { 4615 rc = -ENOMEM; 4616 goto lpfc_sli_hba_setup_error; 4617 } 4618 4619 phba->vpi_ids = kzalloc( 4620 (phba->max_vpi+1) * sizeof(uint16_t), 4621 GFP_KERNEL); 4622 if (!phba->vpi_ids) { 4623 kfree(phba->vpi_bmask); 4624 rc = -ENOMEM; 4625 goto lpfc_sli_hba_setup_error; 4626 } 4627 for (i = 0; i < phba->max_vpi; i++) 4628 phba->vpi_ids[i] = i; 4629 } 4630 } 4631 4632 /* Init HBQs */ 4633 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4634 rc = lpfc_sli_hbq_setup(phba); 4635 if (rc) 4636 goto lpfc_sli_hba_setup_error; 4637 } 4638 spin_lock_irq(&phba->hbalock); 4639 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4640 spin_unlock_irq(&phba->hbalock); 4641 4642 rc = lpfc_config_port_post(phba); 4643 if (rc) 4644 goto lpfc_sli_hba_setup_error; 4645 4646 return rc; 4647 4648 lpfc_sli_hba_setup_error: 4649 phba->link_state = LPFC_HBA_ERROR; 4650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4651 "0445 Firmware initialization failed\n"); 4652 return rc; 4653 } 4654 4655 /** 4656 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4657 * @phba: Pointer to HBA context object. 4658 * @mboxq: mailbox pointer. 4659 * This function issue a dump mailbox command to read config region 4660 * 23 and parse the records in the region and populate driver 4661 * data structure. 4662 **/ 4663 static int 4664 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 4665 { 4666 LPFC_MBOXQ_t *mboxq; 4667 struct lpfc_dmabuf *mp; 4668 struct lpfc_mqe *mqe; 4669 uint32_t data_length; 4670 int rc; 4671 4672 /* Program the default value of vlan_id and fc_map */ 4673 phba->valid_vlan = 0; 4674 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4675 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4676 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4677 4678 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4679 if (!mboxq) 4680 return -ENOMEM; 4681 4682 mqe = &mboxq->u.mqe; 4683 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 4684 rc = -ENOMEM; 4685 goto out_free_mboxq; 4686 } 4687 4688 mp = (struct lpfc_dmabuf *) mboxq->context1; 4689 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4690 4691 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4692 "(%d):2571 Mailbox cmd x%x Status x%x " 4693 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4694 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4695 "CQ: x%x x%x x%x x%x\n", 4696 mboxq->vport ? mboxq->vport->vpi : 0, 4697 bf_get(lpfc_mqe_command, mqe), 4698 bf_get(lpfc_mqe_status, mqe), 4699 mqe->un.mb_words[0], mqe->un.mb_words[1], 4700 mqe->un.mb_words[2], mqe->un.mb_words[3], 4701 mqe->un.mb_words[4], mqe->un.mb_words[5], 4702 mqe->un.mb_words[6], mqe->un.mb_words[7], 4703 mqe->un.mb_words[8], mqe->un.mb_words[9], 4704 mqe->un.mb_words[10], mqe->un.mb_words[11], 4705 mqe->un.mb_words[12], mqe->un.mb_words[13], 4706 mqe->un.mb_words[14], mqe->un.mb_words[15], 4707 mqe->un.mb_words[16], mqe->un.mb_words[50], 4708 mboxq->mcqe.word0, 4709 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4710 mboxq->mcqe.trailer); 4711 4712 if (rc) { 4713 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4714 kfree(mp); 4715 rc = -EIO; 4716 goto out_free_mboxq; 4717 } 4718 data_length = mqe->un.mb_words[5]; 4719 if (data_length > DMP_RGN23_SIZE) { 4720 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4721 kfree(mp); 4722 rc = -EIO; 4723 goto out_free_mboxq; 4724 } 4725 4726 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4727 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4728 kfree(mp); 4729 rc = 0; 4730 4731 out_free_mboxq: 4732 mempool_free(mboxq, phba->mbox_mem_pool); 4733 return rc; 4734 } 4735 4736 /** 4737 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4738 * @phba: pointer to lpfc hba data structure. 4739 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4740 * @vpd: pointer to the memory to hold resulting port vpd data. 4741 * @vpd_size: On input, the number of bytes allocated to @vpd. 4742 * On output, the number of data bytes in @vpd. 4743 * 4744 * This routine executes a READ_REV SLI4 mailbox command. In 4745 * addition, this routine gets the port vpd data. 4746 * 4747 * Return codes 4748 * 0 - successful 4749 * -ENOMEM - could not allocated memory. 4750 **/ 4751 static int 4752 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4753 uint8_t *vpd, uint32_t *vpd_size) 4754 { 4755 int rc = 0; 4756 uint32_t dma_size; 4757 struct lpfc_dmabuf *dmabuf; 4758 struct lpfc_mqe *mqe; 4759 4760 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4761 if (!dmabuf) 4762 return -ENOMEM; 4763 4764 /* 4765 * Get a DMA buffer for the vpd data resulting from the READ_REV 4766 * mailbox command. 4767 */ 4768 dma_size = *vpd_size; 4769 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4770 dma_size, 4771 &dmabuf->phys, 4772 GFP_KERNEL); 4773 if (!dmabuf->virt) { 4774 kfree(dmabuf); 4775 return -ENOMEM; 4776 } 4777 memset(dmabuf->virt, 0, dma_size); 4778 4779 /* 4780 * The SLI4 implementation of READ_REV conflicts at word1, 4781 * bits 31:16 and SLI4 adds vpd functionality not present 4782 * in SLI3. This code corrects the conflicts. 4783 */ 4784 lpfc_read_rev(phba, mboxq); 4785 mqe = &mboxq->u.mqe; 4786 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4787 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4788 mqe->un.read_rev.word1 &= 0x0000FFFF; 4789 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4790 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4791 4792 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4793 if (rc) { 4794 dma_free_coherent(&phba->pcidev->dev, dma_size, 4795 dmabuf->virt, dmabuf->phys); 4796 kfree(dmabuf); 4797 return -EIO; 4798 } 4799 4800 /* 4801 * The available vpd length cannot be bigger than the 4802 * DMA buffer passed to the port. Catch the less than 4803 * case and update the caller's size. 4804 */ 4805 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4806 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4807 4808 memcpy(vpd, dmabuf->virt, *vpd_size); 4809 4810 dma_free_coherent(&phba->pcidev->dev, dma_size, 4811 dmabuf->virt, dmabuf->phys); 4812 kfree(dmabuf); 4813 return 0; 4814 } 4815 4816 /** 4817 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 4818 * @phba: pointer to lpfc hba data structure. 4819 * 4820 * This routine retrieves SLI4 device physical port name this PCI function 4821 * is attached to. 4822 * 4823 * Return codes 4824 * 0 - successful 4825 * otherwise - failed to retrieve physical port name 4826 **/ 4827 static int 4828 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 4829 { 4830 LPFC_MBOXQ_t *mboxq; 4831 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 4832 struct lpfc_controller_attribute *cntl_attr; 4833 struct lpfc_mbx_get_port_name *get_port_name; 4834 void *virtaddr = NULL; 4835 uint32_t alloclen, reqlen; 4836 uint32_t shdr_status, shdr_add_status; 4837 union lpfc_sli4_cfg_shdr *shdr; 4838 char cport_name = 0; 4839 int rc; 4840 4841 /* We assume nothing at this point */ 4842 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4843 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 4844 4845 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4846 if (!mboxq) 4847 return -ENOMEM; 4848 /* obtain link type and link number via READ_CONFIG */ 4849 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4850 lpfc_sli4_read_config(phba); 4851 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 4852 goto retrieve_ppname; 4853 4854 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 4855 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 4856 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4857 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 4858 LPFC_SLI4_MBX_NEMBED); 4859 if (alloclen < reqlen) { 4860 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4861 "3084 Allocated DMA memory size (%d) is " 4862 "less than the requested DMA memory size " 4863 "(%d)\n", alloclen, reqlen); 4864 rc = -ENOMEM; 4865 goto out_free_mboxq; 4866 } 4867 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4868 virtaddr = mboxq->sge_array->addr[0]; 4869 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 4870 shdr = &mbx_cntl_attr->cfg_shdr; 4871 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4872 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4873 if (shdr_status || shdr_add_status || rc) { 4874 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4875 "3085 Mailbox x%x (x%x/x%x) failed, " 4876 "rc:x%x, status:x%x, add_status:x%x\n", 4877 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4878 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4879 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4880 rc, shdr_status, shdr_add_status); 4881 rc = -ENXIO; 4882 goto out_free_mboxq; 4883 } 4884 cntl_attr = &mbx_cntl_attr->cntl_attr; 4885 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 4886 phba->sli4_hba.lnk_info.lnk_tp = 4887 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 4888 phba->sli4_hba.lnk_info.lnk_no = 4889 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 4890 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4891 "3086 lnk_type:%d, lnk_numb:%d\n", 4892 phba->sli4_hba.lnk_info.lnk_tp, 4893 phba->sli4_hba.lnk_info.lnk_no); 4894 4895 retrieve_ppname: 4896 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4897 LPFC_MBOX_OPCODE_GET_PORT_NAME, 4898 sizeof(struct lpfc_mbx_get_port_name) - 4899 sizeof(struct lpfc_sli4_cfg_mhdr), 4900 LPFC_SLI4_MBX_EMBED); 4901 get_port_name = &mboxq->u.mqe.un.get_port_name; 4902 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 4903 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 4904 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 4905 phba->sli4_hba.lnk_info.lnk_tp); 4906 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4907 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4908 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4909 if (shdr_status || shdr_add_status || rc) { 4910 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4911 "3087 Mailbox x%x (x%x/x%x) failed: " 4912 "rc:x%x, status:x%x, add_status:x%x\n", 4913 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4914 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4915 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4916 rc, shdr_status, shdr_add_status); 4917 rc = -ENXIO; 4918 goto out_free_mboxq; 4919 } 4920 switch (phba->sli4_hba.lnk_info.lnk_no) { 4921 case LPFC_LINK_NUMBER_0: 4922 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 4923 &get_port_name->u.response); 4924 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4925 break; 4926 case LPFC_LINK_NUMBER_1: 4927 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 4928 &get_port_name->u.response); 4929 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4930 break; 4931 case LPFC_LINK_NUMBER_2: 4932 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 4933 &get_port_name->u.response); 4934 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4935 break; 4936 case LPFC_LINK_NUMBER_3: 4937 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 4938 &get_port_name->u.response); 4939 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4940 break; 4941 default: 4942 break; 4943 } 4944 4945 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 4946 phba->Port[0] = cport_name; 4947 phba->Port[1] = '\0'; 4948 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4949 "3091 SLI get port name: %s\n", phba->Port); 4950 } 4951 4952 out_free_mboxq: 4953 if (rc != MBX_TIMEOUT) { 4954 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 4955 lpfc_sli4_mbox_cmd_free(phba, mboxq); 4956 else 4957 mempool_free(mboxq, phba->mbox_mem_pool); 4958 } 4959 return rc; 4960 } 4961 4962 /** 4963 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4964 * @phba: pointer to lpfc hba data structure. 4965 * 4966 * This routine is called to explicitly arm the SLI4 device's completion and 4967 * event queues 4968 **/ 4969 static void 4970 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4971 { 4972 int fcp_eqidx; 4973 4974 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4975 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4976 fcp_eqidx = 0; 4977 if (phba->sli4_hba.fcp_cq) { 4978 do { 4979 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4980 LPFC_QUEUE_REARM); 4981 } while (++fcp_eqidx < phba->cfg_fcp_io_channel); 4982 } 4983 4984 if (phba->cfg_EnableXLane) 4985 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); 4986 4987 if (phba->sli4_hba.hba_eq) { 4988 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 4989 fcp_eqidx++) 4990 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], 4991 LPFC_QUEUE_REARM); 4992 } 4993 4994 if (phba->cfg_fof) 4995 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM); 4996 } 4997 4998 /** 4999 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5000 * @phba: Pointer to HBA context object. 5001 * @type: The resource extent type. 5002 * @extnt_count: buffer to hold port available extent count. 5003 * @extnt_size: buffer to hold element count per extent. 5004 * 5005 * This function calls the port and retrievs the number of available 5006 * extents and their size for a particular extent type. 5007 * 5008 * Returns: 0 if successful. Nonzero otherwise. 5009 **/ 5010 int 5011 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5012 uint16_t *extnt_count, uint16_t *extnt_size) 5013 { 5014 int rc = 0; 5015 uint32_t length; 5016 uint32_t mbox_tmo; 5017 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5018 LPFC_MBOXQ_t *mbox; 5019 5020 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5021 if (!mbox) 5022 return -ENOMEM; 5023 5024 /* Find out how many extents are available for this resource type */ 5025 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5026 sizeof(struct lpfc_sli4_cfg_mhdr)); 5027 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5028 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5029 length, LPFC_SLI4_MBX_EMBED); 5030 5031 /* Send an extents count of 0 - the GET doesn't use it. */ 5032 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5033 LPFC_SLI4_MBX_EMBED); 5034 if (unlikely(rc)) { 5035 rc = -EIO; 5036 goto err_exit; 5037 } 5038 5039 if (!phba->sli4_hba.intr_enable) 5040 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5041 else { 5042 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5043 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5044 } 5045 if (unlikely(rc)) { 5046 rc = -EIO; 5047 goto err_exit; 5048 } 5049 5050 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5051 if (bf_get(lpfc_mbox_hdr_status, 5052 &rsrc_info->header.cfg_shdr.response)) { 5053 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5054 "2930 Failed to get resource extents " 5055 "Status 0x%x Add'l Status 0x%x\n", 5056 bf_get(lpfc_mbox_hdr_status, 5057 &rsrc_info->header.cfg_shdr.response), 5058 bf_get(lpfc_mbox_hdr_add_status, 5059 &rsrc_info->header.cfg_shdr.response)); 5060 rc = -EIO; 5061 goto err_exit; 5062 } 5063 5064 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5065 &rsrc_info->u.rsp); 5066 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5067 &rsrc_info->u.rsp); 5068 5069 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5070 "3162 Retrieved extents type-%d from port: count:%d, " 5071 "size:%d\n", type, *extnt_count, *extnt_size); 5072 5073 err_exit: 5074 mempool_free(mbox, phba->mbox_mem_pool); 5075 return rc; 5076 } 5077 5078 /** 5079 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5080 * @phba: Pointer to HBA context object. 5081 * @type: The extent type to check. 5082 * 5083 * This function reads the current available extents from the port and checks 5084 * if the extent count or extent size has changed since the last access. 5085 * Callers use this routine post port reset to understand if there is a 5086 * extent reprovisioning requirement. 5087 * 5088 * Returns: 5089 * -Error: error indicates problem. 5090 * 1: Extent count or size has changed. 5091 * 0: No changes. 5092 **/ 5093 static int 5094 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5095 { 5096 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5097 uint16_t size_diff, rsrc_ext_size; 5098 int rc = 0; 5099 struct lpfc_rsrc_blks *rsrc_entry; 5100 struct list_head *rsrc_blk_list = NULL; 5101 5102 size_diff = 0; 5103 curr_ext_cnt = 0; 5104 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5105 &rsrc_ext_cnt, 5106 &rsrc_ext_size); 5107 if (unlikely(rc)) 5108 return -EIO; 5109 5110 switch (type) { 5111 case LPFC_RSC_TYPE_FCOE_RPI: 5112 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5113 break; 5114 case LPFC_RSC_TYPE_FCOE_VPI: 5115 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5116 break; 5117 case LPFC_RSC_TYPE_FCOE_XRI: 5118 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5119 break; 5120 case LPFC_RSC_TYPE_FCOE_VFI: 5121 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5122 break; 5123 default: 5124 break; 5125 } 5126 5127 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5128 curr_ext_cnt++; 5129 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5130 size_diff++; 5131 } 5132 5133 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5134 rc = 1; 5135 5136 return rc; 5137 } 5138 5139 /** 5140 * lpfc_sli4_cfg_post_extnts - 5141 * @phba: Pointer to HBA context object. 5142 * @extnt_cnt - number of available extents. 5143 * @type - the extent type (rpi, xri, vfi, vpi). 5144 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5145 * @mbox - pointer to the caller's allocated mailbox structure. 5146 * 5147 * This function executes the extents allocation request. It also 5148 * takes care of the amount of memory needed to allocate or get the 5149 * allocated extents. It is the caller's responsibility to evaluate 5150 * the response. 5151 * 5152 * Returns: 5153 * -Error: Error value describes the condition found. 5154 * 0: if successful 5155 **/ 5156 static int 5157 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5158 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5159 { 5160 int rc = 0; 5161 uint32_t req_len; 5162 uint32_t emb_len; 5163 uint32_t alloc_len, mbox_tmo; 5164 5165 /* Calculate the total requested length of the dma memory */ 5166 req_len = extnt_cnt * sizeof(uint16_t); 5167 5168 /* 5169 * Calculate the size of an embedded mailbox. The uint32_t 5170 * accounts for extents-specific word. 5171 */ 5172 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5173 sizeof(uint32_t); 5174 5175 /* 5176 * Presume the allocation and response will fit into an embedded 5177 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5178 */ 5179 *emb = LPFC_SLI4_MBX_EMBED; 5180 if (req_len > emb_len) { 5181 req_len = extnt_cnt * sizeof(uint16_t) + 5182 sizeof(union lpfc_sli4_cfg_shdr) + 5183 sizeof(uint32_t); 5184 *emb = LPFC_SLI4_MBX_NEMBED; 5185 } 5186 5187 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5188 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5189 req_len, *emb); 5190 if (alloc_len < req_len) { 5191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5192 "2982 Allocated DMA memory size (x%x) is " 5193 "less than the requested DMA memory " 5194 "size (x%x)\n", alloc_len, req_len); 5195 return -ENOMEM; 5196 } 5197 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5198 if (unlikely(rc)) 5199 return -EIO; 5200 5201 if (!phba->sli4_hba.intr_enable) 5202 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5203 else { 5204 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5205 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5206 } 5207 5208 if (unlikely(rc)) 5209 rc = -EIO; 5210 return rc; 5211 } 5212 5213 /** 5214 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5215 * @phba: Pointer to HBA context object. 5216 * @type: The resource extent type to allocate. 5217 * 5218 * This function allocates the number of elements for the specified 5219 * resource type. 5220 **/ 5221 static int 5222 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5223 { 5224 bool emb = false; 5225 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5226 uint16_t rsrc_id, rsrc_start, j, k; 5227 uint16_t *ids; 5228 int i, rc; 5229 unsigned long longs; 5230 unsigned long *bmask; 5231 struct lpfc_rsrc_blks *rsrc_blks; 5232 LPFC_MBOXQ_t *mbox; 5233 uint32_t length; 5234 struct lpfc_id_range *id_array = NULL; 5235 void *virtaddr = NULL; 5236 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5237 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5238 struct list_head *ext_blk_list; 5239 5240 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5241 &rsrc_cnt, 5242 &rsrc_size); 5243 if (unlikely(rc)) 5244 return -EIO; 5245 5246 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5247 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5248 "3009 No available Resource Extents " 5249 "for resource type 0x%x: Count: 0x%x, " 5250 "Size 0x%x\n", type, rsrc_cnt, 5251 rsrc_size); 5252 return -ENOMEM; 5253 } 5254 5255 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5256 "2903 Post resource extents type-0x%x: " 5257 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5258 5259 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5260 if (!mbox) 5261 return -ENOMEM; 5262 5263 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5264 if (unlikely(rc)) { 5265 rc = -EIO; 5266 goto err_exit; 5267 } 5268 5269 /* 5270 * Figure out where the response is located. Then get local pointers 5271 * to the response data. The port does not guarantee to respond to 5272 * all extents counts request so update the local variable with the 5273 * allocated count from the port. 5274 */ 5275 if (emb == LPFC_SLI4_MBX_EMBED) { 5276 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5277 id_array = &rsrc_ext->u.rsp.id[0]; 5278 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5279 } else { 5280 virtaddr = mbox->sge_array->addr[0]; 5281 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5282 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5283 id_array = &n_rsrc->id; 5284 } 5285 5286 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5287 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5288 5289 /* 5290 * Based on the resource size and count, correct the base and max 5291 * resource values. 5292 */ 5293 length = sizeof(struct lpfc_rsrc_blks); 5294 switch (type) { 5295 case LPFC_RSC_TYPE_FCOE_RPI: 5296 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5297 sizeof(unsigned long), 5298 GFP_KERNEL); 5299 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5300 rc = -ENOMEM; 5301 goto err_exit; 5302 } 5303 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * 5304 sizeof(uint16_t), 5305 GFP_KERNEL); 5306 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5307 kfree(phba->sli4_hba.rpi_bmask); 5308 rc = -ENOMEM; 5309 goto err_exit; 5310 } 5311 5312 /* 5313 * The next_rpi was initialized with the maximum available 5314 * count but the port may allocate a smaller number. Catch 5315 * that case and update the next_rpi. 5316 */ 5317 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5318 5319 /* Initialize local ptrs for common extent processing later. */ 5320 bmask = phba->sli4_hba.rpi_bmask; 5321 ids = phba->sli4_hba.rpi_ids; 5322 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5323 break; 5324 case LPFC_RSC_TYPE_FCOE_VPI: 5325 phba->vpi_bmask = kzalloc(longs * 5326 sizeof(unsigned long), 5327 GFP_KERNEL); 5328 if (unlikely(!phba->vpi_bmask)) { 5329 rc = -ENOMEM; 5330 goto err_exit; 5331 } 5332 phba->vpi_ids = kzalloc(rsrc_id_cnt * 5333 sizeof(uint16_t), 5334 GFP_KERNEL); 5335 if (unlikely(!phba->vpi_ids)) { 5336 kfree(phba->vpi_bmask); 5337 rc = -ENOMEM; 5338 goto err_exit; 5339 } 5340 5341 /* Initialize local ptrs for common extent processing later. */ 5342 bmask = phba->vpi_bmask; 5343 ids = phba->vpi_ids; 5344 ext_blk_list = &phba->lpfc_vpi_blk_list; 5345 break; 5346 case LPFC_RSC_TYPE_FCOE_XRI: 5347 phba->sli4_hba.xri_bmask = kzalloc(longs * 5348 sizeof(unsigned long), 5349 GFP_KERNEL); 5350 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5351 rc = -ENOMEM; 5352 goto err_exit; 5353 } 5354 phba->sli4_hba.max_cfg_param.xri_used = 0; 5355 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5356 sizeof(uint16_t), 5357 GFP_KERNEL); 5358 if (unlikely(!phba->sli4_hba.xri_ids)) { 5359 kfree(phba->sli4_hba.xri_bmask); 5360 rc = -ENOMEM; 5361 goto err_exit; 5362 } 5363 5364 /* Initialize local ptrs for common extent processing later. */ 5365 bmask = phba->sli4_hba.xri_bmask; 5366 ids = phba->sli4_hba.xri_ids; 5367 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5368 break; 5369 case LPFC_RSC_TYPE_FCOE_VFI: 5370 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5371 sizeof(unsigned long), 5372 GFP_KERNEL); 5373 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5374 rc = -ENOMEM; 5375 goto err_exit; 5376 } 5377 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * 5378 sizeof(uint16_t), 5379 GFP_KERNEL); 5380 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5381 kfree(phba->sli4_hba.vfi_bmask); 5382 rc = -ENOMEM; 5383 goto err_exit; 5384 } 5385 5386 /* Initialize local ptrs for common extent processing later. */ 5387 bmask = phba->sli4_hba.vfi_bmask; 5388 ids = phba->sli4_hba.vfi_ids; 5389 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5390 break; 5391 default: 5392 /* Unsupported Opcode. Fail call. */ 5393 id_array = NULL; 5394 bmask = NULL; 5395 ids = NULL; 5396 ext_blk_list = NULL; 5397 goto err_exit; 5398 } 5399 5400 /* 5401 * Complete initializing the extent configuration with the 5402 * allocated ids assigned to this function. The bitmask serves 5403 * as an index into the array and manages the available ids. The 5404 * array just stores the ids communicated to the port via the wqes. 5405 */ 5406 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5407 if ((i % 2) == 0) 5408 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5409 &id_array[k]); 5410 else 5411 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5412 &id_array[k]); 5413 5414 rsrc_blks = kzalloc(length, GFP_KERNEL); 5415 if (unlikely(!rsrc_blks)) { 5416 rc = -ENOMEM; 5417 kfree(bmask); 5418 kfree(ids); 5419 goto err_exit; 5420 } 5421 rsrc_blks->rsrc_start = rsrc_id; 5422 rsrc_blks->rsrc_size = rsrc_size; 5423 list_add_tail(&rsrc_blks->list, ext_blk_list); 5424 rsrc_start = rsrc_id; 5425 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) 5426 phba->sli4_hba.scsi_xri_start = rsrc_start + 5427 lpfc_sli4_get_els_iocb_cnt(phba); 5428 5429 while (rsrc_id < (rsrc_start + rsrc_size)) { 5430 ids[j] = rsrc_id; 5431 rsrc_id++; 5432 j++; 5433 } 5434 /* Entire word processed. Get next word.*/ 5435 if ((i % 2) == 1) 5436 k++; 5437 } 5438 err_exit: 5439 lpfc_sli4_mbox_cmd_free(phba, mbox); 5440 return rc; 5441 } 5442 5443 /** 5444 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5445 * @phba: Pointer to HBA context object. 5446 * @type: the extent's type. 5447 * 5448 * This function deallocates all extents of a particular resource type. 5449 * SLI4 does not allow for deallocating a particular extent range. It 5450 * is the caller's responsibility to release all kernel memory resources. 5451 **/ 5452 static int 5453 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 5454 { 5455 int rc; 5456 uint32_t length, mbox_tmo = 0; 5457 LPFC_MBOXQ_t *mbox; 5458 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 5459 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 5460 5461 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5462 if (!mbox) 5463 return -ENOMEM; 5464 5465 /* 5466 * This function sends an embedded mailbox because it only sends the 5467 * the resource type. All extents of this type are released by the 5468 * port. 5469 */ 5470 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 5471 sizeof(struct lpfc_sli4_cfg_mhdr)); 5472 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5473 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 5474 length, LPFC_SLI4_MBX_EMBED); 5475 5476 /* Send an extents count of 0 - the dealloc doesn't use it. */ 5477 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5478 LPFC_SLI4_MBX_EMBED); 5479 if (unlikely(rc)) { 5480 rc = -EIO; 5481 goto out_free_mbox; 5482 } 5483 if (!phba->sli4_hba.intr_enable) 5484 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5485 else { 5486 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5487 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5488 } 5489 if (unlikely(rc)) { 5490 rc = -EIO; 5491 goto out_free_mbox; 5492 } 5493 5494 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 5495 if (bf_get(lpfc_mbox_hdr_status, 5496 &dealloc_rsrc->header.cfg_shdr.response)) { 5497 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5498 "2919 Failed to release resource extents " 5499 "for type %d - Status 0x%x Add'l Status 0x%x. " 5500 "Resource memory not released.\n", 5501 type, 5502 bf_get(lpfc_mbox_hdr_status, 5503 &dealloc_rsrc->header.cfg_shdr.response), 5504 bf_get(lpfc_mbox_hdr_add_status, 5505 &dealloc_rsrc->header.cfg_shdr.response)); 5506 rc = -EIO; 5507 goto out_free_mbox; 5508 } 5509 5510 /* Release kernel memory resources for the specific type. */ 5511 switch (type) { 5512 case LPFC_RSC_TYPE_FCOE_VPI: 5513 kfree(phba->vpi_bmask); 5514 kfree(phba->vpi_ids); 5515 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5516 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5517 &phba->lpfc_vpi_blk_list, list) { 5518 list_del_init(&rsrc_blk->list); 5519 kfree(rsrc_blk); 5520 } 5521 phba->sli4_hba.max_cfg_param.vpi_used = 0; 5522 break; 5523 case LPFC_RSC_TYPE_FCOE_XRI: 5524 kfree(phba->sli4_hba.xri_bmask); 5525 kfree(phba->sli4_hba.xri_ids); 5526 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5527 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5528 list_del_init(&rsrc_blk->list); 5529 kfree(rsrc_blk); 5530 } 5531 break; 5532 case LPFC_RSC_TYPE_FCOE_VFI: 5533 kfree(phba->sli4_hba.vfi_bmask); 5534 kfree(phba->sli4_hba.vfi_ids); 5535 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5536 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5537 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 5538 list_del_init(&rsrc_blk->list); 5539 kfree(rsrc_blk); 5540 } 5541 break; 5542 case LPFC_RSC_TYPE_FCOE_RPI: 5543 /* RPI bitmask and physical id array are cleaned up earlier. */ 5544 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5545 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 5546 list_del_init(&rsrc_blk->list); 5547 kfree(rsrc_blk); 5548 } 5549 break; 5550 default: 5551 break; 5552 } 5553 5554 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5555 5556 out_free_mbox: 5557 mempool_free(mbox, phba->mbox_mem_pool); 5558 return rc; 5559 } 5560 5561 /** 5562 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 5563 * @phba: Pointer to HBA context object. 5564 * 5565 * This function allocates all SLI4 resource identifiers. 5566 **/ 5567 int 5568 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 5569 { 5570 int i, rc, error = 0; 5571 uint16_t count, base; 5572 unsigned long longs; 5573 5574 if (!phba->sli4_hba.rpi_hdrs_in_use) 5575 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5576 if (phba->sli4_hba.extents_in_use) { 5577 /* 5578 * The port supports resource extents. The XRI, VPI, VFI, RPI 5579 * resource extent count must be read and allocated before 5580 * provisioning the resource id arrays. 5581 */ 5582 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5583 LPFC_IDX_RSRC_RDY) { 5584 /* 5585 * Extent-based resources are set - the driver could 5586 * be in a port reset. Figure out if any corrective 5587 * actions need to be taken. 5588 */ 5589 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5590 LPFC_RSC_TYPE_FCOE_VFI); 5591 if (rc != 0) 5592 error++; 5593 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5594 LPFC_RSC_TYPE_FCOE_VPI); 5595 if (rc != 0) 5596 error++; 5597 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5598 LPFC_RSC_TYPE_FCOE_XRI); 5599 if (rc != 0) 5600 error++; 5601 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5602 LPFC_RSC_TYPE_FCOE_RPI); 5603 if (rc != 0) 5604 error++; 5605 5606 /* 5607 * It's possible that the number of resources 5608 * provided to this port instance changed between 5609 * resets. Detect this condition and reallocate 5610 * resources. Otherwise, there is no action. 5611 */ 5612 if (error) { 5613 lpfc_printf_log(phba, KERN_INFO, 5614 LOG_MBOX | LOG_INIT, 5615 "2931 Detected extent resource " 5616 "change. Reallocating all " 5617 "extents.\n"); 5618 rc = lpfc_sli4_dealloc_extent(phba, 5619 LPFC_RSC_TYPE_FCOE_VFI); 5620 rc = lpfc_sli4_dealloc_extent(phba, 5621 LPFC_RSC_TYPE_FCOE_VPI); 5622 rc = lpfc_sli4_dealloc_extent(phba, 5623 LPFC_RSC_TYPE_FCOE_XRI); 5624 rc = lpfc_sli4_dealloc_extent(phba, 5625 LPFC_RSC_TYPE_FCOE_RPI); 5626 } else 5627 return 0; 5628 } 5629 5630 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5631 if (unlikely(rc)) 5632 goto err_exit; 5633 5634 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5635 if (unlikely(rc)) 5636 goto err_exit; 5637 5638 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5639 if (unlikely(rc)) 5640 goto err_exit; 5641 5642 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5643 if (unlikely(rc)) 5644 goto err_exit; 5645 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5646 LPFC_IDX_RSRC_RDY); 5647 return rc; 5648 } else { 5649 /* 5650 * The port does not support resource extents. The XRI, VPI, 5651 * VFI, RPI resource ids were determined from READ_CONFIG. 5652 * Just allocate the bitmasks and provision the resource id 5653 * arrays. If a port reset is active, the resources don't 5654 * need any action - just exit. 5655 */ 5656 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5657 LPFC_IDX_RSRC_RDY) { 5658 lpfc_sli4_dealloc_resource_identifiers(phba); 5659 lpfc_sli4_remove_rpis(phba); 5660 } 5661 /* RPIs. */ 5662 count = phba->sli4_hba.max_cfg_param.max_rpi; 5663 if (count <= 0) { 5664 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5665 "3279 Invalid provisioning of " 5666 "rpi:%d\n", count); 5667 rc = -EINVAL; 5668 goto err_exit; 5669 } 5670 base = phba->sli4_hba.max_cfg_param.rpi_base; 5671 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5672 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5673 sizeof(unsigned long), 5674 GFP_KERNEL); 5675 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5676 rc = -ENOMEM; 5677 goto err_exit; 5678 } 5679 phba->sli4_hba.rpi_ids = kzalloc(count * 5680 sizeof(uint16_t), 5681 GFP_KERNEL); 5682 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5683 rc = -ENOMEM; 5684 goto free_rpi_bmask; 5685 } 5686 5687 for (i = 0; i < count; i++) 5688 phba->sli4_hba.rpi_ids[i] = base + i; 5689 5690 /* VPIs. */ 5691 count = phba->sli4_hba.max_cfg_param.max_vpi; 5692 if (count <= 0) { 5693 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5694 "3280 Invalid provisioning of " 5695 "vpi:%d\n", count); 5696 rc = -EINVAL; 5697 goto free_rpi_ids; 5698 } 5699 base = phba->sli4_hba.max_cfg_param.vpi_base; 5700 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5701 phba->vpi_bmask = kzalloc(longs * 5702 sizeof(unsigned long), 5703 GFP_KERNEL); 5704 if (unlikely(!phba->vpi_bmask)) { 5705 rc = -ENOMEM; 5706 goto free_rpi_ids; 5707 } 5708 phba->vpi_ids = kzalloc(count * 5709 sizeof(uint16_t), 5710 GFP_KERNEL); 5711 if (unlikely(!phba->vpi_ids)) { 5712 rc = -ENOMEM; 5713 goto free_vpi_bmask; 5714 } 5715 5716 for (i = 0; i < count; i++) 5717 phba->vpi_ids[i] = base + i; 5718 5719 /* XRIs. */ 5720 count = phba->sli4_hba.max_cfg_param.max_xri; 5721 if (count <= 0) { 5722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5723 "3281 Invalid provisioning of " 5724 "xri:%d\n", count); 5725 rc = -EINVAL; 5726 goto free_vpi_ids; 5727 } 5728 base = phba->sli4_hba.max_cfg_param.xri_base; 5729 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5730 phba->sli4_hba.xri_bmask = kzalloc(longs * 5731 sizeof(unsigned long), 5732 GFP_KERNEL); 5733 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5734 rc = -ENOMEM; 5735 goto free_vpi_ids; 5736 } 5737 phba->sli4_hba.max_cfg_param.xri_used = 0; 5738 phba->sli4_hba.xri_ids = kzalloc(count * 5739 sizeof(uint16_t), 5740 GFP_KERNEL); 5741 if (unlikely(!phba->sli4_hba.xri_ids)) { 5742 rc = -ENOMEM; 5743 goto free_xri_bmask; 5744 } 5745 5746 for (i = 0; i < count; i++) 5747 phba->sli4_hba.xri_ids[i] = base + i; 5748 5749 /* VFIs. */ 5750 count = phba->sli4_hba.max_cfg_param.max_vfi; 5751 if (count <= 0) { 5752 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5753 "3282 Invalid provisioning of " 5754 "vfi:%d\n", count); 5755 rc = -EINVAL; 5756 goto free_xri_ids; 5757 } 5758 base = phba->sli4_hba.max_cfg_param.vfi_base; 5759 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5760 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5761 sizeof(unsigned long), 5762 GFP_KERNEL); 5763 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5764 rc = -ENOMEM; 5765 goto free_xri_ids; 5766 } 5767 phba->sli4_hba.vfi_ids = kzalloc(count * 5768 sizeof(uint16_t), 5769 GFP_KERNEL); 5770 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5771 rc = -ENOMEM; 5772 goto free_vfi_bmask; 5773 } 5774 5775 for (i = 0; i < count; i++) 5776 phba->sli4_hba.vfi_ids[i] = base + i; 5777 5778 /* 5779 * Mark all resources ready. An HBA reset doesn't need 5780 * to reset the initialization. 5781 */ 5782 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5783 LPFC_IDX_RSRC_RDY); 5784 return 0; 5785 } 5786 5787 free_vfi_bmask: 5788 kfree(phba->sli4_hba.vfi_bmask); 5789 free_xri_ids: 5790 kfree(phba->sli4_hba.xri_ids); 5791 free_xri_bmask: 5792 kfree(phba->sli4_hba.xri_bmask); 5793 free_vpi_ids: 5794 kfree(phba->vpi_ids); 5795 free_vpi_bmask: 5796 kfree(phba->vpi_bmask); 5797 free_rpi_ids: 5798 kfree(phba->sli4_hba.rpi_ids); 5799 free_rpi_bmask: 5800 kfree(phba->sli4_hba.rpi_bmask); 5801 err_exit: 5802 return rc; 5803 } 5804 5805 /** 5806 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 5807 * @phba: Pointer to HBA context object. 5808 * 5809 * This function allocates the number of elements for the specified 5810 * resource type. 5811 **/ 5812 int 5813 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 5814 { 5815 if (phba->sli4_hba.extents_in_use) { 5816 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5817 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5818 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5819 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5820 } else { 5821 kfree(phba->vpi_bmask); 5822 phba->sli4_hba.max_cfg_param.vpi_used = 0; 5823 kfree(phba->vpi_ids); 5824 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5825 kfree(phba->sli4_hba.xri_bmask); 5826 kfree(phba->sli4_hba.xri_ids); 5827 kfree(phba->sli4_hba.vfi_bmask); 5828 kfree(phba->sli4_hba.vfi_ids); 5829 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5830 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5831 } 5832 5833 return 0; 5834 } 5835 5836 /** 5837 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 5838 * @phba: Pointer to HBA context object. 5839 * @type: The resource extent type. 5840 * @extnt_count: buffer to hold port extent count response 5841 * @extnt_size: buffer to hold port extent size response. 5842 * 5843 * This function calls the port to read the host allocated extents 5844 * for a particular type. 5845 **/ 5846 int 5847 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 5848 uint16_t *extnt_cnt, uint16_t *extnt_size) 5849 { 5850 bool emb; 5851 int rc = 0; 5852 uint16_t curr_blks = 0; 5853 uint32_t req_len, emb_len; 5854 uint32_t alloc_len, mbox_tmo; 5855 struct list_head *blk_list_head; 5856 struct lpfc_rsrc_blks *rsrc_blk; 5857 LPFC_MBOXQ_t *mbox; 5858 void *virtaddr = NULL; 5859 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5860 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5861 union lpfc_sli4_cfg_shdr *shdr; 5862 5863 switch (type) { 5864 case LPFC_RSC_TYPE_FCOE_VPI: 5865 blk_list_head = &phba->lpfc_vpi_blk_list; 5866 break; 5867 case LPFC_RSC_TYPE_FCOE_XRI: 5868 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 5869 break; 5870 case LPFC_RSC_TYPE_FCOE_VFI: 5871 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 5872 break; 5873 case LPFC_RSC_TYPE_FCOE_RPI: 5874 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 5875 break; 5876 default: 5877 return -EIO; 5878 } 5879 5880 /* Count the number of extents currently allocatd for this type. */ 5881 list_for_each_entry(rsrc_blk, blk_list_head, list) { 5882 if (curr_blks == 0) { 5883 /* 5884 * The GET_ALLOCATED mailbox does not return the size, 5885 * just the count. The size should be just the size 5886 * stored in the current allocated block and all sizes 5887 * for an extent type are the same so set the return 5888 * value now. 5889 */ 5890 *extnt_size = rsrc_blk->rsrc_size; 5891 } 5892 curr_blks++; 5893 } 5894 5895 /* Calculate the total requested length of the dma memory. */ 5896 req_len = curr_blks * sizeof(uint16_t); 5897 5898 /* 5899 * Calculate the size of an embedded mailbox. The uint32_t 5900 * accounts for extents-specific word. 5901 */ 5902 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5903 sizeof(uint32_t); 5904 5905 /* 5906 * Presume the allocation and response will fit into an embedded 5907 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5908 */ 5909 emb = LPFC_SLI4_MBX_EMBED; 5910 req_len = emb_len; 5911 if (req_len > emb_len) { 5912 req_len = curr_blks * sizeof(uint16_t) + 5913 sizeof(union lpfc_sli4_cfg_shdr) + 5914 sizeof(uint32_t); 5915 emb = LPFC_SLI4_MBX_NEMBED; 5916 } 5917 5918 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5919 if (!mbox) 5920 return -ENOMEM; 5921 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 5922 5923 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5924 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 5925 req_len, emb); 5926 if (alloc_len < req_len) { 5927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5928 "2983 Allocated DMA memory size (x%x) is " 5929 "less than the requested DMA memory " 5930 "size (x%x)\n", alloc_len, req_len); 5931 rc = -ENOMEM; 5932 goto err_exit; 5933 } 5934 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 5935 if (unlikely(rc)) { 5936 rc = -EIO; 5937 goto err_exit; 5938 } 5939 5940 if (!phba->sli4_hba.intr_enable) 5941 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5942 else { 5943 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5944 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5945 } 5946 5947 if (unlikely(rc)) { 5948 rc = -EIO; 5949 goto err_exit; 5950 } 5951 5952 /* 5953 * Figure out where the response is located. Then get local pointers 5954 * to the response data. The port does not guarantee to respond to 5955 * all extents counts request so update the local variable with the 5956 * allocated count from the port. 5957 */ 5958 if (emb == LPFC_SLI4_MBX_EMBED) { 5959 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5960 shdr = &rsrc_ext->header.cfg_shdr; 5961 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5962 } else { 5963 virtaddr = mbox->sge_array->addr[0]; 5964 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5965 shdr = &n_rsrc->cfg_shdr; 5966 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5967 } 5968 5969 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 5970 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5971 "2984 Failed to read allocated resources " 5972 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 5973 type, 5974 bf_get(lpfc_mbox_hdr_status, &shdr->response), 5975 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 5976 rc = -EIO; 5977 goto err_exit; 5978 } 5979 err_exit: 5980 lpfc_sli4_mbox_cmd_free(phba, mbox); 5981 return rc; 5982 } 5983 5984 /** 5985 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block 5986 * @phba: pointer to lpfc hba data structure. 5987 * 5988 * This routine walks the list of els buffers that have been allocated and 5989 * repost them to the port by using SGL block post. This is needed after a 5990 * pci_function_reset/warm_start or start. It attempts to construct blocks 5991 * of els buffer sgls which contains contiguous xris and uses the non-embedded 5992 * SGL block post mailbox commands to post them to the port. For single els 5993 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 5994 * mailbox command for posting. 5995 * 5996 * Returns: 0 = success, non-zero failure. 5997 **/ 5998 static int 5999 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) 6000 { 6001 struct lpfc_sglq *sglq_entry = NULL; 6002 struct lpfc_sglq *sglq_entry_next = NULL; 6003 struct lpfc_sglq *sglq_entry_first = NULL; 6004 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; 6005 int last_xritag = NO_XRI; 6006 LIST_HEAD(prep_sgl_list); 6007 LIST_HEAD(blck_sgl_list); 6008 LIST_HEAD(allc_sgl_list); 6009 LIST_HEAD(post_sgl_list); 6010 LIST_HEAD(free_sgl_list); 6011 6012 spin_lock_irq(&phba->hbalock); 6013 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 6014 spin_unlock_irq(&phba->hbalock); 6015 6016 total_cnt = phba->sli4_hba.els_xri_cnt; 6017 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6018 &allc_sgl_list, list) { 6019 list_del_init(&sglq_entry->list); 6020 block_cnt++; 6021 if ((last_xritag != NO_XRI) && 6022 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6023 /* a hole in xri block, form a sgl posting block */ 6024 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6025 post_cnt = block_cnt - 1; 6026 /* prepare list for next posting block */ 6027 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6028 block_cnt = 1; 6029 } else { 6030 /* prepare list for next posting block */ 6031 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6032 /* enough sgls for non-embed sgl mbox command */ 6033 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6034 list_splice_init(&prep_sgl_list, 6035 &blck_sgl_list); 6036 post_cnt = block_cnt; 6037 block_cnt = 0; 6038 } 6039 } 6040 num_posted++; 6041 6042 /* keep track of last sgl's xritag */ 6043 last_xritag = sglq_entry->sli4_xritag; 6044 6045 /* end of repost sgl list condition for els buffers */ 6046 if (num_posted == phba->sli4_hba.els_xri_cnt) { 6047 if (post_cnt == 0) { 6048 list_splice_init(&prep_sgl_list, 6049 &blck_sgl_list); 6050 post_cnt = block_cnt; 6051 } else if (block_cnt == 1) { 6052 status = lpfc_sli4_post_sgl(phba, 6053 sglq_entry->phys, 0, 6054 sglq_entry->sli4_xritag); 6055 if (!status) { 6056 /* successful, put sgl to posted list */ 6057 list_add_tail(&sglq_entry->list, 6058 &post_sgl_list); 6059 } else { 6060 /* Failure, put sgl to free list */ 6061 lpfc_printf_log(phba, KERN_WARNING, 6062 LOG_SLI, 6063 "3159 Failed to post els " 6064 "sgl, xritag:x%x\n", 6065 sglq_entry->sli4_xritag); 6066 list_add_tail(&sglq_entry->list, 6067 &free_sgl_list); 6068 total_cnt--; 6069 } 6070 } 6071 } 6072 6073 /* continue until a nembed page worth of sgls */ 6074 if (post_cnt == 0) 6075 continue; 6076 6077 /* post the els buffer list sgls as a block */ 6078 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list, 6079 post_cnt); 6080 6081 if (!status) { 6082 /* success, put sgl list to posted sgl list */ 6083 list_splice_init(&blck_sgl_list, &post_sgl_list); 6084 } else { 6085 /* Failure, put sgl list to free sgl list */ 6086 sglq_entry_first = list_first_entry(&blck_sgl_list, 6087 struct lpfc_sglq, 6088 list); 6089 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6090 "3160 Failed to post els sgl-list, " 6091 "xritag:x%x-x%x\n", 6092 sglq_entry_first->sli4_xritag, 6093 (sglq_entry_first->sli4_xritag + 6094 post_cnt - 1)); 6095 list_splice_init(&blck_sgl_list, &free_sgl_list); 6096 total_cnt -= post_cnt; 6097 } 6098 6099 /* don't reset xirtag due to hole in xri block */ 6100 if (block_cnt == 0) 6101 last_xritag = NO_XRI; 6102 6103 /* reset els sgl post count for next round of posting */ 6104 post_cnt = 0; 6105 } 6106 /* update the number of XRIs posted for ELS */ 6107 phba->sli4_hba.els_xri_cnt = total_cnt; 6108 6109 /* free the els sgls failed to post */ 6110 lpfc_free_sgl_list(phba, &free_sgl_list); 6111 6112 /* push els sgls posted to the availble list */ 6113 if (!list_empty(&post_sgl_list)) { 6114 spin_lock_irq(&phba->hbalock); 6115 list_splice_init(&post_sgl_list, 6116 &phba->sli4_hba.lpfc_sgl_list); 6117 spin_unlock_irq(&phba->hbalock); 6118 } else { 6119 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6120 "3161 Failure to post els sgl to port.\n"); 6121 return -EIO; 6122 } 6123 return 0; 6124 } 6125 6126 /** 6127 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 6128 * @phba: Pointer to HBA context object. 6129 * 6130 * This function is the main SLI4 device intialization PCI function. This 6131 * function is called by the HBA intialization code, HBA reset code and 6132 * HBA error attention handler code. Caller is not required to hold any 6133 * locks. 6134 **/ 6135 int 6136 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6137 { 6138 int rc; 6139 LPFC_MBOXQ_t *mboxq; 6140 struct lpfc_mqe *mqe; 6141 uint8_t *vpd; 6142 uint32_t vpd_size; 6143 uint32_t ftr_rsp = 0; 6144 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 6145 struct lpfc_vport *vport = phba->pport; 6146 struct lpfc_dmabuf *mp; 6147 6148 /* Perform a PCI function reset to start from clean */ 6149 rc = lpfc_pci_function_reset(phba); 6150 if (unlikely(rc)) 6151 return -ENODEV; 6152 6153 /* Check the HBA Host Status Register for readyness */ 6154 rc = lpfc_sli4_post_status_check(phba); 6155 if (unlikely(rc)) 6156 return -ENODEV; 6157 else { 6158 spin_lock_irq(&phba->hbalock); 6159 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 6160 spin_unlock_irq(&phba->hbalock); 6161 } 6162 6163 /* 6164 * Allocate a single mailbox container for initializing the 6165 * port. 6166 */ 6167 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6168 if (!mboxq) 6169 return -ENOMEM; 6170 6171 /* Issue READ_REV to collect vpd and FW information. */ 6172 vpd_size = SLI4_PAGE_SIZE; 6173 vpd = kzalloc(vpd_size, GFP_KERNEL); 6174 if (!vpd) { 6175 rc = -ENOMEM; 6176 goto out_free_mbox; 6177 } 6178 6179 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 6180 if (unlikely(rc)) { 6181 kfree(vpd); 6182 goto out_free_mbox; 6183 } 6184 6185 mqe = &mboxq->u.mqe; 6186 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 6187 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 6188 phba->hba_flag |= HBA_FCOE_MODE; 6189 else 6190 phba->hba_flag &= ~HBA_FCOE_MODE; 6191 6192 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 6193 LPFC_DCBX_CEE_MODE) 6194 phba->hba_flag |= HBA_FIP_SUPPORT; 6195 else 6196 phba->hba_flag &= ~HBA_FIP_SUPPORT; 6197 6198 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 6199 6200 if (phba->sli_rev != LPFC_SLI_REV4) { 6201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6202 "0376 READ_REV Error. SLI Level %d " 6203 "FCoE enabled %d\n", 6204 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 6205 rc = -EIO; 6206 kfree(vpd); 6207 goto out_free_mbox; 6208 } 6209 6210 /* 6211 * Continue initialization with default values even if driver failed 6212 * to read FCoE param config regions, only read parameters if the 6213 * board is FCoE 6214 */ 6215 if (phba->hba_flag & HBA_FCOE_MODE && 6216 lpfc_sli4_read_fcoe_params(phba)) 6217 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 6218 "2570 Failed to read FCoE parameters\n"); 6219 6220 /* 6221 * Retrieve sli4 device physical port name, failure of doing it 6222 * is considered as non-fatal. 6223 */ 6224 rc = lpfc_sli4_retrieve_pport_name(phba); 6225 if (!rc) 6226 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6227 "3080 Successful retrieving SLI4 device " 6228 "physical port name: %s.\n", phba->Port); 6229 6230 /* 6231 * Evaluate the read rev and vpd data. Populate the driver 6232 * state with the results. If this routine fails, the failure 6233 * is not fatal as the driver will use generic values. 6234 */ 6235 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 6236 if (unlikely(!rc)) { 6237 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6238 "0377 Error %d parsing vpd. " 6239 "Using defaults.\n", rc); 6240 rc = 0; 6241 } 6242 kfree(vpd); 6243 6244 /* Save information as VPD data */ 6245 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 6246 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 6247 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 6248 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 6249 &mqe->un.read_rev); 6250 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 6251 &mqe->un.read_rev); 6252 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 6253 &mqe->un.read_rev); 6254 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 6255 &mqe->un.read_rev); 6256 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 6257 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 6258 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 6259 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 6260 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 6261 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 6262 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6263 "(%d):0380 READ_REV Status x%x " 6264 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 6265 mboxq->vport ? mboxq->vport->vpi : 0, 6266 bf_get(lpfc_mqe_status, mqe), 6267 phba->vpd.rev.opFwName, 6268 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 6269 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 6270 6271 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 6272 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 6273 if (phba->pport->cfg_lun_queue_depth > rc) { 6274 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6275 "3362 LUN queue depth changed from %d to %d\n", 6276 phba->pport->cfg_lun_queue_depth, rc); 6277 phba->pport->cfg_lun_queue_depth = rc; 6278 } 6279 6280 6281 /* 6282 * Discover the port's supported feature set and match it against the 6283 * hosts requests. 6284 */ 6285 lpfc_request_features(phba, mboxq); 6286 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6287 if (unlikely(rc)) { 6288 rc = -EIO; 6289 goto out_free_mbox; 6290 } 6291 6292 /* 6293 * The port must support FCP initiator mode as this is the 6294 * only mode running in the host. 6295 */ 6296 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 6297 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6298 "0378 No support for fcpi mode.\n"); 6299 ftr_rsp++; 6300 } 6301 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 6302 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 6303 else 6304 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 6305 /* 6306 * If the port cannot support the host's requested features 6307 * then turn off the global config parameters to disable the 6308 * feature in the driver. This is not a fatal error. 6309 */ 6310 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 6311 if (phba->cfg_enable_bg) { 6312 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)) 6313 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 6314 else 6315 ftr_rsp++; 6316 } 6317 6318 if (phba->max_vpi && phba->cfg_enable_npiv && 6319 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6320 ftr_rsp++; 6321 6322 if (ftr_rsp) { 6323 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6324 "0379 Feature Mismatch Data: x%08x %08x " 6325 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 6326 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 6327 phba->cfg_enable_npiv, phba->max_vpi); 6328 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 6329 phba->cfg_enable_bg = 0; 6330 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6331 phba->cfg_enable_npiv = 0; 6332 } 6333 6334 /* These SLI3 features are assumed in SLI4 */ 6335 spin_lock_irq(&phba->hbalock); 6336 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 6337 spin_unlock_irq(&phba->hbalock); 6338 6339 /* 6340 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 6341 * calls depends on these resources to complete port setup. 6342 */ 6343 rc = lpfc_sli4_alloc_resource_identifiers(phba); 6344 if (rc) { 6345 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6346 "2920 Failed to alloc Resource IDs " 6347 "rc = x%x\n", rc); 6348 goto out_free_mbox; 6349 } 6350 6351 /* Read the port's service parameters. */ 6352 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6353 if (rc) { 6354 phba->link_state = LPFC_HBA_ERROR; 6355 rc = -ENOMEM; 6356 goto out_free_mbox; 6357 } 6358 6359 mboxq->vport = vport; 6360 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6361 mp = (struct lpfc_dmabuf *) mboxq->context1; 6362 if (rc == MBX_SUCCESS) { 6363 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 6364 rc = 0; 6365 } 6366 6367 /* 6368 * This memory was allocated by the lpfc_read_sparam routine. Release 6369 * it to the mbuf pool. 6370 */ 6371 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6372 kfree(mp); 6373 mboxq->context1 = NULL; 6374 if (unlikely(rc)) { 6375 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6376 "0382 READ_SPARAM command failed " 6377 "status %d, mbxStatus x%x\n", 6378 rc, bf_get(lpfc_mqe_status, mqe)); 6379 phba->link_state = LPFC_HBA_ERROR; 6380 rc = -EIO; 6381 goto out_free_mbox; 6382 } 6383 6384 lpfc_update_vport_wwn(vport); 6385 6386 /* Update the fc_host data structures with new wwn. */ 6387 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6388 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6389 6390 /* update host els and scsi xri-sgl sizes and mappings */ 6391 rc = lpfc_sli4_xri_sgl_update(phba); 6392 if (unlikely(rc)) { 6393 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6394 "1400 Failed to update xri-sgl size and " 6395 "mapping: %d\n", rc); 6396 goto out_free_mbox; 6397 } 6398 6399 /* register the els sgl pool to the port */ 6400 rc = lpfc_sli4_repost_els_sgl_list(phba); 6401 if (unlikely(rc)) { 6402 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6403 "0582 Error %d during els sgl post " 6404 "operation\n", rc); 6405 rc = -ENODEV; 6406 goto out_free_mbox; 6407 } 6408 6409 /* register the allocated scsi sgl pool to the port */ 6410 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6411 if (unlikely(rc)) { 6412 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6413 "0383 Error %d during scsi sgl post " 6414 "operation\n", rc); 6415 /* Some Scsi buffers were moved to the abort scsi list */ 6416 /* A pci function reset will repost them */ 6417 rc = -ENODEV; 6418 goto out_free_mbox; 6419 } 6420 6421 /* Post the rpi header region to the device. */ 6422 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 6423 if (unlikely(rc)) { 6424 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6425 "0393 Error %d during rpi post operation\n", 6426 rc); 6427 rc = -ENODEV; 6428 goto out_free_mbox; 6429 } 6430 lpfc_sli4_node_prep(phba); 6431 6432 /* Create all the SLI4 queues */ 6433 rc = lpfc_sli4_queue_create(phba); 6434 if (rc) { 6435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6436 "3089 Failed to allocate queues\n"); 6437 rc = -ENODEV; 6438 goto out_stop_timers; 6439 } 6440 /* Set up all the queues to the device */ 6441 rc = lpfc_sli4_queue_setup(phba); 6442 if (unlikely(rc)) { 6443 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6444 "0381 Error %d during queue setup.\n ", rc); 6445 goto out_destroy_queue; 6446 } 6447 6448 /* Arm the CQs and then EQs on device */ 6449 lpfc_sli4_arm_cqeq_intr(phba); 6450 6451 /* Indicate device interrupt mode */ 6452 phba->sli4_hba.intr_enable = 1; 6453 6454 /* Allow asynchronous mailbox command to go through */ 6455 spin_lock_irq(&phba->hbalock); 6456 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6457 spin_unlock_irq(&phba->hbalock); 6458 6459 /* Post receive buffers to the device */ 6460 lpfc_sli4_rb_setup(phba); 6461 6462 /* Reset HBA FCF states after HBA reset */ 6463 phba->fcf.fcf_flag = 0; 6464 phba->fcf.current_rec.flag = 0; 6465 6466 /* Start the ELS watchdog timer */ 6467 mod_timer(&vport->els_tmofunc, 6468 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 6469 6470 /* Start heart beat timer */ 6471 mod_timer(&phba->hb_tmofunc, 6472 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 6473 phba->hb_outstanding = 0; 6474 phba->last_completion_time = jiffies; 6475 6476 /* Start error attention (ERATT) polling timer */ 6477 mod_timer(&phba->eratt_poll, 6478 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); 6479 6480 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 6481 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 6482 rc = pci_enable_pcie_error_reporting(phba->pcidev); 6483 if (!rc) { 6484 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6485 "2829 This device supports " 6486 "Advanced Error Reporting (AER)\n"); 6487 spin_lock_irq(&phba->hbalock); 6488 phba->hba_flag |= HBA_AER_ENABLED; 6489 spin_unlock_irq(&phba->hbalock); 6490 } else { 6491 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6492 "2830 This device does not support " 6493 "Advanced Error Reporting (AER)\n"); 6494 phba->cfg_aer_support = 0; 6495 } 6496 rc = 0; 6497 } 6498 6499 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 6500 /* 6501 * The FC Port needs to register FCFI (index 0) 6502 */ 6503 lpfc_reg_fcfi(phba, mboxq); 6504 mboxq->vport = phba->pport; 6505 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6506 if (rc != MBX_SUCCESS) 6507 goto out_unset_queue; 6508 rc = 0; 6509 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 6510 &mboxq->u.mqe.un.reg_fcfi); 6511 6512 /* Check if the port is configured to be disabled */ 6513 lpfc_sli_read_link_ste(phba); 6514 } 6515 6516 /* 6517 * The port is ready, set the host's link state to LINK_DOWN 6518 * in preparation for link interrupts. 6519 */ 6520 spin_lock_irq(&phba->hbalock); 6521 phba->link_state = LPFC_LINK_DOWN; 6522 spin_unlock_irq(&phba->hbalock); 6523 if (!(phba->hba_flag & HBA_FCOE_MODE) && 6524 (phba->hba_flag & LINK_DISABLED)) { 6525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6526 "3103 Adapter Link is disabled.\n"); 6527 lpfc_down_link(phba, mboxq); 6528 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6529 if (rc != MBX_SUCCESS) { 6530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6531 "3104 Adapter failed to issue " 6532 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 6533 goto out_unset_queue; 6534 } 6535 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 6536 /* don't perform init_link on SLI4 FC port loopback test */ 6537 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 6538 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6539 if (rc) 6540 goto out_unset_queue; 6541 } 6542 } 6543 mempool_free(mboxq, phba->mbox_mem_pool); 6544 return rc; 6545 out_unset_queue: 6546 /* Unset all the queues set up in this routine when error out */ 6547 lpfc_sli4_queue_unset(phba); 6548 out_destroy_queue: 6549 lpfc_sli4_queue_destroy(phba); 6550 out_stop_timers: 6551 lpfc_stop_hba_timers(phba); 6552 out_free_mbox: 6553 mempool_free(mboxq, phba->mbox_mem_pool); 6554 return rc; 6555 } 6556 6557 /** 6558 * lpfc_mbox_timeout - Timeout call back function for mbox timer 6559 * @ptr: context object - pointer to hba structure. 6560 * 6561 * This is the callback function for mailbox timer. The mailbox 6562 * timer is armed when a new mailbox command is issued and the timer 6563 * is deleted when the mailbox complete. The function is called by 6564 * the kernel timer code when a mailbox does not complete within 6565 * expected time. This function wakes up the worker thread to 6566 * process the mailbox timeout and returns. All the processing is 6567 * done by the worker thread function lpfc_mbox_timeout_handler. 6568 **/ 6569 void 6570 lpfc_mbox_timeout(unsigned long ptr) 6571 { 6572 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 6573 unsigned long iflag; 6574 uint32_t tmo_posted; 6575 6576 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 6577 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 6578 if (!tmo_posted) 6579 phba->pport->work_port_events |= WORKER_MBOX_TMO; 6580 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 6581 6582 if (!tmo_posted) 6583 lpfc_worker_wake_up(phba); 6584 return; 6585 } 6586 6587 /** 6588 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 6589 * are pending 6590 * @phba: Pointer to HBA context object. 6591 * 6592 * This function checks if any mailbox completions are present on the mailbox 6593 * completion queue. 6594 **/ 6595 bool 6596 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 6597 { 6598 6599 uint32_t idx; 6600 struct lpfc_queue *mcq; 6601 struct lpfc_mcqe *mcqe; 6602 bool pending_completions = false; 6603 6604 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 6605 return false; 6606 6607 /* Check for completions on mailbox completion queue */ 6608 6609 mcq = phba->sli4_hba.mbx_cq; 6610 idx = mcq->hba_index; 6611 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { 6612 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; 6613 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 6614 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 6615 pending_completions = true; 6616 break; 6617 } 6618 idx = (idx + 1) % mcq->entry_count; 6619 if (mcq->hba_index == idx) 6620 break; 6621 } 6622 return pending_completions; 6623 6624 } 6625 6626 /** 6627 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 6628 * that were missed. 6629 * @phba: Pointer to HBA context object. 6630 * 6631 * For sli4, it is possible to miss an interrupt. As such mbox completions 6632 * maybe missed causing erroneous mailbox timeouts to occur. This function 6633 * checks to see if mbox completions are on the mailbox completion queue 6634 * and will process all the completions associated with the eq for the 6635 * mailbox completion queue. 6636 **/ 6637 bool 6638 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 6639 { 6640 6641 uint32_t eqidx; 6642 struct lpfc_queue *fpeq = NULL; 6643 struct lpfc_eqe *eqe; 6644 bool mbox_pending; 6645 6646 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 6647 return false; 6648 6649 /* Find the eq associated with the mcq */ 6650 6651 if (phba->sli4_hba.hba_eq) 6652 for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) 6653 if (phba->sli4_hba.hba_eq[eqidx]->queue_id == 6654 phba->sli4_hba.mbx_cq->assoc_qid) { 6655 fpeq = phba->sli4_hba.hba_eq[eqidx]; 6656 break; 6657 } 6658 if (!fpeq) 6659 return false; 6660 6661 /* Turn off interrupts from this EQ */ 6662 6663 lpfc_sli4_eq_clr_intr(fpeq); 6664 6665 /* Check to see if a mbox completion is pending */ 6666 6667 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 6668 6669 /* 6670 * If a mbox completion is pending, process all the events on EQ 6671 * associated with the mbox completion queue (this could include 6672 * mailbox commands, async events, els commands, receive queue data 6673 * and fcp commands) 6674 */ 6675 6676 if (mbox_pending) 6677 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 6678 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); 6679 fpeq->EQ_processed++; 6680 } 6681 6682 /* Always clear and re-arm the EQ */ 6683 6684 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 6685 6686 return mbox_pending; 6687 6688 } 6689 6690 /** 6691 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 6692 * @phba: Pointer to HBA context object. 6693 * 6694 * This function is called from worker thread when a mailbox command times out. 6695 * The caller is not required to hold any locks. This function will reset the 6696 * HBA and recover all the pending commands. 6697 **/ 6698 void 6699 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 6700 { 6701 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6702 MAILBOX_t *mb = &pmbox->u.mb; 6703 struct lpfc_sli *psli = &phba->sli; 6704 struct lpfc_sli_ring *pring; 6705 6706 /* If the mailbox completed, process the completion and return */ 6707 if (lpfc_sli4_process_missed_mbox_completions(phba)) 6708 return; 6709 6710 /* Check the pmbox pointer first. There is a race condition 6711 * between the mbox timeout handler getting executed in the 6712 * worklist and the mailbox actually completing. When this 6713 * race condition occurs, the mbox_active will be NULL. 6714 */ 6715 spin_lock_irq(&phba->hbalock); 6716 if (pmbox == NULL) { 6717 lpfc_printf_log(phba, KERN_WARNING, 6718 LOG_MBOX | LOG_SLI, 6719 "0353 Active Mailbox cleared - mailbox timeout " 6720 "exiting\n"); 6721 spin_unlock_irq(&phba->hbalock); 6722 return; 6723 } 6724 6725 /* Mbox cmd <mbxCommand> timeout */ 6726 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6727 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 6728 mb->mbxCommand, 6729 phba->pport->port_state, 6730 phba->sli.sli_flag, 6731 phba->sli.mbox_active); 6732 spin_unlock_irq(&phba->hbalock); 6733 6734 /* Setting state unknown so lpfc_sli_abort_iocb_ring 6735 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 6736 * it to fail all outstanding SCSI IO. 6737 */ 6738 spin_lock_irq(&phba->pport->work_port_lock); 6739 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6740 spin_unlock_irq(&phba->pport->work_port_lock); 6741 spin_lock_irq(&phba->hbalock); 6742 phba->link_state = LPFC_LINK_UNKNOWN; 6743 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6744 spin_unlock_irq(&phba->hbalock); 6745 6746 pring = &psli->ring[psli->fcp_ring]; 6747 lpfc_sli_abort_iocb_ring(phba, pring); 6748 6749 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6750 "0345 Resetting board due to mailbox timeout\n"); 6751 6752 /* Reset the HBA device */ 6753 lpfc_reset_hba(phba); 6754 } 6755 6756 /** 6757 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 6758 * @phba: Pointer to HBA context object. 6759 * @pmbox: Pointer to mailbox object. 6760 * @flag: Flag indicating how the mailbox need to be processed. 6761 * 6762 * This function is called by discovery code and HBA management code 6763 * to submit a mailbox command to firmware with SLI-3 interface spec. This 6764 * function gets the hbalock to protect the data structures. 6765 * The mailbox command can be submitted in polling mode, in which case 6766 * this function will wait in a polling loop for the completion of the 6767 * mailbox. 6768 * If the mailbox is submitted in no_wait mode (not polling) the 6769 * function will submit the command and returns immediately without waiting 6770 * for the mailbox completion. The no_wait is supported only when HBA 6771 * is in SLI2/SLI3 mode - interrupts are enabled. 6772 * The SLI interface allows only one mailbox pending at a time. If the 6773 * mailbox is issued in polling mode and there is already a mailbox 6774 * pending, then the function will return an error. If the mailbox is issued 6775 * in NO_WAIT mode and there is a mailbox pending already, the function 6776 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 6777 * The sli layer owns the mailbox object until the completion of mailbox 6778 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 6779 * return codes the caller owns the mailbox command after the return of 6780 * the function. 6781 **/ 6782 static int 6783 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 6784 uint32_t flag) 6785 { 6786 MAILBOX_t *mbx; 6787 struct lpfc_sli *psli = &phba->sli; 6788 uint32_t status, evtctr; 6789 uint32_t ha_copy, hc_copy; 6790 int i; 6791 unsigned long timeout; 6792 unsigned long drvr_flag = 0; 6793 uint32_t word0, ldata; 6794 void __iomem *to_slim; 6795 int processing_queue = 0; 6796 6797 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6798 if (!pmbox) { 6799 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6800 /* processing mbox queue from intr_handler */ 6801 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6802 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6803 return MBX_SUCCESS; 6804 } 6805 processing_queue = 1; 6806 pmbox = lpfc_mbox_get(phba); 6807 if (!pmbox) { 6808 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6809 return MBX_SUCCESS; 6810 } 6811 } 6812 6813 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 6814 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 6815 if(!pmbox->vport) { 6816 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6817 lpfc_printf_log(phba, KERN_ERR, 6818 LOG_MBOX | LOG_VPORT, 6819 "1806 Mbox x%x failed. No vport\n", 6820 pmbox->u.mb.mbxCommand); 6821 dump_stack(); 6822 goto out_not_finished; 6823 } 6824 } 6825 6826 /* If the PCI channel is in offline state, do not post mbox. */ 6827 if (unlikely(pci_channel_offline(phba->pcidev))) { 6828 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6829 goto out_not_finished; 6830 } 6831 6832 /* If HBA has a deferred error attention, fail the iocb. */ 6833 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 6834 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6835 goto out_not_finished; 6836 } 6837 6838 psli = &phba->sli; 6839 6840 mbx = &pmbox->u.mb; 6841 status = MBX_SUCCESS; 6842 6843 if (phba->link_state == LPFC_HBA_ERROR) { 6844 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6845 6846 /* Mbox command <mbxCommand> cannot issue */ 6847 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6848 "(%d):0311 Mailbox command x%x cannot " 6849 "issue Data: x%x x%x\n", 6850 pmbox->vport ? pmbox->vport->vpi : 0, 6851 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6852 goto out_not_finished; 6853 } 6854 6855 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 6856 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 6857 !(hc_copy & HC_MBINT_ENA)) { 6858 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6859 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6860 "(%d):2528 Mailbox command x%x cannot " 6861 "issue Data: x%x x%x\n", 6862 pmbox->vport ? pmbox->vport->vpi : 0, 6863 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6864 goto out_not_finished; 6865 } 6866 } 6867 6868 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6869 /* Polling for a mbox command when another one is already active 6870 * is not allowed in SLI. Also, the driver must have established 6871 * SLI2 mode to queue and process multiple mbox commands. 6872 */ 6873 6874 if (flag & MBX_POLL) { 6875 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6876 6877 /* Mbox command <mbxCommand> cannot issue */ 6878 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6879 "(%d):2529 Mailbox command x%x " 6880 "cannot issue Data: x%x x%x\n", 6881 pmbox->vport ? pmbox->vport->vpi : 0, 6882 pmbox->u.mb.mbxCommand, 6883 psli->sli_flag, flag); 6884 goto out_not_finished; 6885 } 6886 6887 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 6888 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6889 /* Mbox command <mbxCommand> cannot issue */ 6890 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6891 "(%d):2530 Mailbox command x%x " 6892 "cannot issue Data: x%x x%x\n", 6893 pmbox->vport ? pmbox->vport->vpi : 0, 6894 pmbox->u.mb.mbxCommand, 6895 psli->sli_flag, flag); 6896 goto out_not_finished; 6897 } 6898 6899 /* Another mailbox command is still being processed, queue this 6900 * command to be processed later. 6901 */ 6902 lpfc_mbox_put(phba, pmbox); 6903 6904 /* Mbox cmd issue - BUSY */ 6905 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6906 "(%d):0308 Mbox cmd issue - BUSY Data: " 6907 "x%x x%x x%x x%x\n", 6908 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 6909 mbx->mbxCommand, phba->pport->port_state, 6910 psli->sli_flag, flag); 6911 6912 psli->slistat.mbox_busy++; 6913 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6914 6915 if (pmbox->vport) { 6916 lpfc_debugfs_disc_trc(pmbox->vport, 6917 LPFC_DISC_TRC_MBOX_VPORT, 6918 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 6919 (uint32_t)mbx->mbxCommand, 6920 mbx->un.varWords[0], mbx->un.varWords[1]); 6921 } 6922 else { 6923 lpfc_debugfs_disc_trc(phba->pport, 6924 LPFC_DISC_TRC_MBOX, 6925 "MBOX Bsy: cmd:x%x mb:x%x x%x", 6926 (uint32_t)mbx->mbxCommand, 6927 mbx->un.varWords[0], mbx->un.varWords[1]); 6928 } 6929 6930 return MBX_BUSY; 6931 } 6932 6933 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6934 6935 /* If we are not polling, we MUST be in SLI2 mode */ 6936 if (flag != MBX_POLL) { 6937 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 6938 (mbx->mbxCommand != MBX_KILL_BOARD)) { 6939 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6940 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6941 /* Mbox command <mbxCommand> cannot issue */ 6942 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6943 "(%d):2531 Mailbox command x%x " 6944 "cannot issue Data: x%x x%x\n", 6945 pmbox->vport ? pmbox->vport->vpi : 0, 6946 pmbox->u.mb.mbxCommand, 6947 psli->sli_flag, flag); 6948 goto out_not_finished; 6949 } 6950 /* timeout active mbox command */ 6951 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 6952 1000); 6953 mod_timer(&psli->mbox_tmo, jiffies + timeout); 6954 } 6955 6956 /* Mailbox cmd <cmd> issue */ 6957 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6958 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 6959 "x%x\n", 6960 pmbox->vport ? pmbox->vport->vpi : 0, 6961 mbx->mbxCommand, phba->pport->port_state, 6962 psli->sli_flag, flag); 6963 6964 if (mbx->mbxCommand != MBX_HEARTBEAT) { 6965 if (pmbox->vport) { 6966 lpfc_debugfs_disc_trc(pmbox->vport, 6967 LPFC_DISC_TRC_MBOX_VPORT, 6968 "MBOX Send vport: cmd:x%x mb:x%x x%x", 6969 (uint32_t)mbx->mbxCommand, 6970 mbx->un.varWords[0], mbx->un.varWords[1]); 6971 } 6972 else { 6973 lpfc_debugfs_disc_trc(phba->pport, 6974 LPFC_DISC_TRC_MBOX, 6975 "MBOX Send: cmd:x%x mb:x%x x%x", 6976 (uint32_t)mbx->mbxCommand, 6977 mbx->un.varWords[0], mbx->un.varWords[1]); 6978 } 6979 } 6980 6981 psli->slistat.mbox_cmd++; 6982 evtctr = psli->slistat.mbox_event; 6983 6984 /* next set own bit for the adapter and copy over command word */ 6985 mbx->mbxOwner = OWN_CHIP; 6986 6987 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6988 /* Populate mbox extension offset word. */ 6989 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 6990 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 6991 = (uint8_t *)phba->mbox_ext 6992 - (uint8_t *)phba->mbox; 6993 } 6994 6995 /* Copy the mailbox extension data */ 6996 if (pmbox->in_ext_byte_len && pmbox->context2) { 6997 lpfc_sli_pcimem_bcopy(pmbox->context2, 6998 (uint8_t *)phba->mbox_ext, 6999 pmbox->in_ext_byte_len); 7000 } 7001 /* Copy command data to host SLIM area */ 7002 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 7003 } else { 7004 /* Populate mbox extension offset word. */ 7005 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 7006 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 7007 = MAILBOX_HBA_EXT_OFFSET; 7008 7009 /* Copy the mailbox extension data */ 7010 if (pmbox->in_ext_byte_len && pmbox->context2) { 7011 lpfc_memcpy_to_slim(phba->MBslimaddr + 7012 MAILBOX_HBA_EXT_OFFSET, 7013 pmbox->context2, pmbox->in_ext_byte_len); 7014 7015 } 7016 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7017 /* copy command data into host mbox for cmpl */ 7018 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 7019 } 7020 7021 /* First copy mbox command data to HBA SLIM, skip past first 7022 word */ 7023 to_slim = phba->MBslimaddr + sizeof (uint32_t); 7024 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 7025 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 7026 7027 /* Next copy over first word, with mbxOwner set */ 7028 ldata = *((uint32_t *)mbx); 7029 to_slim = phba->MBslimaddr; 7030 writel(ldata, to_slim); 7031 readl(to_slim); /* flush */ 7032 7033 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7034 /* switch over to host mailbox */ 7035 psli->sli_flag |= LPFC_SLI_ACTIVE; 7036 } 7037 } 7038 7039 wmb(); 7040 7041 switch (flag) { 7042 case MBX_NOWAIT: 7043 /* Set up reference to mailbox command */ 7044 psli->mbox_active = pmbox; 7045 /* Interrupt board to do it */ 7046 writel(CA_MBATT, phba->CAregaddr); 7047 readl(phba->CAregaddr); /* flush */ 7048 /* Don't wait for it to finish, just return */ 7049 break; 7050 7051 case MBX_POLL: 7052 /* Set up null reference to mailbox command */ 7053 psli->mbox_active = NULL; 7054 /* Interrupt board to do it */ 7055 writel(CA_MBATT, phba->CAregaddr); 7056 readl(phba->CAregaddr); /* flush */ 7057 7058 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7059 /* First read mbox status word */ 7060 word0 = *((uint32_t *)phba->mbox); 7061 word0 = le32_to_cpu(word0); 7062 } else { 7063 /* First read mbox status word */ 7064 if (lpfc_readl(phba->MBslimaddr, &word0)) { 7065 spin_unlock_irqrestore(&phba->hbalock, 7066 drvr_flag); 7067 goto out_not_finished; 7068 } 7069 } 7070 7071 /* Read the HBA Host Attention Register */ 7072 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7073 spin_unlock_irqrestore(&phba->hbalock, 7074 drvr_flag); 7075 goto out_not_finished; 7076 } 7077 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7078 1000) + jiffies; 7079 i = 0; 7080 /* Wait for command to complete */ 7081 while (((word0 & OWN_CHIP) == OWN_CHIP) || 7082 (!(ha_copy & HA_MBATT) && 7083 (phba->link_state > LPFC_WARM_START))) { 7084 if (time_after(jiffies, timeout)) { 7085 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7086 spin_unlock_irqrestore(&phba->hbalock, 7087 drvr_flag); 7088 goto out_not_finished; 7089 } 7090 7091 /* Check if we took a mbox interrupt while we were 7092 polling */ 7093 if (((word0 & OWN_CHIP) != OWN_CHIP) 7094 && (evtctr != psli->slistat.mbox_event)) 7095 break; 7096 7097 if (i++ > 10) { 7098 spin_unlock_irqrestore(&phba->hbalock, 7099 drvr_flag); 7100 msleep(1); 7101 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7102 } 7103 7104 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7105 /* First copy command data */ 7106 word0 = *((uint32_t *)phba->mbox); 7107 word0 = le32_to_cpu(word0); 7108 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7109 MAILBOX_t *slimmb; 7110 uint32_t slimword0; 7111 /* Check real SLIM for any errors */ 7112 slimword0 = readl(phba->MBslimaddr); 7113 slimmb = (MAILBOX_t *) & slimword0; 7114 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 7115 && slimmb->mbxStatus) { 7116 psli->sli_flag &= 7117 ~LPFC_SLI_ACTIVE; 7118 word0 = slimword0; 7119 } 7120 } 7121 } else { 7122 /* First copy command data */ 7123 word0 = readl(phba->MBslimaddr); 7124 } 7125 /* Read the HBA Host Attention Register */ 7126 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7127 spin_unlock_irqrestore(&phba->hbalock, 7128 drvr_flag); 7129 goto out_not_finished; 7130 } 7131 } 7132 7133 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7134 /* copy results back to user */ 7135 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE); 7136 /* Copy the mailbox extension data */ 7137 if (pmbox->out_ext_byte_len && pmbox->context2) { 7138 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 7139 pmbox->context2, 7140 pmbox->out_ext_byte_len); 7141 } 7142 } else { 7143 /* First copy command data */ 7144 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 7145 MAILBOX_CMD_SIZE); 7146 /* Copy the mailbox extension data */ 7147 if (pmbox->out_ext_byte_len && pmbox->context2) { 7148 lpfc_memcpy_from_slim(pmbox->context2, 7149 phba->MBslimaddr + 7150 MAILBOX_HBA_EXT_OFFSET, 7151 pmbox->out_ext_byte_len); 7152 } 7153 } 7154 7155 writel(HA_MBATT, phba->HAregaddr); 7156 readl(phba->HAregaddr); /* flush */ 7157 7158 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7159 status = mbx->mbxStatus; 7160 } 7161 7162 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7163 return status; 7164 7165 out_not_finished: 7166 if (processing_queue) { 7167 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 7168 lpfc_mbox_cmpl_put(phba, pmbox); 7169 } 7170 return MBX_NOT_FINISHED; 7171 } 7172 7173 /** 7174 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 7175 * @phba: Pointer to HBA context object. 7176 * 7177 * The function blocks the posting of SLI4 asynchronous mailbox commands from 7178 * the driver internal pending mailbox queue. It will then try to wait out the 7179 * possible outstanding mailbox command before return. 7180 * 7181 * Returns: 7182 * 0 - the outstanding mailbox command completed; otherwise, the wait for 7183 * the outstanding mailbox command timed out. 7184 **/ 7185 static int 7186 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 7187 { 7188 struct lpfc_sli *psli = &phba->sli; 7189 int rc = 0; 7190 unsigned long timeout = 0; 7191 7192 /* Mark the asynchronous mailbox command posting as blocked */ 7193 spin_lock_irq(&phba->hbalock); 7194 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7195 /* Determine how long we might wait for the active mailbox 7196 * command to be gracefully completed by firmware. 7197 */ 7198 if (phba->sli.mbox_active) 7199 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 7200 phba->sli.mbox_active) * 7201 1000) + jiffies; 7202 spin_unlock_irq(&phba->hbalock); 7203 7204 /* Make sure the mailbox is really active */ 7205 if (timeout) 7206 lpfc_sli4_process_missed_mbox_completions(phba); 7207 7208 /* Wait for the outstnading mailbox command to complete */ 7209 while (phba->sli.mbox_active) { 7210 /* Check active mailbox complete status every 2ms */ 7211 msleep(2); 7212 if (time_after(jiffies, timeout)) { 7213 /* Timeout, marked the outstanding cmd not complete */ 7214 rc = 1; 7215 break; 7216 } 7217 } 7218 7219 /* Can not cleanly block async mailbox command, fails it */ 7220 if (rc) { 7221 spin_lock_irq(&phba->hbalock); 7222 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7223 spin_unlock_irq(&phba->hbalock); 7224 } 7225 return rc; 7226 } 7227 7228 /** 7229 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 7230 * @phba: Pointer to HBA context object. 7231 * 7232 * The function unblocks and resume posting of SLI4 asynchronous mailbox 7233 * commands from the driver internal pending mailbox queue. It makes sure 7234 * that there is no outstanding mailbox command before resuming posting 7235 * asynchronous mailbox commands. If, for any reason, there is outstanding 7236 * mailbox command, it will try to wait it out before resuming asynchronous 7237 * mailbox command posting. 7238 **/ 7239 static void 7240 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 7241 { 7242 struct lpfc_sli *psli = &phba->sli; 7243 7244 spin_lock_irq(&phba->hbalock); 7245 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7246 /* Asynchronous mailbox posting is not blocked, do nothing */ 7247 spin_unlock_irq(&phba->hbalock); 7248 return; 7249 } 7250 7251 /* Outstanding synchronous mailbox command is guaranteed to be done, 7252 * successful or timeout, after timing-out the outstanding mailbox 7253 * command shall always be removed, so just unblock posting async 7254 * mailbox command and resume 7255 */ 7256 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7257 spin_unlock_irq(&phba->hbalock); 7258 7259 /* wake up worker thread to post asynchronlous mailbox command */ 7260 lpfc_worker_wake_up(phba); 7261 } 7262 7263 /** 7264 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 7265 * @phba: Pointer to HBA context object. 7266 * @mboxq: Pointer to mailbox object. 7267 * 7268 * The function waits for the bootstrap mailbox register ready bit from 7269 * port for twice the regular mailbox command timeout value. 7270 * 7271 * 0 - no timeout on waiting for bootstrap mailbox register ready. 7272 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 7273 **/ 7274 static int 7275 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7276 { 7277 uint32_t db_ready; 7278 unsigned long timeout; 7279 struct lpfc_register bmbx_reg; 7280 7281 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 7282 * 1000) + jiffies; 7283 7284 do { 7285 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 7286 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 7287 if (!db_ready) 7288 msleep(2); 7289 7290 if (time_after(jiffies, timeout)) 7291 return MBXERR_ERROR; 7292 } while (!db_ready); 7293 7294 return 0; 7295 } 7296 7297 /** 7298 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 7299 * @phba: Pointer to HBA context object. 7300 * @mboxq: Pointer to mailbox object. 7301 * 7302 * The function posts a mailbox to the port. The mailbox is expected 7303 * to be comletely filled in and ready for the port to operate on it. 7304 * This routine executes a synchronous completion operation on the 7305 * mailbox by polling for its completion. 7306 * 7307 * The caller must not be holding any locks when calling this routine. 7308 * 7309 * Returns: 7310 * MBX_SUCCESS - mailbox posted successfully 7311 * Any of the MBX error values. 7312 **/ 7313 static int 7314 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7315 { 7316 int rc = MBX_SUCCESS; 7317 unsigned long iflag; 7318 uint32_t mcqe_status; 7319 uint32_t mbx_cmnd; 7320 struct lpfc_sli *psli = &phba->sli; 7321 struct lpfc_mqe *mb = &mboxq->u.mqe; 7322 struct lpfc_bmbx_create *mbox_rgn; 7323 struct dma_address *dma_address; 7324 7325 /* 7326 * Only one mailbox can be active to the bootstrap mailbox region 7327 * at a time and there is no queueing provided. 7328 */ 7329 spin_lock_irqsave(&phba->hbalock, iflag); 7330 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7331 spin_unlock_irqrestore(&phba->hbalock, iflag); 7332 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7333 "(%d):2532 Mailbox command x%x (x%x/x%x) " 7334 "cannot issue Data: x%x x%x\n", 7335 mboxq->vport ? mboxq->vport->vpi : 0, 7336 mboxq->u.mb.mbxCommand, 7337 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7338 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7339 psli->sli_flag, MBX_POLL); 7340 return MBXERR_ERROR; 7341 } 7342 /* The server grabs the token and owns it until release */ 7343 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7344 phba->sli.mbox_active = mboxq; 7345 spin_unlock_irqrestore(&phba->hbalock, iflag); 7346 7347 /* wait for bootstrap mbox register for readyness */ 7348 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7349 if (rc) 7350 goto exit; 7351 7352 /* 7353 * Initialize the bootstrap memory region to avoid stale data areas 7354 * in the mailbox post. Then copy the caller's mailbox contents to 7355 * the bmbx mailbox region. 7356 */ 7357 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 7358 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 7359 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 7360 sizeof(struct lpfc_mqe)); 7361 7362 /* Post the high mailbox dma address to the port and wait for ready. */ 7363 dma_address = &phba->sli4_hba.bmbx.dma_address; 7364 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 7365 7366 /* wait for bootstrap mbox register for hi-address write done */ 7367 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7368 if (rc) 7369 goto exit; 7370 7371 /* Post the low mailbox dma address to the port. */ 7372 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 7373 7374 /* wait for bootstrap mbox register for low address write done */ 7375 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7376 if (rc) 7377 goto exit; 7378 7379 /* 7380 * Read the CQ to ensure the mailbox has completed. 7381 * If so, update the mailbox status so that the upper layers 7382 * can complete the request normally. 7383 */ 7384 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 7385 sizeof(struct lpfc_mqe)); 7386 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 7387 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 7388 sizeof(struct lpfc_mcqe)); 7389 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 7390 /* 7391 * When the CQE status indicates a failure and the mailbox status 7392 * indicates success then copy the CQE status into the mailbox status 7393 * (and prefix it with x4000). 7394 */ 7395 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 7396 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 7397 bf_set(lpfc_mqe_status, mb, 7398 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 7399 rc = MBXERR_ERROR; 7400 } else 7401 lpfc_sli4_swap_str(phba, mboxq); 7402 7403 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7404 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 7405 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 7406 " x%x x%x CQ: x%x x%x x%x x%x\n", 7407 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7408 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7409 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7410 bf_get(lpfc_mqe_status, mb), 7411 mb->un.mb_words[0], mb->un.mb_words[1], 7412 mb->un.mb_words[2], mb->un.mb_words[3], 7413 mb->un.mb_words[4], mb->un.mb_words[5], 7414 mb->un.mb_words[6], mb->un.mb_words[7], 7415 mb->un.mb_words[8], mb->un.mb_words[9], 7416 mb->un.mb_words[10], mb->un.mb_words[11], 7417 mb->un.mb_words[12], mboxq->mcqe.word0, 7418 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 7419 mboxq->mcqe.trailer); 7420 exit: 7421 /* We are holding the token, no needed for lock when release */ 7422 spin_lock_irqsave(&phba->hbalock, iflag); 7423 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7424 phba->sli.mbox_active = NULL; 7425 spin_unlock_irqrestore(&phba->hbalock, iflag); 7426 return rc; 7427 } 7428 7429 /** 7430 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 7431 * @phba: Pointer to HBA context object. 7432 * @pmbox: Pointer to mailbox object. 7433 * @flag: Flag indicating how the mailbox need to be processed. 7434 * 7435 * This function is called by discovery code and HBA management code to submit 7436 * a mailbox command to firmware with SLI-4 interface spec. 7437 * 7438 * Return codes the caller owns the mailbox command after the return of the 7439 * function. 7440 **/ 7441 static int 7442 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 7443 uint32_t flag) 7444 { 7445 struct lpfc_sli *psli = &phba->sli; 7446 unsigned long iflags; 7447 int rc; 7448 7449 /* dump from issue mailbox command if setup */ 7450 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 7451 7452 rc = lpfc_mbox_dev_check(phba); 7453 if (unlikely(rc)) { 7454 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7455 "(%d):2544 Mailbox command x%x (x%x/x%x) " 7456 "cannot issue Data: x%x x%x\n", 7457 mboxq->vport ? mboxq->vport->vpi : 0, 7458 mboxq->u.mb.mbxCommand, 7459 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7460 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7461 psli->sli_flag, flag); 7462 goto out_not_finished; 7463 } 7464 7465 /* Detect polling mode and jump to a handler */ 7466 if (!phba->sli4_hba.intr_enable) { 7467 if (flag == MBX_POLL) 7468 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7469 else 7470 rc = -EIO; 7471 if (rc != MBX_SUCCESS) 7472 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7473 "(%d):2541 Mailbox command x%x " 7474 "(x%x/x%x) failure: " 7475 "mqe_sta: x%x mcqe_sta: x%x/x%x " 7476 "Data: x%x x%x\n,", 7477 mboxq->vport ? mboxq->vport->vpi : 0, 7478 mboxq->u.mb.mbxCommand, 7479 lpfc_sli_config_mbox_subsys_get(phba, 7480 mboxq), 7481 lpfc_sli_config_mbox_opcode_get(phba, 7482 mboxq), 7483 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 7484 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 7485 bf_get(lpfc_mcqe_ext_status, 7486 &mboxq->mcqe), 7487 psli->sli_flag, flag); 7488 return rc; 7489 } else if (flag == MBX_POLL) { 7490 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7491 "(%d):2542 Try to issue mailbox command " 7492 "x%x (x%x/x%x) synchronously ahead of async" 7493 "mailbox command queue: x%x x%x\n", 7494 mboxq->vport ? mboxq->vport->vpi : 0, 7495 mboxq->u.mb.mbxCommand, 7496 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7497 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7498 psli->sli_flag, flag); 7499 /* Try to block the asynchronous mailbox posting */ 7500 rc = lpfc_sli4_async_mbox_block(phba); 7501 if (!rc) { 7502 /* Successfully blocked, now issue sync mbox cmd */ 7503 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7504 if (rc != MBX_SUCCESS) 7505 lpfc_printf_log(phba, KERN_WARNING, 7506 LOG_MBOX | LOG_SLI, 7507 "(%d):2597 Sync Mailbox command " 7508 "x%x (x%x/x%x) failure: " 7509 "mqe_sta: x%x mcqe_sta: x%x/x%x " 7510 "Data: x%x x%x\n,", 7511 mboxq->vport ? mboxq->vport->vpi : 0, 7512 mboxq->u.mb.mbxCommand, 7513 lpfc_sli_config_mbox_subsys_get(phba, 7514 mboxq), 7515 lpfc_sli_config_mbox_opcode_get(phba, 7516 mboxq), 7517 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 7518 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 7519 bf_get(lpfc_mcqe_ext_status, 7520 &mboxq->mcqe), 7521 psli->sli_flag, flag); 7522 /* Unblock the async mailbox posting afterward */ 7523 lpfc_sli4_async_mbox_unblock(phba); 7524 } 7525 return rc; 7526 } 7527 7528 /* Now, interrupt mode asynchrous mailbox command */ 7529 rc = lpfc_mbox_cmd_check(phba, mboxq); 7530 if (rc) { 7531 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7532 "(%d):2543 Mailbox command x%x (x%x/x%x) " 7533 "cannot issue Data: x%x x%x\n", 7534 mboxq->vport ? mboxq->vport->vpi : 0, 7535 mboxq->u.mb.mbxCommand, 7536 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7537 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7538 psli->sli_flag, flag); 7539 goto out_not_finished; 7540 } 7541 7542 /* Put the mailbox command to the driver internal FIFO */ 7543 psli->slistat.mbox_busy++; 7544 spin_lock_irqsave(&phba->hbalock, iflags); 7545 lpfc_mbox_put(phba, mboxq); 7546 spin_unlock_irqrestore(&phba->hbalock, iflags); 7547 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7548 "(%d):0354 Mbox cmd issue - Enqueue Data: " 7549 "x%x (x%x/x%x) x%x x%x x%x\n", 7550 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 7551 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 7552 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7553 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7554 phba->pport->port_state, 7555 psli->sli_flag, MBX_NOWAIT); 7556 /* Wake up worker thread to transport mailbox command from head */ 7557 lpfc_worker_wake_up(phba); 7558 7559 return MBX_BUSY; 7560 7561 out_not_finished: 7562 return MBX_NOT_FINISHED; 7563 } 7564 7565 /** 7566 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 7567 * @phba: Pointer to HBA context object. 7568 * 7569 * This function is called by worker thread to send a mailbox command to 7570 * SLI4 HBA firmware. 7571 * 7572 **/ 7573 int 7574 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 7575 { 7576 struct lpfc_sli *psli = &phba->sli; 7577 LPFC_MBOXQ_t *mboxq; 7578 int rc = MBX_SUCCESS; 7579 unsigned long iflags; 7580 struct lpfc_mqe *mqe; 7581 uint32_t mbx_cmnd; 7582 7583 /* Check interrupt mode before post async mailbox command */ 7584 if (unlikely(!phba->sli4_hba.intr_enable)) 7585 return MBX_NOT_FINISHED; 7586 7587 /* Check for mailbox command service token */ 7588 spin_lock_irqsave(&phba->hbalock, iflags); 7589 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7590 spin_unlock_irqrestore(&phba->hbalock, iflags); 7591 return MBX_NOT_FINISHED; 7592 } 7593 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7594 spin_unlock_irqrestore(&phba->hbalock, iflags); 7595 return MBX_NOT_FINISHED; 7596 } 7597 if (unlikely(phba->sli.mbox_active)) { 7598 spin_unlock_irqrestore(&phba->hbalock, iflags); 7599 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7600 "0384 There is pending active mailbox cmd\n"); 7601 return MBX_NOT_FINISHED; 7602 } 7603 /* Take the mailbox command service token */ 7604 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7605 7606 /* Get the next mailbox command from head of queue */ 7607 mboxq = lpfc_mbox_get(phba); 7608 7609 /* If no more mailbox command waiting for post, we're done */ 7610 if (!mboxq) { 7611 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7612 spin_unlock_irqrestore(&phba->hbalock, iflags); 7613 return MBX_SUCCESS; 7614 } 7615 phba->sli.mbox_active = mboxq; 7616 spin_unlock_irqrestore(&phba->hbalock, iflags); 7617 7618 /* Check device readiness for posting mailbox command */ 7619 rc = lpfc_mbox_dev_check(phba); 7620 if (unlikely(rc)) 7621 /* Driver clean routine will clean up pending mailbox */ 7622 goto out_not_finished; 7623 7624 /* Prepare the mbox command to be posted */ 7625 mqe = &mboxq->u.mqe; 7626 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 7627 7628 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7629 mod_timer(&psli->mbox_tmo, (jiffies + 7630 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 7631 7632 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7633 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 7634 "x%x x%x\n", 7635 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7636 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7637 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7638 phba->pport->port_state, psli->sli_flag); 7639 7640 if (mbx_cmnd != MBX_HEARTBEAT) { 7641 if (mboxq->vport) { 7642 lpfc_debugfs_disc_trc(mboxq->vport, 7643 LPFC_DISC_TRC_MBOX_VPORT, 7644 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7645 mbx_cmnd, mqe->un.mb_words[0], 7646 mqe->un.mb_words[1]); 7647 } else { 7648 lpfc_debugfs_disc_trc(phba->pport, 7649 LPFC_DISC_TRC_MBOX, 7650 "MBOX Send: cmd:x%x mb:x%x x%x", 7651 mbx_cmnd, mqe->un.mb_words[0], 7652 mqe->un.mb_words[1]); 7653 } 7654 } 7655 psli->slistat.mbox_cmd++; 7656 7657 /* Post the mailbox command to the port */ 7658 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 7659 if (rc != MBX_SUCCESS) { 7660 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7661 "(%d):2533 Mailbox command x%x (x%x/x%x) " 7662 "cannot issue Data: x%x x%x\n", 7663 mboxq->vport ? mboxq->vport->vpi : 0, 7664 mboxq->u.mb.mbxCommand, 7665 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7666 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7667 psli->sli_flag, MBX_NOWAIT); 7668 goto out_not_finished; 7669 } 7670 7671 return rc; 7672 7673 out_not_finished: 7674 spin_lock_irqsave(&phba->hbalock, iflags); 7675 if (phba->sli.mbox_active) { 7676 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7677 __lpfc_mbox_cmpl_put(phba, mboxq); 7678 /* Release the token */ 7679 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7680 phba->sli.mbox_active = NULL; 7681 } 7682 spin_unlock_irqrestore(&phba->hbalock, iflags); 7683 7684 return MBX_NOT_FINISHED; 7685 } 7686 7687 /** 7688 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 7689 * @phba: Pointer to HBA context object. 7690 * @pmbox: Pointer to mailbox object. 7691 * @flag: Flag indicating how the mailbox need to be processed. 7692 * 7693 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 7694 * the API jump table function pointer from the lpfc_hba struct. 7695 * 7696 * Return codes the caller owns the mailbox command after the return of the 7697 * function. 7698 **/ 7699 int 7700 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 7701 { 7702 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 7703 } 7704 7705 /** 7706 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 7707 * @phba: The hba struct for which this call is being executed. 7708 * @dev_grp: The HBA PCI-Device group number. 7709 * 7710 * This routine sets up the mbox interface API function jump table in @phba 7711 * struct. 7712 * Returns: 0 - success, -ENODEV - failure. 7713 **/ 7714 int 7715 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7716 { 7717 7718 switch (dev_grp) { 7719 case LPFC_PCI_DEV_LP: 7720 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 7721 phba->lpfc_sli_handle_slow_ring_event = 7722 lpfc_sli_handle_slow_ring_event_s3; 7723 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 7724 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 7725 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 7726 break; 7727 case LPFC_PCI_DEV_OC: 7728 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 7729 phba->lpfc_sli_handle_slow_ring_event = 7730 lpfc_sli_handle_slow_ring_event_s4; 7731 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 7732 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 7733 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 7734 break; 7735 default: 7736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7737 "1420 Invalid HBA PCI-device group: 0x%x\n", 7738 dev_grp); 7739 return -ENODEV; 7740 break; 7741 } 7742 return 0; 7743 } 7744 7745 /** 7746 * __lpfc_sli_ringtx_put - Add an iocb to the txq 7747 * @phba: Pointer to HBA context object. 7748 * @pring: Pointer to driver SLI ring object. 7749 * @piocb: Pointer to address of newly added command iocb. 7750 * 7751 * This function is called with hbalock held to add a command 7752 * iocb to the txq when SLI layer cannot submit the command iocb 7753 * to the ring. 7754 **/ 7755 void 7756 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7757 struct lpfc_iocbq *piocb) 7758 { 7759 /* Insert the caller's iocb in the txq tail for later processing. */ 7760 list_add_tail(&piocb->list, &pring->txq); 7761 } 7762 7763 /** 7764 * lpfc_sli_next_iocb - Get the next iocb in the txq 7765 * @phba: Pointer to HBA context object. 7766 * @pring: Pointer to driver SLI ring object. 7767 * @piocb: Pointer to address of newly added command iocb. 7768 * 7769 * This function is called with hbalock held before a new 7770 * iocb is submitted to the firmware. This function checks 7771 * txq to flush the iocbs in txq to Firmware before 7772 * submitting new iocbs to the Firmware. 7773 * If there are iocbs in the txq which need to be submitted 7774 * to firmware, lpfc_sli_next_iocb returns the first element 7775 * of the txq after dequeuing it from txq. 7776 * If there is no iocb in the txq then the function will return 7777 * *piocb and *piocb is set to NULL. Caller needs to check 7778 * *piocb to find if there are more commands in the txq. 7779 **/ 7780 static struct lpfc_iocbq * 7781 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7782 struct lpfc_iocbq **piocb) 7783 { 7784 struct lpfc_iocbq * nextiocb; 7785 7786 nextiocb = lpfc_sli_ringtx_get(phba, pring); 7787 if (!nextiocb) { 7788 nextiocb = *piocb; 7789 *piocb = NULL; 7790 } 7791 7792 return nextiocb; 7793 } 7794 7795 /** 7796 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 7797 * @phba: Pointer to HBA context object. 7798 * @ring_number: SLI ring number to issue iocb on. 7799 * @piocb: Pointer to command iocb. 7800 * @flag: Flag indicating if this command can be put into txq. 7801 * 7802 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 7803 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 7804 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 7805 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 7806 * this function allows only iocbs for posting buffers. This function finds 7807 * next available slot in the command ring and posts the command to the 7808 * available slot and writes the port attention register to request HBA start 7809 * processing new iocb. If there is no slot available in the ring and 7810 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 7811 * the function returns IOCB_BUSY. 7812 * 7813 * This function is called with hbalock held. The function will return success 7814 * after it successfully submit the iocb to firmware or after adding to the 7815 * txq. 7816 **/ 7817 static int 7818 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 7819 struct lpfc_iocbq *piocb, uint32_t flag) 7820 { 7821 struct lpfc_iocbq *nextiocb; 7822 IOCB_t *iocb; 7823 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 7824 7825 if (piocb->iocb_cmpl && (!piocb->vport) && 7826 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 7827 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 7828 lpfc_printf_log(phba, KERN_ERR, 7829 LOG_SLI | LOG_VPORT, 7830 "1807 IOCB x%x failed. No vport\n", 7831 piocb->iocb.ulpCommand); 7832 dump_stack(); 7833 return IOCB_ERROR; 7834 } 7835 7836 7837 /* If the PCI channel is in offline state, do not post iocbs. */ 7838 if (unlikely(pci_channel_offline(phba->pcidev))) 7839 return IOCB_ERROR; 7840 7841 /* If HBA has a deferred error attention, fail the iocb. */ 7842 if (unlikely(phba->hba_flag & DEFER_ERATT)) 7843 return IOCB_ERROR; 7844 7845 /* 7846 * We should never get an IOCB if we are in a < LINK_DOWN state 7847 */ 7848 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 7849 return IOCB_ERROR; 7850 7851 /* 7852 * Check to see if we are blocking IOCB processing because of a 7853 * outstanding event. 7854 */ 7855 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 7856 goto iocb_busy; 7857 7858 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 7859 /* 7860 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 7861 * can be issued if the link is not up. 7862 */ 7863 switch (piocb->iocb.ulpCommand) { 7864 case CMD_GEN_REQUEST64_CR: 7865 case CMD_GEN_REQUEST64_CX: 7866 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 7867 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 7868 FC_RCTL_DD_UNSOL_CMD) || 7869 (piocb->iocb.un.genreq64.w5.hcsw.Type != 7870 MENLO_TRANSPORT_TYPE)) 7871 7872 goto iocb_busy; 7873 break; 7874 case CMD_QUE_RING_BUF_CN: 7875 case CMD_QUE_RING_BUF64_CN: 7876 /* 7877 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 7878 * completion, iocb_cmpl MUST be 0. 7879 */ 7880 if (piocb->iocb_cmpl) 7881 piocb->iocb_cmpl = NULL; 7882 /*FALLTHROUGH*/ 7883 case CMD_CREATE_XRI_CR: 7884 case CMD_CLOSE_XRI_CN: 7885 case CMD_CLOSE_XRI_CX: 7886 break; 7887 default: 7888 goto iocb_busy; 7889 } 7890 7891 /* 7892 * For FCP commands, we must be in a state where we can process link 7893 * attention events. 7894 */ 7895 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 7896 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 7897 goto iocb_busy; 7898 } 7899 7900 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 7901 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 7902 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 7903 7904 if (iocb) 7905 lpfc_sli_update_ring(phba, pring); 7906 else 7907 lpfc_sli_update_full_ring(phba, pring); 7908 7909 if (!piocb) 7910 return IOCB_SUCCESS; 7911 7912 goto out_busy; 7913 7914 iocb_busy: 7915 pring->stats.iocb_cmd_delay++; 7916 7917 out_busy: 7918 7919 if (!(flag & SLI_IOCB_RET_IOCB)) { 7920 __lpfc_sli_ringtx_put(phba, pring, piocb); 7921 return IOCB_SUCCESS; 7922 } 7923 7924 return IOCB_BUSY; 7925 } 7926 7927 /** 7928 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 7929 * @phba: Pointer to HBA context object. 7930 * @piocb: Pointer to command iocb. 7931 * @sglq: Pointer to the scatter gather queue object. 7932 * 7933 * This routine converts the bpl or bde that is in the IOCB 7934 * to a sgl list for the sli4 hardware. The physical address 7935 * of the bpl/bde is converted back to a virtual address. 7936 * If the IOCB contains a BPL then the list of BDE's is 7937 * converted to sli4_sge's. If the IOCB contains a single 7938 * BDE then it is converted to a single sli_sge. 7939 * The IOCB is still in cpu endianess so the contents of 7940 * the bpl can be used without byte swapping. 7941 * 7942 * Returns valid XRI = Success, NO_XRI = Failure. 7943 **/ 7944 static uint16_t 7945 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 7946 struct lpfc_sglq *sglq) 7947 { 7948 uint16_t xritag = NO_XRI; 7949 struct ulp_bde64 *bpl = NULL; 7950 struct ulp_bde64 bde; 7951 struct sli4_sge *sgl = NULL; 7952 struct lpfc_dmabuf *dmabuf; 7953 IOCB_t *icmd; 7954 int numBdes = 0; 7955 int i = 0; 7956 uint32_t offset = 0; /* accumulated offset in the sg request list */ 7957 int inbound = 0; /* number of sg reply entries inbound from firmware */ 7958 7959 if (!piocbq || !sglq) 7960 return xritag; 7961 7962 sgl = (struct sli4_sge *)sglq->sgl; 7963 icmd = &piocbq->iocb; 7964 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 7965 return sglq->sli4_xritag; 7966 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7967 numBdes = icmd->un.genreq64.bdl.bdeSize / 7968 sizeof(struct ulp_bde64); 7969 /* The addrHigh and addrLow fields within the IOCB 7970 * have not been byteswapped yet so there is no 7971 * need to swap them back. 7972 */ 7973 if (piocbq->context3) 7974 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 7975 else 7976 return xritag; 7977 7978 bpl = (struct ulp_bde64 *)dmabuf->virt; 7979 if (!bpl) 7980 return xritag; 7981 7982 for (i = 0; i < numBdes; i++) { 7983 /* Should already be byte swapped. */ 7984 sgl->addr_hi = bpl->addrHigh; 7985 sgl->addr_lo = bpl->addrLow; 7986 7987 sgl->word2 = le32_to_cpu(sgl->word2); 7988 if ((i+1) == numBdes) 7989 bf_set(lpfc_sli4_sge_last, sgl, 1); 7990 else 7991 bf_set(lpfc_sli4_sge_last, sgl, 0); 7992 /* swap the size field back to the cpu so we 7993 * can assign it to the sgl. 7994 */ 7995 bde.tus.w = le32_to_cpu(bpl->tus.w); 7996 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 7997 /* The offsets in the sgl need to be accumulated 7998 * separately for the request and reply lists. 7999 * The request is always first, the reply follows. 8000 */ 8001 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 8002 /* add up the reply sg entries */ 8003 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 8004 inbound++; 8005 /* first inbound? reset the offset */ 8006 if (inbound == 1) 8007 offset = 0; 8008 bf_set(lpfc_sli4_sge_offset, sgl, offset); 8009 bf_set(lpfc_sli4_sge_type, sgl, 8010 LPFC_SGE_TYPE_DATA); 8011 offset += bde.tus.f.bdeSize; 8012 } 8013 sgl->word2 = cpu_to_le32(sgl->word2); 8014 bpl++; 8015 sgl++; 8016 } 8017 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 8018 /* The addrHigh and addrLow fields of the BDE have not 8019 * been byteswapped yet so they need to be swapped 8020 * before putting them in the sgl. 8021 */ 8022 sgl->addr_hi = 8023 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 8024 sgl->addr_lo = 8025 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 8026 sgl->word2 = le32_to_cpu(sgl->word2); 8027 bf_set(lpfc_sli4_sge_last, sgl, 1); 8028 sgl->word2 = cpu_to_le32(sgl->word2); 8029 sgl->sge_len = 8030 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 8031 } 8032 return sglq->sli4_xritag; 8033 } 8034 8035 /** 8036 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 8037 * @phba: Pointer to HBA context object. 8038 * 8039 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 8040 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 8041 * held. 8042 * 8043 * Return: index into SLI4 fast-path FCP queue index. 8044 **/ 8045 static inline uint32_t 8046 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 8047 { 8048 struct lpfc_vector_map_info *cpup; 8049 int chann, cpu; 8050 8051 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU 8052 && phba->cfg_fcp_io_channel > 1) { 8053 cpu = smp_processor_id(); 8054 if (cpu < phba->sli4_hba.num_present_cpu) { 8055 cpup = phba->sli4_hba.cpu_map; 8056 cpup += cpu; 8057 return cpup->channel_id; 8058 } 8059 chann = cpu; 8060 } 8061 chann = atomic_add_return(1, &phba->fcp_qidx); 8062 chann = (chann % phba->cfg_fcp_io_channel); 8063 return chann; 8064 } 8065 8066 /** 8067 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 8068 * @phba: Pointer to HBA context object. 8069 * @piocb: Pointer to command iocb. 8070 * @wqe: Pointer to the work queue entry. 8071 * 8072 * This routine converts the iocb command to its Work Queue Entry 8073 * equivalent. The wqe pointer should not have any fields set when 8074 * this routine is called because it will memcpy over them. 8075 * This routine does not set the CQ_ID or the WQEC bits in the 8076 * wqe. 8077 * 8078 * Returns: 0 = Success, IOCB_ERROR = Failure. 8079 **/ 8080 static int 8081 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 8082 union lpfc_wqe *wqe) 8083 { 8084 uint32_t xmit_len = 0, total_len = 0; 8085 uint8_t ct = 0; 8086 uint32_t fip; 8087 uint32_t abort_tag; 8088 uint8_t command_type = ELS_COMMAND_NON_FIP; 8089 uint8_t cmnd; 8090 uint16_t xritag; 8091 uint16_t abrt_iotag; 8092 struct lpfc_iocbq *abrtiocbq; 8093 struct ulp_bde64 *bpl = NULL; 8094 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 8095 int numBdes, i; 8096 struct ulp_bde64 bde; 8097 struct lpfc_nodelist *ndlp; 8098 uint32_t *pcmd; 8099 uint32_t if_type; 8100 8101 fip = phba->hba_flag & HBA_FIP_SUPPORT; 8102 /* The fcp commands will set command type */ 8103 if (iocbq->iocb_flag & LPFC_IO_FCP) 8104 command_type = FCP_COMMAND; 8105 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 8106 command_type = ELS_COMMAND_FIP; 8107 else 8108 command_type = ELS_COMMAND_NON_FIP; 8109 8110 /* Some of the fields are in the right position already */ 8111 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 8112 abort_tag = (uint32_t) iocbq->iotag; 8113 xritag = iocbq->sli4_xritag; 8114 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 8115 /* words0-2 bpl convert bde */ 8116 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8117 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8118 sizeof(struct ulp_bde64); 8119 bpl = (struct ulp_bde64 *) 8120 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 8121 if (!bpl) 8122 return IOCB_ERROR; 8123 8124 /* Should already be byte swapped. */ 8125 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 8126 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 8127 /* swap the size field back to the cpu so we 8128 * can assign it to the sgl. 8129 */ 8130 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 8131 xmit_len = wqe->generic.bde.tus.f.bdeSize; 8132 total_len = 0; 8133 for (i = 0; i < numBdes; i++) { 8134 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8135 total_len += bde.tus.f.bdeSize; 8136 } 8137 } else 8138 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 8139 8140 iocbq->iocb.ulpIoTag = iocbq->iotag; 8141 cmnd = iocbq->iocb.ulpCommand; 8142 8143 switch (iocbq->iocb.ulpCommand) { 8144 case CMD_ELS_REQUEST64_CR: 8145 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 8146 ndlp = iocbq->context_un.ndlp; 8147 else 8148 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8149 if (!iocbq->iocb.ulpLe) { 8150 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8151 "2007 Only Limited Edition cmd Format" 8152 " supported 0x%x\n", 8153 iocbq->iocb.ulpCommand); 8154 return IOCB_ERROR; 8155 } 8156 8157 wqe->els_req.payload_len = xmit_len; 8158 /* Els_reguest64 has a TMO */ 8159 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 8160 iocbq->iocb.ulpTimeout); 8161 /* Need a VF for word 4 set the vf bit*/ 8162 bf_set(els_req64_vf, &wqe->els_req, 0); 8163 /* And a VFID for word 12 */ 8164 bf_set(els_req64_vfid, &wqe->els_req, 0); 8165 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8166 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8167 iocbq->iocb.ulpContext); 8168 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 8169 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 8170 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 8171 if (command_type == ELS_COMMAND_FIP) 8172 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 8173 >> LPFC_FIP_ELS_ID_SHIFT); 8174 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8175 iocbq->context2)->virt); 8176 if_type = bf_get(lpfc_sli_intf_if_type, 8177 &phba->sli4_hba.sli_intf); 8178 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8179 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 8180 *pcmd == ELS_CMD_SCR || 8181 *pcmd == ELS_CMD_FDISC || 8182 *pcmd == ELS_CMD_LOGO || 8183 *pcmd == ELS_CMD_PLOGI)) { 8184 bf_set(els_req64_sp, &wqe->els_req, 1); 8185 bf_set(els_req64_sid, &wqe->els_req, 8186 iocbq->vport->fc_myDID); 8187 if ((*pcmd == ELS_CMD_FLOGI) && 8188 !(phba->fc_topology == 8189 LPFC_TOPOLOGY_LOOP)) 8190 bf_set(els_req64_sid, &wqe->els_req, 0); 8191 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 8192 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8193 phba->vpi_ids[iocbq->vport->vpi]); 8194 } else if (pcmd && iocbq->context1) { 8195 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 8196 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8197 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8198 } 8199 } 8200 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 8201 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8202 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 8203 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 8204 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 8205 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 8206 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8207 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 8208 wqe->els_req.max_response_payload_len = total_len - xmit_len; 8209 break; 8210 case CMD_XMIT_SEQUENCE64_CX: 8211 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 8212 iocbq->iocb.un.ulpWord[3]); 8213 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 8214 iocbq->iocb.unsli3.rcvsli3.ox_id); 8215 /* The entire sequence is transmitted for this IOCB */ 8216 xmit_len = total_len; 8217 cmnd = CMD_XMIT_SEQUENCE64_CR; 8218 if (phba->link_flag & LS_LOOPBACK_MODE) 8219 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 8220 case CMD_XMIT_SEQUENCE64_CR: 8221 /* word3 iocb=io_tag32 wqe=reserved */ 8222 wqe->xmit_sequence.rsvd3 = 0; 8223 /* word4 relative_offset memcpy */ 8224 /* word5 r_ctl/df_ctl memcpy */ 8225 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 8226 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 8227 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 8228 LPFC_WQE_IOD_WRITE); 8229 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 8230 LPFC_WQE_LENLOC_WORD12); 8231 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 8232 wqe->xmit_sequence.xmit_len = xmit_len; 8233 command_type = OTHER_COMMAND; 8234 break; 8235 case CMD_XMIT_BCAST64_CN: 8236 /* word3 iocb=iotag32 wqe=seq_payload_len */ 8237 wqe->xmit_bcast64.seq_payload_len = xmit_len; 8238 /* word4 iocb=rsvd wqe=rsvd */ 8239 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 8240 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 8241 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 8242 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8243 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 8244 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 8245 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 8246 LPFC_WQE_LENLOC_WORD3); 8247 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 8248 break; 8249 case CMD_FCP_IWRITE64_CR: 8250 command_type = FCP_COMMAND_DATA_OUT; 8251 /* word3 iocb=iotag wqe=payload_offset_len */ 8252 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8253 bf_set(payload_offset_len, &wqe->fcp_iwrite, 8254 xmit_len + sizeof(struct fcp_rsp)); 8255 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 8256 0); 8257 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8258 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8259 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 8260 iocbq->iocb.ulpFCP2Rcvy); 8261 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 8262 /* Always open the exchange */ 8263 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 8264 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 8265 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 8266 LPFC_WQE_LENLOC_WORD4); 8267 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 8268 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 8269 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 8270 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8271 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 8272 if (phba->cfg_XLanePriority) { 8273 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 8274 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 8275 (phba->cfg_XLanePriority << 1)); 8276 } 8277 } 8278 break; 8279 case CMD_FCP_IREAD64_CR: 8280 /* word3 iocb=iotag wqe=payload_offset_len */ 8281 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8282 bf_set(payload_offset_len, &wqe->fcp_iread, 8283 xmit_len + sizeof(struct fcp_rsp)); 8284 bf_set(cmd_buff_len, &wqe->fcp_iread, 8285 0); 8286 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8287 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8288 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 8289 iocbq->iocb.ulpFCP2Rcvy); 8290 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 8291 /* Always open the exchange */ 8292 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 8293 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 8294 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 8295 LPFC_WQE_LENLOC_WORD4); 8296 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 8297 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 8298 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8299 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8300 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 8301 if (phba->cfg_XLanePriority) { 8302 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 8303 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 8304 (phba->cfg_XLanePriority << 1)); 8305 } 8306 } 8307 break; 8308 case CMD_FCP_ICMND64_CR: 8309 /* word3 iocb=iotag wqe=payload_offset_len */ 8310 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8311 bf_set(payload_offset_len, &wqe->fcp_icmd, 8312 xmit_len + sizeof(struct fcp_rsp)); 8313 bf_set(cmd_buff_len, &wqe->fcp_icmd, 8314 0); 8315 /* word3 iocb=IO_TAG wqe=reserved */ 8316 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 8317 /* Always open the exchange */ 8318 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 8319 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 8320 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 8321 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 8322 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 8323 LPFC_WQE_LENLOC_NONE); 8324 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 8325 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 8326 iocbq->iocb.ulpFCP2Rcvy); 8327 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8328 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 8329 if (phba->cfg_XLanePriority) { 8330 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 8331 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 8332 (phba->cfg_XLanePriority << 1)); 8333 } 8334 } 8335 break; 8336 case CMD_GEN_REQUEST64_CR: 8337 /* For this command calculate the xmit length of the 8338 * request bde. 8339 */ 8340 xmit_len = 0; 8341 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8342 sizeof(struct ulp_bde64); 8343 for (i = 0; i < numBdes; i++) { 8344 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8345 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 8346 break; 8347 xmit_len += bde.tus.f.bdeSize; 8348 } 8349 /* word3 iocb=IO_TAG wqe=request_payload_len */ 8350 wqe->gen_req.request_payload_len = xmit_len; 8351 /* word4 iocb=parameter wqe=relative_offset memcpy */ 8352 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 8353 /* word6 context tag copied in memcpy */ 8354 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 8355 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8356 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8357 "2015 Invalid CT %x command 0x%x\n", 8358 ct, iocbq->iocb.ulpCommand); 8359 return IOCB_ERROR; 8360 } 8361 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 8362 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 8363 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 8364 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 8365 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 8366 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 8367 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8368 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 8369 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 8370 command_type = OTHER_COMMAND; 8371 break; 8372 case CMD_XMIT_ELS_RSP64_CX: 8373 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8374 /* words0-2 BDE memcpy */ 8375 /* word3 iocb=iotag32 wqe=response_payload_len */ 8376 wqe->xmit_els_rsp.response_payload_len = xmit_len; 8377 /* word4 */ 8378 wqe->xmit_els_rsp.word4 = 0; 8379 /* word5 iocb=rsvd wge=did */ 8380 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 8381 iocbq->iocb.un.xseq64.xmit_els_remoteID); 8382 8383 if_type = bf_get(lpfc_sli_intf_if_type, 8384 &phba->sli4_hba.sli_intf); 8385 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8386 if (iocbq->vport->fc_flag & FC_PT2PT) { 8387 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 8388 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 8389 iocbq->vport->fc_myDID); 8390 if (iocbq->vport->fc_myDID == Fabric_DID) { 8391 bf_set(wqe_els_did, 8392 &wqe->xmit_els_rsp.wqe_dest, 0); 8393 } 8394 } 8395 } 8396 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 8397 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8398 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 8399 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8400 iocbq->iocb.unsli3.rcvsli3.ox_id); 8401 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 8402 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 8403 phba->vpi_ids[iocbq->vport->vpi]); 8404 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 8405 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 8406 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 8407 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 8408 LPFC_WQE_LENLOC_WORD3); 8409 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 8410 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 8411 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8412 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8413 iocbq->context2)->virt); 8414 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8415 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 8416 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 8417 iocbq->vport->fc_myDID); 8418 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 8419 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 8420 phba->vpi_ids[phba->pport->vpi]); 8421 } 8422 command_type = OTHER_COMMAND; 8423 break; 8424 case CMD_CLOSE_XRI_CN: 8425 case CMD_ABORT_XRI_CN: 8426 case CMD_ABORT_XRI_CX: 8427 /* words 0-2 memcpy should be 0 rserved */ 8428 /* port will send abts */ 8429 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 8430 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 8431 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 8432 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 8433 } else 8434 fip = 0; 8435 8436 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 8437 /* 8438 * The link is down, or the command was ELS_FIP 8439 * so the fw does not need to send abts 8440 * on the wire. 8441 */ 8442 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 8443 else 8444 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 8445 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 8446 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 8447 wqe->abort_cmd.rsrvd5 = 0; 8448 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 8449 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8450 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 8451 /* 8452 * The abort handler will send us CMD_ABORT_XRI_CN or 8453 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 8454 */ 8455 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 8456 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 8457 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 8458 LPFC_WQE_LENLOC_NONE); 8459 cmnd = CMD_ABORT_XRI_CX; 8460 command_type = OTHER_COMMAND; 8461 xritag = 0; 8462 break; 8463 case CMD_XMIT_BLS_RSP64_CX: 8464 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8465 /* As BLS ABTS RSP WQE is very different from other WQEs, 8466 * we re-construct this WQE here based on information in 8467 * iocbq from scratch. 8468 */ 8469 memset(wqe, 0, sizeof(union lpfc_wqe)); 8470 /* OX_ID is invariable to who sent ABTS to CT exchange */ 8471 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 8472 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 8473 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 8474 LPFC_ABTS_UNSOL_INT) { 8475 /* ABTS sent by initiator to CT exchange, the 8476 * RX_ID field will be filled with the newly 8477 * allocated responder XRI. 8478 */ 8479 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8480 iocbq->sli4_xritag); 8481 } else { 8482 /* ABTS sent by responder to CT exchange, the 8483 * RX_ID field will be filled with the responder 8484 * RX_ID from ABTS. 8485 */ 8486 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8487 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 8488 } 8489 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 8490 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 8491 8492 /* Use CT=VPI */ 8493 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 8494 ndlp->nlp_DID); 8495 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 8496 iocbq->iocb.ulpContext); 8497 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 8498 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 8499 phba->vpi_ids[phba->pport->vpi]); 8500 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 8501 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 8502 LPFC_WQE_LENLOC_NONE); 8503 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 8504 command_type = OTHER_COMMAND; 8505 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 8506 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 8507 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 8508 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 8509 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 8510 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 8511 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 8512 } 8513 8514 break; 8515 case CMD_XRI_ABORTED_CX: 8516 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 8517 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 8518 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 8519 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 8520 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 8521 default: 8522 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8523 "2014 Invalid command 0x%x\n", 8524 iocbq->iocb.ulpCommand); 8525 return IOCB_ERROR; 8526 break; 8527 } 8528 8529 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 8530 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 8531 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 8532 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 8533 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 8534 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 8535 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 8536 LPFC_IO_DIF_INSERT); 8537 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 8538 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 8539 wqe->generic.wqe_com.abort_tag = abort_tag; 8540 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 8541 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 8542 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 8543 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 8544 return 0; 8545 } 8546 8547 /** 8548 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 8549 * @phba: Pointer to HBA context object. 8550 * @ring_number: SLI ring number to issue iocb on. 8551 * @piocb: Pointer to command iocb. 8552 * @flag: Flag indicating if this command can be put into txq. 8553 * 8554 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 8555 * an iocb command to an HBA with SLI-4 interface spec. 8556 * 8557 * This function is called with hbalock held. The function will return success 8558 * after it successfully submit the iocb to firmware or after adding to the 8559 * txq. 8560 **/ 8561 static int 8562 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 8563 struct lpfc_iocbq *piocb, uint32_t flag) 8564 { 8565 struct lpfc_sglq *sglq; 8566 union lpfc_wqe wqe; 8567 struct lpfc_queue *wq; 8568 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 8569 8570 if (piocb->sli4_xritag == NO_XRI) { 8571 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8572 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8573 sglq = NULL; 8574 else { 8575 if (!list_empty(&pring->txq)) { 8576 if (!(flag & SLI_IOCB_RET_IOCB)) { 8577 __lpfc_sli_ringtx_put(phba, 8578 pring, piocb); 8579 return IOCB_SUCCESS; 8580 } else { 8581 return IOCB_BUSY; 8582 } 8583 } else { 8584 sglq = __lpfc_sli_get_sglq(phba, piocb); 8585 if (!sglq) { 8586 if (!(flag & SLI_IOCB_RET_IOCB)) { 8587 __lpfc_sli_ringtx_put(phba, 8588 pring, 8589 piocb); 8590 return IOCB_SUCCESS; 8591 } else 8592 return IOCB_BUSY; 8593 } 8594 } 8595 } 8596 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 8597 /* These IO's already have an XRI and a mapped sgl. */ 8598 sglq = NULL; 8599 } else { 8600 /* 8601 * This is a continuation of a commandi,(CX) so this 8602 * sglq is on the active list 8603 */ 8604 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 8605 if (!sglq) 8606 return IOCB_ERROR; 8607 } 8608 8609 if (sglq) { 8610 piocb->sli4_lxritag = sglq->sli4_lxritag; 8611 piocb->sli4_xritag = sglq->sli4_xritag; 8612 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 8613 return IOCB_ERROR; 8614 } 8615 8616 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 8617 return IOCB_ERROR; 8618 8619 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8620 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8621 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & 8622 LPFC_IO_OAS))) { 8623 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; 8624 } else { 8625 wq = phba->sli4_hba.oas_wq; 8626 } 8627 if (lpfc_sli4_wq_put(wq, &wqe)) 8628 return IOCB_ERROR; 8629 } else { 8630 if (unlikely(!phba->sli4_hba.els_wq)) 8631 return IOCB_ERROR; 8632 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8633 return IOCB_ERROR; 8634 } 8635 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 8636 8637 return 0; 8638 } 8639 8640 /** 8641 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 8642 * 8643 * This routine wraps the actual lockless version for issusing IOCB function 8644 * pointer from the lpfc_hba struct. 8645 * 8646 * Return codes: 8647 * IOCB_ERROR - Error 8648 * IOCB_SUCCESS - Success 8649 * IOCB_BUSY - Busy 8650 **/ 8651 int 8652 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8653 struct lpfc_iocbq *piocb, uint32_t flag) 8654 { 8655 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8656 } 8657 8658 /** 8659 * lpfc_sli_api_table_setup - Set up sli api function jump table 8660 * @phba: The hba struct for which this call is being executed. 8661 * @dev_grp: The HBA PCI-Device group number. 8662 * 8663 * This routine sets up the SLI interface API function jump table in @phba 8664 * struct. 8665 * Returns: 0 - success, -ENODEV - failure. 8666 **/ 8667 int 8668 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8669 { 8670 8671 switch (dev_grp) { 8672 case LPFC_PCI_DEV_LP: 8673 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 8674 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 8675 break; 8676 case LPFC_PCI_DEV_OC: 8677 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 8678 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 8679 break; 8680 default: 8681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8682 "1419 Invalid HBA PCI-device group: 0x%x\n", 8683 dev_grp); 8684 return -ENODEV; 8685 break; 8686 } 8687 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 8688 return 0; 8689 } 8690 8691 /** 8692 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 8693 * @phba: Pointer to HBA context object. 8694 * @pring: Pointer to driver SLI ring object. 8695 * @piocb: Pointer to command iocb. 8696 * @flag: Flag indicating if this command can be put into txq. 8697 * 8698 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 8699 * function. This function gets the hbalock and calls 8700 * __lpfc_sli_issue_iocb function and will return the error returned 8701 * by __lpfc_sli_issue_iocb function. This wrapper is used by 8702 * functions which do not hold hbalock. 8703 **/ 8704 int 8705 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8706 struct lpfc_iocbq *piocb, uint32_t flag) 8707 { 8708 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 8709 struct lpfc_sli_ring *pring; 8710 struct lpfc_queue *fpeq; 8711 struct lpfc_eqe *eqe; 8712 unsigned long iflags; 8713 int rc, idx; 8714 8715 if (phba->sli_rev == LPFC_SLI_REV4) { 8716 if (piocb->iocb_flag & LPFC_IO_FCP) { 8717 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & 8718 LPFC_IO_OAS))) { 8719 if (unlikely(!phba->sli4_hba.fcp_wq)) 8720 return IOCB_ERROR; 8721 idx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8722 piocb->fcp_wqidx = idx; 8723 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; 8724 } else { 8725 if (unlikely(!phba->sli4_hba.oas_wq)) 8726 return IOCB_ERROR; 8727 idx = 0; 8728 piocb->fcp_wqidx = 0; 8729 ring_number = LPFC_FCP_OAS_RING; 8730 } 8731 pring = &phba->sli.ring[ring_number]; 8732 spin_lock_irqsave(&pring->ring_lock, iflags); 8733 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, 8734 flag); 8735 spin_unlock_irqrestore(&pring->ring_lock, iflags); 8736 8737 if (lpfc_fcp_look_ahead) { 8738 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx]; 8739 8740 if (atomic_dec_and_test(&fcp_eq_hdl-> 8741 fcp_eq_in_use)) { 8742 8743 /* Get associated EQ with this index */ 8744 fpeq = phba->sli4_hba.hba_eq[idx]; 8745 8746 /* Turn off interrupts from this EQ */ 8747 lpfc_sli4_eq_clr_intr(fpeq); 8748 8749 /* 8750 * Process all the events on FCP EQ 8751 */ 8752 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 8753 lpfc_sli4_hba_handle_eqe(phba, 8754 eqe, idx); 8755 fpeq->EQ_processed++; 8756 } 8757 8758 /* Always clear and re-arm the EQ */ 8759 lpfc_sli4_eq_release(fpeq, 8760 LPFC_QUEUE_REARM); 8761 } 8762 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 8763 } 8764 } else { 8765 pring = &phba->sli.ring[ring_number]; 8766 spin_lock_irqsave(&pring->ring_lock, iflags); 8767 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, 8768 flag); 8769 spin_unlock_irqrestore(&pring->ring_lock, iflags); 8770 8771 } 8772 } else { 8773 /* For now, SLI2/3 will still use hbalock */ 8774 spin_lock_irqsave(&phba->hbalock, iflags); 8775 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8776 spin_unlock_irqrestore(&phba->hbalock, iflags); 8777 } 8778 return rc; 8779 } 8780 8781 /** 8782 * lpfc_extra_ring_setup - Extra ring setup function 8783 * @phba: Pointer to HBA context object. 8784 * 8785 * This function is called while driver attaches with the 8786 * HBA to setup the extra ring. The extra ring is used 8787 * only when driver needs to support target mode functionality 8788 * or IP over FC functionalities. 8789 * 8790 * This function is called with no lock held. 8791 **/ 8792 static int 8793 lpfc_extra_ring_setup( struct lpfc_hba *phba) 8794 { 8795 struct lpfc_sli *psli; 8796 struct lpfc_sli_ring *pring; 8797 8798 psli = &phba->sli; 8799 8800 /* Adjust cmd/rsp ring iocb entries more evenly */ 8801 8802 /* Take some away from the FCP ring */ 8803 pring = &psli->ring[psli->fcp_ring]; 8804 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8805 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8806 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8807 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8808 8809 /* and give them to the extra ring */ 8810 pring = &psli->ring[psli->extra_ring]; 8811 8812 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8813 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8814 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8815 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8816 8817 /* Setup default profile for this ring */ 8818 pring->iotag_max = 4096; 8819 pring->num_mask = 1; 8820 pring->prt[0].profile = 0; /* Mask 0 */ 8821 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 8822 pring->prt[0].type = phba->cfg_multi_ring_type; 8823 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 8824 return 0; 8825 } 8826 8827 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 8828 * @phba: Pointer to HBA context object. 8829 * @iocbq: Pointer to iocb object. 8830 * 8831 * The async_event handler calls this routine when it receives 8832 * an ASYNC_STATUS_CN event from the port. The port generates 8833 * this event when an Abort Sequence request to an rport fails 8834 * twice in succession. The abort could be originated by the 8835 * driver or by the port. The ABTS could have been for an ELS 8836 * or FCP IO. The port only generates this event when an ABTS 8837 * fails to complete after one retry. 8838 */ 8839 static void 8840 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 8841 struct lpfc_iocbq *iocbq) 8842 { 8843 struct lpfc_nodelist *ndlp = NULL; 8844 uint16_t rpi = 0, vpi = 0; 8845 struct lpfc_vport *vport = NULL; 8846 8847 /* The rpi in the ulpContext is vport-sensitive. */ 8848 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 8849 rpi = iocbq->iocb.ulpContext; 8850 8851 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8852 "3092 Port generated ABTS async event " 8853 "on vpi %d rpi %d status 0x%x\n", 8854 vpi, rpi, iocbq->iocb.ulpStatus); 8855 8856 vport = lpfc_find_vport_by_vpid(phba, vpi); 8857 if (!vport) 8858 goto err_exit; 8859 ndlp = lpfc_findnode_rpi(vport, rpi); 8860 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 8861 goto err_exit; 8862 8863 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 8864 lpfc_sli_abts_recover_port(vport, ndlp); 8865 return; 8866 8867 err_exit: 8868 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8869 "3095 Event Context not found, no " 8870 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 8871 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 8872 vpi, rpi); 8873 } 8874 8875 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 8876 * @phba: pointer to HBA context object. 8877 * @ndlp: nodelist pointer for the impacted rport. 8878 * @axri: pointer to the wcqe containing the failed exchange. 8879 * 8880 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 8881 * port. The port generates this event when an abort exchange request to an 8882 * rport fails twice in succession with no reply. The abort could be originated 8883 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 8884 */ 8885 void 8886 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 8887 struct lpfc_nodelist *ndlp, 8888 struct sli4_wcqe_xri_aborted *axri) 8889 { 8890 struct lpfc_vport *vport; 8891 uint32_t ext_status = 0; 8892 8893 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 8894 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8895 "3115 Node Context not found, driver " 8896 "ignoring abts err event\n"); 8897 return; 8898 } 8899 8900 vport = ndlp->vport; 8901 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8902 "3116 Port generated FCP XRI ABORT event on " 8903 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 8904 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 8905 bf_get(lpfc_wcqe_xa_xri, axri), 8906 bf_get(lpfc_wcqe_xa_status, axri), 8907 axri->parameter); 8908 8909 /* 8910 * Catch the ABTS protocol failure case. Older OCe FW releases returned 8911 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 8912 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 8913 */ 8914 ext_status = axri->parameter & IOERR_PARAM_MASK; 8915 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 8916 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 8917 lpfc_sli_abts_recover_port(vport, ndlp); 8918 } 8919 8920 /** 8921 * lpfc_sli_async_event_handler - ASYNC iocb handler function 8922 * @phba: Pointer to HBA context object. 8923 * @pring: Pointer to driver SLI ring object. 8924 * @iocbq: Pointer to iocb object. 8925 * 8926 * This function is called by the slow ring event handler 8927 * function when there is an ASYNC event iocb in the ring. 8928 * This function is called with no lock held. 8929 * Currently this function handles only temperature related 8930 * ASYNC events. The function decodes the temperature sensor 8931 * event message and posts events for the management applications. 8932 **/ 8933 static void 8934 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 8935 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 8936 { 8937 IOCB_t *icmd; 8938 uint16_t evt_code; 8939 struct temp_event temp_event_data; 8940 struct Scsi_Host *shost; 8941 uint32_t *iocb_w; 8942 8943 icmd = &iocbq->iocb; 8944 evt_code = icmd->un.asyncstat.evt_code; 8945 8946 switch (evt_code) { 8947 case ASYNC_TEMP_WARN: 8948 case ASYNC_TEMP_SAFE: 8949 temp_event_data.data = (uint32_t) icmd->ulpContext; 8950 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 8951 if (evt_code == ASYNC_TEMP_WARN) { 8952 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 8953 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8954 "0347 Adapter is very hot, please take " 8955 "corrective action. temperature : %d Celsius\n", 8956 (uint32_t) icmd->ulpContext); 8957 } else { 8958 temp_event_data.event_code = LPFC_NORMAL_TEMP; 8959 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8960 "0340 Adapter temperature is OK now. " 8961 "temperature : %d Celsius\n", 8962 (uint32_t) icmd->ulpContext); 8963 } 8964 8965 /* Send temperature change event to applications */ 8966 shost = lpfc_shost_from_vport(phba->pport); 8967 fc_host_post_vendor_event(shost, fc_get_event_number(), 8968 sizeof(temp_event_data), (char *) &temp_event_data, 8969 LPFC_NL_VENDOR_ID); 8970 break; 8971 case ASYNC_STATUS_CN: 8972 lpfc_sli_abts_err_handler(phba, iocbq); 8973 break; 8974 default: 8975 iocb_w = (uint32_t *) icmd; 8976 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8977 "0346 Ring %d handler: unexpected ASYNC_STATUS" 8978 " evt_code 0x%x\n" 8979 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 8980 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 8981 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 8982 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 8983 pring->ringno, icmd->un.asyncstat.evt_code, 8984 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 8985 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 8986 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 8987 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 8988 8989 break; 8990 } 8991 } 8992 8993 8994 /** 8995 * lpfc_sli_setup - SLI ring setup function 8996 * @phba: Pointer to HBA context object. 8997 * 8998 * lpfc_sli_setup sets up rings of the SLI interface with 8999 * number of iocbs per ring and iotags. This function is 9000 * called while driver attach to the HBA and before the 9001 * interrupts are enabled. So there is no need for locking. 9002 * 9003 * This function always returns 0. 9004 **/ 9005 int 9006 lpfc_sli_setup(struct lpfc_hba *phba) 9007 { 9008 int i, totiocbsize = 0; 9009 struct lpfc_sli *psli = &phba->sli; 9010 struct lpfc_sli_ring *pring; 9011 9012 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 9013 if (phba->sli_rev == LPFC_SLI_REV4) 9014 psli->num_rings += phba->cfg_fcp_io_channel; 9015 psli->sli_flag = 0; 9016 psli->fcp_ring = LPFC_FCP_RING; 9017 psli->next_ring = LPFC_FCP_NEXT_RING; 9018 psli->extra_ring = LPFC_EXTRA_RING; 9019 9020 psli->iocbq_lookup = NULL; 9021 psli->iocbq_lookup_len = 0; 9022 psli->last_iotag = 0; 9023 9024 for (i = 0; i < psli->num_rings; i++) { 9025 pring = &psli->ring[i]; 9026 switch (i) { 9027 case LPFC_FCP_RING: /* ring 0 - FCP */ 9028 /* numCiocb and numRiocb are used in config_port */ 9029 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 9030 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 9031 pring->sli.sli3.numCiocb += 9032 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9033 pring->sli.sli3.numRiocb += 9034 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9035 pring->sli.sli3.numCiocb += 9036 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9037 pring->sli.sli3.numRiocb += 9038 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9039 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9040 SLI3_IOCB_CMD_SIZE : 9041 SLI2_IOCB_CMD_SIZE; 9042 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9043 SLI3_IOCB_RSP_SIZE : 9044 SLI2_IOCB_RSP_SIZE; 9045 pring->iotag_ctr = 0; 9046 pring->iotag_max = 9047 (phba->cfg_hba_queue_depth * 2); 9048 pring->fast_iotag = pring->iotag_max; 9049 pring->num_mask = 0; 9050 break; 9051 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 9052 /* numCiocb and numRiocb are used in config_port */ 9053 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 9054 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 9055 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9056 SLI3_IOCB_CMD_SIZE : 9057 SLI2_IOCB_CMD_SIZE; 9058 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9059 SLI3_IOCB_RSP_SIZE : 9060 SLI2_IOCB_RSP_SIZE; 9061 pring->iotag_max = phba->cfg_hba_queue_depth; 9062 pring->num_mask = 0; 9063 break; 9064 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 9065 /* numCiocb and numRiocb are used in config_port */ 9066 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 9067 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 9068 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9069 SLI3_IOCB_CMD_SIZE : 9070 SLI2_IOCB_CMD_SIZE; 9071 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9072 SLI3_IOCB_RSP_SIZE : 9073 SLI2_IOCB_RSP_SIZE; 9074 pring->fast_iotag = 0; 9075 pring->iotag_ctr = 0; 9076 pring->iotag_max = 4096; 9077 pring->lpfc_sli_rcv_async_status = 9078 lpfc_sli_async_event_handler; 9079 pring->num_mask = LPFC_MAX_RING_MASK; 9080 pring->prt[0].profile = 0; /* Mask 0 */ 9081 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 9082 pring->prt[0].type = FC_TYPE_ELS; 9083 pring->prt[0].lpfc_sli_rcv_unsol_event = 9084 lpfc_els_unsol_event; 9085 pring->prt[1].profile = 0; /* Mask 1 */ 9086 pring->prt[1].rctl = FC_RCTL_ELS_REP; 9087 pring->prt[1].type = FC_TYPE_ELS; 9088 pring->prt[1].lpfc_sli_rcv_unsol_event = 9089 lpfc_els_unsol_event; 9090 pring->prt[2].profile = 0; /* Mask 2 */ 9091 /* NameServer Inquiry */ 9092 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 9093 /* NameServer */ 9094 pring->prt[2].type = FC_TYPE_CT; 9095 pring->prt[2].lpfc_sli_rcv_unsol_event = 9096 lpfc_ct_unsol_event; 9097 pring->prt[3].profile = 0; /* Mask 3 */ 9098 /* NameServer response */ 9099 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 9100 /* NameServer */ 9101 pring->prt[3].type = FC_TYPE_CT; 9102 pring->prt[3].lpfc_sli_rcv_unsol_event = 9103 lpfc_ct_unsol_event; 9104 break; 9105 } 9106 totiocbsize += (pring->sli.sli3.numCiocb * 9107 pring->sli.sli3.sizeCiocb) + 9108 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 9109 } 9110 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 9111 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 9112 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 9113 "SLI2 SLIM Data: x%x x%lx\n", 9114 phba->brd_no, totiocbsize, 9115 (unsigned long) MAX_SLIM_IOCB_SIZE); 9116 } 9117 if (phba->cfg_multi_ring_support == 2) 9118 lpfc_extra_ring_setup(phba); 9119 9120 return 0; 9121 } 9122 9123 /** 9124 * lpfc_sli_queue_setup - Queue initialization function 9125 * @phba: Pointer to HBA context object. 9126 * 9127 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 9128 * ring. This function also initializes ring indices of each ring. 9129 * This function is called during the initialization of the SLI 9130 * interface of an HBA. 9131 * This function is called with no lock held and always returns 9132 * 1. 9133 **/ 9134 int 9135 lpfc_sli_queue_setup(struct lpfc_hba *phba) 9136 { 9137 struct lpfc_sli *psli; 9138 struct lpfc_sli_ring *pring; 9139 int i; 9140 9141 psli = &phba->sli; 9142 spin_lock_irq(&phba->hbalock); 9143 INIT_LIST_HEAD(&psli->mboxq); 9144 INIT_LIST_HEAD(&psli->mboxq_cmpl); 9145 /* Initialize list headers for txq and txcmplq as double linked lists */ 9146 for (i = 0; i < psli->num_rings; i++) { 9147 pring = &psli->ring[i]; 9148 pring->ringno = i; 9149 pring->sli.sli3.next_cmdidx = 0; 9150 pring->sli.sli3.local_getidx = 0; 9151 pring->sli.sli3.cmdidx = 0; 9152 INIT_LIST_HEAD(&pring->txq); 9153 INIT_LIST_HEAD(&pring->txcmplq); 9154 INIT_LIST_HEAD(&pring->iocb_continueq); 9155 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 9156 INIT_LIST_HEAD(&pring->postbufq); 9157 spin_lock_init(&pring->ring_lock); 9158 } 9159 spin_unlock_irq(&phba->hbalock); 9160 return 1; 9161 } 9162 9163 /** 9164 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 9165 * @phba: Pointer to HBA context object. 9166 * 9167 * This routine flushes the mailbox command subsystem. It will unconditionally 9168 * flush all the mailbox commands in the three possible stages in the mailbox 9169 * command sub-system: pending mailbox command queue; the outstanding mailbox 9170 * command; and completed mailbox command queue. It is caller's responsibility 9171 * to make sure that the driver is in the proper state to flush the mailbox 9172 * command sub-system. Namely, the posting of mailbox commands into the 9173 * pending mailbox command queue from the various clients must be stopped; 9174 * either the HBA is in a state that it will never works on the outstanding 9175 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 9176 * mailbox command has been completed. 9177 **/ 9178 static void 9179 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 9180 { 9181 LIST_HEAD(completions); 9182 struct lpfc_sli *psli = &phba->sli; 9183 LPFC_MBOXQ_t *pmb; 9184 unsigned long iflag; 9185 9186 /* Flush all the mailbox commands in the mbox system */ 9187 spin_lock_irqsave(&phba->hbalock, iflag); 9188 /* The pending mailbox command queue */ 9189 list_splice_init(&phba->sli.mboxq, &completions); 9190 /* The outstanding active mailbox command */ 9191 if (psli->mbox_active) { 9192 list_add_tail(&psli->mbox_active->list, &completions); 9193 psli->mbox_active = NULL; 9194 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9195 } 9196 /* The completed mailbox command queue */ 9197 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 9198 spin_unlock_irqrestore(&phba->hbalock, iflag); 9199 9200 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 9201 while (!list_empty(&completions)) { 9202 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 9203 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 9204 if (pmb->mbox_cmpl) 9205 pmb->mbox_cmpl(phba, pmb); 9206 } 9207 } 9208 9209 /** 9210 * lpfc_sli_host_down - Vport cleanup function 9211 * @vport: Pointer to virtual port object. 9212 * 9213 * lpfc_sli_host_down is called to clean up the resources 9214 * associated with a vport before destroying virtual 9215 * port data structures. 9216 * This function does following operations: 9217 * - Free discovery resources associated with this virtual 9218 * port. 9219 * - Free iocbs associated with this virtual port in 9220 * the txq. 9221 * - Send abort for all iocb commands associated with this 9222 * vport in txcmplq. 9223 * 9224 * This function is called with no lock held and always returns 1. 9225 **/ 9226 int 9227 lpfc_sli_host_down(struct lpfc_vport *vport) 9228 { 9229 LIST_HEAD(completions); 9230 struct lpfc_hba *phba = vport->phba; 9231 struct lpfc_sli *psli = &phba->sli; 9232 struct lpfc_sli_ring *pring; 9233 struct lpfc_iocbq *iocb, *next_iocb; 9234 int i; 9235 unsigned long flags = 0; 9236 uint16_t prev_pring_flag; 9237 9238 lpfc_cleanup_discovery_resources(vport); 9239 9240 spin_lock_irqsave(&phba->hbalock, flags); 9241 for (i = 0; i < psli->num_rings; i++) { 9242 pring = &psli->ring[i]; 9243 prev_pring_flag = pring->flag; 9244 /* Only slow rings */ 9245 if (pring->ringno == LPFC_ELS_RING) { 9246 pring->flag |= LPFC_DEFERRED_RING_EVENT; 9247 /* Set the lpfc data pending flag */ 9248 set_bit(LPFC_DATA_READY, &phba->data_flags); 9249 } 9250 /* 9251 * Error everything on the txq since these iocbs have not been 9252 * given to the FW yet. 9253 */ 9254 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 9255 if (iocb->vport != vport) 9256 continue; 9257 list_move_tail(&iocb->list, &completions); 9258 } 9259 9260 /* Next issue ABTS for everything on the txcmplq */ 9261 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 9262 list) { 9263 if (iocb->vport != vport) 9264 continue; 9265 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 9266 } 9267 9268 pring->flag = prev_pring_flag; 9269 } 9270 9271 spin_unlock_irqrestore(&phba->hbalock, flags); 9272 9273 /* Cancel all the IOCBs from the completions list */ 9274 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9275 IOERR_SLI_DOWN); 9276 return 1; 9277 } 9278 9279 /** 9280 * lpfc_sli_hba_down - Resource cleanup function for the HBA 9281 * @phba: Pointer to HBA context object. 9282 * 9283 * This function cleans up all iocb, buffers, mailbox commands 9284 * while shutting down the HBA. This function is called with no 9285 * lock held and always returns 1. 9286 * This function does the following to cleanup driver resources: 9287 * - Free discovery resources for each virtual port 9288 * - Cleanup any pending fabric iocbs 9289 * - Iterate through the iocb txq and free each entry 9290 * in the list. 9291 * - Free up any buffer posted to the HBA 9292 * - Free mailbox commands in the mailbox queue. 9293 **/ 9294 int 9295 lpfc_sli_hba_down(struct lpfc_hba *phba) 9296 { 9297 LIST_HEAD(completions); 9298 struct lpfc_sli *psli = &phba->sli; 9299 struct lpfc_sli_ring *pring; 9300 struct lpfc_dmabuf *buf_ptr; 9301 unsigned long flags = 0; 9302 int i; 9303 9304 /* Shutdown the mailbox command sub-system */ 9305 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 9306 9307 lpfc_hba_down_prep(phba); 9308 9309 lpfc_fabric_abort_hba(phba); 9310 9311 spin_lock_irqsave(&phba->hbalock, flags); 9312 for (i = 0; i < psli->num_rings; i++) { 9313 pring = &psli->ring[i]; 9314 /* Only slow rings */ 9315 if (pring->ringno == LPFC_ELS_RING) { 9316 pring->flag |= LPFC_DEFERRED_RING_EVENT; 9317 /* Set the lpfc data pending flag */ 9318 set_bit(LPFC_DATA_READY, &phba->data_flags); 9319 } 9320 9321 /* 9322 * Error everything on the txq since these iocbs have not been 9323 * given to the FW yet. 9324 */ 9325 list_splice_init(&pring->txq, &completions); 9326 } 9327 spin_unlock_irqrestore(&phba->hbalock, flags); 9328 9329 /* Cancel all the IOCBs from the completions list */ 9330 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9331 IOERR_SLI_DOWN); 9332 9333 spin_lock_irqsave(&phba->hbalock, flags); 9334 list_splice_init(&phba->elsbuf, &completions); 9335 phba->elsbuf_cnt = 0; 9336 phba->elsbuf_prev_cnt = 0; 9337 spin_unlock_irqrestore(&phba->hbalock, flags); 9338 9339 while (!list_empty(&completions)) { 9340 list_remove_head(&completions, buf_ptr, 9341 struct lpfc_dmabuf, list); 9342 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 9343 kfree(buf_ptr); 9344 } 9345 9346 /* Return any active mbox cmds */ 9347 del_timer_sync(&psli->mbox_tmo); 9348 9349 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 9350 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 9351 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 9352 9353 return 1; 9354 } 9355 9356 /** 9357 * lpfc_sli_pcimem_bcopy - SLI memory copy function 9358 * @srcp: Source memory pointer. 9359 * @destp: Destination memory pointer. 9360 * @cnt: Number of words required to be copied. 9361 * 9362 * This function is used for copying data between driver memory 9363 * and the SLI memory. This function also changes the endianness 9364 * of each word if native endianness is different from SLI 9365 * endianness. This function can be called with or without 9366 * lock. 9367 **/ 9368 void 9369 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 9370 { 9371 uint32_t *src = srcp; 9372 uint32_t *dest = destp; 9373 uint32_t ldata; 9374 int i; 9375 9376 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 9377 ldata = *src; 9378 ldata = le32_to_cpu(ldata); 9379 *dest = ldata; 9380 src++; 9381 dest++; 9382 } 9383 } 9384 9385 9386 /** 9387 * lpfc_sli_bemem_bcopy - SLI memory copy function 9388 * @srcp: Source memory pointer. 9389 * @destp: Destination memory pointer. 9390 * @cnt: Number of words required to be copied. 9391 * 9392 * This function is used for copying data between a data structure 9393 * with big endian representation to local endianness. 9394 * This function can be called with or without lock. 9395 **/ 9396 void 9397 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 9398 { 9399 uint32_t *src = srcp; 9400 uint32_t *dest = destp; 9401 uint32_t ldata; 9402 int i; 9403 9404 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 9405 ldata = *src; 9406 ldata = be32_to_cpu(ldata); 9407 *dest = ldata; 9408 src++; 9409 dest++; 9410 } 9411 } 9412 9413 /** 9414 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 9415 * @phba: Pointer to HBA context object. 9416 * @pring: Pointer to driver SLI ring object. 9417 * @mp: Pointer to driver buffer object. 9418 * 9419 * This function is called with no lock held. 9420 * It always return zero after adding the buffer to the postbufq 9421 * buffer list. 9422 **/ 9423 int 9424 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9425 struct lpfc_dmabuf *mp) 9426 { 9427 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 9428 later */ 9429 spin_lock_irq(&phba->hbalock); 9430 list_add_tail(&mp->list, &pring->postbufq); 9431 pring->postbufq_cnt++; 9432 spin_unlock_irq(&phba->hbalock); 9433 return 0; 9434 } 9435 9436 /** 9437 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 9438 * @phba: Pointer to HBA context object. 9439 * 9440 * When HBQ is enabled, buffers are searched based on tags. This function 9441 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 9442 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 9443 * does not conflict with tags of buffer posted for unsolicited events. 9444 * The function returns the allocated tag. The function is called with 9445 * no locks held. 9446 **/ 9447 uint32_t 9448 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 9449 { 9450 spin_lock_irq(&phba->hbalock); 9451 phba->buffer_tag_count++; 9452 /* 9453 * Always set the QUE_BUFTAG_BIT to distiguish between 9454 * a tag assigned by HBQ. 9455 */ 9456 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 9457 spin_unlock_irq(&phba->hbalock); 9458 return phba->buffer_tag_count; 9459 } 9460 9461 /** 9462 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 9463 * @phba: Pointer to HBA context object. 9464 * @pring: Pointer to driver SLI ring object. 9465 * @tag: Buffer tag. 9466 * 9467 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 9468 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 9469 * iocb is posted to the response ring with the tag of the buffer. 9470 * This function searches the pring->postbufq list using the tag 9471 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 9472 * iocb. If the buffer is found then lpfc_dmabuf object of the 9473 * buffer is returned to the caller else NULL is returned. 9474 * This function is called with no lock held. 9475 **/ 9476 struct lpfc_dmabuf * 9477 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9478 uint32_t tag) 9479 { 9480 struct lpfc_dmabuf *mp, *next_mp; 9481 struct list_head *slp = &pring->postbufq; 9482 9483 /* Search postbufq, from the beginning, looking for a match on tag */ 9484 spin_lock_irq(&phba->hbalock); 9485 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9486 if (mp->buffer_tag == tag) { 9487 list_del_init(&mp->list); 9488 pring->postbufq_cnt--; 9489 spin_unlock_irq(&phba->hbalock); 9490 return mp; 9491 } 9492 } 9493 9494 spin_unlock_irq(&phba->hbalock); 9495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9496 "0402 Cannot find virtual addr for buffer tag on " 9497 "ring %d Data x%lx x%p x%p x%x\n", 9498 pring->ringno, (unsigned long) tag, 9499 slp->next, slp->prev, pring->postbufq_cnt); 9500 9501 return NULL; 9502 } 9503 9504 /** 9505 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 9506 * @phba: Pointer to HBA context object. 9507 * @pring: Pointer to driver SLI ring object. 9508 * @phys: DMA address of the buffer. 9509 * 9510 * This function searches the buffer list using the dma_address 9511 * of unsolicited event to find the driver's lpfc_dmabuf object 9512 * corresponding to the dma_address. The function returns the 9513 * lpfc_dmabuf object if a buffer is found else it returns NULL. 9514 * This function is called by the ct and els unsolicited event 9515 * handlers to get the buffer associated with the unsolicited 9516 * event. 9517 * 9518 * This function is called with no lock held. 9519 **/ 9520 struct lpfc_dmabuf * 9521 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9522 dma_addr_t phys) 9523 { 9524 struct lpfc_dmabuf *mp, *next_mp; 9525 struct list_head *slp = &pring->postbufq; 9526 9527 /* Search postbufq, from the beginning, looking for a match on phys */ 9528 spin_lock_irq(&phba->hbalock); 9529 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9530 if (mp->phys == phys) { 9531 list_del_init(&mp->list); 9532 pring->postbufq_cnt--; 9533 spin_unlock_irq(&phba->hbalock); 9534 return mp; 9535 } 9536 } 9537 9538 spin_unlock_irq(&phba->hbalock); 9539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9540 "0410 Cannot find virtual addr for mapped buf on " 9541 "ring %d Data x%llx x%p x%p x%x\n", 9542 pring->ringno, (unsigned long long)phys, 9543 slp->next, slp->prev, pring->postbufq_cnt); 9544 return NULL; 9545 } 9546 9547 /** 9548 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 9549 * @phba: Pointer to HBA context object. 9550 * @cmdiocb: Pointer to driver command iocb object. 9551 * @rspiocb: Pointer to driver response iocb object. 9552 * 9553 * This function is the completion handler for the abort iocbs for 9554 * ELS commands. This function is called from the ELS ring event 9555 * handler with no lock held. This function frees memory resources 9556 * associated with the abort iocb. 9557 **/ 9558 static void 9559 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9560 struct lpfc_iocbq *rspiocb) 9561 { 9562 IOCB_t *irsp = &rspiocb->iocb; 9563 uint16_t abort_iotag, abort_context; 9564 struct lpfc_iocbq *abort_iocb = NULL; 9565 9566 if (irsp->ulpStatus) { 9567 9568 /* 9569 * Assume that the port already completed and returned, or 9570 * will return the iocb. Just Log the message. 9571 */ 9572 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 9573 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 9574 9575 spin_lock_irq(&phba->hbalock); 9576 if (phba->sli_rev < LPFC_SLI_REV4) { 9577 if (abort_iotag != 0 && 9578 abort_iotag <= phba->sli.last_iotag) 9579 abort_iocb = 9580 phba->sli.iocbq_lookup[abort_iotag]; 9581 } else 9582 /* For sli4 the abort_tag is the XRI, 9583 * so the abort routine puts the iotag of the iocb 9584 * being aborted in the context field of the abort 9585 * IOCB. 9586 */ 9587 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 9588 9589 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 9590 "0327 Cannot abort els iocb %p " 9591 "with tag %x context %x, abort status %x, " 9592 "abort code %x\n", 9593 abort_iocb, abort_iotag, abort_context, 9594 irsp->ulpStatus, irsp->un.ulpWord[4]); 9595 9596 spin_unlock_irq(&phba->hbalock); 9597 } 9598 lpfc_sli_release_iocbq(phba, cmdiocb); 9599 return; 9600 } 9601 9602 /** 9603 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 9604 * @phba: Pointer to HBA context object. 9605 * @cmdiocb: Pointer to driver command iocb object. 9606 * @rspiocb: Pointer to driver response iocb object. 9607 * 9608 * The function is called from SLI ring event handler with no 9609 * lock held. This function is the completion handler for ELS commands 9610 * which are aborted. The function frees memory resources used for 9611 * the aborted ELS commands. 9612 **/ 9613 static void 9614 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9615 struct lpfc_iocbq *rspiocb) 9616 { 9617 IOCB_t *irsp = &rspiocb->iocb; 9618 9619 /* ELS cmd tag <ulpIoTag> completes */ 9620 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9621 "0139 Ignoring ELS cmd tag x%x completion Data: " 9622 "x%x x%x x%x\n", 9623 irsp->ulpIoTag, irsp->ulpStatus, 9624 irsp->un.ulpWord[4], irsp->ulpTimeout); 9625 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 9626 lpfc_ct_free_iocb(phba, cmdiocb); 9627 else 9628 lpfc_els_free_iocb(phba, cmdiocb); 9629 return; 9630 } 9631 9632 /** 9633 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 9634 * @phba: Pointer to HBA context object. 9635 * @pring: Pointer to driver SLI ring object. 9636 * @cmdiocb: Pointer to driver command iocb object. 9637 * 9638 * This function issues an abort iocb for the provided command iocb down to 9639 * the port. Other than the case the outstanding command iocb is an abort 9640 * request, this function issues abort out unconditionally. This function is 9641 * called with hbalock held. The function returns 0 when it fails due to 9642 * memory allocation failure or when the command iocb is an abort request. 9643 **/ 9644 static int 9645 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9646 struct lpfc_iocbq *cmdiocb) 9647 { 9648 struct lpfc_vport *vport = cmdiocb->vport; 9649 struct lpfc_iocbq *abtsiocbp; 9650 IOCB_t *icmd = NULL; 9651 IOCB_t *iabt = NULL; 9652 int retval; 9653 unsigned long iflags; 9654 9655 /* 9656 * There are certain command types we don't want to abort. And we 9657 * don't want to abort commands that are already in the process of 9658 * being aborted. 9659 */ 9660 icmd = &cmdiocb->iocb; 9661 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9662 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9663 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9664 return 0; 9665 9666 /* issue ABTS for this IOCB based on iotag */ 9667 abtsiocbp = __lpfc_sli_get_iocbq(phba); 9668 if (abtsiocbp == NULL) 9669 return 0; 9670 9671 /* This signals the response to set the correct status 9672 * before calling the completion handler 9673 */ 9674 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 9675 9676 iabt = &abtsiocbp->iocb; 9677 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 9678 iabt->un.acxri.abortContextTag = icmd->ulpContext; 9679 if (phba->sli_rev == LPFC_SLI_REV4) { 9680 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 9681 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 9682 } 9683 else 9684 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 9685 iabt->ulpLe = 1; 9686 iabt->ulpClass = icmd->ulpClass; 9687 9688 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9689 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 9690 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 9691 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 9692 9693 if (phba->link_state >= LPFC_LINK_UP) 9694 iabt->ulpCommand = CMD_ABORT_XRI_CN; 9695 else 9696 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 9697 9698 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 9699 9700 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 9701 "0339 Abort xri x%x, original iotag x%x, " 9702 "abort cmd iotag x%x\n", 9703 iabt->un.acxri.abortIoTag, 9704 iabt->un.acxri.abortContextTag, 9705 abtsiocbp->iotag); 9706 9707 if (phba->sli_rev == LPFC_SLI_REV4) { 9708 /* Note: both hbalock and ring_lock need to be set here */ 9709 spin_lock_irqsave(&pring->ring_lock, iflags); 9710 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 9711 abtsiocbp, 0); 9712 spin_unlock_irqrestore(&pring->ring_lock, iflags); 9713 } else { 9714 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 9715 abtsiocbp, 0); 9716 } 9717 9718 if (retval) 9719 __lpfc_sli_release_iocbq(phba, abtsiocbp); 9720 9721 /* 9722 * Caller to this routine should check for IOCB_ERROR 9723 * and handle it properly. This routine no longer removes 9724 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9725 */ 9726 return retval; 9727 } 9728 9729 /** 9730 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 9731 * @phba: Pointer to HBA context object. 9732 * @pring: Pointer to driver SLI ring object. 9733 * @cmdiocb: Pointer to driver command iocb object. 9734 * 9735 * This function issues an abort iocb for the provided command iocb. In case 9736 * of unloading, the abort iocb will not be issued to commands on the ELS 9737 * ring. Instead, the callback function shall be changed to those commands 9738 * so that nothing happens when them finishes. This function is called with 9739 * hbalock held. The function returns 0 when the command iocb is an abort 9740 * request. 9741 **/ 9742 int 9743 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9744 struct lpfc_iocbq *cmdiocb) 9745 { 9746 struct lpfc_vport *vport = cmdiocb->vport; 9747 int retval = IOCB_ERROR; 9748 IOCB_t *icmd = NULL; 9749 9750 /* 9751 * There are certain command types we don't want to abort. And we 9752 * don't want to abort commands that are already in the process of 9753 * being aborted. 9754 */ 9755 icmd = &cmdiocb->iocb; 9756 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9757 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9758 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9759 return 0; 9760 9761 /* 9762 * If we're unloading, don't abort iocb on the ELS ring, but change 9763 * the callback so that nothing happens when it finishes. 9764 */ 9765 if ((vport->load_flag & FC_UNLOADING) && 9766 (pring->ringno == LPFC_ELS_RING)) { 9767 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 9768 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 9769 else 9770 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 9771 goto abort_iotag_exit; 9772 } 9773 9774 /* Now, we try to issue the abort to the cmdiocb out */ 9775 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 9776 9777 abort_iotag_exit: 9778 /* 9779 * Caller to this routine should check for IOCB_ERROR 9780 * and handle it properly. This routine no longer removes 9781 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9782 */ 9783 return retval; 9784 } 9785 9786 /** 9787 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 9788 * @phba: Pointer to HBA context object. 9789 * @pring: Pointer to driver SLI ring object. 9790 * 9791 * This function aborts all iocbs in the given ring and frees all the iocb 9792 * objects in txq. This function issues abort iocbs unconditionally for all 9793 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 9794 * to complete before the return of this function. The caller is not required 9795 * to hold any locks. 9796 **/ 9797 static void 9798 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 9799 { 9800 LIST_HEAD(completions); 9801 struct lpfc_iocbq *iocb, *next_iocb; 9802 9803 if (pring->ringno == LPFC_ELS_RING) 9804 lpfc_fabric_abort_hba(phba); 9805 9806 spin_lock_irq(&phba->hbalock); 9807 9808 /* Take off all the iocbs on txq for cancelling */ 9809 list_splice_init(&pring->txq, &completions); 9810 pring->txq_cnt = 0; 9811 9812 /* Next issue ABTS for everything on the txcmplq */ 9813 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 9814 lpfc_sli_abort_iotag_issue(phba, pring, iocb); 9815 9816 spin_unlock_irq(&phba->hbalock); 9817 9818 /* Cancel all the IOCBs from the completions list */ 9819 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9820 IOERR_SLI_ABORTED); 9821 } 9822 9823 /** 9824 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 9825 * @phba: pointer to lpfc HBA data structure. 9826 * 9827 * This routine will abort all pending and outstanding iocbs to an HBA. 9828 **/ 9829 void 9830 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 9831 { 9832 struct lpfc_sli *psli = &phba->sli; 9833 struct lpfc_sli_ring *pring; 9834 int i; 9835 9836 for (i = 0; i < psli->num_rings; i++) { 9837 pring = &psli->ring[i]; 9838 lpfc_sli_iocb_ring_abort(phba, pring); 9839 } 9840 } 9841 9842 /** 9843 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 9844 * @iocbq: Pointer to driver iocb object. 9845 * @vport: Pointer to driver virtual port object. 9846 * @tgt_id: SCSI ID of the target. 9847 * @lun_id: LUN ID of the scsi device. 9848 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 9849 * 9850 * This function acts as an iocb filter for functions which abort or count 9851 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 9852 * 0 if the filtering criteria is met for the given iocb and will return 9853 * 1 if the filtering criteria is not met. 9854 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 9855 * given iocb is for the SCSI device specified by vport, tgt_id and 9856 * lun_id parameter. 9857 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 9858 * given iocb is for the SCSI target specified by vport and tgt_id 9859 * parameters. 9860 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 9861 * given iocb is for the SCSI host associated with the given vport. 9862 * This function is called with no locks held. 9863 **/ 9864 static int 9865 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 9866 uint16_t tgt_id, uint64_t lun_id, 9867 lpfc_ctx_cmd ctx_cmd) 9868 { 9869 struct lpfc_scsi_buf *lpfc_cmd; 9870 int rc = 1; 9871 9872 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 9873 return rc; 9874 9875 if (iocbq->vport != vport) 9876 return rc; 9877 9878 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 9879 9880 if (lpfc_cmd->pCmd == NULL) 9881 return rc; 9882 9883 switch (ctx_cmd) { 9884 case LPFC_CTX_LUN: 9885 if ((lpfc_cmd->rdata->pnode) && 9886 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 9887 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 9888 rc = 0; 9889 break; 9890 case LPFC_CTX_TGT: 9891 if ((lpfc_cmd->rdata->pnode) && 9892 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 9893 rc = 0; 9894 break; 9895 case LPFC_CTX_HOST: 9896 rc = 0; 9897 break; 9898 default: 9899 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 9900 __func__, ctx_cmd); 9901 break; 9902 } 9903 9904 return rc; 9905 } 9906 9907 /** 9908 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 9909 * @vport: Pointer to virtual port. 9910 * @tgt_id: SCSI ID of the target. 9911 * @lun_id: LUN ID of the scsi device. 9912 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9913 * 9914 * This function returns number of FCP commands pending for the vport. 9915 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 9916 * commands pending on the vport associated with SCSI device specified 9917 * by tgt_id and lun_id parameters. 9918 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 9919 * commands pending on the vport associated with SCSI target specified 9920 * by tgt_id parameter. 9921 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 9922 * commands pending on the vport. 9923 * This function returns the number of iocbs which satisfy the filter. 9924 * This function is called without any lock held. 9925 **/ 9926 int 9927 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 9928 lpfc_ctx_cmd ctx_cmd) 9929 { 9930 struct lpfc_hba *phba = vport->phba; 9931 struct lpfc_iocbq *iocbq; 9932 int sum, i; 9933 9934 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 9935 iocbq = phba->sli.iocbq_lookup[i]; 9936 9937 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 9938 ctx_cmd) == 0) 9939 sum++; 9940 } 9941 9942 return sum; 9943 } 9944 9945 /** 9946 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 9947 * @phba: Pointer to HBA context object 9948 * @cmdiocb: Pointer to command iocb object. 9949 * @rspiocb: Pointer to response iocb object. 9950 * 9951 * This function is called when an aborted FCP iocb completes. This 9952 * function is called by the ring event handler with no lock held. 9953 * This function frees the iocb. 9954 **/ 9955 void 9956 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9957 struct lpfc_iocbq *rspiocb) 9958 { 9959 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9960 "3096 ABORT_XRI_CN completing on rpi x%x " 9961 "original iotag x%x, abort cmd iotag x%x " 9962 "status 0x%x, reason 0x%x\n", 9963 cmdiocb->iocb.un.acxri.abortContextTag, 9964 cmdiocb->iocb.un.acxri.abortIoTag, 9965 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 9966 rspiocb->iocb.un.ulpWord[4]); 9967 lpfc_sli_release_iocbq(phba, cmdiocb); 9968 return; 9969 } 9970 9971 /** 9972 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 9973 * @vport: Pointer to virtual port. 9974 * @pring: Pointer to driver SLI ring object. 9975 * @tgt_id: SCSI ID of the target. 9976 * @lun_id: LUN ID of the scsi device. 9977 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9978 * 9979 * This function sends an abort command for every SCSI command 9980 * associated with the given virtual port pending on the ring 9981 * filtered by lpfc_sli_validate_fcp_iocb function. 9982 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 9983 * FCP iocbs associated with lun specified by tgt_id and lun_id 9984 * parameters 9985 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 9986 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 9987 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 9988 * FCP iocbs associated with virtual port. 9989 * This function returns number of iocbs it failed to abort. 9990 * This function is called with no locks held. 9991 **/ 9992 int 9993 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 9994 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 9995 { 9996 struct lpfc_hba *phba = vport->phba; 9997 struct lpfc_iocbq *iocbq; 9998 struct lpfc_iocbq *abtsiocb; 9999 IOCB_t *cmd = NULL; 10000 int errcnt = 0, ret_val = 0; 10001 int i; 10002 10003 for (i = 1; i <= phba->sli.last_iotag; i++) { 10004 iocbq = phba->sli.iocbq_lookup[i]; 10005 10006 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 10007 abort_cmd) != 0) 10008 continue; 10009 10010 /* 10011 * If the iocbq is already being aborted, don't take a second 10012 * action, but do count it. 10013 */ 10014 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 10015 continue; 10016 10017 /* issue ABTS for this IOCB based on iotag */ 10018 abtsiocb = lpfc_sli_get_iocbq(phba); 10019 if (abtsiocb == NULL) { 10020 errcnt++; 10021 continue; 10022 } 10023 10024 /* indicate the IO is being aborted by the driver. */ 10025 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 10026 10027 cmd = &iocbq->iocb; 10028 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 10029 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 10030 if (phba->sli_rev == LPFC_SLI_REV4) 10031 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 10032 else 10033 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 10034 abtsiocb->iocb.ulpLe = 1; 10035 abtsiocb->iocb.ulpClass = cmd->ulpClass; 10036 abtsiocb->vport = vport; 10037 10038 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10039 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 10040 if (iocbq->iocb_flag & LPFC_IO_FCP) 10041 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 10042 10043 if (lpfc_is_link_up(phba)) 10044 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 10045 else 10046 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 10047 10048 /* Setup callback routine and issue the command. */ 10049 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 10050 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 10051 abtsiocb, 0); 10052 if (ret_val == IOCB_ERROR) { 10053 lpfc_sli_release_iocbq(phba, abtsiocb); 10054 errcnt++; 10055 continue; 10056 } 10057 } 10058 10059 return errcnt; 10060 } 10061 10062 /** 10063 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 10064 * @phba: Pointer to HBA context object. 10065 * @cmdiocbq: Pointer to command iocb. 10066 * @rspiocbq: Pointer to response iocb. 10067 * 10068 * This function is the completion handler for iocbs issued using 10069 * lpfc_sli_issue_iocb_wait function. This function is called by the 10070 * ring event handler function without any lock held. This function 10071 * can be called from both worker thread context and interrupt 10072 * context. This function also can be called from other thread which 10073 * cleans up the SLI layer objects. 10074 * This function copy the contents of the response iocb to the 10075 * response iocb memory object provided by the caller of 10076 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 10077 * sleeps for the iocb completion. 10078 **/ 10079 static void 10080 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 10081 struct lpfc_iocbq *cmdiocbq, 10082 struct lpfc_iocbq *rspiocbq) 10083 { 10084 wait_queue_head_t *pdone_q; 10085 unsigned long iflags; 10086 struct lpfc_scsi_buf *lpfc_cmd; 10087 10088 spin_lock_irqsave(&phba->hbalock, iflags); 10089 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 10090 10091 /* 10092 * A time out has occurred for the iocb. If a time out 10093 * completion handler has been supplied, call it. Otherwise, 10094 * just free the iocbq. 10095 */ 10096 10097 spin_unlock_irqrestore(&phba->hbalock, iflags); 10098 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 10099 cmdiocbq->wait_iocb_cmpl = NULL; 10100 if (cmdiocbq->iocb_cmpl) 10101 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 10102 else 10103 lpfc_sli_release_iocbq(phba, cmdiocbq); 10104 return; 10105 } 10106 10107 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 10108 if (cmdiocbq->context2 && rspiocbq) 10109 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 10110 &rspiocbq->iocb, sizeof(IOCB_t)); 10111 10112 /* Set the exchange busy flag for task management commands */ 10113 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 10114 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 10115 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 10116 cur_iocbq); 10117 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 10118 } 10119 10120 pdone_q = cmdiocbq->context_un.wait_queue; 10121 if (pdone_q) 10122 wake_up(pdone_q); 10123 spin_unlock_irqrestore(&phba->hbalock, iflags); 10124 return; 10125 } 10126 10127 /** 10128 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 10129 * @phba: Pointer to HBA context object.. 10130 * @piocbq: Pointer to command iocb. 10131 * @flag: Flag to test. 10132 * 10133 * This routine grabs the hbalock and then test the iocb_flag to 10134 * see if the passed in flag is set. 10135 * Returns: 10136 * 1 if flag is set. 10137 * 0 if flag is not set. 10138 **/ 10139 static int 10140 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 10141 struct lpfc_iocbq *piocbq, uint32_t flag) 10142 { 10143 unsigned long iflags; 10144 int ret; 10145 10146 spin_lock_irqsave(&phba->hbalock, iflags); 10147 ret = piocbq->iocb_flag & flag; 10148 spin_unlock_irqrestore(&phba->hbalock, iflags); 10149 return ret; 10150 10151 } 10152 10153 /** 10154 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 10155 * @phba: Pointer to HBA context object.. 10156 * @pring: Pointer to sli ring. 10157 * @piocb: Pointer to command iocb. 10158 * @prspiocbq: Pointer to response iocb. 10159 * @timeout: Timeout in number of seconds. 10160 * 10161 * This function issues the iocb to firmware and waits for the 10162 * iocb to complete. The iocb_cmpl field of the shall be used 10163 * to handle iocbs which time out. If the field is NULL, the 10164 * function shall free the iocbq structure. If more clean up is 10165 * needed, the caller is expected to provide a completion function 10166 * that will provide the needed clean up. If the iocb command is 10167 * not completed within timeout seconds, the function will either 10168 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 10169 * completion function set in the iocb_cmpl field and then return 10170 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 10171 * resources if this function returns IOCB_TIMEDOUT. 10172 * The function waits for the iocb completion using an 10173 * non-interruptible wait. 10174 * This function will sleep while waiting for iocb completion. 10175 * So, this function should not be called from any context which 10176 * does not allow sleeping. Due to the same reason, this function 10177 * cannot be called with interrupt disabled. 10178 * This function assumes that the iocb completions occur while 10179 * this function sleep. So, this function cannot be called from 10180 * the thread which process iocb completion for this ring. 10181 * This function clears the iocb_flag of the iocb object before 10182 * issuing the iocb and the iocb completion handler sets this 10183 * flag and wakes this thread when the iocb completes. 10184 * The contents of the response iocb will be copied to prspiocbq 10185 * by the completion handler when the command completes. 10186 * This function returns IOCB_SUCCESS when success. 10187 * This function is called with no lock held. 10188 **/ 10189 int 10190 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 10191 uint32_t ring_number, 10192 struct lpfc_iocbq *piocb, 10193 struct lpfc_iocbq *prspiocbq, 10194 uint32_t timeout) 10195 { 10196 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 10197 long timeleft, timeout_req = 0; 10198 int retval = IOCB_SUCCESS; 10199 uint32_t creg_val; 10200 struct lpfc_iocbq *iocb; 10201 int txq_cnt = 0; 10202 int txcmplq_cnt = 0; 10203 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 10204 unsigned long iflags; 10205 bool iocb_completed = true; 10206 10207 /* 10208 * If the caller has provided a response iocbq buffer, then context2 10209 * is NULL or its an error. 10210 */ 10211 if (prspiocbq) { 10212 if (piocb->context2) 10213 return IOCB_ERROR; 10214 piocb->context2 = prspiocbq; 10215 } 10216 10217 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 10218 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 10219 piocb->context_un.wait_queue = &done_q; 10220 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 10221 10222 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 10223 if (lpfc_readl(phba->HCregaddr, &creg_val)) 10224 return IOCB_ERROR; 10225 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 10226 writel(creg_val, phba->HCregaddr); 10227 readl(phba->HCregaddr); /* flush */ 10228 } 10229 10230 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 10231 SLI_IOCB_RET_IOCB); 10232 if (retval == IOCB_SUCCESS) { 10233 timeout_req = msecs_to_jiffies(timeout * 1000); 10234 timeleft = wait_event_timeout(done_q, 10235 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 10236 timeout_req); 10237 spin_lock_irqsave(&phba->hbalock, iflags); 10238 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 10239 10240 /* 10241 * IOCB timed out. Inform the wake iocb wait 10242 * completion function and set local status 10243 */ 10244 10245 iocb_completed = false; 10246 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 10247 } 10248 spin_unlock_irqrestore(&phba->hbalock, iflags); 10249 if (iocb_completed) { 10250 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10251 "0331 IOCB wake signaled\n"); 10252 /* Note: we are not indicating if the IOCB has a success 10253 * status or not - that's for the caller to check. 10254 * IOCB_SUCCESS means just that the command was sent and 10255 * completed. Not that it completed successfully. 10256 * */ 10257 } else if (timeleft == 0) { 10258 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10259 "0338 IOCB wait timeout error - no " 10260 "wake response Data x%x\n", timeout); 10261 retval = IOCB_TIMEDOUT; 10262 } else { 10263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10264 "0330 IOCB wake NOT set, " 10265 "Data x%x x%lx\n", 10266 timeout, (timeleft / jiffies)); 10267 retval = IOCB_TIMEDOUT; 10268 } 10269 } else if (retval == IOCB_BUSY) { 10270 if (phba->cfg_log_verbose & LOG_SLI) { 10271 list_for_each_entry(iocb, &pring->txq, list) { 10272 txq_cnt++; 10273 } 10274 list_for_each_entry(iocb, &pring->txcmplq, list) { 10275 txcmplq_cnt++; 10276 } 10277 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10278 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 10279 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 10280 } 10281 return retval; 10282 } else { 10283 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10284 "0332 IOCB wait issue failed, Data x%x\n", 10285 retval); 10286 retval = IOCB_ERROR; 10287 } 10288 10289 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 10290 if (lpfc_readl(phba->HCregaddr, &creg_val)) 10291 return IOCB_ERROR; 10292 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 10293 writel(creg_val, phba->HCregaddr); 10294 readl(phba->HCregaddr); /* flush */ 10295 } 10296 10297 if (prspiocbq) 10298 piocb->context2 = NULL; 10299 10300 piocb->context_un.wait_queue = NULL; 10301 piocb->iocb_cmpl = NULL; 10302 return retval; 10303 } 10304 10305 /** 10306 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 10307 * @phba: Pointer to HBA context object. 10308 * @pmboxq: Pointer to driver mailbox object. 10309 * @timeout: Timeout in number of seconds. 10310 * 10311 * This function issues the mailbox to firmware and waits for the 10312 * mailbox command to complete. If the mailbox command is not 10313 * completed within timeout seconds, it returns MBX_TIMEOUT. 10314 * The function waits for the mailbox completion using an 10315 * interruptible wait. If the thread is woken up due to a 10316 * signal, MBX_TIMEOUT error is returned to the caller. Caller 10317 * should not free the mailbox resources, if this function returns 10318 * MBX_TIMEOUT. 10319 * This function will sleep while waiting for mailbox completion. 10320 * So, this function should not be called from any context which 10321 * does not allow sleeping. Due to the same reason, this function 10322 * cannot be called with interrupt disabled. 10323 * This function assumes that the mailbox completion occurs while 10324 * this function sleep. So, this function cannot be called from 10325 * the worker thread which processes mailbox completion. 10326 * This function is called in the context of HBA management 10327 * applications. 10328 * This function returns MBX_SUCCESS when successful. 10329 * This function is called with no lock held. 10330 **/ 10331 int 10332 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 10333 uint32_t timeout) 10334 { 10335 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 10336 MAILBOX_t *mb = NULL; 10337 int retval; 10338 unsigned long flag; 10339 10340 /* The caller might set context1 for extended buffer */ 10341 if (pmboxq->context1) 10342 mb = (MAILBOX_t *)pmboxq->context1; 10343 10344 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 10345 /* setup wake call as IOCB callback */ 10346 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 10347 /* setup context field to pass wait_queue pointer to wake function */ 10348 pmboxq->context1 = &done_q; 10349 10350 /* now issue the command */ 10351 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 10352 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 10353 wait_event_interruptible_timeout(done_q, 10354 pmboxq->mbox_flag & LPFC_MBX_WAKE, 10355 msecs_to_jiffies(timeout * 1000)); 10356 10357 spin_lock_irqsave(&phba->hbalock, flag); 10358 /* restore the possible extended buffer for free resource */ 10359 pmboxq->context1 = (uint8_t *)mb; 10360 /* 10361 * if LPFC_MBX_WAKE flag is set the mailbox is completed 10362 * else do not free the resources. 10363 */ 10364 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 10365 retval = MBX_SUCCESS; 10366 } else { 10367 retval = MBX_TIMEOUT; 10368 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10369 } 10370 spin_unlock_irqrestore(&phba->hbalock, flag); 10371 } else { 10372 /* restore the possible extended buffer for free resource */ 10373 pmboxq->context1 = (uint8_t *)mb; 10374 } 10375 10376 return retval; 10377 } 10378 10379 /** 10380 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 10381 * @phba: Pointer to HBA context. 10382 * 10383 * This function is called to shutdown the driver's mailbox sub-system. 10384 * It first marks the mailbox sub-system is in a block state to prevent 10385 * the asynchronous mailbox command from issued off the pending mailbox 10386 * command queue. If the mailbox command sub-system shutdown is due to 10387 * HBA error conditions such as EEH or ERATT, this routine shall invoke 10388 * the mailbox sub-system flush routine to forcefully bring down the 10389 * mailbox sub-system. Otherwise, if it is due to normal condition (such 10390 * as with offline or HBA function reset), this routine will wait for the 10391 * outstanding mailbox command to complete before invoking the mailbox 10392 * sub-system flush routine to gracefully bring down mailbox sub-system. 10393 **/ 10394 void 10395 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 10396 { 10397 struct lpfc_sli *psli = &phba->sli; 10398 unsigned long timeout; 10399 10400 if (mbx_action == LPFC_MBX_NO_WAIT) { 10401 /* delay 100ms for port state */ 10402 msleep(100); 10403 lpfc_sli_mbox_sys_flush(phba); 10404 return; 10405 } 10406 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 10407 10408 spin_lock_irq(&phba->hbalock); 10409 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 10410 10411 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 10412 /* Determine how long we might wait for the active mailbox 10413 * command to be gracefully completed by firmware. 10414 */ 10415 if (phba->sli.mbox_active) 10416 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 10417 phba->sli.mbox_active) * 10418 1000) + jiffies; 10419 spin_unlock_irq(&phba->hbalock); 10420 10421 while (phba->sli.mbox_active) { 10422 /* Check active mailbox complete status every 2ms */ 10423 msleep(2); 10424 if (time_after(jiffies, timeout)) 10425 /* Timeout, let the mailbox flush routine to 10426 * forcefully release active mailbox command 10427 */ 10428 break; 10429 } 10430 } else 10431 spin_unlock_irq(&phba->hbalock); 10432 10433 lpfc_sli_mbox_sys_flush(phba); 10434 } 10435 10436 /** 10437 * lpfc_sli_eratt_read - read sli-3 error attention events 10438 * @phba: Pointer to HBA context. 10439 * 10440 * This function is called to read the SLI3 device error attention registers 10441 * for possible error attention events. The caller must hold the hostlock 10442 * with spin_lock_irq(). 10443 * 10444 * This function returns 1 when there is Error Attention in the Host Attention 10445 * Register and returns 0 otherwise. 10446 **/ 10447 static int 10448 lpfc_sli_eratt_read(struct lpfc_hba *phba) 10449 { 10450 uint32_t ha_copy; 10451 10452 /* Read chip Host Attention (HA) register */ 10453 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10454 goto unplug_err; 10455 10456 if (ha_copy & HA_ERATT) { 10457 /* Read host status register to retrieve error event */ 10458 if (lpfc_sli_read_hs(phba)) 10459 goto unplug_err; 10460 10461 /* Check if there is a deferred error condition is active */ 10462 if ((HS_FFER1 & phba->work_hs) && 10463 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 10464 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 10465 phba->hba_flag |= DEFER_ERATT; 10466 /* Clear all interrupt enable conditions */ 10467 writel(0, phba->HCregaddr); 10468 readl(phba->HCregaddr); 10469 } 10470 10471 /* Set the driver HA work bitmap */ 10472 phba->work_ha |= HA_ERATT; 10473 /* Indicate polling handles this ERATT */ 10474 phba->hba_flag |= HBA_ERATT_HANDLED; 10475 return 1; 10476 } 10477 return 0; 10478 10479 unplug_err: 10480 /* Set the driver HS work bitmap */ 10481 phba->work_hs |= UNPLUG_ERR; 10482 /* Set the driver HA work bitmap */ 10483 phba->work_ha |= HA_ERATT; 10484 /* Indicate polling handles this ERATT */ 10485 phba->hba_flag |= HBA_ERATT_HANDLED; 10486 return 1; 10487 } 10488 10489 /** 10490 * lpfc_sli4_eratt_read - read sli-4 error attention events 10491 * @phba: Pointer to HBA context. 10492 * 10493 * This function is called to read the SLI4 device error attention registers 10494 * for possible error attention events. The caller must hold the hostlock 10495 * with spin_lock_irq(). 10496 * 10497 * This function returns 1 when there is Error Attention in the Host Attention 10498 * Register and returns 0 otherwise. 10499 **/ 10500 static int 10501 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 10502 { 10503 uint32_t uerr_sta_hi, uerr_sta_lo; 10504 uint32_t if_type, portsmphr; 10505 struct lpfc_register portstat_reg; 10506 10507 /* 10508 * For now, use the SLI4 device internal unrecoverable error 10509 * registers for error attention. This can be changed later. 10510 */ 10511 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10512 switch (if_type) { 10513 case LPFC_SLI_INTF_IF_TYPE_0: 10514 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 10515 &uerr_sta_lo) || 10516 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 10517 &uerr_sta_hi)) { 10518 phba->work_hs |= UNPLUG_ERR; 10519 phba->work_ha |= HA_ERATT; 10520 phba->hba_flag |= HBA_ERATT_HANDLED; 10521 return 1; 10522 } 10523 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 10524 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 10525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10526 "1423 HBA Unrecoverable error: " 10527 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 10528 "ue_mask_lo_reg=0x%x, " 10529 "ue_mask_hi_reg=0x%x\n", 10530 uerr_sta_lo, uerr_sta_hi, 10531 phba->sli4_hba.ue_mask_lo, 10532 phba->sli4_hba.ue_mask_hi); 10533 phba->work_status[0] = uerr_sta_lo; 10534 phba->work_status[1] = uerr_sta_hi; 10535 phba->work_ha |= HA_ERATT; 10536 phba->hba_flag |= HBA_ERATT_HANDLED; 10537 return 1; 10538 } 10539 break; 10540 case LPFC_SLI_INTF_IF_TYPE_2: 10541 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 10542 &portstat_reg.word0) || 10543 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 10544 &portsmphr)){ 10545 phba->work_hs |= UNPLUG_ERR; 10546 phba->work_ha |= HA_ERATT; 10547 phba->hba_flag |= HBA_ERATT_HANDLED; 10548 return 1; 10549 } 10550 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 10551 phba->work_status[0] = 10552 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 10553 phba->work_status[1] = 10554 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 10555 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10556 "2885 Port Status Event: " 10557 "port status reg 0x%x, " 10558 "port smphr reg 0x%x, " 10559 "error 1=0x%x, error 2=0x%x\n", 10560 portstat_reg.word0, 10561 portsmphr, 10562 phba->work_status[0], 10563 phba->work_status[1]); 10564 phba->work_ha |= HA_ERATT; 10565 phba->hba_flag |= HBA_ERATT_HANDLED; 10566 return 1; 10567 } 10568 break; 10569 case LPFC_SLI_INTF_IF_TYPE_1: 10570 default: 10571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10572 "2886 HBA Error Attention on unsupported " 10573 "if type %d.", if_type); 10574 return 1; 10575 } 10576 10577 return 0; 10578 } 10579 10580 /** 10581 * lpfc_sli_check_eratt - check error attention events 10582 * @phba: Pointer to HBA context. 10583 * 10584 * This function is called from timer soft interrupt context to check HBA's 10585 * error attention register bit for error attention events. 10586 * 10587 * This function returns 1 when there is Error Attention in the Host Attention 10588 * Register and returns 0 otherwise. 10589 **/ 10590 int 10591 lpfc_sli_check_eratt(struct lpfc_hba *phba) 10592 { 10593 uint32_t ha_copy; 10594 10595 /* If somebody is waiting to handle an eratt, don't process it 10596 * here. The brdkill function will do this. 10597 */ 10598 if (phba->link_flag & LS_IGNORE_ERATT) 10599 return 0; 10600 10601 /* Check if interrupt handler handles this ERATT */ 10602 spin_lock_irq(&phba->hbalock); 10603 if (phba->hba_flag & HBA_ERATT_HANDLED) { 10604 /* Interrupt handler has handled ERATT */ 10605 spin_unlock_irq(&phba->hbalock); 10606 return 0; 10607 } 10608 10609 /* 10610 * If there is deferred error attention, do not check for error 10611 * attention 10612 */ 10613 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10614 spin_unlock_irq(&phba->hbalock); 10615 return 0; 10616 } 10617 10618 /* If PCI channel is offline, don't process it */ 10619 if (unlikely(pci_channel_offline(phba->pcidev))) { 10620 spin_unlock_irq(&phba->hbalock); 10621 return 0; 10622 } 10623 10624 switch (phba->sli_rev) { 10625 case LPFC_SLI_REV2: 10626 case LPFC_SLI_REV3: 10627 /* Read chip Host Attention (HA) register */ 10628 ha_copy = lpfc_sli_eratt_read(phba); 10629 break; 10630 case LPFC_SLI_REV4: 10631 /* Read device Uncoverable Error (UERR) registers */ 10632 ha_copy = lpfc_sli4_eratt_read(phba); 10633 break; 10634 default: 10635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10636 "0299 Invalid SLI revision (%d)\n", 10637 phba->sli_rev); 10638 ha_copy = 0; 10639 break; 10640 } 10641 spin_unlock_irq(&phba->hbalock); 10642 10643 return ha_copy; 10644 } 10645 10646 /** 10647 * lpfc_intr_state_check - Check device state for interrupt handling 10648 * @phba: Pointer to HBA context. 10649 * 10650 * This inline routine checks whether a device or its PCI slot is in a state 10651 * that the interrupt should be handled. 10652 * 10653 * This function returns 0 if the device or the PCI slot is in a state that 10654 * interrupt should be handled, otherwise -EIO. 10655 */ 10656 static inline int 10657 lpfc_intr_state_check(struct lpfc_hba *phba) 10658 { 10659 /* If the pci channel is offline, ignore all the interrupts */ 10660 if (unlikely(pci_channel_offline(phba->pcidev))) 10661 return -EIO; 10662 10663 /* Update device level interrupt statistics */ 10664 phba->sli.slistat.sli_intr++; 10665 10666 /* Ignore all interrupts during initialization. */ 10667 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 10668 return -EIO; 10669 10670 return 0; 10671 } 10672 10673 /** 10674 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 10675 * @irq: Interrupt number. 10676 * @dev_id: The device context pointer. 10677 * 10678 * This function is directly called from the PCI layer as an interrupt 10679 * service routine when device with SLI-3 interface spec is enabled with 10680 * MSI-X multi-message interrupt mode and there are slow-path events in 10681 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 10682 * interrupt mode, this function is called as part of the device-level 10683 * interrupt handler. When the PCI slot is in error recovery or the HBA 10684 * is undergoing initialization, the interrupt handler will not process 10685 * the interrupt. The link attention and ELS ring attention events are 10686 * handled by the worker thread. The interrupt handler signals the worker 10687 * thread and returns for these events. This function is called without 10688 * any lock held. It gets the hbalock to access and update SLI data 10689 * structures. 10690 * 10691 * This function returns IRQ_HANDLED when interrupt is handled else it 10692 * returns IRQ_NONE. 10693 **/ 10694 irqreturn_t 10695 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 10696 { 10697 struct lpfc_hba *phba; 10698 uint32_t ha_copy, hc_copy; 10699 uint32_t work_ha_copy; 10700 unsigned long status; 10701 unsigned long iflag; 10702 uint32_t control; 10703 10704 MAILBOX_t *mbox, *pmbox; 10705 struct lpfc_vport *vport; 10706 struct lpfc_nodelist *ndlp; 10707 struct lpfc_dmabuf *mp; 10708 LPFC_MBOXQ_t *pmb; 10709 int rc; 10710 10711 /* 10712 * Get the driver's phba structure from the dev_id and 10713 * assume the HBA is not interrupting. 10714 */ 10715 phba = (struct lpfc_hba *)dev_id; 10716 10717 if (unlikely(!phba)) 10718 return IRQ_NONE; 10719 10720 /* 10721 * Stuff needs to be attented to when this function is invoked as an 10722 * individual interrupt handler in MSI-X multi-message interrupt mode 10723 */ 10724 if (phba->intr_type == MSIX) { 10725 /* Check device state for handling interrupt */ 10726 if (lpfc_intr_state_check(phba)) 10727 return IRQ_NONE; 10728 /* Need to read HA REG for slow-path events */ 10729 spin_lock_irqsave(&phba->hbalock, iflag); 10730 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10731 goto unplug_error; 10732 /* If somebody is waiting to handle an eratt don't process it 10733 * here. The brdkill function will do this. 10734 */ 10735 if (phba->link_flag & LS_IGNORE_ERATT) 10736 ha_copy &= ~HA_ERATT; 10737 /* Check the need for handling ERATT in interrupt handler */ 10738 if (ha_copy & HA_ERATT) { 10739 if (phba->hba_flag & HBA_ERATT_HANDLED) 10740 /* ERATT polling has handled ERATT */ 10741 ha_copy &= ~HA_ERATT; 10742 else 10743 /* Indicate interrupt handler handles ERATT */ 10744 phba->hba_flag |= HBA_ERATT_HANDLED; 10745 } 10746 10747 /* 10748 * If there is deferred error attention, do not check for any 10749 * interrupt. 10750 */ 10751 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10752 spin_unlock_irqrestore(&phba->hbalock, iflag); 10753 return IRQ_NONE; 10754 } 10755 10756 /* Clear up only attention source related to slow-path */ 10757 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 10758 goto unplug_error; 10759 10760 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 10761 HC_LAINT_ENA | HC_ERINT_ENA), 10762 phba->HCregaddr); 10763 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 10764 phba->HAregaddr); 10765 writel(hc_copy, phba->HCregaddr); 10766 readl(phba->HAregaddr); /* flush */ 10767 spin_unlock_irqrestore(&phba->hbalock, iflag); 10768 } else 10769 ha_copy = phba->ha_copy; 10770 10771 work_ha_copy = ha_copy & phba->work_ha_mask; 10772 10773 if (work_ha_copy) { 10774 if (work_ha_copy & HA_LATT) { 10775 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 10776 /* 10777 * Turn off Link Attention interrupts 10778 * until CLEAR_LA done 10779 */ 10780 spin_lock_irqsave(&phba->hbalock, iflag); 10781 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 10782 if (lpfc_readl(phba->HCregaddr, &control)) 10783 goto unplug_error; 10784 control &= ~HC_LAINT_ENA; 10785 writel(control, phba->HCregaddr); 10786 readl(phba->HCregaddr); /* flush */ 10787 spin_unlock_irqrestore(&phba->hbalock, iflag); 10788 } 10789 else 10790 work_ha_copy &= ~HA_LATT; 10791 } 10792 10793 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 10794 /* 10795 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 10796 * the only slow ring. 10797 */ 10798 status = (work_ha_copy & 10799 (HA_RXMASK << (4*LPFC_ELS_RING))); 10800 status >>= (4*LPFC_ELS_RING); 10801 if (status & HA_RXMASK) { 10802 spin_lock_irqsave(&phba->hbalock, iflag); 10803 if (lpfc_readl(phba->HCregaddr, &control)) 10804 goto unplug_error; 10805 10806 lpfc_debugfs_slow_ring_trc(phba, 10807 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 10808 control, status, 10809 (uint32_t)phba->sli.slistat.sli_intr); 10810 10811 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 10812 lpfc_debugfs_slow_ring_trc(phba, 10813 "ISR Disable ring:" 10814 "pwork:x%x hawork:x%x wait:x%x", 10815 phba->work_ha, work_ha_copy, 10816 (uint32_t)((unsigned long) 10817 &phba->work_waitq)); 10818 10819 control &= 10820 ~(HC_R0INT_ENA << LPFC_ELS_RING); 10821 writel(control, phba->HCregaddr); 10822 readl(phba->HCregaddr); /* flush */ 10823 } 10824 else { 10825 lpfc_debugfs_slow_ring_trc(phba, 10826 "ISR slow ring: pwork:" 10827 "x%x hawork:x%x wait:x%x", 10828 phba->work_ha, work_ha_copy, 10829 (uint32_t)((unsigned long) 10830 &phba->work_waitq)); 10831 } 10832 spin_unlock_irqrestore(&phba->hbalock, iflag); 10833 } 10834 } 10835 spin_lock_irqsave(&phba->hbalock, iflag); 10836 if (work_ha_copy & HA_ERATT) { 10837 if (lpfc_sli_read_hs(phba)) 10838 goto unplug_error; 10839 /* 10840 * Check if there is a deferred error condition 10841 * is active 10842 */ 10843 if ((HS_FFER1 & phba->work_hs) && 10844 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 10845 HS_FFER6 | HS_FFER7 | HS_FFER8) & 10846 phba->work_hs)) { 10847 phba->hba_flag |= DEFER_ERATT; 10848 /* Clear all interrupt enable conditions */ 10849 writel(0, phba->HCregaddr); 10850 readl(phba->HCregaddr); 10851 } 10852 } 10853 10854 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 10855 pmb = phba->sli.mbox_active; 10856 pmbox = &pmb->u.mb; 10857 mbox = phba->mbox; 10858 vport = pmb->vport; 10859 10860 /* First check out the status word */ 10861 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 10862 if (pmbox->mbxOwner != OWN_HOST) { 10863 spin_unlock_irqrestore(&phba->hbalock, iflag); 10864 /* 10865 * Stray Mailbox Interrupt, mbxCommand <cmd> 10866 * mbxStatus <status> 10867 */ 10868 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10869 LOG_SLI, 10870 "(%d):0304 Stray Mailbox " 10871 "Interrupt mbxCommand x%x " 10872 "mbxStatus x%x\n", 10873 (vport ? vport->vpi : 0), 10874 pmbox->mbxCommand, 10875 pmbox->mbxStatus); 10876 /* clear mailbox attention bit */ 10877 work_ha_copy &= ~HA_MBATT; 10878 } else { 10879 phba->sli.mbox_active = NULL; 10880 spin_unlock_irqrestore(&phba->hbalock, iflag); 10881 phba->last_completion_time = jiffies; 10882 del_timer(&phba->sli.mbox_tmo); 10883 if (pmb->mbox_cmpl) { 10884 lpfc_sli_pcimem_bcopy(mbox, pmbox, 10885 MAILBOX_CMD_SIZE); 10886 if (pmb->out_ext_byte_len && 10887 pmb->context2) 10888 lpfc_sli_pcimem_bcopy( 10889 phba->mbox_ext, 10890 pmb->context2, 10891 pmb->out_ext_byte_len); 10892 } 10893 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 10894 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 10895 10896 lpfc_debugfs_disc_trc(vport, 10897 LPFC_DISC_TRC_MBOX_VPORT, 10898 "MBOX dflt rpi: : " 10899 "status:x%x rpi:x%x", 10900 (uint32_t)pmbox->mbxStatus, 10901 pmbox->un.varWords[0], 0); 10902 10903 if (!pmbox->mbxStatus) { 10904 mp = (struct lpfc_dmabuf *) 10905 (pmb->context1); 10906 ndlp = (struct lpfc_nodelist *) 10907 pmb->context2; 10908 10909 /* Reg_LOGIN of dflt RPI was 10910 * successful. new lets get 10911 * rid of the RPI using the 10912 * same mbox buffer. 10913 */ 10914 lpfc_unreg_login(phba, 10915 vport->vpi, 10916 pmbox->un.varWords[0], 10917 pmb); 10918 pmb->mbox_cmpl = 10919 lpfc_mbx_cmpl_dflt_rpi; 10920 pmb->context1 = mp; 10921 pmb->context2 = ndlp; 10922 pmb->vport = vport; 10923 rc = lpfc_sli_issue_mbox(phba, 10924 pmb, 10925 MBX_NOWAIT); 10926 if (rc != MBX_BUSY) 10927 lpfc_printf_log(phba, 10928 KERN_ERR, 10929 LOG_MBOX | LOG_SLI, 10930 "0350 rc should have" 10931 "been MBX_BUSY\n"); 10932 if (rc != MBX_NOT_FINISHED) 10933 goto send_current_mbox; 10934 } 10935 } 10936 spin_lock_irqsave( 10937 &phba->pport->work_port_lock, 10938 iflag); 10939 phba->pport->work_port_events &= 10940 ~WORKER_MBOX_TMO; 10941 spin_unlock_irqrestore( 10942 &phba->pport->work_port_lock, 10943 iflag); 10944 lpfc_mbox_cmpl_put(phba, pmb); 10945 } 10946 } else 10947 spin_unlock_irqrestore(&phba->hbalock, iflag); 10948 10949 if ((work_ha_copy & HA_MBATT) && 10950 (phba->sli.mbox_active == NULL)) { 10951 send_current_mbox: 10952 /* Process next mailbox command if there is one */ 10953 do { 10954 rc = lpfc_sli_issue_mbox(phba, NULL, 10955 MBX_NOWAIT); 10956 } while (rc == MBX_NOT_FINISHED); 10957 if (rc != MBX_SUCCESS) 10958 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10959 LOG_SLI, "0349 rc should be " 10960 "MBX_SUCCESS\n"); 10961 } 10962 10963 spin_lock_irqsave(&phba->hbalock, iflag); 10964 phba->work_ha |= work_ha_copy; 10965 spin_unlock_irqrestore(&phba->hbalock, iflag); 10966 lpfc_worker_wake_up(phba); 10967 } 10968 return IRQ_HANDLED; 10969 unplug_error: 10970 spin_unlock_irqrestore(&phba->hbalock, iflag); 10971 return IRQ_HANDLED; 10972 10973 } /* lpfc_sli_sp_intr_handler */ 10974 10975 /** 10976 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 10977 * @irq: Interrupt number. 10978 * @dev_id: The device context pointer. 10979 * 10980 * This function is directly called from the PCI layer as an interrupt 10981 * service routine when device with SLI-3 interface spec is enabled with 10982 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 10983 * ring event in the HBA. However, when the device is enabled with either 10984 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 10985 * device-level interrupt handler. When the PCI slot is in error recovery 10986 * or the HBA is undergoing initialization, the interrupt handler will not 10987 * process the interrupt. The SCSI FCP fast-path ring event are handled in 10988 * the intrrupt context. This function is called without any lock held. 10989 * It gets the hbalock to access and update SLI data structures. 10990 * 10991 * This function returns IRQ_HANDLED when interrupt is handled else it 10992 * returns IRQ_NONE. 10993 **/ 10994 irqreturn_t 10995 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 10996 { 10997 struct lpfc_hba *phba; 10998 uint32_t ha_copy; 10999 unsigned long status; 11000 unsigned long iflag; 11001 11002 /* Get the driver's phba structure from the dev_id and 11003 * assume the HBA is not interrupting. 11004 */ 11005 phba = (struct lpfc_hba *) dev_id; 11006 11007 if (unlikely(!phba)) 11008 return IRQ_NONE; 11009 11010 /* 11011 * Stuff needs to be attented to when this function is invoked as an 11012 * individual interrupt handler in MSI-X multi-message interrupt mode 11013 */ 11014 if (phba->intr_type == MSIX) { 11015 /* Check device state for handling interrupt */ 11016 if (lpfc_intr_state_check(phba)) 11017 return IRQ_NONE; 11018 /* Need to read HA REG for FCP ring and other ring events */ 11019 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 11020 return IRQ_HANDLED; 11021 /* Clear up only attention source related to fast-path */ 11022 spin_lock_irqsave(&phba->hbalock, iflag); 11023 /* 11024 * If there is deferred error attention, do not check for 11025 * any interrupt. 11026 */ 11027 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11028 spin_unlock_irqrestore(&phba->hbalock, iflag); 11029 return IRQ_NONE; 11030 } 11031 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 11032 phba->HAregaddr); 11033 readl(phba->HAregaddr); /* flush */ 11034 spin_unlock_irqrestore(&phba->hbalock, iflag); 11035 } else 11036 ha_copy = phba->ha_copy; 11037 11038 /* 11039 * Process all events on FCP ring. Take the optimized path for FCP IO. 11040 */ 11041 ha_copy &= ~(phba->work_ha_mask); 11042 11043 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 11044 status >>= (4*LPFC_FCP_RING); 11045 if (status & HA_RXMASK) 11046 lpfc_sli_handle_fast_ring_event(phba, 11047 &phba->sli.ring[LPFC_FCP_RING], 11048 status); 11049 11050 if (phba->cfg_multi_ring_support == 2) { 11051 /* 11052 * Process all events on extra ring. Take the optimized path 11053 * for extra ring IO. 11054 */ 11055 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 11056 status >>= (4*LPFC_EXTRA_RING); 11057 if (status & HA_RXMASK) { 11058 lpfc_sli_handle_fast_ring_event(phba, 11059 &phba->sli.ring[LPFC_EXTRA_RING], 11060 status); 11061 } 11062 } 11063 return IRQ_HANDLED; 11064 } /* lpfc_sli_fp_intr_handler */ 11065 11066 /** 11067 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 11068 * @irq: Interrupt number. 11069 * @dev_id: The device context pointer. 11070 * 11071 * This function is the HBA device-level interrupt handler to device with 11072 * SLI-3 interface spec, called from the PCI layer when either MSI or 11073 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 11074 * requires driver attention. This function invokes the slow-path interrupt 11075 * attention handling function and fast-path interrupt attention handling 11076 * function in turn to process the relevant HBA attention events. This 11077 * function is called without any lock held. It gets the hbalock to access 11078 * and update SLI data structures. 11079 * 11080 * This function returns IRQ_HANDLED when interrupt is handled, else it 11081 * returns IRQ_NONE. 11082 **/ 11083 irqreturn_t 11084 lpfc_sli_intr_handler(int irq, void *dev_id) 11085 { 11086 struct lpfc_hba *phba; 11087 irqreturn_t sp_irq_rc, fp_irq_rc; 11088 unsigned long status1, status2; 11089 uint32_t hc_copy; 11090 11091 /* 11092 * Get the driver's phba structure from the dev_id and 11093 * assume the HBA is not interrupting. 11094 */ 11095 phba = (struct lpfc_hba *) dev_id; 11096 11097 if (unlikely(!phba)) 11098 return IRQ_NONE; 11099 11100 /* Check device state for handling interrupt */ 11101 if (lpfc_intr_state_check(phba)) 11102 return IRQ_NONE; 11103 11104 spin_lock(&phba->hbalock); 11105 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 11106 spin_unlock(&phba->hbalock); 11107 return IRQ_HANDLED; 11108 } 11109 11110 if (unlikely(!phba->ha_copy)) { 11111 spin_unlock(&phba->hbalock); 11112 return IRQ_NONE; 11113 } else if (phba->ha_copy & HA_ERATT) { 11114 if (phba->hba_flag & HBA_ERATT_HANDLED) 11115 /* ERATT polling has handled ERATT */ 11116 phba->ha_copy &= ~HA_ERATT; 11117 else 11118 /* Indicate interrupt handler handles ERATT */ 11119 phba->hba_flag |= HBA_ERATT_HANDLED; 11120 } 11121 11122 /* 11123 * If there is deferred error attention, do not check for any interrupt. 11124 */ 11125 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11126 spin_unlock(&phba->hbalock); 11127 return IRQ_NONE; 11128 } 11129 11130 /* Clear attention sources except link and error attentions */ 11131 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 11132 spin_unlock(&phba->hbalock); 11133 return IRQ_HANDLED; 11134 } 11135 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 11136 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 11137 phba->HCregaddr); 11138 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 11139 writel(hc_copy, phba->HCregaddr); 11140 readl(phba->HAregaddr); /* flush */ 11141 spin_unlock(&phba->hbalock); 11142 11143 /* 11144 * Invokes slow-path host attention interrupt handling as appropriate. 11145 */ 11146 11147 /* status of events with mailbox and link attention */ 11148 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 11149 11150 /* status of events with ELS ring */ 11151 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 11152 status2 >>= (4*LPFC_ELS_RING); 11153 11154 if (status1 || (status2 & HA_RXMASK)) 11155 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 11156 else 11157 sp_irq_rc = IRQ_NONE; 11158 11159 /* 11160 * Invoke fast-path host attention interrupt handling as appropriate. 11161 */ 11162 11163 /* status of events with FCP ring */ 11164 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 11165 status1 >>= (4*LPFC_FCP_RING); 11166 11167 /* status of events with extra ring */ 11168 if (phba->cfg_multi_ring_support == 2) { 11169 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 11170 status2 >>= (4*LPFC_EXTRA_RING); 11171 } else 11172 status2 = 0; 11173 11174 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 11175 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 11176 else 11177 fp_irq_rc = IRQ_NONE; 11178 11179 /* Return device-level interrupt handling status */ 11180 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 11181 } /* lpfc_sli_intr_handler */ 11182 11183 /** 11184 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 11185 * @phba: pointer to lpfc hba data structure. 11186 * 11187 * This routine is invoked by the worker thread to process all the pending 11188 * SLI4 FCP abort XRI events. 11189 **/ 11190 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 11191 { 11192 struct lpfc_cq_event *cq_event; 11193 11194 /* First, declare the fcp xri abort event has been handled */ 11195 spin_lock_irq(&phba->hbalock); 11196 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 11197 spin_unlock_irq(&phba->hbalock); 11198 /* Now, handle all the fcp xri abort events */ 11199 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 11200 /* Get the first event from the head of the event queue */ 11201 spin_lock_irq(&phba->hbalock); 11202 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 11203 cq_event, struct lpfc_cq_event, list); 11204 spin_unlock_irq(&phba->hbalock); 11205 /* Notify aborted XRI for FCP work queue */ 11206 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 11207 /* Free the event processed back to the free pool */ 11208 lpfc_sli4_cq_event_release(phba, cq_event); 11209 } 11210 } 11211 11212 /** 11213 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 11214 * @phba: pointer to lpfc hba data structure. 11215 * 11216 * This routine is invoked by the worker thread to process all the pending 11217 * SLI4 els abort xri events. 11218 **/ 11219 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 11220 { 11221 struct lpfc_cq_event *cq_event; 11222 11223 /* First, declare the els xri abort event has been handled */ 11224 spin_lock_irq(&phba->hbalock); 11225 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 11226 spin_unlock_irq(&phba->hbalock); 11227 /* Now, handle all the els xri abort events */ 11228 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 11229 /* Get the first event from the head of the event queue */ 11230 spin_lock_irq(&phba->hbalock); 11231 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11232 cq_event, struct lpfc_cq_event, list); 11233 spin_unlock_irq(&phba->hbalock); 11234 /* Notify aborted XRI for ELS work queue */ 11235 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 11236 /* Free the event processed back to the free pool */ 11237 lpfc_sli4_cq_event_release(phba, cq_event); 11238 } 11239 } 11240 11241 /** 11242 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 11243 * @phba: pointer to lpfc hba data structure 11244 * @pIocbIn: pointer to the rspiocbq 11245 * @pIocbOut: pointer to the cmdiocbq 11246 * @wcqe: pointer to the complete wcqe 11247 * 11248 * This routine transfers the fields of a command iocbq to a response iocbq 11249 * by copying all the IOCB fields from command iocbq and transferring the 11250 * completion status information from the complete wcqe. 11251 **/ 11252 static void 11253 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 11254 struct lpfc_iocbq *pIocbIn, 11255 struct lpfc_iocbq *pIocbOut, 11256 struct lpfc_wcqe_complete *wcqe) 11257 { 11258 int numBdes, i; 11259 unsigned long iflags; 11260 uint32_t status, max_response; 11261 struct lpfc_dmabuf *dmabuf; 11262 struct ulp_bde64 *bpl, bde; 11263 size_t offset = offsetof(struct lpfc_iocbq, iocb); 11264 11265 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 11266 sizeof(struct lpfc_iocbq) - offset); 11267 /* Map WCQE parameters into irspiocb parameters */ 11268 status = bf_get(lpfc_wcqe_c_status, wcqe); 11269 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 11270 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 11271 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 11272 pIocbIn->iocb.un.fcpi.fcpi_parm = 11273 pIocbOut->iocb.un.fcpi.fcpi_parm - 11274 wcqe->total_data_placed; 11275 else 11276 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 11277 else { 11278 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 11279 switch (pIocbOut->iocb.ulpCommand) { 11280 case CMD_ELS_REQUEST64_CR: 11281 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 11282 bpl = (struct ulp_bde64 *)dmabuf->virt; 11283 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 11284 max_response = bde.tus.f.bdeSize; 11285 break; 11286 case CMD_GEN_REQUEST64_CR: 11287 max_response = 0; 11288 if (!pIocbOut->context3) 11289 break; 11290 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 11291 sizeof(struct ulp_bde64); 11292 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 11293 bpl = (struct ulp_bde64 *)dmabuf->virt; 11294 for (i = 0; i < numBdes; i++) { 11295 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 11296 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 11297 max_response += bde.tus.f.bdeSize; 11298 } 11299 break; 11300 default: 11301 max_response = wcqe->total_data_placed; 11302 break; 11303 } 11304 if (max_response < wcqe->total_data_placed) 11305 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 11306 else 11307 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 11308 wcqe->total_data_placed; 11309 } 11310 11311 /* Convert BG errors for completion status */ 11312 if (status == CQE_STATUS_DI_ERROR) { 11313 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 11314 11315 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 11316 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 11317 else 11318 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 11319 11320 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 11321 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 11322 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11323 BGS_GUARD_ERR_MASK; 11324 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 11325 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11326 BGS_APPTAG_ERR_MASK; 11327 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 11328 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11329 BGS_REFTAG_ERR_MASK; 11330 11331 /* Check to see if there was any good data before the error */ 11332 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 11333 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11334 BGS_HI_WATER_MARK_PRESENT_MASK; 11335 pIocbIn->iocb.unsli3.sli3_bg.bghm = 11336 wcqe->total_data_placed; 11337 } 11338 11339 /* 11340 * Set ALL the error bits to indicate we don't know what 11341 * type of error it is. 11342 */ 11343 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 11344 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11345 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 11346 BGS_GUARD_ERR_MASK); 11347 } 11348 11349 /* Pick up HBA exchange busy condition */ 11350 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 11351 spin_lock_irqsave(&phba->hbalock, iflags); 11352 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 11353 spin_unlock_irqrestore(&phba->hbalock, iflags); 11354 } 11355 } 11356 11357 /** 11358 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 11359 * @phba: Pointer to HBA context object. 11360 * @wcqe: Pointer to work-queue completion queue entry. 11361 * 11362 * This routine handles an ELS work-queue completion event and construct 11363 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 11364 * discovery engine to handle. 11365 * 11366 * Return: Pointer to the receive IOCBQ, NULL otherwise. 11367 **/ 11368 static struct lpfc_iocbq * 11369 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 11370 struct lpfc_iocbq *irspiocbq) 11371 { 11372 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 11373 struct lpfc_iocbq *cmdiocbq; 11374 struct lpfc_wcqe_complete *wcqe; 11375 unsigned long iflags; 11376 11377 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 11378 spin_lock_irqsave(&pring->ring_lock, iflags); 11379 pring->stats.iocb_event++; 11380 /* Look up the ELS command IOCB and create pseudo response IOCB */ 11381 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11382 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11383 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11384 11385 if (unlikely(!cmdiocbq)) { 11386 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11387 "0386 ELS complete with no corresponding " 11388 "cmdiocb: iotag (%d)\n", 11389 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11390 lpfc_sli_release_iocbq(phba, irspiocbq); 11391 return NULL; 11392 } 11393 11394 /* Fake the irspiocbq and copy necessary response information */ 11395 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 11396 11397 return irspiocbq; 11398 } 11399 11400 /** 11401 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 11402 * @phba: Pointer to HBA context object. 11403 * @cqe: Pointer to mailbox completion queue entry. 11404 * 11405 * This routine process a mailbox completion queue entry with asynchrous 11406 * event. 11407 * 11408 * Return: true if work posted to worker thread, otherwise false. 11409 **/ 11410 static bool 11411 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 11412 { 11413 struct lpfc_cq_event *cq_event; 11414 unsigned long iflags; 11415 11416 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11417 "0392 Async Event: word0:x%x, word1:x%x, " 11418 "word2:x%x, word3:x%x\n", mcqe->word0, 11419 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 11420 11421 /* Allocate a new internal CQ_EVENT entry */ 11422 cq_event = lpfc_sli4_cq_event_alloc(phba); 11423 if (!cq_event) { 11424 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11425 "0394 Failed to allocate CQ_EVENT entry\n"); 11426 return false; 11427 } 11428 11429 /* Move the CQE into an asynchronous event entry */ 11430 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 11431 spin_lock_irqsave(&phba->hbalock, iflags); 11432 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 11433 /* Set the async event flag */ 11434 phba->hba_flag |= ASYNC_EVENT; 11435 spin_unlock_irqrestore(&phba->hbalock, iflags); 11436 11437 return true; 11438 } 11439 11440 /** 11441 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 11442 * @phba: Pointer to HBA context object. 11443 * @cqe: Pointer to mailbox completion queue entry. 11444 * 11445 * This routine process a mailbox completion queue entry with mailbox 11446 * completion event. 11447 * 11448 * Return: true if work posted to worker thread, otherwise false. 11449 **/ 11450 static bool 11451 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 11452 { 11453 uint32_t mcqe_status; 11454 MAILBOX_t *mbox, *pmbox; 11455 struct lpfc_mqe *mqe; 11456 struct lpfc_vport *vport; 11457 struct lpfc_nodelist *ndlp; 11458 struct lpfc_dmabuf *mp; 11459 unsigned long iflags; 11460 LPFC_MBOXQ_t *pmb; 11461 bool workposted = false; 11462 int rc; 11463 11464 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 11465 if (!bf_get(lpfc_trailer_completed, mcqe)) 11466 goto out_no_mqe_complete; 11467 11468 /* Get the reference to the active mbox command */ 11469 spin_lock_irqsave(&phba->hbalock, iflags); 11470 pmb = phba->sli.mbox_active; 11471 if (unlikely(!pmb)) { 11472 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 11473 "1832 No pending MBOX command to handle\n"); 11474 spin_unlock_irqrestore(&phba->hbalock, iflags); 11475 goto out_no_mqe_complete; 11476 } 11477 spin_unlock_irqrestore(&phba->hbalock, iflags); 11478 mqe = &pmb->u.mqe; 11479 pmbox = (MAILBOX_t *)&pmb->u.mqe; 11480 mbox = phba->mbox; 11481 vport = pmb->vport; 11482 11483 /* Reset heartbeat timer */ 11484 phba->last_completion_time = jiffies; 11485 del_timer(&phba->sli.mbox_tmo); 11486 11487 /* Move mbox data to caller's mailbox region, do endian swapping */ 11488 if (pmb->mbox_cmpl && mbox) 11489 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 11490 11491 /* 11492 * For mcqe errors, conditionally move a modified error code to 11493 * the mbox so that the error will not be missed. 11494 */ 11495 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 11496 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 11497 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 11498 bf_set(lpfc_mqe_status, mqe, 11499 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 11500 } 11501 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 11502 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 11503 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 11504 "MBOX dflt rpi: status:x%x rpi:x%x", 11505 mcqe_status, 11506 pmbox->un.varWords[0], 0); 11507 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 11508 mp = (struct lpfc_dmabuf *)(pmb->context1); 11509 ndlp = (struct lpfc_nodelist *)pmb->context2; 11510 /* Reg_LOGIN of dflt RPI was successful. Now lets get 11511 * RID of the PPI using the same mbox buffer. 11512 */ 11513 lpfc_unreg_login(phba, vport->vpi, 11514 pmbox->un.varWords[0], pmb); 11515 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 11516 pmb->context1 = mp; 11517 pmb->context2 = ndlp; 11518 pmb->vport = vport; 11519 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 11520 if (rc != MBX_BUSY) 11521 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 11522 LOG_SLI, "0385 rc should " 11523 "have been MBX_BUSY\n"); 11524 if (rc != MBX_NOT_FINISHED) 11525 goto send_current_mbox; 11526 } 11527 } 11528 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11529 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 11530 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11531 11532 /* There is mailbox completion work to do */ 11533 spin_lock_irqsave(&phba->hbalock, iflags); 11534 __lpfc_mbox_cmpl_put(phba, pmb); 11535 phba->work_ha |= HA_MBATT; 11536 spin_unlock_irqrestore(&phba->hbalock, iflags); 11537 workposted = true; 11538 11539 send_current_mbox: 11540 spin_lock_irqsave(&phba->hbalock, iflags); 11541 /* Release the mailbox command posting token */ 11542 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11543 /* Setting active mailbox pointer need to be in sync to flag clear */ 11544 phba->sli.mbox_active = NULL; 11545 spin_unlock_irqrestore(&phba->hbalock, iflags); 11546 /* Wake up worker thread to post the next pending mailbox command */ 11547 lpfc_worker_wake_up(phba); 11548 out_no_mqe_complete: 11549 if (bf_get(lpfc_trailer_consumed, mcqe)) 11550 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 11551 return workposted; 11552 } 11553 11554 /** 11555 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 11556 * @phba: Pointer to HBA context object. 11557 * @cqe: Pointer to mailbox completion queue entry. 11558 * 11559 * This routine process a mailbox completion queue entry, it invokes the 11560 * proper mailbox complete handling or asynchrous event handling routine 11561 * according to the MCQE's async bit. 11562 * 11563 * Return: true if work posted to worker thread, otherwise false. 11564 **/ 11565 static bool 11566 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 11567 { 11568 struct lpfc_mcqe mcqe; 11569 bool workposted; 11570 11571 /* Copy the mailbox MCQE and convert endian order as needed */ 11572 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 11573 11574 /* Invoke the proper event handling routine */ 11575 if (!bf_get(lpfc_trailer_async, &mcqe)) 11576 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 11577 else 11578 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 11579 return workposted; 11580 } 11581 11582 /** 11583 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 11584 * @phba: Pointer to HBA context object. 11585 * @cq: Pointer to associated CQ 11586 * @wcqe: Pointer to work-queue completion queue entry. 11587 * 11588 * This routine handles an ELS work-queue completion event. 11589 * 11590 * Return: true if work posted to worker thread, otherwise false. 11591 **/ 11592 static bool 11593 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11594 struct lpfc_wcqe_complete *wcqe) 11595 { 11596 struct lpfc_iocbq *irspiocbq; 11597 unsigned long iflags; 11598 struct lpfc_sli_ring *pring = cq->pring; 11599 int txq_cnt = 0; 11600 int txcmplq_cnt = 0; 11601 int fcp_txcmplq_cnt = 0; 11602 11603 /* Get an irspiocbq for later ELS response processing use */ 11604 irspiocbq = lpfc_sli_get_iocbq(phba); 11605 if (!irspiocbq) { 11606 if (!list_empty(&pring->txq)) 11607 txq_cnt++; 11608 if (!list_empty(&pring->txcmplq)) 11609 txcmplq_cnt++; 11610 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) 11611 fcp_txcmplq_cnt++; 11612 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11613 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 11614 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 11615 txq_cnt, phba->iocb_cnt, 11616 fcp_txcmplq_cnt, 11617 txcmplq_cnt); 11618 return false; 11619 } 11620 11621 /* Save off the slow-path queue event for work thread to process */ 11622 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 11623 spin_lock_irqsave(&phba->hbalock, iflags); 11624 list_add_tail(&irspiocbq->cq_event.list, 11625 &phba->sli4_hba.sp_queue_event); 11626 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11627 spin_unlock_irqrestore(&phba->hbalock, iflags); 11628 11629 return true; 11630 } 11631 11632 /** 11633 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 11634 * @phba: Pointer to HBA context object. 11635 * @wcqe: Pointer to work-queue completion queue entry. 11636 * 11637 * This routine handles slow-path WQ entry comsumed event by invoking the 11638 * proper WQ release routine to the slow-path WQ. 11639 **/ 11640 static void 11641 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 11642 struct lpfc_wcqe_release *wcqe) 11643 { 11644 /* sanity check on queue memory */ 11645 if (unlikely(!phba->sli4_hba.els_wq)) 11646 return; 11647 /* Check for the slow-path ELS work queue */ 11648 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 11649 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 11650 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 11651 else 11652 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11653 "2579 Slow-path wqe consume event carries " 11654 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 11655 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 11656 phba->sli4_hba.els_wq->queue_id); 11657 } 11658 11659 /** 11660 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 11661 * @phba: Pointer to HBA context object. 11662 * @cq: Pointer to a WQ completion queue. 11663 * @wcqe: Pointer to work-queue completion queue entry. 11664 * 11665 * This routine handles an XRI abort event. 11666 * 11667 * Return: true if work posted to worker thread, otherwise false. 11668 **/ 11669 static bool 11670 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 11671 struct lpfc_queue *cq, 11672 struct sli4_wcqe_xri_aborted *wcqe) 11673 { 11674 bool workposted = false; 11675 struct lpfc_cq_event *cq_event; 11676 unsigned long iflags; 11677 11678 /* Allocate a new internal CQ_EVENT entry */ 11679 cq_event = lpfc_sli4_cq_event_alloc(phba); 11680 if (!cq_event) { 11681 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11682 "0602 Failed to allocate CQ_EVENT entry\n"); 11683 return false; 11684 } 11685 11686 /* Move the CQE into the proper xri abort event list */ 11687 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 11688 switch (cq->subtype) { 11689 case LPFC_FCP: 11690 spin_lock_irqsave(&phba->hbalock, iflags); 11691 list_add_tail(&cq_event->list, 11692 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 11693 /* Set the fcp xri abort event flag */ 11694 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 11695 spin_unlock_irqrestore(&phba->hbalock, iflags); 11696 workposted = true; 11697 break; 11698 case LPFC_ELS: 11699 spin_lock_irqsave(&phba->hbalock, iflags); 11700 list_add_tail(&cq_event->list, 11701 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 11702 /* Set the els xri abort event flag */ 11703 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 11704 spin_unlock_irqrestore(&phba->hbalock, iflags); 11705 workposted = true; 11706 break; 11707 default: 11708 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11709 "0603 Invalid work queue CQE subtype (x%x)\n", 11710 cq->subtype); 11711 workposted = false; 11712 break; 11713 } 11714 return workposted; 11715 } 11716 11717 /** 11718 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 11719 * @phba: Pointer to HBA context object. 11720 * @rcqe: Pointer to receive-queue completion queue entry. 11721 * 11722 * This routine process a receive-queue completion queue entry. 11723 * 11724 * Return: true if work posted to worker thread, otherwise false. 11725 **/ 11726 static bool 11727 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 11728 { 11729 bool workposted = false; 11730 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 11731 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 11732 struct hbq_dmabuf *dma_buf; 11733 uint32_t status, rq_id; 11734 unsigned long iflags; 11735 11736 /* sanity check on queue memory */ 11737 if (unlikely(!hrq) || unlikely(!drq)) 11738 return workposted; 11739 11740 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 11741 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 11742 else 11743 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 11744 if (rq_id != hrq->queue_id) 11745 goto out; 11746 11747 status = bf_get(lpfc_rcqe_status, rcqe); 11748 switch (status) { 11749 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11751 "2537 Receive Frame Truncated!!\n"); 11752 hrq->RQ_buf_trunc++; 11753 case FC_STATUS_RQ_SUCCESS: 11754 lpfc_sli4_rq_release(hrq, drq); 11755 spin_lock_irqsave(&phba->hbalock, iflags); 11756 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11757 if (!dma_buf) { 11758 hrq->RQ_no_buf_found++; 11759 spin_unlock_irqrestore(&phba->hbalock, iflags); 11760 goto out; 11761 } 11762 hrq->RQ_rcv_buf++; 11763 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11764 /* save off the frame for the word thread to process */ 11765 list_add_tail(&dma_buf->cq_event.list, 11766 &phba->sli4_hba.sp_queue_event); 11767 /* Frame received */ 11768 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11769 spin_unlock_irqrestore(&phba->hbalock, iflags); 11770 workposted = true; 11771 break; 11772 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11773 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11774 hrq->RQ_no_posted_buf++; 11775 /* Post more buffers if possible */ 11776 spin_lock_irqsave(&phba->hbalock, iflags); 11777 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11778 spin_unlock_irqrestore(&phba->hbalock, iflags); 11779 workposted = true; 11780 break; 11781 } 11782 out: 11783 return workposted; 11784 } 11785 11786 /** 11787 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 11788 * @phba: Pointer to HBA context object. 11789 * @cq: Pointer to the completion queue. 11790 * @wcqe: Pointer to a completion queue entry. 11791 * 11792 * This routine process a slow-path work-queue or receive queue completion queue 11793 * entry. 11794 * 11795 * Return: true if work posted to worker thread, otherwise false. 11796 **/ 11797 static bool 11798 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11799 struct lpfc_cqe *cqe) 11800 { 11801 struct lpfc_cqe cqevt; 11802 bool workposted = false; 11803 11804 /* Copy the work queue CQE and convert endian order if needed */ 11805 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 11806 11807 /* Check and process for different type of WCQE and dispatch */ 11808 switch (bf_get(lpfc_cqe_code, &cqevt)) { 11809 case CQE_CODE_COMPL_WQE: 11810 /* Process the WQ/RQ complete event */ 11811 phba->last_completion_time = jiffies; 11812 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 11813 (struct lpfc_wcqe_complete *)&cqevt); 11814 break; 11815 case CQE_CODE_RELEASE_WQE: 11816 /* Process the WQ release event */ 11817 lpfc_sli4_sp_handle_rel_wcqe(phba, 11818 (struct lpfc_wcqe_release *)&cqevt); 11819 break; 11820 case CQE_CODE_XRI_ABORTED: 11821 /* Process the WQ XRI abort event */ 11822 phba->last_completion_time = jiffies; 11823 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11824 (struct sli4_wcqe_xri_aborted *)&cqevt); 11825 break; 11826 case CQE_CODE_RECEIVE: 11827 case CQE_CODE_RECEIVE_V1: 11828 /* Process the RQ event */ 11829 phba->last_completion_time = jiffies; 11830 workposted = lpfc_sli4_sp_handle_rcqe(phba, 11831 (struct lpfc_rcqe *)&cqevt); 11832 break; 11833 default: 11834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11835 "0388 Not a valid WCQE code: x%x\n", 11836 bf_get(lpfc_cqe_code, &cqevt)); 11837 break; 11838 } 11839 return workposted; 11840 } 11841 11842 /** 11843 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 11844 * @phba: Pointer to HBA context object. 11845 * @eqe: Pointer to fast-path event queue entry. 11846 * 11847 * This routine process a event queue entry from the slow-path event queue. 11848 * It will check the MajorCode and MinorCode to determine this is for a 11849 * completion event on a completion queue, if not, an error shall be logged 11850 * and just return. Otherwise, it will get to the corresponding completion 11851 * queue and process all the entries on that completion queue, rearm the 11852 * completion queue, and then return. 11853 * 11854 **/ 11855 static void 11856 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11857 struct lpfc_queue *speq) 11858 { 11859 struct lpfc_queue *cq = NULL, *childq; 11860 struct lpfc_cqe *cqe; 11861 bool workposted = false; 11862 int ecount = 0; 11863 uint16_t cqid; 11864 11865 /* Get the reference to the corresponding CQ */ 11866 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11867 11868 list_for_each_entry(childq, &speq->child_list, list) { 11869 if (childq->queue_id == cqid) { 11870 cq = childq; 11871 break; 11872 } 11873 } 11874 if (unlikely(!cq)) { 11875 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11877 "0365 Slow-path CQ identifier " 11878 "(%d) does not exist\n", cqid); 11879 return; 11880 } 11881 11882 /* Process all the entries to the CQ */ 11883 switch (cq->type) { 11884 case LPFC_MCQ: 11885 while ((cqe = lpfc_sli4_cq_get(cq))) { 11886 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11887 if (!(++ecount % cq->entry_repost)) 11888 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11889 cq->CQ_mbox++; 11890 } 11891 break; 11892 case LPFC_WCQ: 11893 while ((cqe = lpfc_sli4_cq_get(cq))) { 11894 if (cq->subtype == LPFC_FCP) 11895 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, 11896 cqe); 11897 else 11898 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 11899 cqe); 11900 if (!(++ecount % cq->entry_repost)) 11901 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11902 } 11903 11904 /* Track the max number of CQEs processed in 1 EQ */ 11905 if (ecount > cq->CQ_max_cqe) 11906 cq->CQ_max_cqe = ecount; 11907 break; 11908 default: 11909 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11910 "0370 Invalid completion queue type (%d)\n", 11911 cq->type); 11912 return; 11913 } 11914 11915 /* Catch the no cq entry condition, log an error */ 11916 if (unlikely(ecount == 0)) 11917 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11918 "0371 No entry from the CQ: identifier " 11919 "(x%x), type (%d)\n", cq->queue_id, cq->type); 11920 11921 /* In any case, flash and re-arm the RCQ */ 11922 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 11923 11924 /* wake up worker thread if there are works to be done */ 11925 if (workposted) 11926 lpfc_worker_wake_up(phba); 11927 } 11928 11929 /** 11930 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 11931 * @phba: Pointer to HBA context object. 11932 * @cq: Pointer to associated CQ 11933 * @wcqe: Pointer to work-queue completion queue entry. 11934 * 11935 * This routine process a fast-path work queue completion entry from fast-path 11936 * event queue for FCP command response completion. 11937 **/ 11938 static void 11939 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11940 struct lpfc_wcqe_complete *wcqe) 11941 { 11942 struct lpfc_sli_ring *pring = cq->pring; 11943 struct lpfc_iocbq *cmdiocbq; 11944 struct lpfc_iocbq irspiocbq; 11945 unsigned long iflags; 11946 11947 /* Check for response status */ 11948 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 11949 /* If resource errors reported from HBA, reduce queue 11950 * depth of the SCSI device. 11951 */ 11952 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 11953 IOSTAT_LOCAL_REJECT)) && 11954 ((wcqe->parameter & IOERR_PARAM_MASK) == 11955 IOERR_NO_RESOURCES)) 11956 phba->lpfc_rampdown_queue_depth(phba); 11957 11958 /* Log the error status */ 11959 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11960 "0373 FCP complete error: status=x%x, " 11961 "hw_status=x%x, total_data_specified=%d, " 11962 "parameter=x%x, word3=x%x\n", 11963 bf_get(lpfc_wcqe_c_status, wcqe), 11964 bf_get(lpfc_wcqe_c_hw_status, wcqe), 11965 wcqe->total_data_placed, wcqe->parameter, 11966 wcqe->word3); 11967 } 11968 11969 /* Look up the FCP command IOCB and create pseudo response IOCB */ 11970 spin_lock_irqsave(&pring->ring_lock, iflags); 11971 pring->stats.iocb_event++; 11972 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11973 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11974 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11975 if (unlikely(!cmdiocbq)) { 11976 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11977 "0374 FCP complete with no corresponding " 11978 "cmdiocb: iotag (%d)\n", 11979 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11980 return; 11981 } 11982 if (unlikely(!cmdiocbq->iocb_cmpl)) { 11983 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11984 "0375 FCP cmdiocb not callback function " 11985 "iotag: (%d)\n", 11986 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11987 return; 11988 } 11989 11990 /* Fake the irspiocb and copy necessary response information */ 11991 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 11992 11993 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 11994 spin_lock_irqsave(&phba->hbalock, iflags); 11995 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 11996 spin_unlock_irqrestore(&phba->hbalock, iflags); 11997 } 11998 11999 /* Pass the cmd_iocb and the rsp state to the upper layer */ 12000 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 12001 } 12002 12003 /** 12004 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 12005 * @phba: Pointer to HBA context object. 12006 * @cq: Pointer to completion queue. 12007 * @wcqe: Pointer to work-queue completion queue entry. 12008 * 12009 * This routine handles an fast-path WQ entry comsumed event by invoking the 12010 * proper WQ release routine to the slow-path WQ. 12011 **/ 12012 static void 12013 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12014 struct lpfc_wcqe_release *wcqe) 12015 { 12016 struct lpfc_queue *childwq; 12017 bool wqid_matched = false; 12018 uint16_t fcp_wqid; 12019 12020 /* Check for fast-path FCP work queue release */ 12021 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 12022 list_for_each_entry(childwq, &cq->child_list, list) { 12023 if (childwq->queue_id == fcp_wqid) { 12024 lpfc_sli4_wq_release(childwq, 12025 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 12026 wqid_matched = true; 12027 break; 12028 } 12029 } 12030 /* Report warning log message if no match found */ 12031 if (wqid_matched != true) 12032 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12033 "2580 Fast-path wqe consume event carries " 12034 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 12035 } 12036 12037 /** 12038 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 12039 * @cq: Pointer to the completion queue. 12040 * @eqe: Pointer to fast-path completion queue entry. 12041 * 12042 * This routine process a fast-path work queue completion entry from fast-path 12043 * event queue for FCP command response completion. 12044 **/ 12045 static int 12046 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12047 struct lpfc_cqe *cqe) 12048 { 12049 struct lpfc_wcqe_release wcqe; 12050 bool workposted = false; 12051 12052 /* Copy the work queue CQE and convert endian order if needed */ 12053 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 12054 12055 /* Check and process for different type of WCQE and dispatch */ 12056 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 12057 case CQE_CODE_COMPL_WQE: 12058 cq->CQ_wq++; 12059 /* Process the WQ complete event */ 12060 phba->last_completion_time = jiffies; 12061 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 12062 (struct lpfc_wcqe_complete *)&wcqe); 12063 break; 12064 case CQE_CODE_RELEASE_WQE: 12065 cq->CQ_release_wqe++; 12066 /* Process the WQ release event */ 12067 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 12068 (struct lpfc_wcqe_release *)&wcqe); 12069 break; 12070 case CQE_CODE_XRI_ABORTED: 12071 cq->CQ_xri_aborted++; 12072 /* Process the WQ XRI abort event */ 12073 phba->last_completion_time = jiffies; 12074 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 12075 (struct sli4_wcqe_xri_aborted *)&wcqe); 12076 break; 12077 default: 12078 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12079 "0144 Not a valid WCQE code: x%x\n", 12080 bf_get(lpfc_wcqe_c_code, &wcqe)); 12081 break; 12082 } 12083 return workposted; 12084 } 12085 12086 /** 12087 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 12088 * @phba: Pointer to HBA context object. 12089 * @eqe: Pointer to fast-path event queue entry. 12090 * 12091 * This routine process a event queue entry from the fast-path event queue. 12092 * It will check the MajorCode and MinorCode to determine this is for a 12093 * completion event on a completion queue, if not, an error shall be logged 12094 * and just return. Otherwise, it will get to the corresponding completion 12095 * queue and process all the entries on the completion queue, rearm the 12096 * completion queue, and then return. 12097 **/ 12098 static void 12099 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 12100 uint32_t qidx) 12101 { 12102 struct lpfc_queue *cq; 12103 struct lpfc_cqe *cqe; 12104 bool workposted = false; 12105 uint16_t cqid; 12106 int ecount = 0; 12107 12108 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 12109 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12110 "0366 Not a valid completion " 12111 "event: majorcode=x%x, minorcode=x%x\n", 12112 bf_get_le32(lpfc_eqe_major_code, eqe), 12113 bf_get_le32(lpfc_eqe_minor_code, eqe)); 12114 return; 12115 } 12116 12117 /* Get the reference to the corresponding CQ */ 12118 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 12119 12120 /* Check if this is a Slow path event */ 12121 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) { 12122 lpfc_sli4_sp_handle_eqe(phba, eqe, 12123 phba->sli4_hba.hba_eq[qidx]); 12124 return; 12125 } 12126 12127 if (unlikely(!phba->sli4_hba.fcp_cq)) { 12128 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12129 "3146 Fast-path completion queues " 12130 "does not exist\n"); 12131 return; 12132 } 12133 cq = phba->sli4_hba.fcp_cq[qidx]; 12134 if (unlikely(!cq)) { 12135 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 12136 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12137 "0367 Fast-path completion queue " 12138 "(%d) does not exist\n", qidx); 12139 return; 12140 } 12141 12142 if (unlikely(cqid != cq->queue_id)) { 12143 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12144 "0368 Miss-matched fast-path completion " 12145 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 12146 cqid, cq->queue_id); 12147 return; 12148 } 12149 12150 /* Process all the entries to the CQ */ 12151 while ((cqe = lpfc_sli4_cq_get(cq))) { 12152 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 12153 if (!(++ecount % cq->entry_repost)) 12154 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 12155 } 12156 12157 /* Track the max number of CQEs processed in 1 EQ */ 12158 if (ecount > cq->CQ_max_cqe) 12159 cq->CQ_max_cqe = ecount; 12160 12161 /* Catch the no cq entry condition */ 12162 if (unlikely(ecount == 0)) 12163 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12164 "0369 No entry from fast-path completion " 12165 "queue fcpcqid=%d\n", cq->queue_id); 12166 12167 /* In any case, flash and re-arm the CQ */ 12168 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 12169 12170 /* wake up worker thread if there are works to be done */ 12171 if (workposted) 12172 lpfc_worker_wake_up(phba); 12173 } 12174 12175 static void 12176 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 12177 { 12178 struct lpfc_eqe *eqe; 12179 12180 /* walk all the EQ entries and drop on the floor */ 12181 while ((eqe = lpfc_sli4_eq_get(eq))) 12182 ; 12183 12184 /* Clear and re-arm the EQ */ 12185 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 12186 } 12187 12188 12189 /** 12190 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue 12191 * entry 12192 * @phba: Pointer to HBA context object. 12193 * @eqe: Pointer to fast-path event queue entry. 12194 * 12195 * This routine process a event queue entry from the Flash Optimized Fabric 12196 * event queue. It will check the MajorCode and MinorCode to determine this 12197 * is for a completion event on a completion queue, if not, an error shall be 12198 * logged and just return. Otherwise, it will get to the corresponding 12199 * completion queue and process all the entries on the completion queue, rearm 12200 * the completion queue, and then return. 12201 **/ 12202 static void 12203 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 12204 { 12205 struct lpfc_queue *cq; 12206 struct lpfc_cqe *cqe; 12207 bool workposted = false; 12208 uint16_t cqid; 12209 int ecount = 0; 12210 12211 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 12212 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12213 "9147 Not a valid completion " 12214 "event: majorcode=x%x, minorcode=x%x\n", 12215 bf_get_le32(lpfc_eqe_major_code, eqe), 12216 bf_get_le32(lpfc_eqe_minor_code, eqe)); 12217 return; 12218 } 12219 12220 /* Get the reference to the corresponding CQ */ 12221 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 12222 12223 /* Next check for OAS */ 12224 cq = phba->sli4_hba.oas_cq; 12225 if (unlikely(!cq)) { 12226 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 12227 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12228 "9148 OAS completion queue " 12229 "does not exist\n"); 12230 return; 12231 } 12232 12233 if (unlikely(cqid != cq->queue_id)) { 12234 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12235 "9149 Miss-matched fast-path compl " 12236 "queue id: eqcqid=%d, fcpcqid=%d\n", 12237 cqid, cq->queue_id); 12238 return; 12239 } 12240 12241 /* Process all the entries to the OAS CQ */ 12242 while ((cqe = lpfc_sli4_cq_get(cq))) { 12243 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 12244 if (!(++ecount % cq->entry_repost)) 12245 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 12246 } 12247 12248 /* Track the max number of CQEs processed in 1 EQ */ 12249 if (ecount > cq->CQ_max_cqe) 12250 cq->CQ_max_cqe = ecount; 12251 12252 /* Catch the no cq entry condition */ 12253 if (unlikely(ecount == 0)) 12254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12255 "9153 No entry from fast-path completion " 12256 "queue fcpcqid=%d\n", cq->queue_id); 12257 12258 /* In any case, flash and re-arm the CQ */ 12259 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 12260 12261 /* wake up worker thread if there are works to be done */ 12262 if (workposted) 12263 lpfc_worker_wake_up(phba); 12264 } 12265 12266 /** 12267 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device 12268 * @irq: Interrupt number. 12269 * @dev_id: The device context pointer. 12270 * 12271 * This function is directly called from the PCI layer as an interrupt 12272 * service routine when device with SLI-4 interface spec is enabled with 12273 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric 12274 * IOCB ring event in the HBA. However, when the device is enabled with either 12275 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12276 * device-level interrupt handler. When the PCI slot is in error recovery 12277 * or the HBA is undergoing initialization, the interrupt handler will not 12278 * process the interrupt. The Flash Optimized Fabric ring event are handled in 12279 * the intrrupt context. This function is called without any lock held. 12280 * It gets the hbalock to access and update SLI data structures. Note that, 12281 * the EQ to CQ are one-to-one map such that the EQ index is 12282 * equal to that of CQ index. 12283 * 12284 * This function returns IRQ_HANDLED when interrupt is handled else it 12285 * returns IRQ_NONE. 12286 **/ 12287 irqreturn_t 12288 lpfc_sli4_fof_intr_handler(int irq, void *dev_id) 12289 { 12290 struct lpfc_hba *phba; 12291 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 12292 struct lpfc_queue *eq; 12293 struct lpfc_eqe *eqe; 12294 unsigned long iflag; 12295 int ecount = 0; 12296 uint32_t eqidx; 12297 12298 /* Get the driver's phba structure from the dev_id */ 12299 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 12300 phba = fcp_eq_hdl->phba; 12301 eqidx = fcp_eq_hdl->idx; 12302 12303 if (unlikely(!phba)) 12304 return IRQ_NONE; 12305 12306 /* Get to the EQ struct associated with this vector */ 12307 eq = phba->sli4_hba.fof_eq; 12308 if (unlikely(!eq)) 12309 return IRQ_NONE; 12310 12311 /* Check device state for handling interrupt */ 12312 if (unlikely(lpfc_intr_state_check(phba))) { 12313 eq->EQ_badstate++; 12314 /* Check again for link_state with lock held */ 12315 spin_lock_irqsave(&phba->hbalock, iflag); 12316 if (phba->link_state < LPFC_LINK_DOWN) 12317 /* Flush, clear interrupt, and rearm the EQ */ 12318 lpfc_sli4_eq_flush(phba, eq); 12319 spin_unlock_irqrestore(&phba->hbalock, iflag); 12320 return IRQ_NONE; 12321 } 12322 12323 /* 12324 * Process all the event on FCP fast-path EQ 12325 */ 12326 while ((eqe = lpfc_sli4_eq_get(eq))) { 12327 lpfc_sli4_fof_handle_eqe(phba, eqe); 12328 if (!(++ecount % eq->entry_repost)) 12329 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); 12330 eq->EQ_processed++; 12331 } 12332 12333 /* Track the max number of EQEs processed in 1 intr */ 12334 if (ecount > eq->EQ_max_eqe) 12335 eq->EQ_max_eqe = ecount; 12336 12337 12338 if (unlikely(ecount == 0)) { 12339 eq->EQ_no_entry++; 12340 12341 if (phba->intr_type == MSIX) 12342 /* MSI-X treated interrupt served as no EQ share INT */ 12343 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12344 "9145 MSI-X interrupt with no EQE\n"); 12345 else { 12346 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12347 "9146 ISR interrupt with no EQE\n"); 12348 /* Non MSI-X treated on interrupt as EQ share INT */ 12349 return IRQ_NONE; 12350 } 12351 } 12352 /* Always clear and re-arm the fast-path EQ */ 12353 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 12354 return IRQ_HANDLED; 12355 } 12356 12357 /** 12358 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 12359 * @irq: Interrupt number. 12360 * @dev_id: The device context pointer. 12361 * 12362 * This function is directly called from the PCI layer as an interrupt 12363 * service routine when device with SLI-4 interface spec is enabled with 12364 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12365 * ring event in the HBA. However, when the device is enabled with either 12366 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12367 * device-level interrupt handler. When the PCI slot is in error recovery 12368 * or the HBA is undergoing initialization, the interrupt handler will not 12369 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12370 * the intrrupt context. This function is called without any lock held. 12371 * It gets the hbalock to access and update SLI data structures. Note that, 12372 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 12373 * equal to that of FCP CQ index. 12374 * 12375 * The link attention and ELS ring attention events are handled 12376 * by the worker thread. The interrupt handler signals the worker thread 12377 * and returns for these events. This function is called without any lock 12378 * held. It gets the hbalock to access and update SLI data structures. 12379 * 12380 * This function returns IRQ_HANDLED when interrupt is handled else it 12381 * returns IRQ_NONE. 12382 **/ 12383 irqreturn_t 12384 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 12385 { 12386 struct lpfc_hba *phba; 12387 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 12388 struct lpfc_queue *fpeq; 12389 struct lpfc_eqe *eqe; 12390 unsigned long iflag; 12391 int ecount = 0; 12392 int fcp_eqidx; 12393 12394 /* Get the driver's phba structure from the dev_id */ 12395 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 12396 phba = fcp_eq_hdl->phba; 12397 fcp_eqidx = fcp_eq_hdl->idx; 12398 12399 if (unlikely(!phba)) 12400 return IRQ_NONE; 12401 if (unlikely(!phba->sli4_hba.hba_eq)) 12402 return IRQ_NONE; 12403 12404 /* Get to the EQ struct associated with this vector */ 12405 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx]; 12406 if (unlikely(!fpeq)) 12407 return IRQ_NONE; 12408 12409 if (lpfc_fcp_look_ahead) { 12410 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use)) 12411 lpfc_sli4_eq_clr_intr(fpeq); 12412 else { 12413 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12414 return IRQ_NONE; 12415 } 12416 } 12417 12418 /* Check device state for handling interrupt */ 12419 if (unlikely(lpfc_intr_state_check(phba))) { 12420 fpeq->EQ_badstate++; 12421 /* Check again for link_state with lock held */ 12422 spin_lock_irqsave(&phba->hbalock, iflag); 12423 if (phba->link_state < LPFC_LINK_DOWN) 12424 /* Flush, clear interrupt, and rearm the EQ */ 12425 lpfc_sli4_eq_flush(phba, fpeq); 12426 spin_unlock_irqrestore(&phba->hbalock, iflag); 12427 if (lpfc_fcp_look_ahead) 12428 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12429 return IRQ_NONE; 12430 } 12431 12432 /* 12433 * Process all the event on FCP fast-path EQ 12434 */ 12435 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 12436 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx); 12437 if (!(++ecount % fpeq->entry_repost)) 12438 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 12439 fpeq->EQ_processed++; 12440 } 12441 12442 /* Track the max number of EQEs processed in 1 intr */ 12443 if (ecount > fpeq->EQ_max_eqe) 12444 fpeq->EQ_max_eqe = ecount; 12445 12446 /* Always clear and re-arm the fast-path EQ */ 12447 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 12448 12449 if (unlikely(ecount == 0)) { 12450 fpeq->EQ_no_entry++; 12451 12452 if (lpfc_fcp_look_ahead) { 12453 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12454 return IRQ_NONE; 12455 } 12456 12457 if (phba->intr_type == MSIX) 12458 /* MSI-X treated interrupt served as no EQ share INT */ 12459 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12460 "0358 MSI-X interrupt with no EQE\n"); 12461 else 12462 /* Non MSI-X treated on interrupt as EQ share INT */ 12463 return IRQ_NONE; 12464 } 12465 12466 if (lpfc_fcp_look_ahead) 12467 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12468 return IRQ_HANDLED; 12469 } /* lpfc_sli4_fp_intr_handler */ 12470 12471 /** 12472 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 12473 * @irq: Interrupt number. 12474 * @dev_id: The device context pointer. 12475 * 12476 * This function is the device-level interrupt handler to device with SLI-4 12477 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 12478 * interrupt mode is enabled and there is an event in the HBA which requires 12479 * driver attention. This function invokes the slow-path interrupt attention 12480 * handling function and fast-path interrupt attention handling function in 12481 * turn to process the relevant HBA attention events. This function is called 12482 * without any lock held. It gets the hbalock to access and update SLI data 12483 * structures. 12484 * 12485 * This function returns IRQ_HANDLED when interrupt is handled, else it 12486 * returns IRQ_NONE. 12487 **/ 12488 irqreturn_t 12489 lpfc_sli4_intr_handler(int irq, void *dev_id) 12490 { 12491 struct lpfc_hba *phba; 12492 irqreturn_t hba_irq_rc; 12493 bool hba_handled = false; 12494 int fcp_eqidx; 12495 12496 /* Get the driver's phba structure from the dev_id */ 12497 phba = (struct lpfc_hba *)dev_id; 12498 12499 if (unlikely(!phba)) 12500 return IRQ_NONE; 12501 12502 /* 12503 * Invoke fast-path host attention interrupt handling as appropriate. 12504 */ 12505 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 12506 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 12507 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 12508 if (hba_irq_rc == IRQ_HANDLED) 12509 hba_handled |= true; 12510 } 12511 12512 if (phba->cfg_fof) { 12513 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, 12514 &phba->sli4_hba.fcp_eq_hdl[0]); 12515 if (hba_irq_rc == IRQ_HANDLED) 12516 hba_handled |= true; 12517 } 12518 12519 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 12520 } /* lpfc_sli4_intr_handler */ 12521 12522 /** 12523 * lpfc_sli4_queue_free - free a queue structure and associated memory 12524 * @queue: The queue structure to free. 12525 * 12526 * This function frees a queue structure and the DMAable memory used for 12527 * the host resident queue. This function must be called after destroying the 12528 * queue on the HBA. 12529 **/ 12530 void 12531 lpfc_sli4_queue_free(struct lpfc_queue *queue) 12532 { 12533 struct lpfc_dmabuf *dmabuf; 12534 12535 if (!queue) 12536 return; 12537 12538 while (!list_empty(&queue->page_list)) { 12539 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 12540 list); 12541 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 12542 dmabuf->virt, dmabuf->phys); 12543 kfree(dmabuf); 12544 } 12545 kfree(queue); 12546 return; 12547 } 12548 12549 /** 12550 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 12551 * @phba: The HBA that this queue is being created on. 12552 * @entry_size: The size of each queue entry for this queue. 12553 * @entry count: The number of entries that this queue will handle. 12554 * 12555 * This function allocates a queue structure and the DMAable memory used for 12556 * the host resident queue. This function must be called before creating the 12557 * queue on the HBA. 12558 **/ 12559 struct lpfc_queue * 12560 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 12561 uint32_t entry_count) 12562 { 12563 struct lpfc_queue *queue; 12564 struct lpfc_dmabuf *dmabuf; 12565 int x, total_qe_count; 12566 void *dma_pointer; 12567 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12568 12569 if (!phba->sli4_hba.pc_sli4_params.supported) 12570 hw_page_size = SLI4_PAGE_SIZE; 12571 12572 queue = kzalloc(sizeof(struct lpfc_queue) + 12573 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 12574 if (!queue) 12575 return NULL; 12576 queue->page_count = (ALIGN(entry_size * entry_count, 12577 hw_page_size))/hw_page_size; 12578 INIT_LIST_HEAD(&queue->list); 12579 INIT_LIST_HEAD(&queue->page_list); 12580 INIT_LIST_HEAD(&queue->child_list); 12581 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 12582 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 12583 if (!dmabuf) 12584 goto out_fail; 12585 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12586 hw_page_size, &dmabuf->phys, 12587 GFP_KERNEL); 12588 if (!dmabuf->virt) { 12589 kfree(dmabuf); 12590 goto out_fail; 12591 } 12592 memset(dmabuf->virt, 0, hw_page_size); 12593 dmabuf->buffer_tag = x; 12594 list_add_tail(&dmabuf->list, &queue->page_list); 12595 /* initialize queue's entry array */ 12596 dma_pointer = dmabuf->virt; 12597 for (; total_qe_count < entry_count && 12598 dma_pointer < (hw_page_size + dmabuf->virt); 12599 total_qe_count++, dma_pointer += entry_size) { 12600 queue->qe[total_qe_count].address = dma_pointer; 12601 } 12602 } 12603 queue->entry_size = entry_size; 12604 queue->entry_count = entry_count; 12605 12606 /* 12607 * entry_repost is calculated based on the number of entries in the 12608 * queue. This works out except for RQs. If buffers are NOT initially 12609 * posted for every RQE, entry_repost should be adjusted accordingly. 12610 */ 12611 queue->entry_repost = (entry_count >> 3); 12612 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST) 12613 queue->entry_repost = LPFC_QUEUE_MIN_REPOST; 12614 queue->phba = phba; 12615 12616 return queue; 12617 out_fail: 12618 lpfc_sli4_queue_free(queue); 12619 return NULL; 12620 } 12621 12622 /** 12623 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 12624 * @phba: HBA structure that indicates port to create a queue on. 12625 * @pci_barset: PCI BAR set flag. 12626 * 12627 * This function shall perform iomap of the specified PCI BAR address to host 12628 * memory address if not already done so and return it. The returned host 12629 * memory address can be NULL. 12630 */ 12631 static void __iomem * 12632 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 12633 { 12634 struct pci_dev *pdev; 12635 12636 if (!phba->pcidev) 12637 return NULL; 12638 else 12639 pdev = phba->pcidev; 12640 12641 switch (pci_barset) { 12642 case WQ_PCI_BAR_0_AND_1: 12643 return phba->pci_bar0_memmap_p; 12644 case WQ_PCI_BAR_2_AND_3: 12645 return phba->pci_bar2_memmap_p; 12646 case WQ_PCI_BAR_4_AND_5: 12647 return phba->pci_bar4_memmap_p; 12648 default: 12649 break; 12650 } 12651 return NULL; 12652 } 12653 12654 /** 12655 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs 12656 * @phba: HBA structure that indicates port to create a queue on. 12657 * @startq: The starting FCP EQ to modify 12658 * 12659 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 12660 * 12661 * The @phba struct is used to send mailbox command to HBA. The @startq 12662 * is used to get the starting FCP EQ to change. 12663 * This function is asynchronous and will wait for the mailbox 12664 * command to finish before continuing. 12665 * 12666 * On success this function will return a zero. If unable to allocate enough 12667 * memory this function will return -ENOMEM. If the queue create mailbox command 12668 * fails this function will return -ENXIO. 12669 **/ 12670 uint32_t 12671 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) 12672 { 12673 struct lpfc_mbx_modify_eq_delay *eq_delay; 12674 LPFC_MBOXQ_t *mbox; 12675 struct lpfc_queue *eq; 12676 int cnt, rc, length, status = 0; 12677 uint32_t shdr_status, shdr_add_status; 12678 uint32_t result; 12679 int fcp_eqidx; 12680 union lpfc_sli4_cfg_shdr *shdr; 12681 uint16_t dmult; 12682 12683 if (startq >= phba->cfg_fcp_io_channel) 12684 return 0; 12685 12686 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12687 if (!mbox) 12688 return -ENOMEM; 12689 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 12690 sizeof(struct lpfc_sli4_cfg_mhdr)); 12691 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12692 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 12693 length, LPFC_SLI4_MBX_EMBED); 12694 eq_delay = &mbox->u.mqe.un.eq_delay; 12695 12696 /* Calculate delay multiper from maximum interrupt per second */ 12697 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; 12698 if (result > LPFC_DMULT_CONST) 12699 dmult = 0; 12700 else 12701 dmult = LPFC_DMULT_CONST/result - 1; 12702 12703 cnt = 0; 12704 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; 12705 fcp_eqidx++) { 12706 eq = phba->sli4_hba.hba_eq[fcp_eqidx]; 12707 if (!eq) 12708 continue; 12709 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 12710 eq_delay->u.request.eq[cnt].phase = 0; 12711 eq_delay->u.request.eq[cnt].delay_multi = dmult; 12712 cnt++; 12713 if (cnt >= LPFC_MAX_EQ_DELAY) 12714 break; 12715 } 12716 eq_delay->u.request.num_eq = cnt; 12717 12718 mbox->vport = phba->pport; 12719 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12720 mbox->context1 = NULL; 12721 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12722 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 12723 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12724 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12725 if (shdr_status || shdr_add_status || rc) { 12726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12727 "2512 MODIFY_EQ_DELAY mailbox failed with " 12728 "status x%x add_status x%x, mbx status x%x\n", 12729 shdr_status, shdr_add_status, rc); 12730 status = -ENXIO; 12731 } 12732 mempool_free(mbox, phba->mbox_mem_pool); 12733 return status; 12734 } 12735 12736 /** 12737 * lpfc_eq_create - Create an Event Queue on the HBA 12738 * @phba: HBA structure that indicates port to create a queue on. 12739 * @eq: The queue structure to use to create the event queue. 12740 * @imax: The maximum interrupt per second limit. 12741 * 12742 * This function creates an event queue, as detailed in @eq, on a port, 12743 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 12744 * 12745 * The @phba struct is used to send mailbox command to HBA. The @eq struct 12746 * is used to get the entry count and entry size that are necessary to 12747 * determine the number of pages to allocate and use for this queue. This 12748 * function will send the EQ_CREATE mailbox command to the HBA to setup the 12749 * event queue. This function is asynchronous and will wait for the mailbox 12750 * command to finish before continuing. 12751 * 12752 * On success this function will return a zero. If unable to allocate enough 12753 * memory this function will return -ENOMEM. If the queue create mailbox command 12754 * fails this function will return -ENXIO. 12755 **/ 12756 uint32_t 12757 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 12758 { 12759 struct lpfc_mbx_eq_create *eq_create; 12760 LPFC_MBOXQ_t *mbox; 12761 int rc, length, status = 0; 12762 struct lpfc_dmabuf *dmabuf; 12763 uint32_t shdr_status, shdr_add_status; 12764 union lpfc_sli4_cfg_shdr *shdr; 12765 uint16_t dmult; 12766 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12767 12768 /* sanity check on queue memory */ 12769 if (!eq) 12770 return -ENODEV; 12771 if (!phba->sli4_hba.pc_sli4_params.supported) 12772 hw_page_size = SLI4_PAGE_SIZE; 12773 12774 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12775 if (!mbox) 12776 return -ENOMEM; 12777 length = (sizeof(struct lpfc_mbx_eq_create) - 12778 sizeof(struct lpfc_sli4_cfg_mhdr)); 12779 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12780 LPFC_MBOX_OPCODE_EQ_CREATE, 12781 length, LPFC_SLI4_MBX_EMBED); 12782 eq_create = &mbox->u.mqe.un.eq_create; 12783 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 12784 eq->page_count); 12785 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 12786 LPFC_EQE_SIZE); 12787 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 12788 /* Calculate delay multiper from maximum interrupt per second */ 12789 if (imax > LPFC_DMULT_CONST) 12790 dmult = 0; 12791 else 12792 dmult = LPFC_DMULT_CONST/imax - 1; 12793 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 12794 dmult); 12795 switch (eq->entry_count) { 12796 default: 12797 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12798 "0360 Unsupported EQ count. (%d)\n", 12799 eq->entry_count); 12800 if (eq->entry_count < 256) 12801 return -EINVAL; 12802 /* otherwise default to smallest count (drop through) */ 12803 case 256: 12804 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12805 LPFC_EQ_CNT_256); 12806 break; 12807 case 512: 12808 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12809 LPFC_EQ_CNT_512); 12810 break; 12811 case 1024: 12812 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12813 LPFC_EQ_CNT_1024); 12814 break; 12815 case 2048: 12816 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12817 LPFC_EQ_CNT_2048); 12818 break; 12819 case 4096: 12820 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12821 LPFC_EQ_CNT_4096); 12822 break; 12823 } 12824 list_for_each_entry(dmabuf, &eq->page_list, list) { 12825 memset(dmabuf->virt, 0, hw_page_size); 12826 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12827 putPaddrLow(dmabuf->phys); 12828 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12829 putPaddrHigh(dmabuf->phys); 12830 } 12831 mbox->vport = phba->pport; 12832 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12833 mbox->context1 = NULL; 12834 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12835 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 12836 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12837 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12838 if (shdr_status || shdr_add_status || rc) { 12839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12840 "2500 EQ_CREATE mailbox failed with " 12841 "status x%x add_status x%x, mbx status x%x\n", 12842 shdr_status, shdr_add_status, rc); 12843 status = -ENXIO; 12844 } 12845 eq->type = LPFC_EQ; 12846 eq->subtype = LPFC_NONE; 12847 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 12848 if (eq->queue_id == 0xFFFF) 12849 status = -ENXIO; 12850 eq->host_index = 0; 12851 eq->hba_index = 0; 12852 12853 mempool_free(mbox, phba->mbox_mem_pool); 12854 return status; 12855 } 12856 12857 /** 12858 * lpfc_cq_create - Create a Completion Queue on the HBA 12859 * @phba: HBA structure that indicates port to create a queue on. 12860 * @cq: The queue structure to use to create the completion queue. 12861 * @eq: The event queue to bind this completion queue to. 12862 * 12863 * This function creates a completion queue, as detailed in @wq, on a port, 12864 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 12865 * 12866 * The @phba struct is used to send mailbox command to HBA. The @cq struct 12867 * is used to get the entry count and entry size that are necessary to 12868 * determine the number of pages to allocate and use for this queue. The @eq 12869 * is used to indicate which event queue to bind this completion queue to. This 12870 * function will send the CQ_CREATE mailbox command to the HBA to setup the 12871 * completion queue. This function is asynchronous and will wait for the mailbox 12872 * command to finish before continuing. 12873 * 12874 * On success this function will return a zero. If unable to allocate enough 12875 * memory this function will return -ENOMEM. If the queue create mailbox command 12876 * fails this function will return -ENXIO. 12877 **/ 12878 uint32_t 12879 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 12880 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 12881 { 12882 struct lpfc_mbx_cq_create *cq_create; 12883 struct lpfc_dmabuf *dmabuf; 12884 LPFC_MBOXQ_t *mbox; 12885 int rc, length, status = 0; 12886 uint32_t shdr_status, shdr_add_status; 12887 union lpfc_sli4_cfg_shdr *shdr; 12888 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12889 12890 /* sanity check on queue memory */ 12891 if (!cq || !eq) 12892 return -ENODEV; 12893 if (!phba->sli4_hba.pc_sli4_params.supported) 12894 hw_page_size = SLI4_PAGE_SIZE; 12895 12896 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12897 if (!mbox) 12898 return -ENOMEM; 12899 length = (sizeof(struct lpfc_mbx_cq_create) - 12900 sizeof(struct lpfc_sli4_cfg_mhdr)); 12901 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12902 LPFC_MBOX_OPCODE_CQ_CREATE, 12903 length, LPFC_SLI4_MBX_EMBED); 12904 cq_create = &mbox->u.mqe.un.cq_create; 12905 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 12906 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 12907 cq->page_count); 12908 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 12909 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 12910 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12911 phba->sli4_hba.pc_sli4_params.cqv); 12912 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 12913 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ 12914 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); 12915 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 12916 eq->queue_id); 12917 } else { 12918 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 12919 eq->queue_id); 12920 } 12921 switch (cq->entry_count) { 12922 default: 12923 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12924 "0361 Unsupported CQ count. (%d)\n", 12925 cq->entry_count); 12926 if (cq->entry_count < 256) { 12927 status = -EINVAL; 12928 goto out; 12929 } 12930 /* otherwise default to smallest count (drop through) */ 12931 case 256: 12932 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12933 LPFC_CQ_CNT_256); 12934 break; 12935 case 512: 12936 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12937 LPFC_CQ_CNT_512); 12938 break; 12939 case 1024: 12940 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12941 LPFC_CQ_CNT_1024); 12942 break; 12943 } 12944 list_for_each_entry(dmabuf, &cq->page_list, list) { 12945 memset(dmabuf->virt, 0, hw_page_size); 12946 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12947 putPaddrLow(dmabuf->phys); 12948 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12949 putPaddrHigh(dmabuf->phys); 12950 } 12951 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12952 12953 /* The IOCTL status is embedded in the mailbox subheader. */ 12954 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12955 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12956 if (shdr_status || shdr_add_status || rc) { 12957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12958 "2501 CQ_CREATE mailbox failed with " 12959 "status x%x add_status x%x, mbx status x%x\n", 12960 shdr_status, shdr_add_status, rc); 12961 status = -ENXIO; 12962 goto out; 12963 } 12964 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12965 if (cq->queue_id == 0xFFFF) { 12966 status = -ENXIO; 12967 goto out; 12968 } 12969 /* link the cq onto the parent eq child list */ 12970 list_add_tail(&cq->list, &eq->child_list); 12971 /* Set up completion queue's type and subtype */ 12972 cq->type = type; 12973 cq->subtype = subtype; 12974 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12975 cq->assoc_qid = eq->queue_id; 12976 cq->host_index = 0; 12977 cq->hba_index = 0; 12978 12979 out: 12980 mempool_free(mbox, phba->mbox_mem_pool); 12981 return status; 12982 } 12983 12984 /** 12985 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 12986 * @phba: HBA structure that indicates port to create a queue on. 12987 * @mq: The queue structure to use to create the mailbox queue. 12988 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 12989 * @cq: The completion queue to associate with this cq. 12990 * 12991 * This function provides failback (fb) functionality when the 12992 * mq_create_ext fails on older FW generations. It's purpose is identical 12993 * to mq_create_ext otherwise. 12994 * 12995 * This routine cannot fail as all attributes were previously accessed and 12996 * initialized in mq_create_ext. 12997 **/ 12998 static void 12999 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 13000 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 13001 { 13002 struct lpfc_mbx_mq_create *mq_create; 13003 struct lpfc_dmabuf *dmabuf; 13004 int length; 13005 13006 length = (sizeof(struct lpfc_mbx_mq_create) - 13007 sizeof(struct lpfc_sli4_cfg_mhdr)); 13008 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13009 LPFC_MBOX_OPCODE_MQ_CREATE, 13010 length, LPFC_SLI4_MBX_EMBED); 13011 mq_create = &mbox->u.mqe.un.mq_create; 13012 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 13013 mq->page_count); 13014 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 13015 cq->queue_id); 13016 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 13017 switch (mq->entry_count) { 13018 case 16: 13019 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 13020 LPFC_MQ_RING_SIZE_16); 13021 break; 13022 case 32: 13023 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 13024 LPFC_MQ_RING_SIZE_32); 13025 break; 13026 case 64: 13027 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 13028 LPFC_MQ_RING_SIZE_64); 13029 break; 13030 case 128: 13031 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 13032 LPFC_MQ_RING_SIZE_128); 13033 break; 13034 } 13035 list_for_each_entry(dmabuf, &mq->page_list, list) { 13036 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13037 putPaddrLow(dmabuf->phys); 13038 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13039 putPaddrHigh(dmabuf->phys); 13040 } 13041 } 13042 13043 /** 13044 * lpfc_mq_create - Create a mailbox Queue on the HBA 13045 * @phba: HBA structure that indicates port to create a queue on. 13046 * @mq: The queue structure to use to create the mailbox queue. 13047 * @cq: The completion queue to associate with this cq. 13048 * @subtype: The queue's subtype. 13049 * 13050 * This function creates a mailbox queue, as detailed in @mq, on a port, 13051 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 13052 * 13053 * The @phba struct is used to send mailbox command to HBA. The @cq struct 13054 * is used to get the entry count and entry size that are necessary to 13055 * determine the number of pages to allocate and use for this queue. This 13056 * function will send the MQ_CREATE mailbox command to the HBA to setup the 13057 * mailbox queue. This function is asynchronous and will wait for the mailbox 13058 * command to finish before continuing. 13059 * 13060 * On success this function will return a zero. If unable to allocate enough 13061 * memory this function will return -ENOMEM. If the queue create mailbox command 13062 * fails this function will return -ENXIO. 13063 **/ 13064 int32_t 13065 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 13066 struct lpfc_queue *cq, uint32_t subtype) 13067 { 13068 struct lpfc_mbx_mq_create *mq_create; 13069 struct lpfc_mbx_mq_create_ext *mq_create_ext; 13070 struct lpfc_dmabuf *dmabuf; 13071 LPFC_MBOXQ_t *mbox; 13072 int rc, length, status = 0; 13073 uint32_t shdr_status, shdr_add_status; 13074 union lpfc_sli4_cfg_shdr *shdr; 13075 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13076 13077 /* sanity check on queue memory */ 13078 if (!mq || !cq) 13079 return -ENODEV; 13080 if (!phba->sli4_hba.pc_sli4_params.supported) 13081 hw_page_size = SLI4_PAGE_SIZE; 13082 13083 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13084 if (!mbox) 13085 return -ENOMEM; 13086 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 13087 sizeof(struct lpfc_sli4_cfg_mhdr)); 13088 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13089 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 13090 length, LPFC_SLI4_MBX_EMBED); 13091 13092 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 13093 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 13094 bf_set(lpfc_mbx_mq_create_ext_num_pages, 13095 &mq_create_ext->u.request, mq->page_count); 13096 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 13097 &mq_create_ext->u.request, 1); 13098 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 13099 &mq_create_ext->u.request, 1); 13100 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 13101 &mq_create_ext->u.request, 1); 13102 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 13103 &mq_create_ext->u.request, 1); 13104 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 13105 &mq_create_ext->u.request, 1); 13106 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 13107 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13108 phba->sli4_hba.pc_sli4_params.mqv); 13109 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 13110 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 13111 cq->queue_id); 13112 else 13113 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 13114 cq->queue_id); 13115 switch (mq->entry_count) { 13116 default: 13117 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13118 "0362 Unsupported MQ count. (%d)\n", 13119 mq->entry_count); 13120 if (mq->entry_count < 16) { 13121 status = -EINVAL; 13122 goto out; 13123 } 13124 /* otherwise default to smallest count (drop through) */ 13125 case 16: 13126 bf_set(lpfc_mq_context_ring_size, 13127 &mq_create_ext->u.request.context, 13128 LPFC_MQ_RING_SIZE_16); 13129 break; 13130 case 32: 13131 bf_set(lpfc_mq_context_ring_size, 13132 &mq_create_ext->u.request.context, 13133 LPFC_MQ_RING_SIZE_32); 13134 break; 13135 case 64: 13136 bf_set(lpfc_mq_context_ring_size, 13137 &mq_create_ext->u.request.context, 13138 LPFC_MQ_RING_SIZE_64); 13139 break; 13140 case 128: 13141 bf_set(lpfc_mq_context_ring_size, 13142 &mq_create_ext->u.request.context, 13143 LPFC_MQ_RING_SIZE_128); 13144 break; 13145 } 13146 list_for_each_entry(dmabuf, &mq->page_list, list) { 13147 memset(dmabuf->virt, 0, hw_page_size); 13148 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 13149 putPaddrLow(dmabuf->phys); 13150 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 13151 putPaddrHigh(dmabuf->phys); 13152 } 13153 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13154 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 13155 &mq_create_ext->u.response); 13156 if (rc != MBX_SUCCESS) { 13157 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13158 "2795 MQ_CREATE_EXT failed with " 13159 "status x%x. Failback to MQ_CREATE.\n", 13160 rc); 13161 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 13162 mq_create = &mbox->u.mqe.un.mq_create; 13163 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13164 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 13165 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 13166 &mq_create->u.response); 13167 } 13168 13169 /* The IOCTL status is embedded in the mailbox subheader. */ 13170 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13171 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13172 if (shdr_status || shdr_add_status || rc) { 13173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13174 "2502 MQ_CREATE mailbox failed with " 13175 "status x%x add_status x%x, mbx status x%x\n", 13176 shdr_status, shdr_add_status, rc); 13177 status = -ENXIO; 13178 goto out; 13179 } 13180 if (mq->queue_id == 0xFFFF) { 13181 status = -ENXIO; 13182 goto out; 13183 } 13184 mq->type = LPFC_MQ; 13185 mq->assoc_qid = cq->queue_id; 13186 mq->subtype = subtype; 13187 mq->host_index = 0; 13188 mq->hba_index = 0; 13189 13190 /* link the mq onto the parent cq child list */ 13191 list_add_tail(&mq->list, &cq->child_list); 13192 out: 13193 mempool_free(mbox, phba->mbox_mem_pool); 13194 return status; 13195 } 13196 13197 /** 13198 * lpfc_wq_create - Create a Work Queue on the HBA 13199 * @phba: HBA structure that indicates port to create a queue on. 13200 * @wq: The queue structure to use to create the work queue. 13201 * @cq: The completion queue to bind this work queue to. 13202 * @subtype: The subtype of the work queue indicating its functionality. 13203 * 13204 * This function creates a work queue, as detailed in @wq, on a port, described 13205 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 13206 * 13207 * The @phba struct is used to send mailbox command to HBA. The @wq struct 13208 * is used to get the entry count and entry size that are necessary to 13209 * determine the number of pages to allocate and use for this queue. The @cq 13210 * is used to indicate which completion queue to bind this work queue to. This 13211 * function will send the WQ_CREATE mailbox command to the HBA to setup the 13212 * work queue. This function is asynchronous and will wait for the mailbox 13213 * command to finish before continuing. 13214 * 13215 * On success this function will return a zero. If unable to allocate enough 13216 * memory this function will return -ENOMEM. If the queue create mailbox command 13217 * fails this function will return -ENXIO. 13218 **/ 13219 uint32_t 13220 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 13221 struct lpfc_queue *cq, uint32_t subtype) 13222 { 13223 struct lpfc_mbx_wq_create *wq_create; 13224 struct lpfc_dmabuf *dmabuf; 13225 LPFC_MBOXQ_t *mbox; 13226 int rc, length, status = 0; 13227 uint32_t shdr_status, shdr_add_status; 13228 union lpfc_sli4_cfg_shdr *shdr; 13229 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13230 struct dma_address *page; 13231 void __iomem *bar_memmap_p; 13232 uint32_t db_offset; 13233 uint16_t pci_barset; 13234 13235 /* sanity check on queue memory */ 13236 if (!wq || !cq) 13237 return -ENODEV; 13238 if (!phba->sli4_hba.pc_sli4_params.supported) 13239 hw_page_size = SLI4_PAGE_SIZE; 13240 13241 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13242 if (!mbox) 13243 return -ENOMEM; 13244 length = (sizeof(struct lpfc_mbx_wq_create) - 13245 sizeof(struct lpfc_sli4_cfg_mhdr)); 13246 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13247 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 13248 length, LPFC_SLI4_MBX_EMBED); 13249 wq_create = &mbox->u.mqe.un.wq_create; 13250 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 13251 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 13252 wq->page_count); 13253 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 13254 cq->queue_id); 13255 13256 /* wqv is the earliest version supported, NOT the latest */ 13257 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13258 phba->sli4_hba.pc_sli4_params.wqv); 13259 13260 switch (phba->sli4_hba.pc_sli4_params.wqv) { 13261 case LPFC_Q_CREATE_VERSION_0: 13262 switch (wq->entry_size) { 13263 default: 13264 case 64: 13265 /* Nothing to do, version 0 ONLY supports 64 byte */ 13266 page = wq_create->u.request.page; 13267 break; 13268 case 128: 13269 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 13270 LPFC_WQ_SZ128_SUPPORT)) { 13271 status = -ERANGE; 13272 goto out; 13273 } 13274 /* If we get here the HBA MUST also support V1 and 13275 * we MUST use it 13276 */ 13277 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13278 LPFC_Q_CREATE_VERSION_1); 13279 13280 bf_set(lpfc_mbx_wq_create_wqe_count, 13281 &wq_create->u.request_1, wq->entry_count); 13282 bf_set(lpfc_mbx_wq_create_wqe_size, 13283 &wq_create->u.request_1, 13284 LPFC_WQ_WQE_SIZE_128); 13285 bf_set(lpfc_mbx_wq_create_page_size, 13286 &wq_create->u.request_1, 13287 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13288 page = wq_create->u.request_1.page; 13289 break; 13290 } 13291 break; 13292 case LPFC_Q_CREATE_VERSION_1: 13293 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 13294 wq->entry_count); 13295 switch (wq->entry_size) { 13296 default: 13297 case 64: 13298 bf_set(lpfc_mbx_wq_create_wqe_size, 13299 &wq_create->u.request_1, 13300 LPFC_WQ_WQE_SIZE_64); 13301 break; 13302 case 128: 13303 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 13304 LPFC_WQ_SZ128_SUPPORT)) { 13305 status = -ERANGE; 13306 goto out; 13307 } 13308 bf_set(lpfc_mbx_wq_create_wqe_size, 13309 &wq_create->u.request_1, 13310 LPFC_WQ_WQE_SIZE_128); 13311 break; 13312 } 13313 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, 13314 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13315 page = wq_create->u.request_1.page; 13316 break; 13317 default: 13318 status = -ERANGE; 13319 goto out; 13320 } 13321 13322 list_for_each_entry(dmabuf, &wq->page_list, list) { 13323 memset(dmabuf->virt, 0, hw_page_size); 13324 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 13325 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 13326 } 13327 13328 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 13329 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 13330 13331 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13332 /* The IOCTL status is embedded in the mailbox subheader. */ 13333 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13334 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13335 if (shdr_status || shdr_add_status || rc) { 13336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13337 "2503 WQ_CREATE mailbox failed with " 13338 "status x%x add_status x%x, mbx status x%x\n", 13339 shdr_status, shdr_add_status, rc); 13340 status = -ENXIO; 13341 goto out; 13342 } 13343 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 13344 if (wq->queue_id == 0xFFFF) { 13345 status = -ENXIO; 13346 goto out; 13347 } 13348 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 13349 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 13350 &wq_create->u.response); 13351 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 13352 (wq->db_format != LPFC_DB_RING_FORMAT)) { 13353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13354 "3265 WQ[%d] doorbell format not " 13355 "supported: x%x\n", wq->queue_id, 13356 wq->db_format); 13357 status = -EINVAL; 13358 goto out; 13359 } 13360 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 13361 &wq_create->u.response); 13362 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 13363 if (!bar_memmap_p) { 13364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13365 "3263 WQ[%d] failed to memmap pci " 13366 "barset:x%x\n", wq->queue_id, 13367 pci_barset); 13368 status = -ENOMEM; 13369 goto out; 13370 } 13371 db_offset = wq_create->u.response.doorbell_offset; 13372 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 13373 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 13374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13375 "3252 WQ[%d] doorbell offset not " 13376 "supported: x%x\n", wq->queue_id, 13377 db_offset); 13378 status = -EINVAL; 13379 goto out; 13380 } 13381 wq->db_regaddr = bar_memmap_p + db_offset; 13382 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13383 "3264 WQ[%d]: barset:x%x, offset:x%x, " 13384 "format:x%x\n", wq->queue_id, pci_barset, 13385 db_offset, wq->db_format); 13386 } else { 13387 wq->db_format = LPFC_DB_LIST_FORMAT; 13388 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 13389 } 13390 wq->type = LPFC_WQ; 13391 wq->assoc_qid = cq->queue_id; 13392 wq->subtype = subtype; 13393 wq->host_index = 0; 13394 wq->hba_index = 0; 13395 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 13396 13397 /* link the wq onto the parent cq child list */ 13398 list_add_tail(&wq->list, &cq->child_list); 13399 out: 13400 mempool_free(mbox, phba->mbox_mem_pool); 13401 return status; 13402 } 13403 13404 /** 13405 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ 13406 * @phba: HBA structure that indicates port to create a queue on. 13407 * @rq: The queue structure to use for the receive queue. 13408 * @qno: The associated HBQ number 13409 * 13410 * 13411 * For SLI4 we need to adjust the RQ repost value based on 13412 * the number of buffers that are initially posted to the RQ. 13413 */ 13414 void 13415 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) 13416 { 13417 uint32_t cnt; 13418 13419 /* sanity check on queue memory */ 13420 if (!rq) 13421 return; 13422 cnt = lpfc_hbq_defs[qno]->entry_count; 13423 13424 /* Recalc repost for RQs based on buffers initially posted */ 13425 cnt = (cnt >> 3); 13426 if (cnt < LPFC_QUEUE_MIN_REPOST) 13427 cnt = LPFC_QUEUE_MIN_REPOST; 13428 13429 rq->entry_repost = cnt; 13430 } 13431 13432 /** 13433 * lpfc_rq_create - Create a Receive Queue on the HBA 13434 * @phba: HBA structure that indicates port to create a queue on. 13435 * @hrq: The queue structure to use to create the header receive queue. 13436 * @drq: The queue structure to use to create the data receive queue. 13437 * @cq: The completion queue to bind this work queue to. 13438 * 13439 * This function creates a receive buffer queue pair , as detailed in @hrq and 13440 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 13441 * to the HBA. 13442 * 13443 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 13444 * struct is used to get the entry count that is necessary to determine the 13445 * number of pages to use for this queue. The @cq is used to indicate which 13446 * completion queue to bind received buffers that are posted to these queues to. 13447 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 13448 * receive queue pair. This function is asynchronous and will wait for the 13449 * mailbox command to finish before continuing. 13450 * 13451 * On success this function will return a zero. If unable to allocate enough 13452 * memory this function will return -ENOMEM. If the queue create mailbox command 13453 * fails this function will return -ENXIO. 13454 **/ 13455 uint32_t 13456 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 13457 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 13458 { 13459 struct lpfc_mbx_rq_create *rq_create; 13460 struct lpfc_dmabuf *dmabuf; 13461 LPFC_MBOXQ_t *mbox; 13462 int rc, length, status = 0; 13463 uint32_t shdr_status, shdr_add_status; 13464 union lpfc_sli4_cfg_shdr *shdr; 13465 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13466 void __iomem *bar_memmap_p; 13467 uint32_t db_offset; 13468 uint16_t pci_barset; 13469 13470 /* sanity check on queue memory */ 13471 if (!hrq || !drq || !cq) 13472 return -ENODEV; 13473 if (!phba->sli4_hba.pc_sli4_params.supported) 13474 hw_page_size = SLI4_PAGE_SIZE; 13475 13476 if (hrq->entry_count != drq->entry_count) 13477 return -EINVAL; 13478 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13479 if (!mbox) 13480 return -ENOMEM; 13481 length = (sizeof(struct lpfc_mbx_rq_create) - 13482 sizeof(struct lpfc_sli4_cfg_mhdr)); 13483 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13484 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 13485 length, LPFC_SLI4_MBX_EMBED); 13486 rq_create = &mbox->u.mqe.un.rq_create; 13487 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 13488 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13489 phba->sli4_hba.pc_sli4_params.rqv); 13490 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 13491 bf_set(lpfc_rq_context_rqe_count_1, 13492 &rq_create->u.request.context, 13493 hrq->entry_count); 13494 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 13495 bf_set(lpfc_rq_context_rqe_size, 13496 &rq_create->u.request.context, 13497 LPFC_RQE_SIZE_8); 13498 bf_set(lpfc_rq_context_page_size, 13499 &rq_create->u.request.context, 13500 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13501 } else { 13502 switch (hrq->entry_count) { 13503 default: 13504 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13505 "2535 Unsupported RQ count. (%d)\n", 13506 hrq->entry_count); 13507 if (hrq->entry_count < 512) { 13508 status = -EINVAL; 13509 goto out; 13510 } 13511 /* otherwise default to smallest count (drop through) */ 13512 case 512: 13513 bf_set(lpfc_rq_context_rqe_count, 13514 &rq_create->u.request.context, 13515 LPFC_RQ_RING_SIZE_512); 13516 break; 13517 case 1024: 13518 bf_set(lpfc_rq_context_rqe_count, 13519 &rq_create->u.request.context, 13520 LPFC_RQ_RING_SIZE_1024); 13521 break; 13522 case 2048: 13523 bf_set(lpfc_rq_context_rqe_count, 13524 &rq_create->u.request.context, 13525 LPFC_RQ_RING_SIZE_2048); 13526 break; 13527 case 4096: 13528 bf_set(lpfc_rq_context_rqe_count, 13529 &rq_create->u.request.context, 13530 LPFC_RQ_RING_SIZE_4096); 13531 break; 13532 } 13533 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 13534 LPFC_HDR_BUF_SIZE); 13535 } 13536 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 13537 cq->queue_id); 13538 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 13539 hrq->page_count); 13540 list_for_each_entry(dmabuf, &hrq->page_list, list) { 13541 memset(dmabuf->virt, 0, hw_page_size); 13542 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13543 putPaddrLow(dmabuf->phys); 13544 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13545 putPaddrHigh(dmabuf->phys); 13546 } 13547 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 13548 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 13549 13550 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13551 /* The IOCTL status is embedded in the mailbox subheader. */ 13552 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13553 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13554 if (shdr_status || shdr_add_status || rc) { 13555 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13556 "2504 RQ_CREATE mailbox failed with " 13557 "status x%x add_status x%x, mbx status x%x\n", 13558 shdr_status, shdr_add_status, rc); 13559 status = -ENXIO; 13560 goto out; 13561 } 13562 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 13563 if (hrq->queue_id == 0xFFFF) { 13564 status = -ENXIO; 13565 goto out; 13566 } 13567 13568 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 13569 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 13570 &rq_create->u.response); 13571 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 13572 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 13573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13574 "3262 RQ [%d] doorbell format not " 13575 "supported: x%x\n", hrq->queue_id, 13576 hrq->db_format); 13577 status = -EINVAL; 13578 goto out; 13579 } 13580 13581 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 13582 &rq_create->u.response); 13583 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 13584 if (!bar_memmap_p) { 13585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13586 "3269 RQ[%d] failed to memmap pci " 13587 "barset:x%x\n", hrq->queue_id, 13588 pci_barset); 13589 status = -ENOMEM; 13590 goto out; 13591 } 13592 13593 db_offset = rq_create->u.response.doorbell_offset; 13594 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 13595 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 13596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13597 "3270 RQ[%d] doorbell offset not " 13598 "supported: x%x\n", hrq->queue_id, 13599 db_offset); 13600 status = -EINVAL; 13601 goto out; 13602 } 13603 hrq->db_regaddr = bar_memmap_p + db_offset; 13604 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13605 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 13606 "format:x%x\n", hrq->queue_id, pci_barset, 13607 db_offset, hrq->db_format); 13608 } else { 13609 hrq->db_format = LPFC_DB_RING_FORMAT; 13610 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 13611 } 13612 hrq->type = LPFC_HRQ; 13613 hrq->assoc_qid = cq->queue_id; 13614 hrq->subtype = subtype; 13615 hrq->host_index = 0; 13616 hrq->hba_index = 0; 13617 13618 /* now create the data queue */ 13619 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13620 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 13621 length, LPFC_SLI4_MBX_EMBED); 13622 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13623 phba->sli4_hba.pc_sli4_params.rqv); 13624 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 13625 bf_set(lpfc_rq_context_rqe_count_1, 13626 &rq_create->u.request.context, hrq->entry_count); 13627 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 13628 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 13629 LPFC_RQE_SIZE_8); 13630 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 13631 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13632 } else { 13633 switch (drq->entry_count) { 13634 default: 13635 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13636 "2536 Unsupported RQ count. (%d)\n", 13637 drq->entry_count); 13638 if (drq->entry_count < 512) { 13639 status = -EINVAL; 13640 goto out; 13641 } 13642 /* otherwise default to smallest count (drop through) */ 13643 case 512: 13644 bf_set(lpfc_rq_context_rqe_count, 13645 &rq_create->u.request.context, 13646 LPFC_RQ_RING_SIZE_512); 13647 break; 13648 case 1024: 13649 bf_set(lpfc_rq_context_rqe_count, 13650 &rq_create->u.request.context, 13651 LPFC_RQ_RING_SIZE_1024); 13652 break; 13653 case 2048: 13654 bf_set(lpfc_rq_context_rqe_count, 13655 &rq_create->u.request.context, 13656 LPFC_RQ_RING_SIZE_2048); 13657 break; 13658 case 4096: 13659 bf_set(lpfc_rq_context_rqe_count, 13660 &rq_create->u.request.context, 13661 LPFC_RQ_RING_SIZE_4096); 13662 break; 13663 } 13664 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 13665 LPFC_DATA_BUF_SIZE); 13666 } 13667 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 13668 cq->queue_id); 13669 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 13670 drq->page_count); 13671 list_for_each_entry(dmabuf, &drq->page_list, list) { 13672 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13673 putPaddrLow(dmabuf->phys); 13674 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13675 putPaddrHigh(dmabuf->phys); 13676 } 13677 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 13678 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 13679 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13680 /* The IOCTL status is embedded in the mailbox subheader. */ 13681 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 13682 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13683 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13684 if (shdr_status || shdr_add_status || rc) { 13685 status = -ENXIO; 13686 goto out; 13687 } 13688 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 13689 if (drq->queue_id == 0xFFFF) { 13690 status = -ENXIO; 13691 goto out; 13692 } 13693 drq->type = LPFC_DRQ; 13694 drq->assoc_qid = cq->queue_id; 13695 drq->subtype = subtype; 13696 drq->host_index = 0; 13697 drq->hba_index = 0; 13698 13699 /* link the header and data RQs onto the parent cq child list */ 13700 list_add_tail(&hrq->list, &cq->child_list); 13701 list_add_tail(&drq->list, &cq->child_list); 13702 13703 out: 13704 mempool_free(mbox, phba->mbox_mem_pool); 13705 return status; 13706 } 13707 13708 /** 13709 * lpfc_eq_destroy - Destroy an event Queue on the HBA 13710 * @eq: The queue structure associated with the queue to destroy. 13711 * 13712 * This function destroys a queue, as detailed in @eq by sending an mailbox 13713 * command, specific to the type of queue, to the HBA. 13714 * 13715 * The @eq struct is used to get the queue ID of the queue to destroy. 13716 * 13717 * On success this function will return a zero. If the queue destroy mailbox 13718 * command fails this function will return -ENXIO. 13719 **/ 13720 uint32_t 13721 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 13722 { 13723 LPFC_MBOXQ_t *mbox; 13724 int rc, length, status = 0; 13725 uint32_t shdr_status, shdr_add_status; 13726 union lpfc_sli4_cfg_shdr *shdr; 13727 13728 /* sanity check on queue memory */ 13729 if (!eq) 13730 return -ENODEV; 13731 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 13732 if (!mbox) 13733 return -ENOMEM; 13734 length = (sizeof(struct lpfc_mbx_eq_destroy) - 13735 sizeof(struct lpfc_sli4_cfg_mhdr)); 13736 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13737 LPFC_MBOX_OPCODE_EQ_DESTROY, 13738 length, LPFC_SLI4_MBX_EMBED); 13739 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 13740 eq->queue_id); 13741 mbox->vport = eq->phba->pport; 13742 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13743 13744 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 13745 /* The IOCTL status is embedded in the mailbox subheader. */ 13746 shdr = (union lpfc_sli4_cfg_shdr *) 13747 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 13748 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13749 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13750 if (shdr_status || shdr_add_status || rc) { 13751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13752 "2505 EQ_DESTROY mailbox failed with " 13753 "status x%x add_status x%x, mbx status x%x\n", 13754 shdr_status, shdr_add_status, rc); 13755 status = -ENXIO; 13756 } 13757 13758 /* Remove eq from any list */ 13759 list_del_init(&eq->list); 13760 mempool_free(mbox, eq->phba->mbox_mem_pool); 13761 return status; 13762 } 13763 13764 /** 13765 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 13766 * @cq: The queue structure associated with the queue to destroy. 13767 * 13768 * This function destroys a queue, as detailed in @cq by sending an mailbox 13769 * command, specific to the type of queue, to the HBA. 13770 * 13771 * The @cq struct is used to get the queue ID of the queue to destroy. 13772 * 13773 * On success this function will return a zero. If the queue destroy mailbox 13774 * command fails this function will return -ENXIO. 13775 **/ 13776 uint32_t 13777 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 13778 { 13779 LPFC_MBOXQ_t *mbox; 13780 int rc, length, status = 0; 13781 uint32_t shdr_status, shdr_add_status; 13782 union lpfc_sli4_cfg_shdr *shdr; 13783 13784 /* sanity check on queue memory */ 13785 if (!cq) 13786 return -ENODEV; 13787 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 13788 if (!mbox) 13789 return -ENOMEM; 13790 length = (sizeof(struct lpfc_mbx_cq_destroy) - 13791 sizeof(struct lpfc_sli4_cfg_mhdr)); 13792 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13793 LPFC_MBOX_OPCODE_CQ_DESTROY, 13794 length, LPFC_SLI4_MBX_EMBED); 13795 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 13796 cq->queue_id); 13797 mbox->vport = cq->phba->pport; 13798 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13799 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 13800 /* The IOCTL status is embedded in the mailbox subheader. */ 13801 shdr = (union lpfc_sli4_cfg_shdr *) 13802 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 13803 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13804 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13805 if (shdr_status || shdr_add_status || rc) { 13806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13807 "2506 CQ_DESTROY mailbox failed with " 13808 "status x%x add_status x%x, mbx status x%x\n", 13809 shdr_status, shdr_add_status, rc); 13810 status = -ENXIO; 13811 } 13812 /* Remove cq from any list */ 13813 list_del_init(&cq->list); 13814 mempool_free(mbox, cq->phba->mbox_mem_pool); 13815 return status; 13816 } 13817 13818 /** 13819 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 13820 * @qm: The queue structure associated with the queue to destroy. 13821 * 13822 * This function destroys a queue, as detailed in @mq by sending an mailbox 13823 * command, specific to the type of queue, to the HBA. 13824 * 13825 * The @mq struct is used to get the queue ID of the queue to destroy. 13826 * 13827 * On success this function will return a zero. If the queue destroy mailbox 13828 * command fails this function will return -ENXIO. 13829 **/ 13830 uint32_t 13831 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 13832 { 13833 LPFC_MBOXQ_t *mbox; 13834 int rc, length, status = 0; 13835 uint32_t shdr_status, shdr_add_status; 13836 union lpfc_sli4_cfg_shdr *shdr; 13837 13838 /* sanity check on queue memory */ 13839 if (!mq) 13840 return -ENODEV; 13841 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 13842 if (!mbox) 13843 return -ENOMEM; 13844 length = (sizeof(struct lpfc_mbx_mq_destroy) - 13845 sizeof(struct lpfc_sli4_cfg_mhdr)); 13846 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13847 LPFC_MBOX_OPCODE_MQ_DESTROY, 13848 length, LPFC_SLI4_MBX_EMBED); 13849 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 13850 mq->queue_id); 13851 mbox->vport = mq->phba->pport; 13852 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13853 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 13854 /* The IOCTL status is embedded in the mailbox subheader. */ 13855 shdr = (union lpfc_sli4_cfg_shdr *) 13856 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 13857 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13858 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13859 if (shdr_status || shdr_add_status || rc) { 13860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13861 "2507 MQ_DESTROY mailbox failed with " 13862 "status x%x add_status x%x, mbx status x%x\n", 13863 shdr_status, shdr_add_status, rc); 13864 status = -ENXIO; 13865 } 13866 /* Remove mq from any list */ 13867 list_del_init(&mq->list); 13868 mempool_free(mbox, mq->phba->mbox_mem_pool); 13869 return status; 13870 } 13871 13872 /** 13873 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 13874 * @wq: The queue structure associated with the queue to destroy. 13875 * 13876 * This function destroys a queue, as detailed in @wq by sending an mailbox 13877 * command, specific to the type of queue, to the HBA. 13878 * 13879 * The @wq struct is used to get the queue ID of the queue to destroy. 13880 * 13881 * On success this function will return a zero. If the queue destroy mailbox 13882 * command fails this function will return -ENXIO. 13883 **/ 13884 uint32_t 13885 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 13886 { 13887 LPFC_MBOXQ_t *mbox; 13888 int rc, length, status = 0; 13889 uint32_t shdr_status, shdr_add_status; 13890 union lpfc_sli4_cfg_shdr *shdr; 13891 13892 /* sanity check on queue memory */ 13893 if (!wq) 13894 return -ENODEV; 13895 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 13896 if (!mbox) 13897 return -ENOMEM; 13898 length = (sizeof(struct lpfc_mbx_wq_destroy) - 13899 sizeof(struct lpfc_sli4_cfg_mhdr)); 13900 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13901 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 13902 length, LPFC_SLI4_MBX_EMBED); 13903 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 13904 wq->queue_id); 13905 mbox->vport = wq->phba->pport; 13906 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13907 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 13908 shdr = (union lpfc_sli4_cfg_shdr *) 13909 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 13910 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13911 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13912 if (shdr_status || shdr_add_status || rc) { 13913 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13914 "2508 WQ_DESTROY mailbox failed with " 13915 "status x%x add_status x%x, mbx status x%x\n", 13916 shdr_status, shdr_add_status, rc); 13917 status = -ENXIO; 13918 } 13919 /* Remove wq from any list */ 13920 list_del_init(&wq->list); 13921 mempool_free(mbox, wq->phba->mbox_mem_pool); 13922 return status; 13923 } 13924 13925 /** 13926 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 13927 * @rq: The queue structure associated with the queue to destroy. 13928 * 13929 * This function destroys a queue, as detailed in @rq by sending an mailbox 13930 * command, specific to the type of queue, to the HBA. 13931 * 13932 * The @rq struct is used to get the queue ID of the queue to destroy. 13933 * 13934 * On success this function will return a zero. If the queue destroy mailbox 13935 * command fails this function will return -ENXIO. 13936 **/ 13937 uint32_t 13938 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 13939 struct lpfc_queue *drq) 13940 { 13941 LPFC_MBOXQ_t *mbox; 13942 int rc, length, status = 0; 13943 uint32_t shdr_status, shdr_add_status; 13944 union lpfc_sli4_cfg_shdr *shdr; 13945 13946 /* sanity check on queue memory */ 13947 if (!hrq || !drq) 13948 return -ENODEV; 13949 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 13950 if (!mbox) 13951 return -ENOMEM; 13952 length = (sizeof(struct lpfc_mbx_rq_destroy) - 13953 sizeof(struct lpfc_sli4_cfg_mhdr)); 13954 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13955 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 13956 length, LPFC_SLI4_MBX_EMBED); 13957 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 13958 hrq->queue_id); 13959 mbox->vport = hrq->phba->pport; 13960 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13961 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 13962 /* The IOCTL status is embedded in the mailbox subheader. */ 13963 shdr = (union lpfc_sli4_cfg_shdr *) 13964 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 13965 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13966 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13967 if (shdr_status || shdr_add_status || rc) { 13968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13969 "2509 RQ_DESTROY mailbox failed with " 13970 "status x%x add_status x%x, mbx status x%x\n", 13971 shdr_status, shdr_add_status, rc); 13972 if (rc != MBX_TIMEOUT) 13973 mempool_free(mbox, hrq->phba->mbox_mem_pool); 13974 return -ENXIO; 13975 } 13976 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 13977 drq->queue_id); 13978 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 13979 shdr = (union lpfc_sli4_cfg_shdr *) 13980 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 13981 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13982 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13983 if (shdr_status || shdr_add_status || rc) { 13984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13985 "2510 RQ_DESTROY mailbox failed with " 13986 "status x%x add_status x%x, mbx status x%x\n", 13987 shdr_status, shdr_add_status, rc); 13988 status = -ENXIO; 13989 } 13990 list_del_init(&hrq->list); 13991 list_del_init(&drq->list); 13992 mempool_free(mbox, hrq->phba->mbox_mem_pool); 13993 return status; 13994 } 13995 13996 /** 13997 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 13998 * @phba: The virtual port for which this call being executed. 13999 * @pdma_phys_addr0: Physical address of the 1st SGL page. 14000 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 14001 * @xritag: the xritag that ties this io to the SGL pages. 14002 * 14003 * This routine will post the sgl pages for the IO that has the xritag 14004 * that is in the iocbq structure. The xritag is assigned during iocbq 14005 * creation and persists for as long as the driver is loaded. 14006 * if the caller has fewer than 256 scatter gather segments to map then 14007 * pdma_phys_addr1 should be 0. 14008 * If the caller needs to map more than 256 scatter gather segment then 14009 * pdma_phys_addr1 should be a valid physical address. 14010 * physical address for SGLs must be 64 byte aligned. 14011 * If you are going to map 2 SGL's then the first one must have 256 entries 14012 * the second sgl can have between 1 and 256 entries. 14013 * 14014 * Return codes: 14015 * 0 - Success 14016 * -ENXIO, -ENOMEM - Failure 14017 **/ 14018 int 14019 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 14020 dma_addr_t pdma_phys_addr0, 14021 dma_addr_t pdma_phys_addr1, 14022 uint16_t xritag) 14023 { 14024 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 14025 LPFC_MBOXQ_t *mbox; 14026 int rc; 14027 uint32_t shdr_status, shdr_add_status; 14028 uint32_t mbox_tmo; 14029 union lpfc_sli4_cfg_shdr *shdr; 14030 14031 if (xritag == NO_XRI) { 14032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14033 "0364 Invalid param:\n"); 14034 return -EINVAL; 14035 } 14036 14037 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14038 if (!mbox) 14039 return -ENOMEM; 14040 14041 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14042 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 14043 sizeof(struct lpfc_mbx_post_sgl_pages) - 14044 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 14045 14046 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 14047 &mbox->u.mqe.un.post_sgl_pages; 14048 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 14049 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 14050 14051 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 14052 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 14053 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 14054 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 14055 14056 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 14057 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 14058 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 14059 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 14060 if (!phba->sli4_hba.intr_enable) 14061 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14062 else { 14063 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 14064 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 14065 } 14066 /* The IOCTL status is embedded in the mailbox subheader. */ 14067 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 14068 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14069 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14070 if (rc != MBX_TIMEOUT) 14071 mempool_free(mbox, phba->mbox_mem_pool); 14072 if (shdr_status || shdr_add_status || rc) { 14073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14074 "2511 POST_SGL mailbox failed with " 14075 "status x%x add_status x%x, mbx status x%x\n", 14076 shdr_status, shdr_add_status, rc); 14077 rc = -ENXIO; 14078 } 14079 return 0; 14080 } 14081 14082 /** 14083 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 14084 * @phba: pointer to lpfc hba data structure. 14085 * 14086 * This routine is invoked to post rpi header templates to the 14087 * HBA consistent with the SLI-4 interface spec. This routine 14088 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14089 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14090 * 14091 * Returns 14092 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 14093 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 14094 **/ 14095 uint16_t 14096 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 14097 { 14098 unsigned long xri; 14099 14100 /* 14101 * Fetch the next logical xri. Because this index is logical, 14102 * the driver starts at 0 each time. 14103 */ 14104 spin_lock_irq(&phba->hbalock); 14105 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 14106 phba->sli4_hba.max_cfg_param.max_xri, 0); 14107 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 14108 spin_unlock_irq(&phba->hbalock); 14109 return NO_XRI; 14110 } else { 14111 set_bit(xri, phba->sli4_hba.xri_bmask); 14112 phba->sli4_hba.max_cfg_param.xri_used++; 14113 } 14114 spin_unlock_irq(&phba->hbalock); 14115 return xri; 14116 } 14117 14118 /** 14119 * lpfc_sli4_free_xri - Release an xri for reuse. 14120 * @phba: pointer to lpfc hba data structure. 14121 * 14122 * This routine is invoked to release an xri to the pool of 14123 * available rpis maintained by the driver. 14124 **/ 14125 void 14126 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 14127 { 14128 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 14129 phba->sli4_hba.max_cfg_param.xri_used--; 14130 } 14131 } 14132 14133 /** 14134 * lpfc_sli4_free_xri - Release an xri for reuse. 14135 * @phba: pointer to lpfc hba data structure. 14136 * 14137 * This routine is invoked to release an xri to the pool of 14138 * available rpis maintained by the driver. 14139 **/ 14140 void 14141 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 14142 { 14143 spin_lock_irq(&phba->hbalock); 14144 __lpfc_sli4_free_xri(phba, xri); 14145 spin_unlock_irq(&phba->hbalock); 14146 } 14147 14148 /** 14149 * lpfc_sli4_next_xritag - Get an xritag for the io 14150 * @phba: Pointer to HBA context object. 14151 * 14152 * This function gets an xritag for the iocb. If there is no unused xritag 14153 * it will return 0xffff. 14154 * The function returns the allocated xritag if successful, else returns zero. 14155 * Zero is not a valid xritag. 14156 * The caller is not required to hold any lock. 14157 **/ 14158 uint16_t 14159 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 14160 { 14161 uint16_t xri_index; 14162 14163 xri_index = lpfc_sli4_alloc_xri(phba); 14164 if (xri_index == NO_XRI) 14165 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14166 "2004 Failed to allocate XRI.last XRITAG is %d" 14167 " Max XRI is %d, Used XRI is %d\n", 14168 xri_index, 14169 phba->sli4_hba.max_cfg_param.max_xri, 14170 phba->sli4_hba.max_cfg_param.xri_used); 14171 return xri_index; 14172 } 14173 14174 /** 14175 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 14176 * @phba: pointer to lpfc hba data structure. 14177 * @post_sgl_list: pointer to els sgl entry list. 14178 * @count: number of els sgl entries on the list. 14179 * 14180 * This routine is invoked to post a block of driver's sgl pages to the 14181 * HBA using non-embedded mailbox command. No Lock is held. This routine 14182 * is only called when the driver is loading and after all IO has been 14183 * stopped. 14184 **/ 14185 static int 14186 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, 14187 struct list_head *post_sgl_list, 14188 int post_cnt) 14189 { 14190 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 14191 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 14192 struct sgl_page_pairs *sgl_pg_pairs; 14193 void *viraddr; 14194 LPFC_MBOXQ_t *mbox; 14195 uint32_t reqlen, alloclen, pg_pairs; 14196 uint32_t mbox_tmo; 14197 uint16_t xritag_start = 0; 14198 int rc = 0; 14199 uint32_t shdr_status, shdr_add_status; 14200 union lpfc_sli4_cfg_shdr *shdr; 14201 14202 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) + 14203 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 14204 if (reqlen > SLI4_PAGE_SIZE) { 14205 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 14206 "2559 Block sgl registration required DMA " 14207 "size (%d) great than a page\n", reqlen); 14208 return -ENOMEM; 14209 } 14210 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14211 if (!mbox) 14212 return -ENOMEM; 14213 14214 /* Allocate DMA memory and set up the non-embedded mailbox command */ 14215 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14216 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 14217 LPFC_SLI4_MBX_NEMBED); 14218 14219 if (alloclen < reqlen) { 14220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14221 "0285 Allocated DMA memory size (%d) is " 14222 "less than the requested DMA memory " 14223 "size (%d)\n", alloclen, reqlen); 14224 lpfc_sli4_mbox_cmd_free(phba, mbox); 14225 return -ENOMEM; 14226 } 14227 /* Set up the SGL pages in the non-embedded DMA pages */ 14228 viraddr = mbox->sge_array->addr[0]; 14229 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 14230 sgl_pg_pairs = &sgl->sgl_pg_pairs; 14231 14232 pg_pairs = 0; 14233 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 14234 /* Set up the sge entry */ 14235 sgl_pg_pairs->sgl_pg0_addr_lo = 14236 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 14237 sgl_pg_pairs->sgl_pg0_addr_hi = 14238 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 14239 sgl_pg_pairs->sgl_pg1_addr_lo = 14240 cpu_to_le32(putPaddrLow(0)); 14241 sgl_pg_pairs->sgl_pg1_addr_hi = 14242 cpu_to_le32(putPaddrHigh(0)); 14243 14244 /* Keep the first xritag on the list */ 14245 if (pg_pairs == 0) 14246 xritag_start = sglq_entry->sli4_xritag; 14247 sgl_pg_pairs++; 14248 pg_pairs++; 14249 } 14250 14251 /* Complete initialization and perform endian conversion. */ 14252 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 14253 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt); 14254 sgl->word0 = cpu_to_le32(sgl->word0); 14255 if (!phba->sli4_hba.intr_enable) 14256 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14257 else { 14258 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 14259 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 14260 } 14261 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 14262 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14263 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14264 if (rc != MBX_TIMEOUT) 14265 lpfc_sli4_mbox_cmd_free(phba, mbox); 14266 if (shdr_status || shdr_add_status || rc) { 14267 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14268 "2513 POST_SGL_BLOCK mailbox command failed " 14269 "status x%x add_status x%x mbx status x%x\n", 14270 shdr_status, shdr_add_status, rc); 14271 rc = -ENXIO; 14272 } 14273 return rc; 14274 } 14275 14276 /** 14277 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 14278 * @phba: pointer to lpfc hba data structure. 14279 * @sblist: pointer to scsi buffer list. 14280 * @count: number of scsi buffers on the list. 14281 * 14282 * This routine is invoked to post a block of @count scsi sgl pages from a 14283 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 14284 * No Lock is held. 14285 * 14286 **/ 14287 int 14288 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, 14289 struct list_head *sblist, 14290 int count) 14291 { 14292 struct lpfc_scsi_buf *psb; 14293 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 14294 struct sgl_page_pairs *sgl_pg_pairs; 14295 void *viraddr; 14296 LPFC_MBOXQ_t *mbox; 14297 uint32_t reqlen, alloclen, pg_pairs; 14298 uint32_t mbox_tmo; 14299 uint16_t xritag_start = 0; 14300 int rc = 0; 14301 uint32_t shdr_status, shdr_add_status; 14302 dma_addr_t pdma_phys_bpl1; 14303 union lpfc_sli4_cfg_shdr *shdr; 14304 14305 /* Calculate the requested length of the dma memory */ 14306 reqlen = count * sizeof(struct sgl_page_pairs) + 14307 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 14308 if (reqlen > SLI4_PAGE_SIZE) { 14309 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 14310 "0217 Block sgl registration required DMA " 14311 "size (%d) great than a page\n", reqlen); 14312 return -ENOMEM; 14313 } 14314 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14315 if (!mbox) { 14316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14317 "0283 Failed to allocate mbox cmd memory\n"); 14318 return -ENOMEM; 14319 } 14320 14321 /* Allocate DMA memory and set up the non-embedded mailbox command */ 14322 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14323 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 14324 LPFC_SLI4_MBX_NEMBED); 14325 14326 if (alloclen < reqlen) { 14327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14328 "2561 Allocated DMA memory size (%d) is " 14329 "less than the requested DMA memory " 14330 "size (%d)\n", alloclen, reqlen); 14331 lpfc_sli4_mbox_cmd_free(phba, mbox); 14332 return -ENOMEM; 14333 } 14334 14335 /* Get the first SGE entry from the non-embedded DMA memory */ 14336 viraddr = mbox->sge_array->addr[0]; 14337 14338 /* Set up the SGL pages in the non-embedded DMA pages */ 14339 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 14340 sgl_pg_pairs = &sgl->sgl_pg_pairs; 14341 14342 pg_pairs = 0; 14343 list_for_each_entry(psb, sblist, list) { 14344 /* Set up the sge entry */ 14345 sgl_pg_pairs->sgl_pg0_addr_lo = 14346 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 14347 sgl_pg_pairs->sgl_pg0_addr_hi = 14348 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 14349 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 14350 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 14351 else 14352 pdma_phys_bpl1 = 0; 14353 sgl_pg_pairs->sgl_pg1_addr_lo = 14354 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 14355 sgl_pg_pairs->sgl_pg1_addr_hi = 14356 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 14357 /* Keep the first xritag on the list */ 14358 if (pg_pairs == 0) 14359 xritag_start = psb->cur_iocbq.sli4_xritag; 14360 sgl_pg_pairs++; 14361 pg_pairs++; 14362 } 14363 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 14364 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 14365 /* Perform endian conversion if necessary */ 14366 sgl->word0 = cpu_to_le32(sgl->word0); 14367 14368 if (!phba->sli4_hba.intr_enable) 14369 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14370 else { 14371 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 14372 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 14373 } 14374 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 14375 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14376 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14377 if (rc != MBX_TIMEOUT) 14378 lpfc_sli4_mbox_cmd_free(phba, mbox); 14379 if (shdr_status || shdr_add_status || rc) { 14380 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14381 "2564 POST_SGL_BLOCK mailbox command failed " 14382 "status x%x add_status x%x mbx status x%x\n", 14383 shdr_status, shdr_add_status, rc); 14384 rc = -ENXIO; 14385 } 14386 return rc; 14387 } 14388 14389 /** 14390 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 14391 * @phba: pointer to lpfc_hba struct that the frame was received on 14392 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 14393 * 14394 * This function checks the fields in the @fc_hdr to see if the FC frame is a 14395 * valid type of frame that the LPFC driver will handle. This function will 14396 * return a zero if the frame is a valid frame or a non zero value when the 14397 * frame does not pass the check. 14398 **/ 14399 static int 14400 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 14401 { 14402 /* make rctl_names static to save stack space */ 14403 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 14404 char *type_names[] = FC_TYPE_NAMES_INIT; 14405 struct fc_vft_header *fc_vft_hdr; 14406 uint32_t *header = (uint32_t *) fc_hdr; 14407 14408 switch (fc_hdr->fh_r_ctl) { 14409 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 14410 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 14411 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 14412 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 14413 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 14414 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 14415 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 14416 case FC_RCTL_DD_CMD_STATUS: /* command status */ 14417 case FC_RCTL_ELS_REQ: /* extended link services request */ 14418 case FC_RCTL_ELS_REP: /* extended link services reply */ 14419 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 14420 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 14421 case FC_RCTL_BA_NOP: /* basic link service NOP */ 14422 case FC_RCTL_BA_ABTS: /* basic link service abort */ 14423 case FC_RCTL_BA_RMC: /* remove connection */ 14424 case FC_RCTL_BA_ACC: /* basic accept */ 14425 case FC_RCTL_BA_RJT: /* basic reject */ 14426 case FC_RCTL_BA_PRMT: 14427 case FC_RCTL_ACK_1: /* acknowledge_1 */ 14428 case FC_RCTL_ACK_0: /* acknowledge_0 */ 14429 case FC_RCTL_P_RJT: /* port reject */ 14430 case FC_RCTL_F_RJT: /* fabric reject */ 14431 case FC_RCTL_P_BSY: /* port busy */ 14432 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 14433 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 14434 case FC_RCTL_LCR: /* link credit reset */ 14435 case FC_RCTL_END: /* end */ 14436 break; 14437 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 14438 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 14439 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 14440 return lpfc_fc_frame_check(phba, fc_hdr); 14441 default: 14442 goto drop; 14443 } 14444 switch (fc_hdr->fh_type) { 14445 case FC_TYPE_BLS: 14446 case FC_TYPE_ELS: 14447 case FC_TYPE_FCP: 14448 case FC_TYPE_CT: 14449 break; 14450 case FC_TYPE_IP: 14451 case FC_TYPE_ILS: 14452 default: 14453 goto drop; 14454 } 14455 14456 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 14457 "2538 Received frame rctl:%s (x%x), type:%s (x%x), " 14458 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 14459 rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, 14460 type_names[fc_hdr->fh_type], fc_hdr->fh_type, 14461 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 14462 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 14463 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 14464 be32_to_cpu(header[6])); 14465 return 0; 14466 drop: 14467 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14468 "2539 Dropped frame rctl:%s type:%s\n", 14469 rctl_names[fc_hdr->fh_r_ctl], 14470 type_names[fc_hdr->fh_type]); 14471 return 1; 14472 } 14473 14474 /** 14475 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 14476 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 14477 * 14478 * This function processes the FC header to retrieve the VFI from the VF 14479 * header, if one exists. This function will return the VFI if one exists 14480 * or 0 if no VSAN Header exists. 14481 **/ 14482 static uint32_t 14483 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 14484 { 14485 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 14486 14487 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 14488 return 0; 14489 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 14490 } 14491 14492 /** 14493 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 14494 * @phba: Pointer to the HBA structure to search for the vport on 14495 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 14496 * @fcfi: The FC Fabric ID that the frame came from 14497 * 14498 * This function searches the @phba for a vport that matches the content of the 14499 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 14500 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 14501 * returns the matching vport pointer or NULL if unable to match frame to a 14502 * vport. 14503 **/ 14504 static struct lpfc_vport * 14505 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 14506 uint16_t fcfi) 14507 { 14508 struct lpfc_vport **vports; 14509 struct lpfc_vport *vport = NULL; 14510 int i; 14511 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 14512 fc_hdr->fh_d_id[1] << 8 | 14513 fc_hdr->fh_d_id[2]); 14514 14515 if (did == Fabric_DID) 14516 return phba->pport; 14517 if ((phba->pport->fc_flag & FC_PT2PT) && 14518 !(phba->link_state == LPFC_HBA_READY)) 14519 return phba->pport; 14520 14521 vports = lpfc_create_vport_work_array(phba); 14522 if (vports != NULL) 14523 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 14524 if (phba->fcf.fcfi == fcfi && 14525 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 14526 vports[i]->fc_myDID == did) { 14527 vport = vports[i]; 14528 break; 14529 } 14530 } 14531 lpfc_destroy_vport_work_array(phba, vports); 14532 return vport; 14533 } 14534 14535 /** 14536 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 14537 * @vport: The vport to work on. 14538 * 14539 * This function updates the receive sequence time stamp for this vport. The 14540 * receive sequence time stamp indicates the time that the last frame of the 14541 * the sequence that has been idle for the longest amount of time was received. 14542 * the driver uses this time stamp to indicate if any received sequences have 14543 * timed out. 14544 **/ 14545 void 14546 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 14547 { 14548 struct lpfc_dmabuf *h_buf; 14549 struct hbq_dmabuf *dmabuf = NULL; 14550 14551 /* get the oldest sequence on the rcv list */ 14552 h_buf = list_get_first(&vport->rcv_buffer_list, 14553 struct lpfc_dmabuf, list); 14554 if (!h_buf) 14555 return; 14556 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14557 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 14558 } 14559 14560 /** 14561 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 14562 * @vport: The vport that the received sequences were sent to. 14563 * 14564 * This function cleans up all outstanding received sequences. This is called 14565 * by the driver when a link event or user action invalidates all the received 14566 * sequences. 14567 **/ 14568 void 14569 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 14570 { 14571 struct lpfc_dmabuf *h_buf, *hnext; 14572 struct lpfc_dmabuf *d_buf, *dnext; 14573 struct hbq_dmabuf *dmabuf = NULL; 14574 14575 /* start with the oldest sequence on the rcv list */ 14576 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 14577 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14578 list_del_init(&dmabuf->hbuf.list); 14579 list_for_each_entry_safe(d_buf, dnext, 14580 &dmabuf->dbuf.list, list) { 14581 list_del_init(&d_buf->list); 14582 lpfc_in_buf_free(vport->phba, d_buf); 14583 } 14584 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 14585 } 14586 } 14587 14588 /** 14589 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 14590 * @vport: The vport that the received sequences were sent to. 14591 * 14592 * This function determines whether any received sequences have timed out by 14593 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 14594 * indicates that there is at least one timed out sequence this routine will 14595 * go through the received sequences one at a time from most inactive to most 14596 * active to determine which ones need to be cleaned up. Once it has determined 14597 * that a sequence needs to be cleaned up it will simply free up the resources 14598 * without sending an abort. 14599 **/ 14600 void 14601 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 14602 { 14603 struct lpfc_dmabuf *h_buf, *hnext; 14604 struct lpfc_dmabuf *d_buf, *dnext; 14605 struct hbq_dmabuf *dmabuf = NULL; 14606 unsigned long timeout; 14607 int abort_count = 0; 14608 14609 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 14610 vport->rcv_buffer_time_stamp); 14611 if (list_empty(&vport->rcv_buffer_list) || 14612 time_before(jiffies, timeout)) 14613 return; 14614 /* start with the oldest sequence on the rcv list */ 14615 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 14616 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14617 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 14618 dmabuf->time_stamp); 14619 if (time_before(jiffies, timeout)) 14620 break; 14621 abort_count++; 14622 list_del_init(&dmabuf->hbuf.list); 14623 list_for_each_entry_safe(d_buf, dnext, 14624 &dmabuf->dbuf.list, list) { 14625 list_del_init(&d_buf->list); 14626 lpfc_in_buf_free(vport->phba, d_buf); 14627 } 14628 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 14629 } 14630 if (abort_count) 14631 lpfc_update_rcv_time_stamp(vport); 14632 } 14633 14634 /** 14635 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 14636 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 14637 * 14638 * This function searches through the existing incomplete sequences that have 14639 * been sent to this @vport. If the frame matches one of the incomplete 14640 * sequences then the dbuf in the @dmabuf is added to the list of frames that 14641 * make up that sequence. If no sequence is found that matches this frame then 14642 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 14643 * This function returns a pointer to the first dmabuf in the sequence list that 14644 * the frame was linked to. 14645 **/ 14646 static struct hbq_dmabuf * 14647 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 14648 { 14649 struct fc_frame_header *new_hdr; 14650 struct fc_frame_header *temp_hdr; 14651 struct lpfc_dmabuf *d_buf; 14652 struct lpfc_dmabuf *h_buf; 14653 struct hbq_dmabuf *seq_dmabuf = NULL; 14654 struct hbq_dmabuf *temp_dmabuf = NULL; 14655 14656 INIT_LIST_HEAD(&dmabuf->dbuf.list); 14657 dmabuf->time_stamp = jiffies; 14658 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14659 /* Use the hdr_buf to find the sequence that this frame belongs to */ 14660 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 14661 temp_hdr = (struct fc_frame_header *)h_buf->virt; 14662 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 14663 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 14664 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 14665 continue; 14666 /* found a pending sequence that matches this frame */ 14667 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14668 break; 14669 } 14670 if (!seq_dmabuf) { 14671 /* 14672 * This indicates first frame received for this sequence. 14673 * Queue the buffer on the vport's rcv_buffer_list. 14674 */ 14675 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 14676 lpfc_update_rcv_time_stamp(vport); 14677 return dmabuf; 14678 } 14679 temp_hdr = seq_dmabuf->hbuf.virt; 14680 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 14681 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 14682 list_del_init(&seq_dmabuf->hbuf.list); 14683 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 14684 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 14685 lpfc_update_rcv_time_stamp(vport); 14686 return dmabuf; 14687 } 14688 /* move this sequence to the tail to indicate a young sequence */ 14689 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 14690 seq_dmabuf->time_stamp = jiffies; 14691 lpfc_update_rcv_time_stamp(vport); 14692 if (list_empty(&seq_dmabuf->dbuf.list)) { 14693 temp_hdr = dmabuf->hbuf.virt; 14694 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 14695 return seq_dmabuf; 14696 } 14697 /* find the correct place in the sequence to insert this frame */ 14698 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 14699 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14700 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 14701 /* 14702 * If the frame's sequence count is greater than the frame on 14703 * the list then insert the frame right after this frame 14704 */ 14705 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 14706 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 14707 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 14708 return seq_dmabuf; 14709 } 14710 } 14711 return NULL; 14712 } 14713 14714 /** 14715 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 14716 * @vport: pointer to a vitural port 14717 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14718 * 14719 * This function tries to abort from the partially assembed sequence, described 14720 * by the information from basic abbort @dmabuf. It checks to see whether such 14721 * partially assembled sequence held by the driver. If so, it shall free up all 14722 * the frames from the partially assembled sequence. 14723 * 14724 * Return 14725 * true -- if there is matching partially assembled sequence present and all 14726 * the frames freed with the sequence; 14727 * false -- if there is no matching partially assembled sequence present so 14728 * nothing got aborted in the lower layer driver 14729 **/ 14730 static bool 14731 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 14732 struct hbq_dmabuf *dmabuf) 14733 { 14734 struct fc_frame_header *new_hdr; 14735 struct fc_frame_header *temp_hdr; 14736 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 14737 struct hbq_dmabuf *seq_dmabuf = NULL; 14738 14739 /* Use the hdr_buf to find the sequence that matches this frame */ 14740 INIT_LIST_HEAD(&dmabuf->dbuf.list); 14741 INIT_LIST_HEAD(&dmabuf->hbuf.list); 14742 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14743 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 14744 temp_hdr = (struct fc_frame_header *)h_buf->virt; 14745 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 14746 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 14747 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 14748 continue; 14749 /* found a pending sequence that matches this frame */ 14750 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14751 break; 14752 } 14753 14754 /* Free up all the frames from the partially assembled sequence */ 14755 if (seq_dmabuf) { 14756 list_for_each_entry_safe(d_buf, n_buf, 14757 &seq_dmabuf->dbuf.list, list) { 14758 list_del_init(&d_buf->list); 14759 lpfc_in_buf_free(vport->phba, d_buf); 14760 } 14761 return true; 14762 } 14763 return false; 14764 } 14765 14766 /** 14767 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 14768 * @vport: pointer to a vitural port 14769 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14770 * 14771 * This function tries to abort from the assembed sequence from upper level 14772 * protocol, described by the information from basic abbort @dmabuf. It 14773 * checks to see whether such pending context exists at upper level protocol. 14774 * If so, it shall clean up the pending context. 14775 * 14776 * Return 14777 * true -- if there is matching pending context of the sequence cleaned 14778 * at ulp; 14779 * false -- if there is no matching pending context of the sequence present 14780 * at ulp. 14781 **/ 14782 static bool 14783 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 14784 { 14785 struct lpfc_hba *phba = vport->phba; 14786 int handled; 14787 14788 /* Accepting abort at ulp with SLI4 only */ 14789 if (phba->sli_rev < LPFC_SLI_REV4) 14790 return false; 14791 14792 /* Register all caring upper level protocols to attend abort */ 14793 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 14794 if (handled) 14795 return true; 14796 14797 return false; 14798 } 14799 14800 /** 14801 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 14802 * @phba: Pointer to HBA context object. 14803 * @cmd_iocbq: pointer to the command iocbq structure. 14804 * @rsp_iocbq: pointer to the response iocbq structure. 14805 * 14806 * This function handles the sequence abort response iocb command complete 14807 * event. It properly releases the memory allocated to the sequence abort 14808 * accept iocb. 14809 **/ 14810 static void 14811 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 14812 struct lpfc_iocbq *cmd_iocbq, 14813 struct lpfc_iocbq *rsp_iocbq) 14814 { 14815 struct lpfc_nodelist *ndlp; 14816 14817 if (cmd_iocbq) { 14818 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 14819 lpfc_nlp_put(ndlp); 14820 lpfc_nlp_not_used(ndlp); 14821 lpfc_sli_release_iocbq(phba, cmd_iocbq); 14822 } 14823 14824 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 14825 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 14826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14827 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 14828 rsp_iocbq->iocb.ulpStatus, 14829 rsp_iocbq->iocb.un.ulpWord[4]); 14830 } 14831 14832 /** 14833 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 14834 * @phba: Pointer to HBA context object. 14835 * @xri: xri id in transaction. 14836 * 14837 * This function validates the xri maps to the known range of XRIs allocated an 14838 * used by the driver. 14839 **/ 14840 uint16_t 14841 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 14842 uint16_t xri) 14843 { 14844 int i; 14845 14846 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 14847 if (xri == phba->sli4_hba.xri_ids[i]) 14848 return i; 14849 } 14850 return NO_XRI; 14851 } 14852 14853 /** 14854 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 14855 * @phba: Pointer to HBA context object. 14856 * @fc_hdr: pointer to a FC frame header. 14857 * 14858 * This function sends a basic response to a previous unsol sequence abort 14859 * event after aborting the sequence handling. 14860 **/ 14861 static void 14862 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 14863 struct fc_frame_header *fc_hdr, bool aborted) 14864 { 14865 struct lpfc_hba *phba = vport->phba; 14866 struct lpfc_iocbq *ctiocb = NULL; 14867 struct lpfc_nodelist *ndlp; 14868 uint16_t oxid, rxid, xri, lxri; 14869 uint32_t sid, fctl; 14870 IOCB_t *icmd; 14871 int rc; 14872 14873 if (!lpfc_is_link_up(phba)) 14874 return; 14875 14876 sid = sli4_sid_from_fc_hdr(fc_hdr); 14877 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 14878 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 14879 14880 ndlp = lpfc_findnode_did(vport, sid); 14881 if (!ndlp) { 14882 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 14883 if (!ndlp) { 14884 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 14885 "1268 Failed to allocate ndlp for " 14886 "oxid:x%x SID:x%x\n", oxid, sid); 14887 return; 14888 } 14889 lpfc_nlp_init(vport, ndlp, sid); 14890 /* Put ndlp onto pport node list */ 14891 lpfc_enqueue_node(vport, ndlp); 14892 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 14893 /* re-setup ndlp without removing from node list */ 14894 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 14895 if (!ndlp) { 14896 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 14897 "3275 Failed to active ndlp found " 14898 "for oxid:x%x SID:x%x\n", oxid, sid); 14899 return; 14900 } 14901 } 14902 14903 /* Allocate buffer for rsp iocb */ 14904 ctiocb = lpfc_sli_get_iocbq(phba); 14905 if (!ctiocb) 14906 return; 14907 14908 /* Extract the F_CTL field from FC_HDR */ 14909 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 14910 14911 icmd = &ctiocb->iocb; 14912 icmd->un.xseq64.bdl.bdeSize = 0; 14913 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 14914 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 14915 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 14916 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 14917 14918 /* Fill in the rest of iocb fields */ 14919 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 14920 icmd->ulpBdeCount = 0; 14921 icmd->ulpLe = 1; 14922 icmd->ulpClass = CLASS3; 14923 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 14924 ctiocb->context1 = lpfc_nlp_get(ndlp); 14925 14926 ctiocb->iocb_cmpl = NULL; 14927 ctiocb->vport = phba->pport; 14928 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 14929 ctiocb->sli4_lxritag = NO_XRI; 14930 ctiocb->sli4_xritag = NO_XRI; 14931 14932 if (fctl & FC_FC_EX_CTX) 14933 /* Exchange responder sent the abort so we 14934 * own the oxid. 14935 */ 14936 xri = oxid; 14937 else 14938 xri = rxid; 14939 lxri = lpfc_sli4_xri_inrange(phba, xri); 14940 if (lxri != NO_XRI) 14941 lpfc_set_rrq_active(phba, ndlp, lxri, 14942 (xri == oxid) ? rxid : oxid, 0); 14943 /* For BA_ABTS from exchange responder, if the logical xri with 14944 * the oxid maps to the FCP XRI range, the port no longer has 14945 * that exchange context, send a BLS_RJT. Override the IOCB for 14946 * a BA_RJT. 14947 */ 14948 if ((fctl & FC_FC_EX_CTX) && 14949 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) { 14950 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14951 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14952 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 14953 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 14954 } 14955 14956 /* If BA_ABTS failed to abort a partially assembled receive sequence, 14957 * the driver no longer has that exchange, send a BLS_RJT. Override 14958 * the IOCB for a BA_RJT. 14959 */ 14960 if (aborted == false) { 14961 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14962 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14963 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 14964 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 14965 } 14966 14967 if (fctl & FC_FC_EX_CTX) { 14968 /* ABTS sent by responder to CT exchange, construction 14969 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 14970 * field and RX_ID from ABTS for RX_ID field. 14971 */ 14972 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 14973 } else { 14974 /* ABTS sent by initiator to CT exchange, construction 14975 * of BA_ACC will need to allocate a new XRI as for the 14976 * XRI_TAG field. 14977 */ 14978 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 14979 } 14980 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 14981 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 14982 14983 /* Xmit CT abts response on exchange <xid> */ 14984 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 14985 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 14986 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 14987 14988 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 14989 if (rc == IOCB_ERROR) { 14990 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 14991 "2925 Failed to issue CT ABTS RSP x%x on " 14992 "xri x%x, Data x%x\n", 14993 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 14994 phba->link_state); 14995 lpfc_nlp_put(ndlp); 14996 ctiocb->context1 = NULL; 14997 lpfc_sli_release_iocbq(phba, ctiocb); 14998 } 14999 } 15000 15001 /** 15002 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 15003 * @vport: Pointer to the vport on which this sequence was received 15004 * @dmabuf: pointer to a dmabuf that describes the FC sequence 15005 * 15006 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 15007 * receive sequence is only partially assembed by the driver, it shall abort 15008 * the partially assembled frames for the sequence. Otherwise, if the 15009 * unsolicited receive sequence has been completely assembled and passed to 15010 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 15011 * unsolicited sequence has been aborted. After that, it will issue a basic 15012 * accept to accept the abort. 15013 **/ 15014 void 15015 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 15016 struct hbq_dmabuf *dmabuf) 15017 { 15018 struct lpfc_hba *phba = vport->phba; 15019 struct fc_frame_header fc_hdr; 15020 uint32_t fctl; 15021 bool aborted; 15022 15023 /* Make a copy of fc_hdr before the dmabuf being released */ 15024 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 15025 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 15026 15027 if (fctl & FC_FC_EX_CTX) { 15028 /* ABTS by responder to exchange, no cleanup needed */ 15029 aborted = true; 15030 } else { 15031 /* ABTS by initiator to exchange, need to do cleanup */ 15032 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 15033 if (aborted == false) 15034 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 15035 } 15036 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15037 15038 /* Respond with BA_ACC or BA_RJT accordingly */ 15039 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 15040 } 15041 15042 /** 15043 * lpfc_seq_complete - Indicates if a sequence is complete 15044 * @dmabuf: pointer to a dmabuf that describes the FC sequence 15045 * 15046 * This function checks the sequence, starting with the frame described by 15047 * @dmabuf, to see if all the frames associated with this sequence are present. 15048 * the frames associated with this sequence are linked to the @dmabuf using the 15049 * dbuf list. This function looks for two major things. 1) That the first frame 15050 * has a sequence count of zero. 2) There is a frame with last frame of sequence 15051 * set. 3) That there are no holes in the sequence count. The function will 15052 * return 1 when the sequence is complete, otherwise it will return 0. 15053 **/ 15054 static int 15055 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 15056 { 15057 struct fc_frame_header *hdr; 15058 struct lpfc_dmabuf *d_buf; 15059 struct hbq_dmabuf *seq_dmabuf; 15060 uint32_t fctl; 15061 int seq_count = 0; 15062 15063 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 15064 /* make sure first fame of sequence has a sequence count of zero */ 15065 if (hdr->fh_seq_cnt != seq_count) 15066 return 0; 15067 fctl = (hdr->fh_f_ctl[0] << 16 | 15068 hdr->fh_f_ctl[1] << 8 | 15069 hdr->fh_f_ctl[2]); 15070 /* If last frame of sequence we can return success. */ 15071 if (fctl & FC_FC_END_SEQ) 15072 return 1; 15073 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 15074 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 15075 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 15076 /* If there is a hole in the sequence count then fail. */ 15077 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 15078 return 0; 15079 fctl = (hdr->fh_f_ctl[0] << 16 | 15080 hdr->fh_f_ctl[1] << 8 | 15081 hdr->fh_f_ctl[2]); 15082 /* If last frame of sequence we can return success. */ 15083 if (fctl & FC_FC_END_SEQ) 15084 return 1; 15085 } 15086 return 0; 15087 } 15088 15089 /** 15090 * lpfc_prep_seq - Prep sequence for ULP processing 15091 * @vport: Pointer to the vport on which this sequence was received 15092 * @dmabuf: pointer to a dmabuf that describes the FC sequence 15093 * 15094 * This function takes a sequence, described by a list of frames, and creates 15095 * a list of iocbq structures to describe the sequence. This iocbq list will be 15096 * used to issue to the generic unsolicited sequence handler. This routine 15097 * returns a pointer to the first iocbq in the list. If the function is unable 15098 * to allocate an iocbq then it throw out the received frames that were not 15099 * able to be described and return a pointer to the first iocbq. If unable to 15100 * allocate any iocbqs (including the first) this function will return NULL. 15101 **/ 15102 static struct lpfc_iocbq * 15103 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 15104 { 15105 struct hbq_dmabuf *hbq_buf; 15106 struct lpfc_dmabuf *d_buf, *n_buf; 15107 struct lpfc_iocbq *first_iocbq, *iocbq; 15108 struct fc_frame_header *fc_hdr; 15109 uint32_t sid; 15110 uint32_t len, tot_len; 15111 struct ulp_bde64 *pbde; 15112 15113 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 15114 /* remove from receive buffer list */ 15115 list_del_init(&seq_dmabuf->hbuf.list); 15116 lpfc_update_rcv_time_stamp(vport); 15117 /* get the Remote Port's SID */ 15118 sid = sli4_sid_from_fc_hdr(fc_hdr); 15119 tot_len = 0; 15120 /* Get an iocbq struct to fill in. */ 15121 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 15122 if (first_iocbq) { 15123 /* Initialize the first IOCB. */ 15124 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 15125 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 15126 15127 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 15128 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 15129 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 15130 first_iocbq->iocb.un.rcvels.parmRo = 15131 sli4_did_from_fc_hdr(fc_hdr); 15132 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 15133 } else 15134 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 15135 first_iocbq->iocb.ulpContext = NO_XRI; 15136 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 15137 be16_to_cpu(fc_hdr->fh_ox_id); 15138 /* iocbq is prepped for internal consumption. Physical vpi. */ 15139 first_iocbq->iocb.unsli3.rcvsli3.vpi = 15140 vport->phba->vpi_ids[vport->vpi]; 15141 /* put the first buffer into the first IOCBq */ 15142 tot_len = bf_get(lpfc_rcqe_length, 15143 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 15144 15145 first_iocbq->context2 = &seq_dmabuf->dbuf; 15146 first_iocbq->context3 = NULL; 15147 first_iocbq->iocb.ulpBdeCount = 1; 15148 if (tot_len > LPFC_DATA_BUF_SIZE) 15149 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 15150 LPFC_DATA_BUF_SIZE; 15151 else 15152 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 15153 15154 first_iocbq->iocb.un.rcvels.remoteID = sid; 15155 15156 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 15157 } 15158 iocbq = first_iocbq; 15159 /* 15160 * Each IOCBq can have two Buffers assigned, so go through the list 15161 * of buffers for this sequence and save two buffers in each IOCBq 15162 */ 15163 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 15164 if (!iocbq) { 15165 lpfc_in_buf_free(vport->phba, d_buf); 15166 continue; 15167 } 15168 if (!iocbq->context3) { 15169 iocbq->context3 = d_buf; 15170 iocbq->iocb.ulpBdeCount++; 15171 /* We need to get the size out of the right CQE */ 15172 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 15173 len = bf_get(lpfc_rcqe_length, 15174 &hbq_buf->cq_event.cqe.rcqe_cmpl); 15175 pbde = (struct ulp_bde64 *) 15176 &iocbq->iocb.unsli3.sli3Words[4]; 15177 if (len > LPFC_DATA_BUF_SIZE) 15178 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 15179 else 15180 pbde->tus.f.bdeSize = len; 15181 15182 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 15183 tot_len += len; 15184 } else { 15185 iocbq = lpfc_sli_get_iocbq(vport->phba); 15186 if (!iocbq) { 15187 if (first_iocbq) { 15188 first_iocbq->iocb.ulpStatus = 15189 IOSTAT_FCP_RSP_ERROR; 15190 first_iocbq->iocb.un.ulpWord[4] = 15191 IOERR_NO_RESOURCES; 15192 } 15193 lpfc_in_buf_free(vport->phba, d_buf); 15194 continue; 15195 } 15196 /* We need to get the size out of the right CQE */ 15197 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 15198 len = bf_get(lpfc_rcqe_length, 15199 &hbq_buf->cq_event.cqe.rcqe_cmpl); 15200 iocbq->context2 = d_buf; 15201 iocbq->context3 = NULL; 15202 iocbq->iocb.ulpBdeCount = 1; 15203 if (len > LPFC_DATA_BUF_SIZE) 15204 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 15205 LPFC_DATA_BUF_SIZE; 15206 else 15207 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 15208 15209 tot_len += len; 15210 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 15211 15212 iocbq->iocb.un.rcvels.remoteID = sid; 15213 list_add_tail(&iocbq->list, &first_iocbq->list); 15214 } 15215 } 15216 return first_iocbq; 15217 } 15218 15219 static void 15220 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 15221 struct hbq_dmabuf *seq_dmabuf) 15222 { 15223 struct fc_frame_header *fc_hdr; 15224 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 15225 struct lpfc_hba *phba = vport->phba; 15226 15227 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 15228 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 15229 if (!iocbq) { 15230 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15231 "2707 Ring %d handler: Failed to allocate " 15232 "iocb Rctl x%x Type x%x received\n", 15233 LPFC_ELS_RING, 15234 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 15235 return; 15236 } 15237 if (!lpfc_complete_unsol_iocb(phba, 15238 &phba->sli.ring[LPFC_ELS_RING], 15239 iocbq, fc_hdr->fh_r_ctl, 15240 fc_hdr->fh_type)) 15241 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15242 "2540 Ring %d handler: unexpected Rctl " 15243 "x%x Type x%x received\n", 15244 LPFC_ELS_RING, 15245 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 15246 15247 /* Free iocb created in lpfc_prep_seq */ 15248 list_for_each_entry_safe(curr_iocb, next_iocb, 15249 &iocbq->list, list) { 15250 list_del_init(&curr_iocb->list); 15251 lpfc_sli_release_iocbq(phba, curr_iocb); 15252 } 15253 lpfc_sli_release_iocbq(phba, iocbq); 15254 } 15255 15256 /** 15257 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 15258 * @phba: Pointer to HBA context object. 15259 * 15260 * This function is called with no lock held. This function processes all 15261 * the received buffers and gives it to upper layers when a received buffer 15262 * indicates that it is the final frame in the sequence. The interrupt 15263 * service routine processes received buffers at interrupt contexts and adds 15264 * received dma buffers to the rb_pend_list queue and signals the worker thread. 15265 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 15266 * appropriate receive function when the final frame in a sequence is received. 15267 **/ 15268 void 15269 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 15270 struct hbq_dmabuf *dmabuf) 15271 { 15272 struct hbq_dmabuf *seq_dmabuf; 15273 struct fc_frame_header *fc_hdr; 15274 struct lpfc_vport *vport; 15275 uint32_t fcfi; 15276 uint32_t did; 15277 15278 /* Process each received buffer */ 15279 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 15280 /* check to see if this a valid type of frame */ 15281 if (lpfc_fc_frame_check(phba, fc_hdr)) { 15282 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15283 return; 15284 } 15285 if ((bf_get(lpfc_cqe_code, 15286 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 15287 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 15288 &dmabuf->cq_event.cqe.rcqe_cmpl); 15289 else 15290 fcfi = bf_get(lpfc_rcqe_fcf_id, 15291 &dmabuf->cq_event.cqe.rcqe_cmpl); 15292 15293 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 15294 if (!vport) { 15295 /* throw out the frame */ 15296 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15297 return; 15298 } 15299 15300 /* d_id this frame is directed to */ 15301 did = sli4_did_from_fc_hdr(fc_hdr); 15302 15303 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 15304 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 15305 (did != Fabric_DID)) { 15306 /* 15307 * Throw out the frame if we are not pt2pt. 15308 * The pt2pt protocol allows for discovery frames 15309 * to be received without a registered VPI. 15310 */ 15311 if (!(vport->fc_flag & FC_PT2PT) || 15312 (phba->link_state == LPFC_HBA_READY)) { 15313 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15314 return; 15315 } 15316 } 15317 15318 /* Handle the basic abort sequence (BA_ABTS) event */ 15319 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 15320 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 15321 return; 15322 } 15323 15324 /* Link this frame */ 15325 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 15326 if (!seq_dmabuf) { 15327 /* unable to add frame to vport - throw it out */ 15328 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15329 return; 15330 } 15331 /* If not last frame in sequence continue processing frames. */ 15332 if (!lpfc_seq_complete(seq_dmabuf)) 15333 return; 15334 15335 /* Send the complete sequence to the upper layer protocol */ 15336 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 15337 } 15338 15339 /** 15340 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 15341 * @phba: pointer to lpfc hba data structure. 15342 * 15343 * This routine is invoked to post rpi header templates to the 15344 * HBA consistent with the SLI-4 interface spec. This routine 15345 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 15346 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 15347 * 15348 * This routine does not require any locks. It's usage is expected 15349 * to be driver load or reset recovery when the driver is 15350 * sequential. 15351 * 15352 * Return codes 15353 * 0 - successful 15354 * -EIO - The mailbox failed to complete successfully. 15355 * When this error occurs, the driver is not guaranteed 15356 * to have any rpi regions posted to the device and 15357 * must either attempt to repost the regions or take a 15358 * fatal error. 15359 **/ 15360 int 15361 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 15362 { 15363 struct lpfc_rpi_hdr *rpi_page; 15364 uint32_t rc = 0; 15365 uint16_t lrpi = 0; 15366 15367 /* SLI4 ports that support extents do not require RPI headers. */ 15368 if (!phba->sli4_hba.rpi_hdrs_in_use) 15369 goto exit; 15370 if (phba->sli4_hba.extents_in_use) 15371 return -EIO; 15372 15373 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 15374 /* 15375 * Assign the rpi headers a physical rpi only if the driver 15376 * has not initialized those resources. A port reset only 15377 * needs the headers posted. 15378 */ 15379 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 15380 LPFC_RPI_RSRC_RDY) 15381 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 15382 15383 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 15384 if (rc != MBX_SUCCESS) { 15385 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15386 "2008 Error %d posting all rpi " 15387 "headers\n", rc); 15388 rc = -EIO; 15389 break; 15390 } 15391 } 15392 15393 exit: 15394 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 15395 LPFC_RPI_RSRC_RDY); 15396 return rc; 15397 } 15398 15399 /** 15400 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 15401 * @phba: pointer to lpfc hba data structure. 15402 * @rpi_page: pointer to the rpi memory region. 15403 * 15404 * This routine is invoked to post a single rpi header to the 15405 * HBA consistent with the SLI-4 interface spec. This memory region 15406 * maps up to 64 rpi context regions. 15407 * 15408 * Return codes 15409 * 0 - successful 15410 * -ENOMEM - No available memory 15411 * -EIO - The mailbox failed to complete successfully. 15412 **/ 15413 int 15414 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 15415 { 15416 LPFC_MBOXQ_t *mboxq; 15417 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 15418 uint32_t rc = 0; 15419 uint32_t shdr_status, shdr_add_status; 15420 union lpfc_sli4_cfg_shdr *shdr; 15421 15422 /* SLI4 ports that support extents do not require RPI headers. */ 15423 if (!phba->sli4_hba.rpi_hdrs_in_use) 15424 return rc; 15425 if (phba->sli4_hba.extents_in_use) 15426 return -EIO; 15427 15428 /* The port is notified of the header region via a mailbox command. */ 15429 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15430 if (!mboxq) { 15431 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15432 "2001 Unable to allocate memory for issuing " 15433 "SLI_CONFIG_SPECIAL mailbox command\n"); 15434 return -ENOMEM; 15435 } 15436 15437 /* Post all rpi memory regions to the port. */ 15438 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 15439 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 15440 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 15441 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 15442 sizeof(struct lpfc_sli4_cfg_mhdr), 15443 LPFC_SLI4_MBX_EMBED); 15444 15445 15446 /* Post the physical rpi to the port for this rpi header. */ 15447 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 15448 rpi_page->start_rpi); 15449 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 15450 hdr_tmpl, rpi_page->page_count); 15451 15452 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 15453 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 15454 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 15455 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 15456 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15457 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15458 if (rc != MBX_TIMEOUT) 15459 mempool_free(mboxq, phba->mbox_mem_pool); 15460 if (shdr_status || shdr_add_status || rc) { 15461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15462 "2514 POST_RPI_HDR mailbox failed with " 15463 "status x%x add_status x%x, mbx status x%x\n", 15464 shdr_status, shdr_add_status, rc); 15465 rc = -ENXIO; 15466 } 15467 return rc; 15468 } 15469 15470 /** 15471 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 15472 * @phba: pointer to lpfc hba data structure. 15473 * 15474 * This routine is invoked to post rpi header templates to the 15475 * HBA consistent with the SLI-4 interface spec. This routine 15476 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 15477 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 15478 * 15479 * Returns 15480 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 15481 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 15482 **/ 15483 int 15484 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 15485 { 15486 unsigned long rpi; 15487 uint16_t max_rpi, rpi_limit; 15488 uint16_t rpi_remaining, lrpi = 0; 15489 struct lpfc_rpi_hdr *rpi_hdr; 15490 unsigned long iflag; 15491 15492 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 15493 rpi_limit = phba->sli4_hba.next_rpi; 15494 15495 /* 15496 * Fetch the next logical rpi. Because this index is logical, 15497 * the driver starts at 0 each time. 15498 */ 15499 spin_lock_irqsave(&phba->hbalock, iflag); 15500 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 15501 if (rpi >= rpi_limit) 15502 rpi = LPFC_RPI_ALLOC_ERROR; 15503 else { 15504 set_bit(rpi, phba->sli4_hba.rpi_bmask); 15505 phba->sli4_hba.max_cfg_param.rpi_used++; 15506 phba->sli4_hba.rpi_count++; 15507 } 15508 15509 /* 15510 * Don't try to allocate more rpi header regions if the device limit 15511 * has been exhausted. 15512 */ 15513 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 15514 (phba->sli4_hba.rpi_count >= max_rpi)) { 15515 spin_unlock_irqrestore(&phba->hbalock, iflag); 15516 return rpi; 15517 } 15518 15519 /* 15520 * RPI header postings are not required for SLI4 ports capable of 15521 * extents. 15522 */ 15523 if (!phba->sli4_hba.rpi_hdrs_in_use) { 15524 spin_unlock_irqrestore(&phba->hbalock, iflag); 15525 return rpi; 15526 } 15527 15528 /* 15529 * If the driver is running low on rpi resources, allocate another 15530 * page now. Note that the next_rpi value is used because 15531 * it represents how many are actually in use whereas max_rpi notes 15532 * how many are supported max by the device. 15533 */ 15534 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 15535 spin_unlock_irqrestore(&phba->hbalock, iflag); 15536 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 15537 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 15538 if (!rpi_hdr) { 15539 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15540 "2002 Error Could not grow rpi " 15541 "count\n"); 15542 } else { 15543 lrpi = rpi_hdr->start_rpi; 15544 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 15545 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 15546 } 15547 } 15548 15549 return rpi; 15550 } 15551 15552 /** 15553 * lpfc_sli4_free_rpi - Release an rpi for reuse. 15554 * @phba: pointer to lpfc hba data structure. 15555 * 15556 * This routine is invoked to release an rpi to the pool of 15557 * available rpis maintained by the driver. 15558 **/ 15559 void 15560 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 15561 { 15562 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 15563 phba->sli4_hba.rpi_count--; 15564 phba->sli4_hba.max_cfg_param.rpi_used--; 15565 } 15566 } 15567 15568 /** 15569 * lpfc_sli4_free_rpi - Release an rpi for reuse. 15570 * @phba: pointer to lpfc hba data structure. 15571 * 15572 * This routine is invoked to release an rpi to the pool of 15573 * available rpis maintained by the driver. 15574 **/ 15575 void 15576 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 15577 { 15578 spin_lock_irq(&phba->hbalock); 15579 __lpfc_sli4_free_rpi(phba, rpi); 15580 spin_unlock_irq(&phba->hbalock); 15581 } 15582 15583 /** 15584 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 15585 * @phba: pointer to lpfc hba data structure. 15586 * 15587 * This routine is invoked to remove the memory region that 15588 * provided rpi via a bitmask. 15589 **/ 15590 void 15591 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 15592 { 15593 kfree(phba->sli4_hba.rpi_bmask); 15594 kfree(phba->sli4_hba.rpi_ids); 15595 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 15596 } 15597 15598 /** 15599 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 15600 * @phba: pointer to lpfc hba data structure. 15601 * 15602 * This routine is invoked to remove the memory region that 15603 * provided rpi via a bitmask. 15604 **/ 15605 int 15606 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 15607 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 15608 { 15609 LPFC_MBOXQ_t *mboxq; 15610 struct lpfc_hba *phba = ndlp->phba; 15611 int rc; 15612 15613 /* The port is notified of the header region via a mailbox command. */ 15614 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15615 if (!mboxq) 15616 return -ENOMEM; 15617 15618 /* Post all rpi memory regions to the port. */ 15619 lpfc_resume_rpi(mboxq, ndlp); 15620 if (cmpl) { 15621 mboxq->mbox_cmpl = cmpl; 15622 mboxq->context1 = arg; 15623 mboxq->context2 = ndlp; 15624 } else 15625 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15626 mboxq->vport = ndlp->vport; 15627 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15628 if (rc == MBX_NOT_FINISHED) { 15629 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15630 "2010 Resume RPI Mailbox failed " 15631 "status %d, mbxStatus x%x\n", rc, 15632 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 15633 mempool_free(mboxq, phba->mbox_mem_pool); 15634 return -EIO; 15635 } 15636 return 0; 15637 } 15638 15639 /** 15640 * lpfc_sli4_init_vpi - Initialize a vpi with the port 15641 * @vport: Pointer to the vport for which the vpi is being initialized 15642 * 15643 * This routine is invoked to activate a vpi with the port. 15644 * 15645 * Returns: 15646 * 0 success 15647 * -Evalue otherwise 15648 **/ 15649 int 15650 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 15651 { 15652 LPFC_MBOXQ_t *mboxq; 15653 int rc = 0; 15654 int retval = MBX_SUCCESS; 15655 uint32_t mbox_tmo; 15656 struct lpfc_hba *phba = vport->phba; 15657 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15658 if (!mboxq) 15659 return -ENOMEM; 15660 lpfc_init_vpi(phba, mboxq, vport->vpi); 15661 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 15662 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 15663 if (rc != MBX_SUCCESS) { 15664 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 15665 "2022 INIT VPI Mailbox failed " 15666 "status %d, mbxStatus x%x\n", rc, 15667 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 15668 retval = -EIO; 15669 } 15670 if (rc != MBX_TIMEOUT) 15671 mempool_free(mboxq, vport->phba->mbox_mem_pool); 15672 15673 return retval; 15674 } 15675 15676 /** 15677 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 15678 * @phba: pointer to lpfc hba data structure. 15679 * @mboxq: Pointer to mailbox object. 15680 * 15681 * This routine is invoked to manually add a single FCF record. The caller 15682 * must pass a completely initialized FCF_Record. This routine takes 15683 * care of the nonembedded mailbox operations. 15684 **/ 15685 static void 15686 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 15687 { 15688 void *virt_addr; 15689 union lpfc_sli4_cfg_shdr *shdr; 15690 uint32_t shdr_status, shdr_add_status; 15691 15692 virt_addr = mboxq->sge_array->addr[0]; 15693 /* The IOCTL status is embedded in the mailbox subheader. */ 15694 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 15695 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15696 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15697 15698 if ((shdr_status || shdr_add_status) && 15699 (shdr_status != STATUS_FCF_IN_USE)) 15700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15701 "2558 ADD_FCF_RECORD mailbox failed with " 15702 "status x%x add_status x%x\n", 15703 shdr_status, shdr_add_status); 15704 15705 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15706 } 15707 15708 /** 15709 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 15710 * @phba: pointer to lpfc hba data structure. 15711 * @fcf_record: pointer to the initialized fcf record to add. 15712 * 15713 * This routine is invoked to manually add a single FCF record. The caller 15714 * must pass a completely initialized FCF_Record. This routine takes 15715 * care of the nonembedded mailbox operations. 15716 **/ 15717 int 15718 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 15719 { 15720 int rc = 0; 15721 LPFC_MBOXQ_t *mboxq; 15722 uint8_t *bytep; 15723 void *virt_addr; 15724 dma_addr_t phys_addr; 15725 struct lpfc_mbx_sge sge; 15726 uint32_t alloc_len, req_len; 15727 uint32_t fcfindex; 15728 15729 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15730 if (!mboxq) { 15731 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15732 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 15733 return -ENOMEM; 15734 } 15735 15736 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 15737 sizeof(uint32_t); 15738 15739 /* Allocate DMA memory and set up the non-embedded mailbox command */ 15740 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 15741 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 15742 req_len, LPFC_SLI4_MBX_NEMBED); 15743 if (alloc_len < req_len) { 15744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15745 "2523 Allocated DMA memory size (x%x) is " 15746 "less than the requested DMA memory " 15747 "size (x%x)\n", alloc_len, req_len); 15748 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15749 return -ENOMEM; 15750 } 15751 15752 /* 15753 * Get the first SGE entry from the non-embedded DMA memory. This 15754 * routine only uses a single SGE. 15755 */ 15756 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 15757 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 15758 virt_addr = mboxq->sge_array->addr[0]; 15759 /* 15760 * Configure the FCF record for FCFI 0. This is the driver's 15761 * hardcoded default and gets used in nonFIP mode. 15762 */ 15763 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 15764 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 15765 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 15766 15767 /* 15768 * Copy the fcf_index and the FCF Record Data. The data starts after 15769 * the FCoE header plus word10. The data copy needs to be endian 15770 * correct. 15771 */ 15772 bytep += sizeof(uint32_t); 15773 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 15774 mboxq->vport = phba->pport; 15775 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 15776 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15777 if (rc == MBX_NOT_FINISHED) { 15778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15779 "2515 ADD_FCF_RECORD mailbox failed with " 15780 "status 0x%x\n", rc); 15781 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15782 rc = -EIO; 15783 } else 15784 rc = 0; 15785 15786 return rc; 15787 } 15788 15789 /** 15790 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 15791 * @phba: pointer to lpfc hba data structure. 15792 * @fcf_record: pointer to the fcf record to write the default data. 15793 * @fcf_index: FCF table entry index. 15794 * 15795 * This routine is invoked to build the driver's default FCF record. The 15796 * values used are hardcoded. This routine handles memory initialization. 15797 * 15798 **/ 15799 void 15800 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 15801 struct fcf_record *fcf_record, 15802 uint16_t fcf_index) 15803 { 15804 memset(fcf_record, 0, sizeof(struct fcf_record)); 15805 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 15806 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 15807 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 15808 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 15809 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 15810 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 15811 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 15812 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 15813 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 15814 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 15815 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 15816 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 15817 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 15818 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 15819 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 15820 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 15821 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 15822 /* Set the VLAN bit map */ 15823 if (phba->valid_vlan) { 15824 fcf_record->vlan_bitmap[phba->vlan_id / 8] 15825 = 1 << (phba->vlan_id % 8); 15826 } 15827 } 15828 15829 /** 15830 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 15831 * @phba: pointer to lpfc hba data structure. 15832 * @fcf_index: FCF table entry offset. 15833 * 15834 * This routine is invoked to scan the entire FCF table by reading FCF 15835 * record and processing it one at a time starting from the @fcf_index 15836 * for initial FCF discovery or fast FCF failover rediscovery. 15837 * 15838 * Return 0 if the mailbox command is submitted successfully, none 0 15839 * otherwise. 15840 **/ 15841 int 15842 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15843 { 15844 int rc = 0, error; 15845 LPFC_MBOXQ_t *mboxq; 15846 15847 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 15848 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 15849 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15850 if (!mboxq) { 15851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15852 "2000 Failed to allocate mbox for " 15853 "READ_FCF cmd\n"); 15854 error = -ENOMEM; 15855 goto fail_fcf_scan; 15856 } 15857 /* Construct the read FCF record mailbox command */ 15858 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15859 if (rc) { 15860 error = -EINVAL; 15861 goto fail_fcf_scan; 15862 } 15863 /* Issue the mailbox command asynchronously */ 15864 mboxq->vport = phba->pport; 15865 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 15866 15867 spin_lock_irq(&phba->hbalock); 15868 phba->hba_flag |= FCF_TS_INPROG; 15869 spin_unlock_irq(&phba->hbalock); 15870 15871 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15872 if (rc == MBX_NOT_FINISHED) 15873 error = -EIO; 15874 else { 15875 /* Reset eligible FCF count for new scan */ 15876 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 15877 phba->fcf.eligible_fcf_cnt = 0; 15878 error = 0; 15879 } 15880 fail_fcf_scan: 15881 if (error) { 15882 if (mboxq) 15883 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15884 /* FCF scan failed, clear FCF_TS_INPROG flag */ 15885 spin_lock_irq(&phba->hbalock); 15886 phba->hba_flag &= ~FCF_TS_INPROG; 15887 spin_unlock_irq(&phba->hbalock); 15888 } 15889 return error; 15890 } 15891 15892 /** 15893 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 15894 * @phba: pointer to lpfc hba data structure. 15895 * @fcf_index: FCF table entry offset. 15896 * 15897 * This routine is invoked to read an FCF record indicated by @fcf_index 15898 * and to use it for FLOGI roundrobin FCF failover. 15899 * 15900 * Return 0 if the mailbox command is submitted successfully, none 0 15901 * otherwise. 15902 **/ 15903 int 15904 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15905 { 15906 int rc = 0, error; 15907 LPFC_MBOXQ_t *mboxq; 15908 15909 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15910 if (!mboxq) { 15911 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15912 "2763 Failed to allocate mbox for " 15913 "READ_FCF cmd\n"); 15914 error = -ENOMEM; 15915 goto fail_fcf_read; 15916 } 15917 /* Construct the read FCF record mailbox command */ 15918 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15919 if (rc) { 15920 error = -EINVAL; 15921 goto fail_fcf_read; 15922 } 15923 /* Issue the mailbox command asynchronously */ 15924 mboxq->vport = phba->pport; 15925 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 15926 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15927 if (rc == MBX_NOT_FINISHED) 15928 error = -EIO; 15929 else 15930 error = 0; 15931 15932 fail_fcf_read: 15933 if (error && mboxq) 15934 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15935 return error; 15936 } 15937 15938 /** 15939 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 15940 * @phba: pointer to lpfc hba data structure. 15941 * @fcf_index: FCF table entry offset. 15942 * 15943 * This routine is invoked to read an FCF record indicated by @fcf_index to 15944 * determine whether it's eligible for FLOGI roundrobin failover list. 15945 * 15946 * Return 0 if the mailbox command is submitted successfully, none 0 15947 * otherwise. 15948 **/ 15949 int 15950 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15951 { 15952 int rc = 0, error; 15953 LPFC_MBOXQ_t *mboxq; 15954 15955 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15956 if (!mboxq) { 15957 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15958 "2758 Failed to allocate mbox for " 15959 "READ_FCF cmd\n"); 15960 error = -ENOMEM; 15961 goto fail_fcf_read; 15962 } 15963 /* Construct the read FCF record mailbox command */ 15964 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15965 if (rc) { 15966 error = -EINVAL; 15967 goto fail_fcf_read; 15968 } 15969 /* Issue the mailbox command asynchronously */ 15970 mboxq->vport = phba->pport; 15971 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 15972 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15973 if (rc == MBX_NOT_FINISHED) 15974 error = -EIO; 15975 else 15976 error = 0; 15977 15978 fail_fcf_read: 15979 if (error && mboxq) 15980 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15981 return error; 15982 } 15983 15984 /** 15985 * lpfc_check_next_fcf_pri 15986 * phba pointer to the lpfc_hba struct for this port. 15987 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 15988 * routine when the rr_bmask is empty. The FCF indecies are put into the 15989 * rr_bmask based on their priority level. Starting from the highest priority 15990 * to the lowest. The most likely FCF candidate will be in the highest 15991 * priority group. When this routine is called it searches the fcf_pri list for 15992 * next lowest priority group and repopulates the rr_bmask with only those 15993 * fcf_indexes. 15994 * returns: 15995 * 1=success 0=failure 15996 **/ 15997 int 15998 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 15999 { 16000 uint16_t next_fcf_pri; 16001 uint16_t last_index; 16002 struct lpfc_fcf_pri *fcf_pri; 16003 int rc; 16004 int ret = 0; 16005 16006 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 16007 LPFC_SLI4_FCF_TBL_INDX_MAX); 16008 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16009 "3060 Last IDX %d\n", last_index); 16010 16011 /* Verify the priority list has 2 or more entries */ 16012 spin_lock_irq(&phba->hbalock); 16013 if (list_empty(&phba->fcf.fcf_pri_list) || 16014 list_is_singular(&phba->fcf.fcf_pri_list)) { 16015 spin_unlock_irq(&phba->hbalock); 16016 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 16017 "3061 Last IDX %d\n", last_index); 16018 return 0; /* Empty rr list */ 16019 } 16020 spin_unlock_irq(&phba->hbalock); 16021 16022 next_fcf_pri = 0; 16023 /* 16024 * Clear the rr_bmask and set all of the bits that are at this 16025 * priority. 16026 */ 16027 memset(phba->fcf.fcf_rr_bmask, 0, 16028 sizeof(*phba->fcf.fcf_rr_bmask)); 16029 spin_lock_irq(&phba->hbalock); 16030 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 16031 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 16032 continue; 16033 /* 16034 * the 1st priority that has not FLOGI failed 16035 * will be the highest. 16036 */ 16037 if (!next_fcf_pri) 16038 next_fcf_pri = fcf_pri->fcf_rec.priority; 16039 spin_unlock_irq(&phba->hbalock); 16040 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 16041 rc = lpfc_sli4_fcf_rr_index_set(phba, 16042 fcf_pri->fcf_rec.fcf_index); 16043 if (rc) 16044 return 0; 16045 } 16046 spin_lock_irq(&phba->hbalock); 16047 } 16048 /* 16049 * if next_fcf_pri was not set above and the list is not empty then 16050 * we have failed flogis on all of them. So reset flogi failed 16051 * and start at the beginning. 16052 */ 16053 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 16054 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 16055 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 16056 /* 16057 * the 1st priority that has not FLOGI failed 16058 * will be the highest. 16059 */ 16060 if (!next_fcf_pri) 16061 next_fcf_pri = fcf_pri->fcf_rec.priority; 16062 spin_unlock_irq(&phba->hbalock); 16063 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 16064 rc = lpfc_sli4_fcf_rr_index_set(phba, 16065 fcf_pri->fcf_rec.fcf_index); 16066 if (rc) 16067 return 0; 16068 } 16069 spin_lock_irq(&phba->hbalock); 16070 } 16071 } else 16072 ret = 1; 16073 spin_unlock_irq(&phba->hbalock); 16074 16075 return ret; 16076 } 16077 /** 16078 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 16079 * @phba: pointer to lpfc hba data structure. 16080 * 16081 * This routine is to get the next eligible FCF record index in a round 16082 * robin fashion. If the next eligible FCF record index equals to the 16083 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 16084 * shall be returned, otherwise, the next eligible FCF record's index 16085 * shall be returned. 16086 **/ 16087 uint16_t 16088 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 16089 { 16090 uint16_t next_fcf_index; 16091 16092 initial_priority: 16093 /* Search start from next bit of currently registered FCF index */ 16094 next_fcf_index = phba->fcf.current_rec.fcf_indx; 16095 16096 next_priority: 16097 /* Determine the next fcf index to check */ 16098 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 16099 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 16100 LPFC_SLI4_FCF_TBL_INDX_MAX, 16101 next_fcf_index); 16102 16103 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 16104 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 16105 /* 16106 * If we have wrapped then we need to clear the bits that 16107 * have been tested so that we can detect when we should 16108 * change the priority level. 16109 */ 16110 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 16111 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 16112 } 16113 16114 16115 /* Check roundrobin failover list empty condition */ 16116 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 16117 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 16118 /* 16119 * If next fcf index is not found check if there are lower 16120 * Priority level fcf's in the fcf_priority list. 16121 * Set up the rr_bmask with all of the avaiable fcf bits 16122 * at that level and continue the selection process. 16123 */ 16124 if (lpfc_check_next_fcf_pri_level(phba)) 16125 goto initial_priority; 16126 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 16127 "2844 No roundrobin failover FCF available\n"); 16128 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 16129 return LPFC_FCOE_FCF_NEXT_NONE; 16130 else { 16131 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 16132 "3063 Only FCF available idx %d, flag %x\n", 16133 next_fcf_index, 16134 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 16135 return next_fcf_index; 16136 } 16137 } 16138 16139 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 16140 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 16141 LPFC_FCF_FLOGI_FAILED) 16142 goto next_priority; 16143 16144 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16145 "2845 Get next roundrobin failover FCF (x%x)\n", 16146 next_fcf_index); 16147 16148 return next_fcf_index; 16149 } 16150 16151 /** 16152 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 16153 * @phba: pointer to lpfc hba data structure. 16154 * 16155 * This routine sets the FCF record index in to the eligible bmask for 16156 * roundrobin failover search. It checks to make sure that the index 16157 * does not go beyond the range of the driver allocated bmask dimension 16158 * before setting the bit. 16159 * 16160 * Returns 0 if the index bit successfully set, otherwise, it returns 16161 * -EINVAL. 16162 **/ 16163 int 16164 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 16165 { 16166 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 16167 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 16168 "2610 FCF (x%x) reached driver's book " 16169 "keeping dimension:x%x\n", 16170 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 16171 return -EINVAL; 16172 } 16173 /* Set the eligible FCF record index bmask */ 16174 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 16175 16176 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16177 "2790 Set FCF (x%x) to roundrobin FCF failover " 16178 "bmask\n", fcf_index); 16179 16180 return 0; 16181 } 16182 16183 /** 16184 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 16185 * @phba: pointer to lpfc hba data structure. 16186 * 16187 * This routine clears the FCF record index from the eligible bmask for 16188 * roundrobin failover search. It checks to make sure that the index 16189 * does not go beyond the range of the driver allocated bmask dimension 16190 * before clearing the bit. 16191 **/ 16192 void 16193 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 16194 { 16195 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 16196 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 16197 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 16198 "2762 FCF (x%x) reached driver's book " 16199 "keeping dimension:x%x\n", 16200 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 16201 return; 16202 } 16203 /* Clear the eligible FCF record index bmask */ 16204 spin_lock_irq(&phba->hbalock); 16205 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 16206 list) { 16207 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 16208 list_del_init(&fcf_pri->list); 16209 break; 16210 } 16211 } 16212 spin_unlock_irq(&phba->hbalock); 16213 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 16214 16215 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16216 "2791 Clear FCF (x%x) from roundrobin failover " 16217 "bmask\n", fcf_index); 16218 } 16219 16220 /** 16221 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 16222 * @phba: pointer to lpfc hba data structure. 16223 * 16224 * This routine is the completion routine for the rediscover FCF table mailbox 16225 * command. If the mailbox command returned failure, it will try to stop the 16226 * FCF rediscover wait timer. 16227 **/ 16228 void 16229 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 16230 { 16231 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 16232 uint32_t shdr_status, shdr_add_status; 16233 16234 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 16235 16236 shdr_status = bf_get(lpfc_mbox_hdr_status, 16237 &redisc_fcf->header.cfg_shdr.response); 16238 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 16239 &redisc_fcf->header.cfg_shdr.response); 16240 if (shdr_status || shdr_add_status) { 16241 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 16242 "2746 Requesting for FCF rediscovery failed " 16243 "status x%x add_status x%x\n", 16244 shdr_status, shdr_add_status); 16245 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 16246 spin_lock_irq(&phba->hbalock); 16247 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 16248 spin_unlock_irq(&phba->hbalock); 16249 /* 16250 * CVL event triggered FCF rediscover request failed, 16251 * last resort to re-try current registered FCF entry. 16252 */ 16253 lpfc_retry_pport_discovery(phba); 16254 } else { 16255 spin_lock_irq(&phba->hbalock); 16256 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 16257 spin_unlock_irq(&phba->hbalock); 16258 /* 16259 * DEAD FCF event triggered FCF rediscover request 16260 * failed, last resort to fail over as a link down 16261 * to FCF registration. 16262 */ 16263 lpfc_sli4_fcf_dead_failthrough(phba); 16264 } 16265 } else { 16266 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16267 "2775 Start FCF rediscover quiescent timer\n"); 16268 /* 16269 * Start FCF rediscovery wait timer for pending FCF 16270 * before rescan FCF record table. 16271 */ 16272 lpfc_fcf_redisc_wait_start_timer(phba); 16273 } 16274 16275 mempool_free(mbox, phba->mbox_mem_pool); 16276 } 16277 16278 /** 16279 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 16280 * @phba: pointer to lpfc hba data structure. 16281 * 16282 * This routine is invoked to request for rediscovery of the entire FCF table 16283 * by the port. 16284 **/ 16285 int 16286 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 16287 { 16288 LPFC_MBOXQ_t *mbox; 16289 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 16290 int rc, length; 16291 16292 /* Cancel retry delay timers to all vports before FCF rediscover */ 16293 lpfc_cancel_all_vport_retry_delay_timer(phba); 16294 16295 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16296 if (!mbox) { 16297 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16298 "2745 Failed to allocate mbox for " 16299 "requesting FCF rediscover.\n"); 16300 return -ENOMEM; 16301 } 16302 16303 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 16304 sizeof(struct lpfc_sli4_cfg_mhdr)); 16305 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16306 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 16307 length, LPFC_SLI4_MBX_EMBED); 16308 16309 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 16310 /* Set count to 0 for invalidating the entire FCF database */ 16311 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 16312 16313 /* Issue the mailbox command asynchronously */ 16314 mbox->vport = phba->pport; 16315 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 16316 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 16317 16318 if (rc == MBX_NOT_FINISHED) { 16319 mempool_free(mbox, phba->mbox_mem_pool); 16320 return -EIO; 16321 } 16322 return 0; 16323 } 16324 16325 /** 16326 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 16327 * @phba: pointer to lpfc hba data structure. 16328 * 16329 * This function is the failover routine as a last resort to the FCF DEAD 16330 * event when driver failed to perform fast FCF failover. 16331 **/ 16332 void 16333 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 16334 { 16335 uint32_t link_state; 16336 16337 /* 16338 * Last resort as FCF DEAD event failover will treat this as 16339 * a link down, but save the link state because we don't want 16340 * it to be changed to Link Down unless it is already down. 16341 */ 16342 link_state = phba->link_state; 16343 lpfc_linkdown(phba); 16344 phba->link_state = link_state; 16345 16346 /* Unregister FCF if no devices connected to it */ 16347 lpfc_unregister_unused_fcf(phba); 16348 } 16349 16350 /** 16351 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 16352 * @phba: pointer to lpfc hba data structure. 16353 * @rgn23_data: pointer to configure region 23 data. 16354 * 16355 * This function gets SLI3 port configure region 23 data through memory dump 16356 * mailbox command. When it successfully retrieves data, the size of the data 16357 * will be returned, otherwise, 0 will be returned. 16358 **/ 16359 static uint32_t 16360 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 16361 { 16362 LPFC_MBOXQ_t *pmb = NULL; 16363 MAILBOX_t *mb; 16364 uint32_t offset = 0; 16365 int rc; 16366 16367 if (!rgn23_data) 16368 return 0; 16369 16370 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16371 if (!pmb) { 16372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16373 "2600 failed to allocate mailbox memory\n"); 16374 return 0; 16375 } 16376 mb = &pmb->u.mb; 16377 16378 do { 16379 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 16380 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 16381 16382 if (rc != MBX_SUCCESS) { 16383 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 16384 "2601 failed to read config " 16385 "region 23, rc 0x%x Status 0x%x\n", 16386 rc, mb->mbxStatus); 16387 mb->un.varDmp.word_cnt = 0; 16388 } 16389 /* 16390 * dump mem may return a zero when finished or we got a 16391 * mailbox error, either way we are done. 16392 */ 16393 if (mb->un.varDmp.word_cnt == 0) 16394 break; 16395 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 16396 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 16397 16398 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 16399 rgn23_data + offset, 16400 mb->un.varDmp.word_cnt); 16401 offset += mb->un.varDmp.word_cnt; 16402 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 16403 16404 mempool_free(pmb, phba->mbox_mem_pool); 16405 return offset; 16406 } 16407 16408 /** 16409 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 16410 * @phba: pointer to lpfc hba data structure. 16411 * @rgn23_data: pointer to configure region 23 data. 16412 * 16413 * This function gets SLI4 port configure region 23 data through memory dump 16414 * mailbox command. When it successfully retrieves data, the size of the data 16415 * will be returned, otherwise, 0 will be returned. 16416 **/ 16417 static uint32_t 16418 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 16419 { 16420 LPFC_MBOXQ_t *mboxq = NULL; 16421 struct lpfc_dmabuf *mp = NULL; 16422 struct lpfc_mqe *mqe; 16423 uint32_t data_length = 0; 16424 int rc; 16425 16426 if (!rgn23_data) 16427 return 0; 16428 16429 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16430 if (!mboxq) { 16431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16432 "3105 failed to allocate mailbox memory\n"); 16433 return 0; 16434 } 16435 16436 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 16437 goto out; 16438 mqe = &mboxq->u.mqe; 16439 mp = (struct lpfc_dmabuf *) mboxq->context1; 16440 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 16441 if (rc) 16442 goto out; 16443 data_length = mqe->un.mb_words[5]; 16444 if (data_length == 0) 16445 goto out; 16446 if (data_length > DMP_RGN23_SIZE) { 16447 data_length = 0; 16448 goto out; 16449 } 16450 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 16451 out: 16452 mempool_free(mboxq, phba->mbox_mem_pool); 16453 if (mp) { 16454 lpfc_mbuf_free(phba, mp->virt, mp->phys); 16455 kfree(mp); 16456 } 16457 return data_length; 16458 } 16459 16460 /** 16461 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 16462 * @phba: pointer to lpfc hba data structure. 16463 * 16464 * This function read region 23 and parse TLV for port status to 16465 * decide if the user disaled the port. If the TLV indicates the 16466 * port is disabled, the hba_flag is set accordingly. 16467 **/ 16468 void 16469 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 16470 { 16471 uint8_t *rgn23_data = NULL; 16472 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 16473 uint32_t offset = 0; 16474 16475 /* Get adapter Region 23 data */ 16476 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 16477 if (!rgn23_data) 16478 goto out; 16479 16480 if (phba->sli_rev < LPFC_SLI_REV4) 16481 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 16482 else { 16483 if_type = bf_get(lpfc_sli_intf_if_type, 16484 &phba->sli4_hba.sli_intf); 16485 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 16486 goto out; 16487 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 16488 } 16489 16490 if (!data_size) 16491 goto out; 16492 16493 /* Check the region signature first */ 16494 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 16495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16496 "2619 Config region 23 has bad signature\n"); 16497 goto out; 16498 } 16499 offset += 4; 16500 16501 /* Check the data structure version */ 16502 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 16503 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16504 "2620 Config region 23 has bad version\n"); 16505 goto out; 16506 } 16507 offset += 4; 16508 16509 /* Parse TLV entries in the region */ 16510 while (offset < data_size) { 16511 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 16512 break; 16513 /* 16514 * If the TLV is not driver specific TLV or driver id is 16515 * not linux driver id, skip the record. 16516 */ 16517 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 16518 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 16519 (rgn23_data[offset + 3] != 0)) { 16520 offset += rgn23_data[offset + 1] * 4 + 4; 16521 continue; 16522 } 16523 16524 /* Driver found a driver specific TLV in the config region */ 16525 sub_tlv_len = rgn23_data[offset + 1] * 4; 16526 offset += 4; 16527 tlv_offset = 0; 16528 16529 /* 16530 * Search for configured port state sub-TLV. 16531 */ 16532 while ((offset < data_size) && 16533 (tlv_offset < sub_tlv_len)) { 16534 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 16535 offset += 4; 16536 tlv_offset += 4; 16537 break; 16538 } 16539 if (rgn23_data[offset] != PORT_STE_TYPE) { 16540 offset += rgn23_data[offset + 1] * 4 + 4; 16541 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 16542 continue; 16543 } 16544 16545 /* This HBA contains PORT_STE configured */ 16546 if (!rgn23_data[offset + 2]) 16547 phba->hba_flag |= LINK_DISABLED; 16548 16549 goto out; 16550 } 16551 } 16552 16553 out: 16554 kfree(rgn23_data); 16555 return; 16556 } 16557 16558 /** 16559 * lpfc_wr_object - write an object to the firmware 16560 * @phba: HBA structure that indicates port to create a queue on. 16561 * @dmabuf_list: list of dmabufs to write to the port. 16562 * @size: the total byte value of the objects to write to the port. 16563 * @offset: the current offset to be used to start the transfer. 16564 * 16565 * This routine will create a wr_object mailbox command to send to the port. 16566 * the mailbox command will be constructed using the dma buffers described in 16567 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 16568 * BDEs that the imbedded mailbox can support. The @offset variable will be 16569 * used to indicate the starting offset of the transfer and will also return 16570 * the offset after the write object mailbox has completed. @size is used to 16571 * determine the end of the object and whether the eof bit should be set. 16572 * 16573 * Return 0 is successful and offset will contain the the new offset to use 16574 * for the next write. 16575 * Return negative value for error cases. 16576 **/ 16577 int 16578 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 16579 uint32_t size, uint32_t *offset) 16580 { 16581 struct lpfc_mbx_wr_object *wr_object; 16582 LPFC_MBOXQ_t *mbox; 16583 int rc = 0, i = 0; 16584 uint32_t shdr_status, shdr_add_status; 16585 uint32_t mbox_tmo; 16586 union lpfc_sli4_cfg_shdr *shdr; 16587 struct lpfc_dmabuf *dmabuf; 16588 uint32_t written = 0; 16589 16590 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16591 if (!mbox) 16592 return -ENOMEM; 16593 16594 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16595 LPFC_MBOX_OPCODE_WRITE_OBJECT, 16596 sizeof(struct lpfc_mbx_wr_object) - 16597 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16598 16599 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 16600 wr_object->u.request.write_offset = *offset; 16601 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 16602 wr_object->u.request.object_name[0] = 16603 cpu_to_le32(wr_object->u.request.object_name[0]); 16604 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 16605 list_for_each_entry(dmabuf, dmabuf_list, list) { 16606 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 16607 break; 16608 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 16609 wr_object->u.request.bde[i].addrHigh = 16610 putPaddrHigh(dmabuf->phys); 16611 if (written + SLI4_PAGE_SIZE >= size) { 16612 wr_object->u.request.bde[i].tus.f.bdeSize = 16613 (size - written); 16614 written += (size - written); 16615 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 16616 } else { 16617 wr_object->u.request.bde[i].tus.f.bdeSize = 16618 SLI4_PAGE_SIZE; 16619 written += SLI4_PAGE_SIZE; 16620 } 16621 i++; 16622 } 16623 wr_object->u.request.bde_count = i; 16624 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 16625 if (!phba->sli4_hba.intr_enable) 16626 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16627 else { 16628 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16629 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16630 } 16631 /* The IOCTL status is embedded in the mailbox subheader. */ 16632 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 16633 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16634 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16635 if (rc != MBX_TIMEOUT) 16636 mempool_free(mbox, phba->mbox_mem_pool); 16637 if (shdr_status || shdr_add_status || rc) { 16638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16639 "3025 Write Object mailbox failed with " 16640 "status x%x add_status x%x, mbx status x%x\n", 16641 shdr_status, shdr_add_status, rc); 16642 rc = -ENXIO; 16643 } else 16644 *offset += wr_object->u.response.actual_write_length; 16645 return rc; 16646 } 16647 16648 /** 16649 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 16650 * @vport: pointer to vport data structure. 16651 * 16652 * This function iterate through the mailboxq and clean up all REG_LOGIN 16653 * and REG_VPI mailbox commands associated with the vport. This function 16654 * is called when driver want to restart discovery of the vport due to 16655 * a Clear Virtual Link event. 16656 **/ 16657 void 16658 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 16659 { 16660 struct lpfc_hba *phba = vport->phba; 16661 LPFC_MBOXQ_t *mb, *nextmb; 16662 struct lpfc_dmabuf *mp; 16663 struct lpfc_nodelist *ndlp; 16664 struct lpfc_nodelist *act_mbx_ndlp = NULL; 16665 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 16666 LIST_HEAD(mbox_cmd_list); 16667 uint8_t restart_loop; 16668 16669 /* Clean up internally queued mailbox commands with the vport */ 16670 spin_lock_irq(&phba->hbalock); 16671 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 16672 if (mb->vport != vport) 16673 continue; 16674 16675 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 16676 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 16677 continue; 16678 16679 list_del(&mb->list); 16680 list_add_tail(&mb->list, &mbox_cmd_list); 16681 } 16682 /* Clean up active mailbox command with the vport */ 16683 mb = phba->sli.mbox_active; 16684 if (mb && (mb->vport == vport)) { 16685 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 16686 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 16687 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16688 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 16689 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 16690 /* Put reference count for delayed processing */ 16691 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 16692 /* Unregister the RPI when mailbox complete */ 16693 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 16694 } 16695 } 16696 /* Cleanup any mailbox completions which are not yet processed */ 16697 do { 16698 restart_loop = 0; 16699 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 16700 /* 16701 * If this mailox is already processed or it is 16702 * for another vport ignore it. 16703 */ 16704 if ((mb->vport != vport) || 16705 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 16706 continue; 16707 16708 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 16709 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 16710 continue; 16711 16712 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16713 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 16714 ndlp = (struct lpfc_nodelist *)mb->context2; 16715 /* Unregister the RPI when mailbox complete */ 16716 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 16717 restart_loop = 1; 16718 spin_unlock_irq(&phba->hbalock); 16719 spin_lock(shost->host_lock); 16720 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 16721 spin_unlock(shost->host_lock); 16722 spin_lock_irq(&phba->hbalock); 16723 break; 16724 } 16725 } 16726 } while (restart_loop); 16727 16728 spin_unlock_irq(&phba->hbalock); 16729 16730 /* Release the cleaned-up mailbox commands */ 16731 while (!list_empty(&mbox_cmd_list)) { 16732 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 16733 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 16734 mp = (struct lpfc_dmabuf *) (mb->context1); 16735 if (mp) { 16736 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 16737 kfree(mp); 16738 } 16739 ndlp = (struct lpfc_nodelist *) mb->context2; 16740 mb->context2 = NULL; 16741 if (ndlp) { 16742 spin_lock(shost->host_lock); 16743 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 16744 spin_unlock(shost->host_lock); 16745 lpfc_nlp_put(ndlp); 16746 } 16747 } 16748 mempool_free(mb, phba->mbox_mem_pool); 16749 } 16750 16751 /* Release the ndlp with the cleaned-up active mailbox command */ 16752 if (act_mbx_ndlp) { 16753 spin_lock(shost->host_lock); 16754 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 16755 spin_unlock(shost->host_lock); 16756 lpfc_nlp_put(act_mbx_ndlp); 16757 } 16758 } 16759 16760 /** 16761 * lpfc_drain_txq - Drain the txq 16762 * @phba: Pointer to HBA context object. 16763 * 16764 * This function attempt to submit IOCBs on the txq 16765 * to the adapter. For SLI4 adapters, the txq contains 16766 * ELS IOCBs that have been deferred because the there 16767 * are no SGLs. This congestion can occur with large 16768 * vport counts during node discovery. 16769 **/ 16770 16771 uint32_t 16772 lpfc_drain_txq(struct lpfc_hba *phba) 16773 { 16774 LIST_HEAD(completions); 16775 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 16776 struct lpfc_iocbq *piocbq = NULL; 16777 unsigned long iflags = 0; 16778 char *fail_msg = NULL; 16779 struct lpfc_sglq *sglq; 16780 union lpfc_wqe wqe; 16781 int txq_cnt = 0; 16782 16783 spin_lock_irqsave(&pring->ring_lock, iflags); 16784 list_for_each_entry(piocbq, &pring->txq, list) { 16785 txq_cnt++; 16786 } 16787 16788 if (txq_cnt > pring->txq_max) 16789 pring->txq_max = txq_cnt; 16790 16791 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16792 16793 while (!list_empty(&pring->txq)) { 16794 spin_lock_irqsave(&pring->ring_lock, iflags); 16795 16796 piocbq = lpfc_sli_ringtx_get(phba, pring); 16797 if (!piocbq) { 16798 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16799 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16800 "2823 txq empty and txq_cnt is %d\n ", 16801 txq_cnt); 16802 break; 16803 } 16804 sglq = __lpfc_sli_get_sglq(phba, piocbq); 16805 if (!sglq) { 16806 __lpfc_sli_ringtx_put(phba, pring, piocbq); 16807 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16808 break; 16809 } 16810 txq_cnt--; 16811 16812 /* The xri and iocb resources secured, 16813 * attempt to issue request 16814 */ 16815 piocbq->sli4_lxritag = sglq->sli4_lxritag; 16816 piocbq->sli4_xritag = sglq->sli4_xritag; 16817 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 16818 fail_msg = "to convert bpl to sgl"; 16819 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 16820 fail_msg = "to convert iocb to wqe"; 16821 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 16822 fail_msg = " - Wq is full"; 16823 else 16824 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 16825 16826 if (fail_msg) { 16827 /* Failed means we can't issue and need to cancel */ 16828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16829 "2822 IOCB failed %s iotag 0x%x " 16830 "xri 0x%x\n", 16831 fail_msg, 16832 piocbq->iotag, piocbq->sli4_xritag); 16833 list_add_tail(&piocbq->list, &completions); 16834 } 16835 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16836 } 16837 16838 /* Cancel all the IOCBs that cannot be issued */ 16839 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 16840 IOERR_SLI_ABORTED); 16841 16842 return txq_cnt; 16843 } 16844