1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 #include <linux/aer.h> 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include <linux/nvme-fc-driver.h> 43 44 #include "lpfc_hw4.h" 45 #include "lpfc_hw.h" 46 #include "lpfc_sli.h" 47 #include "lpfc_sli4.h" 48 #include "lpfc_nl.h" 49 #include "lpfc_disc.h" 50 #include "lpfc.h" 51 #include "lpfc_scsi.h" 52 #include "lpfc_nvme.h" 53 #include "lpfc_nvmet.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_logmsg.h" 56 #include "lpfc_compat.h" 57 #include "lpfc_debugfs.h" 58 #include "lpfc_vport.h" 59 #include "lpfc_version.h" 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 70 /* Provide function prototypes local to this module. */ 71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint32_t); 73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 74 uint8_t *, uint32_t *); 75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 76 struct lpfc_iocbq *); 77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 78 struct hbq_dmabuf *); 79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 80 struct hbq_dmabuf *dmabuf); 81 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, 82 struct lpfc_cqe *); 83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 84 int); 85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 86 struct lpfc_eqe *eqe, uint32_t qidx); 87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 89 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, 90 struct lpfc_sli_ring *pring, 91 struct lpfc_iocbq *cmdiocb); 92 93 static IOCB_t * 94 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 95 { 96 return &iocbq->iocb; 97 } 98 99 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 100 /** 101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 102 * @srcp: Source memory pointer. 103 * @destp: Destination memory pointer. 104 * @cnt: Number of words required to be copied. 105 * Must be a multiple of sizeof(uint64_t) 106 * 107 * This function is used for copying data between driver memory 108 * and the SLI WQ. This function also changes the endianness 109 * of each word if native endianness is different from SLI 110 * endianness. This function can be called with or without 111 * lock. 112 **/ 113 void 114 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 115 { 116 uint64_t *src = srcp; 117 uint64_t *dest = destp; 118 int i; 119 120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 121 *dest++ = *src++; 122 } 123 #else 124 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 125 #endif 126 127 /** 128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 129 * @q: The Work Queue to operate on. 130 * @wqe: The work Queue Entry to put on the Work queue. 131 * 132 * This routine will copy the contents of @wqe to the next available entry on 133 * the @q. This function will then ring the Work Queue Doorbell to signal the 134 * HBA to start processing the Work Queue Entry. This function returns 0 if 135 * successful. If no entries are available on @q then this function will return 136 * -ENOMEM. 137 * The caller is expected to hold the hbalock when calling this routine. 138 **/ 139 static int 140 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 141 { 142 union lpfc_wqe *temp_wqe; 143 struct lpfc_register doorbell; 144 uint32_t host_index; 145 uint32_t idx; 146 uint32_t i = 0; 147 uint8_t *tmp; 148 149 /* sanity check on queue memory */ 150 if (unlikely(!q)) 151 return -ENOMEM; 152 temp_wqe = q->qe[q->host_index].wqe; 153 154 /* If the host has not yet processed the next entry then we are done */ 155 idx = ((q->host_index + 1) % q->entry_count); 156 if (idx == q->hba_index) { 157 q->WQ_overflow++; 158 return -EBUSY; 159 } 160 q->WQ_posted++; 161 /* set consumption flag every once in a while */ 162 if (!((q->host_index + 1) % q->entry_repost)) 163 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 164 else 165 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 166 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 167 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 168 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 169 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 170 /* write to DPP aperture taking advatage of Combined Writes */ 171 tmp = (uint8_t *)temp_wqe; 172 #ifdef __raw_writeq 173 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 174 __raw_writeq(*((uint64_t *)(tmp + i)), 175 q->dpp_regaddr + i); 176 #else 177 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 178 __raw_writel(*((uint32_t *)(tmp + i)), 179 q->dpp_regaddr + i); 180 #endif 181 } 182 /* ensure WQE bcopy and DPP flushed before doorbell write */ 183 wmb(); 184 185 /* Update the host index before invoking device */ 186 host_index = q->host_index; 187 188 q->host_index = idx; 189 190 /* Ring Doorbell */ 191 doorbell.word0 = 0; 192 if (q->db_format == LPFC_DB_LIST_FORMAT) { 193 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 194 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 195 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 196 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 197 q->dpp_id); 198 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 199 q->queue_id); 200 } else { 201 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 202 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); 203 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 204 } 205 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 206 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 207 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 208 } else { 209 return -EINVAL; 210 } 211 writel(doorbell.word0, q->db_regaddr); 212 213 return 0; 214 } 215 216 /** 217 * lpfc_sli4_wq_release - Updates internal hba index for WQ 218 * @q: The Work Queue to operate on. 219 * @index: The index to advance the hba index to. 220 * 221 * This routine will update the HBA index of a queue to reflect consumption of 222 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 223 * an entry the host calls this function to update the queue's internal 224 * pointers. This routine returns the number of entries that were consumed by 225 * the HBA. 226 **/ 227 static uint32_t 228 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 229 { 230 uint32_t released = 0; 231 232 /* sanity check on queue memory */ 233 if (unlikely(!q)) 234 return 0; 235 236 if (q->hba_index == index) 237 return 0; 238 do { 239 q->hba_index = ((q->hba_index + 1) % q->entry_count); 240 released++; 241 } while (q->hba_index != index); 242 return released; 243 } 244 245 /** 246 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 247 * @q: The Mailbox Queue to operate on. 248 * @wqe: The Mailbox Queue Entry to put on the Work queue. 249 * 250 * This routine will copy the contents of @mqe to the next available entry on 251 * the @q. This function will then ring the Work Queue Doorbell to signal the 252 * HBA to start processing the Work Queue Entry. This function returns 0 if 253 * successful. If no entries are available on @q then this function will return 254 * -ENOMEM. 255 * The caller is expected to hold the hbalock when calling this routine. 256 **/ 257 static uint32_t 258 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 259 { 260 struct lpfc_mqe *temp_mqe; 261 struct lpfc_register doorbell; 262 263 /* sanity check on queue memory */ 264 if (unlikely(!q)) 265 return -ENOMEM; 266 temp_mqe = q->qe[q->host_index].mqe; 267 268 /* If the host has not yet processed the next entry then we are done */ 269 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 270 return -ENOMEM; 271 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 272 /* Save off the mailbox pointer for completion */ 273 q->phba->mbox = (MAILBOX_t *)temp_mqe; 274 275 /* Update the host index before invoking device */ 276 q->host_index = ((q->host_index + 1) % q->entry_count); 277 278 /* Ring Doorbell */ 279 doorbell.word0 = 0; 280 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 281 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 282 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 283 return 0; 284 } 285 286 /** 287 * lpfc_sli4_mq_release - Updates internal hba index for MQ 288 * @q: The Mailbox Queue to operate on. 289 * 290 * This routine will update the HBA index of a queue to reflect consumption of 291 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 292 * an entry the host calls this function to update the queue's internal 293 * pointers. This routine returns the number of entries that were consumed by 294 * the HBA. 295 **/ 296 static uint32_t 297 lpfc_sli4_mq_release(struct lpfc_queue *q) 298 { 299 /* sanity check on queue memory */ 300 if (unlikely(!q)) 301 return 0; 302 303 /* Clear the mailbox pointer for completion */ 304 q->phba->mbox = NULL; 305 q->hba_index = ((q->hba_index + 1) % q->entry_count); 306 return 1; 307 } 308 309 /** 310 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 311 * @q: The Event Queue to get the first valid EQE from 312 * 313 * This routine will get the first valid Event Queue Entry from @q, update 314 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 315 * the Queue (no more work to do), or the Queue is full of EQEs that have been 316 * processed, but not popped back to the HBA then this routine will return NULL. 317 **/ 318 static struct lpfc_eqe * 319 lpfc_sli4_eq_get(struct lpfc_queue *q) 320 { 321 struct lpfc_hba *phba; 322 struct lpfc_eqe *eqe; 323 uint32_t idx; 324 325 /* sanity check on queue memory */ 326 if (unlikely(!q)) 327 return NULL; 328 phba = q->phba; 329 eqe = q->qe[q->hba_index].eqe; 330 331 /* If the next EQE is not valid then we are done */ 332 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 333 return NULL; 334 /* If the host has not yet processed the next entry then we are done */ 335 idx = ((q->hba_index + 1) % q->entry_count); 336 if (idx == q->host_index) 337 return NULL; 338 339 q->hba_index = idx; 340 /* if the index wrapped around, toggle the valid bit */ 341 if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index) 342 q->qe_valid = (q->qe_valid) ? 0 : 1; 343 344 345 /* 346 * insert barrier for instruction interlock : data from the hardware 347 * must have the valid bit checked before it can be copied and acted 348 * upon. Speculative instructions were allowing a bcopy at the start 349 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 350 * after our return, to copy data before the valid bit check above 351 * was done. As such, some of the copied data was stale. The barrier 352 * ensures the check is before any data is copied. 353 */ 354 mb(); 355 return eqe; 356 } 357 358 /** 359 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 360 * @q: The Event Queue to disable interrupts 361 * 362 **/ 363 inline void 364 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 365 { 366 struct lpfc_register doorbell; 367 368 doorbell.word0 = 0; 369 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 370 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 371 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 372 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 373 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 374 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 375 } 376 377 /** 378 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 379 * @q: The Event Queue to disable interrupts 380 * 381 **/ 382 inline void 383 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 384 { 385 struct lpfc_register doorbell; 386 387 doorbell.word0 = 0; 388 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 389 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 390 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 391 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 392 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 393 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 394 } 395 396 /** 397 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 398 * @q: The Event Queue that the host has completed processing for. 399 * @arm: Indicates whether the host wants to arms this CQ. 400 * 401 * This routine will mark all Event Queue Entries on @q, from the last 402 * known completed entry to the last entry that was processed, as completed 403 * by clearing the valid bit for each completion queue entry. Then it will 404 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 405 * The internal host index in the @q will be updated by this routine to indicate 406 * that the host has finished processing the entries. The @arm parameter 407 * indicates that the queue should be rearmed when ringing the doorbell. 408 * 409 * This function will return the number of EQEs that were popped. 410 **/ 411 uint32_t 412 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 413 { 414 uint32_t released = 0; 415 struct lpfc_hba *phba; 416 struct lpfc_eqe *temp_eqe; 417 struct lpfc_register doorbell; 418 419 /* sanity check on queue memory */ 420 if (unlikely(!q)) 421 return 0; 422 phba = q->phba; 423 424 /* while there are valid entries */ 425 while (q->hba_index != q->host_index) { 426 if (!phba->sli4_hba.pc_sli4_params.eqav) { 427 temp_eqe = q->qe[q->host_index].eqe; 428 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 429 } 430 released++; 431 q->host_index = ((q->host_index + 1) % q->entry_count); 432 } 433 if (unlikely(released == 0 && !arm)) 434 return 0; 435 436 /* ring doorbell for number popped */ 437 doorbell.word0 = 0; 438 if (arm) { 439 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 440 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 441 } 442 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 443 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 444 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 445 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 446 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 447 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 448 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 449 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 450 readl(q->phba->sli4_hba.EQDBregaddr); 451 return released; 452 } 453 454 /** 455 * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ 456 * @q: The Event Queue that the host has completed processing for. 457 * @arm: Indicates whether the host wants to arms this CQ. 458 * 459 * This routine will mark all Event Queue Entries on @q, from the last 460 * known completed entry to the last entry that was processed, as completed 461 * by clearing the valid bit for each completion queue entry. Then it will 462 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 463 * The internal host index in the @q will be updated by this routine to indicate 464 * that the host has finished processing the entries. The @arm parameter 465 * indicates that the queue should be rearmed when ringing the doorbell. 466 * 467 * This function will return the number of EQEs that were popped. 468 **/ 469 uint32_t 470 lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm) 471 { 472 uint32_t released = 0; 473 struct lpfc_hba *phba; 474 struct lpfc_eqe *temp_eqe; 475 struct lpfc_register doorbell; 476 477 /* sanity check on queue memory */ 478 if (unlikely(!q)) 479 return 0; 480 phba = q->phba; 481 482 /* while there are valid entries */ 483 while (q->hba_index != q->host_index) { 484 if (!phba->sli4_hba.pc_sli4_params.eqav) { 485 temp_eqe = q->qe[q->host_index].eqe; 486 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 487 } 488 released++; 489 q->host_index = ((q->host_index + 1) % q->entry_count); 490 } 491 if (unlikely(released == 0 && !arm)) 492 return 0; 493 494 /* ring doorbell for number popped */ 495 doorbell.word0 = 0; 496 if (arm) 497 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 498 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released); 499 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 500 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 501 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 502 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 503 readl(q->phba->sli4_hba.EQDBregaddr); 504 return released; 505 } 506 507 /** 508 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 509 * @q: The Completion Queue to get the first valid CQE from 510 * 511 * This routine will get the first valid Completion Queue Entry from @q, update 512 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 513 * the Queue (no more work to do), or the Queue is full of CQEs that have been 514 * processed, but not popped back to the HBA then this routine will return NULL. 515 **/ 516 static struct lpfc_cqe * 517 lpfc_sli4_cq_get(struct lpfc_queue *q) 518 { 519 struct lpfc_hba *phba; 520 struct lpfc_cqe *cqe; 521 uint32_t idx; 522 523 /* sanity check on queue memory */ 524 if (unlikely(!q)) 525 return NULL; 526 phba = q->phba; 527 cqe = q->qe[q->hba_index].cqe; 528 529 /* If the next CQE is not valid then we are done */ 530 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 531 return NULL; 532 /* If the host has not yet processed the next entry then we are done */ 533 idx = ((q->hba_index + 1) % q->entry_count); 534 if (idx == q->host_index) 535 return NULL; 536 537 q->hba_index = idx; 538 /* if the index wrapped around, toggle the valid bit */ 539 if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index) 540 q->qe_valid = (q->qe_valid) ? 0 : 1; 541 542 /* 543 * insert barrier for instruction interlock : data from the hardware 544 * must have the valid bit checked before it can be copied and acted 545 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 546 * instructions allowing action on content before valid bit checked, 547 * add barrier here as well. May not be needed as "content" is a 548 * single 32-bit entity here (vs multi word structure for cq's). 549 */ 550 mb(); 551 return cqe; 552 } 553 554 /** 555 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 556 * @q: The Completion Queue that the host has completed processing for. 557 * @arm: Indicates whether the host wants to arms this CQ. 558 * 559 * This routine will mark all Completion queue entries on @q, from the last 560 * known completed entry to the last entry that was processed, as completed 561 * by clearing the valid bit for each completion queue entry. Then it will 562 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 563 * The internal host index in the @q will be updated by this routine to indicate 564 * that the host has finished processing the entries. The @arm parameter 565 * indicates that the queue should be rearmed when ringing the doorbell. 566 * 567 * This function will return the number of CQEs that were released. 568 **/ 569 uint32_t 570 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 571 { 572 uint32_t released = 0; 573 struct lpfc_hba *phba; 574 struct lpfc_cqe *temp_qe; 575 struct lpfc_register doorbell; 576 577 /* sanity check on queue memory */ 578 if (unlikely(!q)) 579 return 0; 580 phba = q->phba; 581 582 /* while there are valid entries */ 583 while (q->hba_index != q->host_index) { 584 if (!phba->sli4_hba.pc_sli4_params.cqav) { 585 temp_qe = q->qe[q->host_index].cqe; 586 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 587 } 588 released++; 589 q->host_index = ((q->host_index + 1) % q->entry_count); 590 } 591 if (unlikely(released == 0 && !arm)) 592 return 0; 593 594 /* ring doorbell for number popped */ 595 doorbell.word0 = 0; 596 if (arm) 597 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 598 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 599 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 600 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 601 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 602 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 603 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 604 return released; 605 } 606 607 /** 608 * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ 609 * @q: The Completion Queue that the host has completed processing for. 610 * @arm: Indicates whether the host wants to arms this CQ. 611 * 612 * This routine will mark all Completion queue entries on @q, from the last 613 * known completed entry to the last entry that was processed, as completed 614 * by clearing the valid bit for each completion queue entry. Then it will 615 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 616 * The internal host index in the @q will be updated by this routine to indicate 617 * that the host has finished processing the entries. The @arm parameter 618 * indicates that the queue should be rearmed when ringing the doorbell. 619 * 620 * This function will return the number of CQEs that were released. 621 **/ 622 uint32_t 623 lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm) 624 { 625 uint32_t released = 0; 626 struct lpfc_hba *phba; 627 struct lpfc_cqe *temp_qe; 628 struct lpfc_register doorbell; 629 630 /* sanity check on queue memory */ 631 if (unlikely(!q)) 632 return 0; 633 phba = q->phba; 634 635 /* while there are valid entries */ 636 while (q->hba_index != q->host_index) { 637 if (!phba->sli4_hba.pc_sli4_params.cqav) { 638 temp_qe = q->qe[q->host_index].cqe; 639 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 640 } 641 released++; 642 q->host_index = ((q->host_index + 1) % q->entry_count); 643 } 644 if (unlikely(released == 0 && !arm)) 645 return 0; 646 647 /* ring doorbell for number popped */ 648 doorbell.word0 = 0; 649 if (arm) 650 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 651 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released); 652 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 653 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 654 return released; 655 } 656 657 /** 658 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 659 * @q: The Header Receive Queue to operate on. 660 * @wqe: The Receive Queue Entry to put on the Receive queue. 661 * 662 * This routine will copy the contents of @wqe to the next available entry on 663 * the @q. This function will then ring the Receive Queue Doorbell to signal the 664 * HBA to start processing the Receive Queue Entry. This function returns the 665 * index that the rqe was copied to if successful. If no entries are available 666 * on @q then this function will return -ENOMEM. 667 * The caller is expected to hold the hbalock when calling this routine. 668 **/ 669 int 670 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 671 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 672 { 673 struct lpfc_rqe *temp_hrqe; 674 struct lpfc_rqe *temp_drqe; 675 struct lpfc_register doorbell; 676 int hq_put_index; 677 int dq_put_index; 678 679 /* sanity check on queue memory */ 680 if (unlikely(!hq) || unlikely(!dq)) 681 return -ENOMEM; 682 hq_put_index = hq->host_index; 683 dq_put_index = dq->host_index; 684 temp_hrqe = hq->qe[hq_put_index].rqe; 685 temp_drqe = dq->qe[dq_put_index].rqe; 686 687 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 688 return -EINVAL; 689 if (hq_put_index != dq_put_index) 690 return -EINVAL; 691 /* If the host has not yet processed the next entry then we are done */ 692 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 693 return -EBUSY; 694 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 695 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 696 697 /* Update the host index to point to the next slot */ 698 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 699 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 700 hq->RQ_buf_posted++; 701 702 /* Ring The Header Receive Queue Doorbell */ 703 if (!(hq->host_index % hq->entry_repost)) { 704 doorbell.word0 = 0; 705 if (hq->db_format == LPFC_DB_RING_FORMAT) { 706 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 707 hq->entry_repost); 708 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 709 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 710 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 711 hq->entry_repost); 712 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 713 hq->host_index); 714 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 715 } else { 716 return -EINVAL; 717 } 718 writel(doorbell.word0, hq->db_regaddr); 719 } 720 return hq_put_index; 721 } 722 723 /** 724 * lpfc_sli4_rq_release - Updates internal hba index for RQ 725 * @q: The Header Receive Queue to operate on. 726 * 727 * This routine will update the HBA index of a queue to reflect consumption of 728 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 729 * consumed an entry the host calls this function to update the queue's 730 * internal pointers. This routine returns the number of entries that were 731 * consumed by the HBA. 732 **/ 733 static uint32_t 734 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 735 { 736 /* sanity check on queue memory */ 737 if (unlikely(!hq) || unlikely(!dq)) 738 return 0; 739 740 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 741 return 0; 742 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 743 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 744 return 1; 745 } 746 747 /** 748 * lpfc_cmd_iocb - Get next command iocb entry in the ring 749 * @phba: Pointer to HBA context object. 750 * @pring: Pointer to driver SLI ring object. 751 * 752 * This function returns pointer to next command iocb entry 753 * in the command ring. The caller must hold hbalock to prevent 754 * other threads consume the next command iocb. 755 * SLI-2/SLI-3 provide different sized iocbs. 756 **/ 757 static inline IOCB_t * 758 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 759 { 760 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 761 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 762 } 763 764 /** 765 * lpfc_resp_iocb - Get next response iocb entry in the ring 766 * @phba: Pointer to HBA context object. 767 * @pring: Pointer to driver SLI ring object. 768 * 769 * This function returns pointer to next response iocb entry 770 * in the response ring. The caller must hold hbalock to make sure 771 * that no other thread consume the next response iocb. 772 * SLI-2/SLI-3 provide different sized iocbs. 773 **/ 774 static inline IOCB_t * 775 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 776 { 777 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 778 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 779 } 780 781 /** 782 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 783 * @phba: Pointer to HBA context object. 784 * 785 * This function is called with hbalock held. This function 786 * allocates a new driver iocb object from the iocb pool. If the 787 * allocation is successful, it returns pointer to the newly 788 * allocated iocb object else it returns NULL. 789 **/ 790 struct lpfc_iocbq * 791 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 792 { 793 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 794 struct lpfc_iocbq * iocbq = NULL; 795 796 lockdep_assert_held(&phba->hbalock); 797 798 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 799 if (iocbq) 800 phba->iocb_cnt++; 801 if (phba->iocb_cnt > phba->iocb_max) 802 phba->iocb_max = phba->iocb_cnt; 803 return iocbq; 804 } 805 806 /** 807 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 808 * @phba: Pointer to HBA context object. 809 * @xritag: XRI value. 810 * 811 * This function clears the sglq pointer from the array of acive 812 * sglq's. The xritag that is passed in is used to index into the 813 * array. Before the xritag can be used it needs to be adjusted 814 * by subtracting the xribase. 815 * 816 * Returns sglq ponter = success, NULL = Failure. 817 **/ 818 struct lpfc_sglq * 819 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 820 { 821 struct lpfc_sglq *sglq; 822 823 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 824 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 825 return sglq; 826 } 827 828 /** 829 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 830 * @phba: Pointer to HBA context object. 831 * @xritag: XRI value. 832 * 833 * This function returns the sglq pointer from the array of acive 834 * sglq's. The xritag that is passed in is used to index into the 835 * array. Before the xritag can be used it needs to be adjusted 836 * by subtracting the xribase. 837 * 838 * Returns sglq ponter = success, NULL = Failure. 839 **/ 840 struct lpfc_sglq * 841 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 842 { 843 struct lpfc_sglq *sglq; 844 845 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 846 return sglq; 847 } 848 849 /** 850 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 851 * @phba: Pointer to HBA context object. 852 * @xritag: xri used in this exchange. 853 * @rrq: The RRQ to be cleared. 854 * 855 **/ 856 void 857 lpfc_clr_rrq_active(struct lpfc_hba *phba, 858 uint16_t xritag, 859 struct lpfc_node_rrq *rrq) 860 { 861 struct lpfc_nodelist *ndlp = NULL; 862 863 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 864 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 865 866 /* The target DID could have been swapped (cable swap) 867 * we should use the ndlp from the findnode if it is 868 * available. 869 */ 870 if ((!ndlp) && rrq->ndlp) 871 ndlp = rrq->ndlp; 872 873 if (!ndlp) 874 goto out; 875 876 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 877 rrq->send_rrq = 0; 878 rrq->xritag = 0; 879 rrq->rrq_stop_time = 0; 880 } 881 out: 882 mempool_free(rrq, phba->rrq_pool); 883 } 884 885 /** 886 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 887 * @phba: Pointer to HBA context object. 888 * 889 * This function is called with hbalock held. This function 890 * Checks if stop_time (ratov from setting rrq active) has 891 * been reached, if it has and the send_rrq flag is set then 892 * it will call lpfc_send_rrq. If the send_rrq flag is not set 893 * then it will just call the routine to clear the rrq and 894 * free the rrq resource. 895 * The timer is set to the next rrq that is going to expire before 896 * leaving the routine. 897 * 898 **/ 899 void 900 lpfc_handle_rrq_active(struct lpfc_hba *phba) 901 { 902 struct lpfc_node_rrq *rrq; 903 struct lpfc_node_rrq *nextrrq; 904 unsigned long next_time; 905 unsigned long iflags; 906 LIST_HEAD(send_rrq); 907 908 spin_lock_irqsave(&phba->hbalock, iflags); 909 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 910 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 911 list_for_each_entry_safe(rrq, nextrrq, 912 &phba->active_rrq_list, list) { 913 if (time_after(jiffies, rrq->rrq_stop_time)) 914 list_move(&rrq->list, &send_rrq); 915 else if (time_before(rrq->rrq_stop_time, next_time)) 916 next_time = rrq->rrq_stop_time; 917 } 918 spin_unlock_irqrestore(&phba->hbalock, iflags); 919 if ((!list_empty(&phba->active_rrq_list)) && 920 (!(phba->pport->load_flag & FC_UNLOADING))) 921 mod_timer(&phba->rrq_tmr, next_time); 922 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 923 list_del(&rrq->list); 924 if (!rrq->send_rrq) 925 /* this call will free the rrq */ 926 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 927 else if (lpfc_send_rrq(phba, rrq)) { 928 /* if we send the rrq then the completion handler 929 * will clear the bit in the xribitmap. 930 */ 931 lpfc_clr_rrq_active(phba, rrq->xritag, 932 rrq); 933 } 934 } 935 } 936 937 /** 938 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 939 * @vport: Pointer to vport context object. 940 * @xri: The xri used in the exchange. 941 * @did: The targets DID for this exchange. 942 * 943 * returns NULL = rrq not found in the phba->active_rrq_list. 944 * rrq = rrq for this xri and target. 945 **/ 946 struct lpfc_node_rrq * 947 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 948 { 949 struct lpfc_hba *phba = vport->phba; 950 struct lpfc_node_rrq *rrq; 951 struct lpfc_node_rrq *nextrrq; 952 unsigned long iflags; 953 954 if (phba->sli_rev != LPFC_SLI_REV4) 955 return NULL; 956 spin_lock_irqsave(&phba->hbalock, iflags); 957 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 958 if (rrq->vport == vport && rrq->xritag == xri && 959 rrq->nlp_DID == did){ 960 list_del(&rrq->list); 961 spin_unlock_irqrestore(&phba->hbalock, iflags); 962 return rrq; 963 } 964 } 965 spin_unlock_irqrestore(&phba->hbalock, iflags); 966 return NULL; 967 } 968 969 /** 970 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 971 * @vport: Pointer to vport context object. 972 * @ndlp: Pointer to the lpfc_node_list structure. 973 * If ndlp is NULL Remove all active RRQs for this vport from the 974 * phba->active_rrq_list and clear the rrq. 975 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 976 **/ 977 void 978 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 979 980 { 981 struct lpfc_hba *phba = vport->phba; 982 struct lpfc_node_rrq *rrq; 983 struct lpfc_node_rrq *nextrrq; 984 unsigned long iflags; 985 LIST_HEAD(rrq_list); 986 987 if (phba->sli_rev != LPFC_SLI_REV4) 988 return; 989 if (!ndlp) { 990 lpfc_sli4_vport_delete_els_xri_aborted(vport); 991 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 992 } 993 spin_lock_irqsave(&phba->hbalock, iflags); 994 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 995 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 996 list_move(&rrq->list, &rrq_list); 997 spin_unlock_irqrestore(&phba->hbalock, iflags); 998 999 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 1000 list_del(&rrq->list); 1001 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1002 } 1003 } 1004 1005 /** 1006 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 1007 * @phba: Pointer to HBA context object. 1008 * @ndlp: Targets nodelist pointer for this exchange. 1009 * @xritag the xri in the bitmap to test. 1010 * 1011 * This function is called with hbalock held. This function 1012 * returns 0 = rrq not active for this xri 1013 * 1 = rrq is valid for this xri. 1014 **/ 1015 int 1016 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1017 uint16_t xritag) 1018 { 1019 lockdep_assert_held(&phba->hbalock); 1020 if (!ndlp) 1021 return 0; 1022 if (!ndlp->active_rrqs_xri_bitmap) 1023 return 0; 1024 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1025 return 1; 1026 else 1027 return 0; 1028 } 1029 1030 /** 1031 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1032 * @phba: Pointer to HBA context object. 1033 * @ndlp: nodelist pointer for this target. 1034 * @xritag: xri used in this exchange. 1035 * @rxid: Remote Exchange ID. 1036 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1037 * 1038 * This function takes the hbalock. 1039 * The active bit is always set in the active rrq xri_bitmap even 1040 * if there is no slot avaiable for the other rrq information. 1041 * 1042 * returns 0 rrq actived for this xri 1043 * < 0 No memory or invalid ndlp. 1044 **/ 1045 int 1046 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1047 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1048 { 1049 unsigned long iflags; 1050 struct lpfc_node_rrq *rrq; 1051 int empty; 1052 1053 if (!ndlp) 1054 return -EINVAL; 1055 1056 if (!phba->cfg_enable_rrq) 1057 return -EINVAL; 1058 1059 spin_lock_irqsave(&phba->hbalock, iflags); 1060 if (phba->pport->load_flag & FC_UNLOADING) { 1061 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1062 goto out; 1063 } 1064 1065 /* 1066 * set the active bit even if there is no mem available. 1067 */ 1068 if (NLP_CHK_FREE_REQ(ndlp)) 1069 goto out; 1070 1071 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 1072 goto out; 1073 1074 if (!ndlp->active_rrqs_xri_bitmap) 1075 goto out; 1076 1077 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1078 goto out; 1079 1080 spin_unlock_irqrestore(&phba->hbalock, iflags); 1081 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 1082 if (!rrq) { 1083 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1084 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1085 " DID:0x%x Send:%d\n", 1086 xritag, rxid, ndlp->nlp_DID, send_rrq); 1087 return -EINVAL; 1088 } 1089 if (phba->cfg_enable_rrq == 1) 1090 rrq->send_rrq = send_rrq; 1091 else 1092 rrq->send_rrq = 0; 1093 rrq->xritag = xritag; 1094 rrq->rrq_stop_time = jiffies + 1095 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1096 rrq->ndlp = ndlp; 1097 rrq->nlp_DID = ndlp->nlp_DID; 1098 rrq->vport = ndlp->vport; 1099 rrq->rxid = rxid; 1100 spin_lock_irqsave(&phba->hbalock, iflags); 1101 empty = list_empty(&phba->active_rrq_list); 1102 list_add_tail(&rrq->list, &phba->active_rrq_list); 1103 phba->hba_flag |= HBA_RRQ_ACTIVE; 1104 if (empty) 1105 lpfc_worker_wake_up(phba); 1106 spin_unlock_irqrestore(&phba->hbalock, iflags); 1107 return 0; 1108 out: 1109 spin_unlock_irqrestore(&phba->hbalock, iflags); 1110 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1111 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1112 " DID:0x%x Send:%d\n", 1113 xritag, rxid, ndlp->nlp_DID, send_rrq); 1114 return -EINVAL; 1115 } 1116 1117 /** 1118 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1119 * @phba: Pointer to HBA context object. 1120 * @piocb: Pointer to the iocbq. 1121 * 1122 * This function is called with the ring lock held. This function 1123 * gets a new driver sglq object from the sglq list. If the 1124 * list is not empty then it is successful, it returns pointer to the newly 1125 * allocated sglq object else it returns NULL. 1126 **/ 1127 static struct lpfc_sglq * 1128 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1129 { 1130 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1131 struct lpfc_sglq *sglq = NULL; 1132 struct lpfc_sglq *start_sglq = NULL; 1133 struct lpfc_scsi_buf *lpfc_cmd; 1134 struct lpfc_nodelist *ndlp; 1135 int found = 0; 1136 1137 lockdep_assert_held(&phba->hbalock); 1138 1139 if (piocbq->iocb_flag & LPFC_IO_FCP) { 1140 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 1141 ndlp = lpfc_cmd->rdata->pnode; 1142 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 1143 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 1144 ndlp = piocbq->context_un.ndlp; 1145 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 1146 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 1147 ndlp = NULL; 1148 else 1149 ndlp = piocbq->context_un.ndlp; 1150 } else { 1151 ndlp = piocbq->context1; 1152 } 1153 1154 spin_lock(&phba->sli4_hba.sgl_list_lock); 1155 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1156 start_sglq = sglq; 1157 while (!found) { 1158 if (!sglq) 1159 break; 1160 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1161 test_bit(sglq->sli4_lxritag, 1162 ndlp->active_rrqs_xri_bitmap)) { 1163 /* This xri has an rrq outstanding for this DID. 1164 * put it back in the list and get another xri. 1165 */ 1166 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1167 sglq = NULL; 1168 list_remove_head(lpfc_els_sgl_list, sglq, 1169 struct lpfc_sglq, list); 1170 if (sglq == start_sglq) { 1171 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1172 sglq = NULL; 1173 break; 1174 } else 1175 continue; 1176 } 1177 sglq->ndlp = ndlp; 1178 found = 1; 1179 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1180 sglq->state = SGL_ALLOCATED; 1181 } 1182 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1183 return sglq; 1184 } 1185 1186 /** 1187 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1188 * @phba: Pointer to HBA context object. 1189 * @piocb: Pointer to the iocbq. 1190 * 1191 * This function is called with the sgl_list lock held. This function 1192 * gets a new driver sglq object from the sglq list. If the 1193 * list is not empty then it is successful, it returns pointer to the newly 1194 * allocated sglq object else it returns NULL. 1195 **/ 1196 struct lpfc_sglq * 1197 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1198 { 1199 struct list_head *lpfc_nvmet_sgl_list; 1200 struct lpfc_sglq *sglq = NULL; 1201 1202 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1203 1204 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1205 1206 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1207 if (!sglq) 1208 return NULL; 1209 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1210 sglq->state = SGL_ALLOCATED; 1211 return sglq; 1212 } 1213 1214 /** 1215 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1216 * @phba: Pointer to HBA context object. 1217 * 1218 * This function is called with no lock held. This function 1219 * allocates a new driver iocb object from the iocb pool. If the 1220 * allocation is successful, it returns pointer to the newly 1221 * allocated iocb object else it returns NULL. 1222 **/ 1223 struct lpfc_iocbq * 1224 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1225 { 1226 struct lpfc_iocbq * iocbq = NULL; 1227 unsigned long iflags; 1228 1229 spin_lock_irqsave(&phba->hbalock, iflags); 1230 iocbq = __lpfc_sli_get_iocbq(phba); 1231 spin_unlock_irqrestore(&phba->hbalock, iflags); 1232 return iocbq; 1233 } 1234 1235 /** 1236 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1237 * @phba: Pointer to HBA context object. 1238 * @iocbq: Pointer to driver iocb object. 1239 * 1240 * This function is called with hbalock held to release driver 1241 * iocb object to the iocb pool. The iotag in the iocb object 1242 * does not change for each use of the iocb object. This function 1243 * clears all other fields of the iocb object when it is freed. 1244 * The sqlq structure that holds the xritag and phys and virtual 1245 * mappings for the scatter gather list is retrieved from the 1246 * active array of sglq. The get of the sglq pointer also clears 1247 * the entry in the array. If the status of the IO indiactes that 1248 * this IO was aborted then the sglq entry it put on the 1249 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1250 * IO has good status or fails for any other reason then the sglq 1251 * entry is added to the free list (lpfc_els_sgl_list). 1252 **/ 1253 static void 1254 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1255 { 1256 struct lpfc_sglq *sglq; 1257 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1258 unsigned long iflag = 0; 1259 struct lpfc_sli_ring *pring; 1260 1261 lockdep_assert_held(&phba->hbalock); 1262 1263 if (iocbq->sli4_xritag == NO_XRI) 1264 sglq = NULL; 1265 else 1266 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1267 1268 1269 if (sglq) { 1270 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1271 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1272 iflag); 1273 sglq->state = SGL_FREED; 1274 sglq->ndlp = NULL; 1275 list_add_tail(&sglq->list, 1276 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1277 spin_unlock_irqrestore( 1278 &phba->sli4_hba.sgl_list_lock, iflag); 1279 goto out; 1280 } 1281 1282 pring = phba->sli4_hba.els_wq->pring; 1283 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1284 (sglq->state != SGL_XRI_ABORTED)) { 1285 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1286 iflag); 1287 list_add(&sglq->list, 1288 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1289 spin_unlock_irqrestore( 1290 &phba->sli4_hba.sgl_list_lock, iflag); 1291 } else { 1292 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1293 iflag); 1294 sglq->state = SGL_FREED; 1295 sglq->ndlp = NULL; 1296 list_add_tail(&sglq->list, 1297 &phba->sli4_hba.lpfc_els_sgl_list); 1298 spin_unlock_irqrestore( 1299 &phba->sli4_hba.sgl_list_lock, iflag); 1300 1301 /* Check if TXQ queue needs to be serviced */ 1302 if (!list_empty(&pring->txq)) 1303 lpfc_worker_wake_up(phba); 1304 } 1305 } 1306 1307 out: 1308 /* 1309 * Clean all volatile data fields, preserve iotag and node struct. 1310 */ 1311 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1312 iocbq->sli4_lxritag = NO_XRI; 1313 iocbq->sli4_xritag = NO_XRI; 1314 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1315 LPFC_IO_NVME_LS); 1316 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1317 } 1318 1319 1320 /** 1321 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1322 * @phba: Pointer to HBA context object. 1323 * @iocbq: Pointer to driver iocb object. 1324 * 1325 * This function is called with hbalock held to release driver 1326 * iocb object to the iocb pool. The iotag in the iocb object 1327 * does not change for each use of the iocb object. This function 1328 * clears all other fields of the iocb object when it is freed. 1329 **/ 1330 static void 1331 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1332 { 1333 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1334 1335 lockdep_assert_held(&phba->hbalock); 1336 1337 /* 1338 * Clean all volatile data fields, preserve iotag and node struct. 1339 */ 1340 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1341 iocbq->sli4_xritag = NO_XRI; 1342 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1343 } 1344 1345 /** 1346 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1347 * @phba: Pointer to HBA context object. 1348 * @iocbq: Pointer to driver iocb object. 1349 * 1350 * This function is called with hbalock held to release driver 1351 * iocb object to the iocb pool. The iotag in the iocb object 1352 * does not change for each use of the iocb object. This function 1353 * clears all other fields of the iocb object when it is freed. 1354 **/ 1355 static void 1356 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1357 { 1358 lockdep_assert_held(&phba->hbalock); 1359 1360 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1361 phba->iocb_cnt--; 1362 } 1363 1364 /** 1365 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1366 * @phba: Pointer to HBA context object. 1367 * @iocbq: Pointer to driver iocb object. 1368 * 1369 * This function is called with no lock held to release the iocb to 1370 * iocb pool. 1371 **/ 1372 void 1373 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1374 { 1375 unsigned long iflags; 1376 1377 /* 1378 * Clean all volatile data fields, preserve iotag and node struct. 1379 */ 1380 spin_lock_irqsave(&phba->hbalock, iflags); 1381 __lpfc_sli_release_iocbq(phba, iocbq); 1382 spin_unlock_irqrestore(&phba->hbalock, iflags); 1383 } 1384 1385 /** 1386 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1387 * @phba: Pointer to HBA context object. 1388 * @iocblist: List of IOCBs. 1389 * @ulpstatus: ULP status in IOCB command field. 1390 * @ulpWord4: ULP word-4 in IOCB command field. 1391 * 1392 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1393 * on the list by invoking the complete callback function associated with the 1394 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1395 * fields. 1396 **/ 1397 void 1398 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1399 uint32_t ulpstatus, uint32_t ulpWord4) 1400 { 1401 struct lpfc_iocbq *piocb; 1402 1403 while (!list_empty(iocblist)) { 1404 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1405 if (!piocb->iocb_cmpl) 1406 lpfc_sli_release_iocbq(phba, piocb); 1407 else { 1408 piocb->iocb.ulpStatus = ulpstatus; 1409 piocb->iocb.un.ulpWord[4] = ulpWord4; 1410 (piocb->iocb_cmpl) (phba, piocb, piocb); 1411 } 1412 } 1413 return; 1414 } 1415 1416 /** 1417 * lpfc_sli_iocb_cmd_type - Get the iocb type 1418 * @iocb_cmnd: iocb command code. 1419 * 1420 * This function is called by ring event handler function to get the iocb type. 1421 * This function translates the iocb command to an iocb command type used to 1422 * decide the final disposition of each completed IOCB. 1423 * The function returns 1424 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1425 * LPFC_SOL_IOCB if it is a solicited iocb completion 1426 * LPFC_ABORT_IOCB if it is an abort iocb 1427 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1428 * 1429 * The caller is not required to hold any lock. 1430 **/ 1431 static lpfc_iocb_type 1432 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1433 { 1434 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1435 1436 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1437 return 0; 1438 1439 switch (iocb_cmnd) { 1440 case CMD_XMIT_SEQUENCE_CR: 1441 case CMD_XMIT_SEQUENCE_CX: 1442 case CMD_XMIT_BCAST_CN: 1443 case CMD_XMIT_BCAST_CX: 1444 case CMD_ELS_REQUEST_CR: 1445 case CMD_ELS_REQUEST_CX: 1446 case CMD_CREATE_XRI_CR: 1447 case CMD_CREATE_XRI_CX: 1448 case CMD_GET_RPI_CN: 1449 case CMD_XMIT_ELS_RSP_CX: 1450 case CMD_GET_RPI_CR: 1451 case CMD_FCP_IWRITE_CR: 1452 case CMD_FCP_IWRITE_CX: 1453 case CMD_FCP_IREAD_CR: 1454 case CMD_FCP_IREAD_CX: 1455 case CMD_FCP_ICMND_CR: 1456 case CMD_FCP_ICMND_CX: 1457 case CMD_FCP_TSEND_CX: 1458 case CMD_FCP_TRSP_CX: 1459 case CMD_FCP_TRECEIVE_CX: 1460 case CMD_FCP_AUTO_TRSP_CX: 1461 case CMD_ADAPTER_MSG: 1462 case CMD_ADAPTER_DUMP: 1463 case CMD_XMIT_SEQUENCE64_CR: 1464 case CMD_XMIT_SEQUENCE64_CX: 1465 case CMD_XMIT_BCAST64_CN: 1466 case CMD_XMIT_BCAST64_CX: 1467 case CMD_ELS_REQUEST64_CR: 1468 case CMD_ELS_REQUEST64_CX: 1469 case CMD_FCP_IWRITE64_CR: 1470 case CMD_FCP_IWRITE64_CX: 1471 case CMD_FCP_IREAD64_CR: 1472 case CMD_FCP_IREAD64_CX: 1473 case CMD_FCP_ICMND64_CR: 1474 case CMD_FCP_ICMND64_CX: 1475 case CMD_FCP_TSEND64_CX: 1476 case CMD_FCP_TRSP64_CX: 1477 case CMD_FCP_TRECEIVE64_CX: 1478 case CMD_GEN_REQUEST64_CR: 1479 case CMD_GEN_REQUEST64_CX: 1480 case CMD_XMIT_ELS_RSP64_CX: 1481 case DSSCMD_IWRITE64_CR: 1482 case DSSCMD_IWRITE64_CX: 1483 case DSSCMD_IREAD64_CR: 1484 case DSSCMD_IREAD64_CX: 1485 type = LPFC_SOL_IOCB; 1486 break; 1487 case CMD_ABORT_XRI_CN: 1488 case CMD_ABORT_XRI_CX: 1489 case CMD_CLOSE_XRI_CN: 1490 case CMD_CLOSE_XRI_CX: 1491 case CMD_XRI_ABORTED_CX: 1492 case CMD_ABORT_MXRI64_CN: 1493 case CMD_XMIT_BLS_RSP64_CX: 1494 type = LPFC_ABORT_IOCB; 1495 break; 1496 case CMD_RCV_SEQUENCE_CX: 1497 case CMD_RCV_ELS_REQ_CX: 1498 case CMD_RCV_SEQUENCE64_CX: 1499 case CMD_RCV_ELS_REQ64_CX: 1500 case CMD_ASYNC_STATUS: 1501 case CMD_IOCB_RCV_SEQ64_CX: 1502 case CMD_IOCB_RCV_ELS64_CX: 1503 case CMD_IOCB_RCV_CONT64_CX: 1504 case CMD_IOCB_RET_XRI64_CX: 1505 type = LPFC_UNSOL_IOCB; 1506 break; 1507 case CMD_IOCB_XMIT_MSEQ64_CR: 1508 case CMD_IOCB_XMIT_MSEQ64_CX: 1509 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1510 case CMD_IOCB_RCV_ELS_LIST64_CX: 1511 case CMD_IOCB_CLOSE_EXTENDED_CN: 1512 case CMD_IOCB_ABORT_EXTENDED_CN: 1513 case CMD_IOCB_RET_HBQE64_CN: 1514 case CMD_IOCB_FCP_IBIDIR64_CR: 1515 case CMD_IOCB_FCP_IBIDIR64_CX: 1516 case CMD_IOCB_FCP_ITASKMGT64_CX: 1517 case CMD_IOCB_LOGENTRY_CN: 1518 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1519 printk("%s - Unhandled SLI-3 Command x%x\n", 1520 __func__, iocb_cmnd); 1521 type = LPFC_UNKNOWN_IOCB; 1522 break; 1523 default: 1524 type = LPFC_UNKNOWN_IOCB; 1525 break; 1526 } 1527 1528 return type; 1529 } 1530 1531 /** 1532 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1533 * @phba: Pointer to HBA context object. 1534 * 1535 * This function is called from SLI initialization code 1536 * to configure every ring of the HBA's SLI interface. The 1537 * caller is not required to hold any lock. This function issues 1538 * a config_ring mailbox command for each ring. 1539 * This function returns zero if successful else returns a negative 1540 * error code. 1541 **/ 1542 static int 1543 lpfc_sli_ring_map(struct lpfc_hba *phba) 1544 { 1545 struct lpfc_sli *psli = &phba->sli; 1546 LPFC_MBOXQ_t *pmb; 1547 MAILBOX_t *pmbox; 1548 int i, rc, ret = 0; 1549 1550 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1551 if (!pmb) 1552 return -ENOMEM; 1553 pmbox = &pmb->u.mb; 1554 phba->link_state = LPFC_INIT_MBX_CMDS; 1555 for (i = 0; i < psli->num_rings; i++) { 1556 lpfc_config_ring(phba, i, pmb); 1557 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1558 if (rc != MBX_SUCCESS) { 1559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1560 "0446 Adapter failed to init (%d), " 1561 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1562 "ring %d\n", 1563 rc, pmbox->mbxCommand, 1564 pmbox->mbxStatus, i); 1565 phba->link_state = LPFC_HBA_ERROR; 1566 ret = -ENXIO; 1567 break; 1568 } 1569 } 1570 mempool_free(pmb, phba->mbox_mem_pool); 1571 return ret; 1572 } 1573 1574 /** 1575 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1576 * @phba: Pointer to HBA context object. 1577 * @pring: Pointer to driver SLI ring object. 1578 * @piocb: Pointer to the driver iocb object. 1579 * 1580 * This function is called with hbalock held. The function adds the 1581 * new iocb to txcmplq of the given ring. This function always returns 1582 * 0. If this function is called for ELS ring, this function checks if 1583 * there is a vport associated with the ELS command. This function also 1584 * starts els_tmofunc timer if this is an ELS command. 1585 **/ 1586 static int 1587 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1588 struct lpfc_iocbq *piocb) 1589 { 1590 lockdep_assert_held(&phba->hbalock); 1591 1592 BUG_ON(!piocb); 1593 1594 list_add_tail(&piocb->list, &pring->txcmplq); 1595 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1596 1597 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1598 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1599 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1600 BUG_ON(!piocb->vport); 1601 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1602 mod_timer(&piocb->vport->els_tmofunc, 1603 jiffies + 1604 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1605 } 1606 1607 return 0; 1608 } 1609 1610 /** 1611 * lpfc_sli_ringtx_get - Get first element of the txq 1612 * @phba: Pointer to HBA context object. 1613 * @pring: Pointer to driver SLI ring object. 1614 * 1615 * This function is called with hbalock held to get next 1616 * iocb in txq of the given ring. If there is any iocb in 1617 * the txq, the function returns first iocb in the list after 1618 * removing the iocb from the list, else it returns NULL. 1619 **/ 1620 struct lpfc_iocbq * 1621 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1622 { 1623 struct lpfc_iocbq *cmd_iocb; 1624 1625 lockdep_assert_held(&phba->hbalock); 1626 1627 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1628 return cmd_iocb; 1629 } 1630 1631 /** 1632 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1633 * @phba: Pointer to HBA context object. 1634 * @pring: Pointer to driver SLI ring object. 1635 * 1636 * This function is called with hbalock held and the caller must post the 1637 * iocb without releasing the lock. If the caller releases the lock, 1638 * iocb slot returned by the function is not guaranteed to be available. 1639 * The function returns pointer to the next available iocb slot if there 1640 * is available slot in the ring, else it returns NULL. 1641 * If the get index of the ring is ahead of the put index, the function 1642 * will post an error attention event to the worker thread to take the 1643 * HBA to offline state. 1644 **/ 1645 static IOCB_t * 1646 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1647 { 1648 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1649 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1650 1651 lockdep_assert_held(&phba->hbalock); 1652 1653 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1654 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1655 pring->sli.sli3.next_cmdidx = 0; 1656 1657 if (unlikely(pring->sli.sli3.local_getidx == 1658 pring->sli.sli3.next_cmdidx)) { 1659 1660 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1661 1662 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1664 "0315 Ring %d issue: portCmdGet %d " 1665 "is bigger than cmd ring %d\n", 1666 pring->ringno, 1667 pring->sli.sli3.local_getidx, 1668 max_cmd_idx); 1669 1670 phba->link_state = LPFC_HBA_ERROR; 1671 /* 1672 * All error attention handlers are posted to 1673 * worker thread 1674 */ 1675 phba->work_ha |= HA_ERATT; 1676 phba->work_hs = HS_FFER3; 1677 1678 lpfc_worker_wake_up(phba); 1679 1680 return NULL; 1681 } 1682 1683 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1684 return NULL; 1685 } 1686 1687 return lpfc_cmd_iocb(phba, pring); 1688 } 1689 1690 /** 1691 * lpfc_sli_next_iotag - Get an iotag for the iocb 1692 * @phba: Pointer to HBA context object. 1693 * @iocbq: Pointer to driver iocb object. 1694 * 1695 * This function gets an iotag for the iocb. If there is no unused iotag and 1696 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1697 * array and assigns a new iotag. 1698 * The function returns the allocated iotag if successful, else returns zero. 1699 * Zero is not a valid iotag. 1700 * The caller is not required to hold any lock. 1701 **/ 1702 uint16_t 1703 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1704 { 1705 struct lpfc_iocbq **new_arr; 1706 struct lpfc_iocbq **old_arr; 1707 size_t new_len; 1708 struct lpfc_sli *psli = &phba->sli; 1709 uint16_t iotag; 1710 1711 spin_lock_irq(&phba->hbalock); 1712 iotag = psli->last_iotag; 1713 if(++iotag < psli->iocbq_lookup_len) { 1714 psli->last_iotag = iotag; 1715 psli->iocbq_lookup[iotag] = iocbq; 1716 spin_unlock_irq(&phba->hbalock); 1717 iocbq->iotag = iotag; 1718 return iotag; 1719 } else if (psli->iocbq_lookup_len < (0xffff 1720 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1721 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1722 spin_unlock_irq(&phba->hbalock); 1723 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), 1724 GFP_KERNEL); 1725 if (new_arr) { 1726 spin_lock_irq(&phba->hbalock); 1727 old_arr = psli->iocbq_lookup; 1728 if (new_len <= psli->iocbq_lookup_len) { 1729 /* highly unprobable case */ 1730 kfree(new_arr); 1731 iotag = psli->last_iotag; 1732 if(++iotag < psli->iocbq_lookup_len) { 1733 psli->last_iotag = iotag; 1734 psli->iocbq_lookup[iotag] = iocbq; 1735 spin_unlock_irq(&phba->hbalock); 1736 iocbq->iotag = iotag; 1737 return iotag; 1738 } 1739 spin_unlock_irq(&phba->hbalock); 1740 return 0; 1741 } 1742 if (psli->iocbq_lookup) 1743 memcpy(new_arr, old_arr, 1744 ((psli->last_iotag + 1) * 1745 sizeof (struct lpfc_iocbq *))); 1746 psli->iocbq_lookup = new_arr; 1747 psli->iocbq_lookup_len = new_len; 1748 psli->last_iotag = iotag; 1749 psli->iocbq_lookup[iotag] = iocbq; 1750 spin_unlock_irq(&phba->hbalock); 1751 iocbq->iotag = iotag; 1752 kfree(old_arr); 1753 return iotag; 1754 } 1755 } else 1756 spin_unlock_irq(&phba->hbalock); 1757 1758 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1759 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1760 psli->last_iotag); 1761 1762 return 0; 1763 } 1764 1765 /** 1766 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1767 * @phba: Pointer to HBA context object. 1768 * @pring: Pointer to driver SLI ring object. 1769 * @iocb: Pointer to iocb slot in the ring. 1770 * @nextiocb: Pointer to driver iocb object which need to be 1771 * posted to firmware. 1772 * 1773 * This function is called with hbalock held to post a new iocb to 1774 * the firmware. This function copies the new iocb to ring iocb slot and 1775 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1776 * a completion call back for this iocb else the function will free the 1777 * iocb object. 1778 **/ 1779 static void 1780 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1781 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1782 { 1783 lockdep_assert_held(&phba->hbalock); 1784 /* 1785 * Set up an iotag 1786 */ 1787 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1788 1789 1790 if (pring->ringno == LPFC_ELS_RING) { 1791 lpfc_debugfs_slow_ring_trc(phba, 1792 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1793 *(((uint32_t *) &nextiocb->iocb) + 4), 1794 *(((uint32_t *) &nextiocb->iocb) + 6), 1795 *(((uint32_t *) &nextiocb->iocb) + 7)); 1796 } 1797 1798 /* 1799 * Issue iocb command to adapter 1800 */ 1801 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1802 wmb(); 1803 pring->stats.iocb_cmd++; 1804 1805 /* 1806 * If there is no completion routine to call, we can release the 1807 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1808 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1809 */ 1810 if (nextiocb->iocb_cmpl) 1811 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1812 else 1813 __lpfc_sli_release_iocbq(phba, nextiocb); 1814 1815 /* 1816 * Let the HBA know what IOCB slot will be the next one the 1817 * driver will put a command into. 1818 */ 1819 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1820 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1821 } 1822 1823 /** 1824 * lpfc_sli_update_full_ring - Update the chip attention register 1825 * @phba: Pointer to HBA context object. 1826 * @pring: Pointer to driver SLI ring object. 1827 * 1828 * The caller is not required to hold any lock for calling this function. 1829 * This function updates the chip attention bits for the ring to inform firmware 1830 * that there are pending work to be done for this ring and requests an 1831 * interrupt when there is space available in the ring. This function is 1832 * called when the driver is unable to post more iocbs to the ring due 1833 * to unavailability of space in the ring. 1834 **/ 1835 static void 1836 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1837 { 1838 int ringno = pring->ringno; 1839 1840 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1841 1842 wmb(); 1843 1844 /* 1845 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1846 * The HBA will tell us when an IOCB entry is available. 1847 */ 1848 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1849 readl(phba->CAregaddr); /* flush */ 1850 1851 pring->stats.iocb_cmd_full++; 1852 } 1853 1854 /** 1855 * lpfc_sli_update_ring - Update chip attention register 1856 * @phba: Pointer to HBA context object. 1857 * @pring: Pointer to driver SLI ring object. 1858 * 1859 * This function updates the chip attention register bit for the 1860 * given ring to inform HBA that there is more work to be done 1861 * in this ring. The caller is not required to hold any lock. 1862 **/ 1863 static void 1864 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1865 { 1866 int ringno = pring->ringno; 1867 1868 /* 1869 * Tell the HBA that there is work to do in this ring. 1870 */ 1871 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1872 wmb(); 1873 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1874 readl(phba->CAregaddr); /* flush */ 1875 } 1876 } 1877 1878 /** 1879 * lpfc_sli_resume_iocb - Process iocbs in the txq 1880 * @phba: Pointer to HBA context object. 1881 * @pring: Pointer to driver SLI ring object. 1882 * 1883 * This function is called with hbalock held to post pending iocbs 1884 * in the txq to the firmware. This function is called when driver 1885 * detects space available in the ring. 1886 **/ 1887 static void 1888 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1889 { 1890 IOCB_t *iocb; 1891 struct lpfc_iocbq *nextiocb; 1892 1893 lockdep_assert_held(&phba->hbalock); 1894 1895 /* 1896 * Check to see if: 1897 * (a) there is anything on the txq to send 1898 * (b) link is up 1899 * (c) link attention events can be processed (fcp ring only) 1900 * (d) IOCB processing is not blocked by the outstanding mbox command. 1901 */ 1902 1903 if (lpfc_is_link_up(phba) && 1904 (!list_empty(&pring->txq)) && 1905 (pring->ringno != LPFC_FCP_RING || 1906 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1907 1908 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1909 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1910 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1911 1912 if (iocb) 1913 lpfc_sli_update_ring(phba, pring); 1914 else 1915 lpfc_sli_update_full_ring(phba, pring); 1916 } 1917 1918 return; 1919 } 1920 1921 /** 1922 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1923 * @phba: Pointer to HBA context object. 1924 * @hbqno: HBQ number. 1925 * 1926 * This function is called with hbalock held to get the next 1927 * available slot for the given HBQ. If there is free slot 1928 * available for the HBQ it will return pointer to the next available 1929 * HBQ entry else it will return NULL. 1930 **/ 1931 static struct lpfc_hbq_entry * 1932 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1933 { 1934 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1935 1936 lockdep_assert_held(&phba->hbalock); 1937 1938 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1939 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1940 hbqp->next_hbqPutIdx = 0; 1941 1942 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1943 uint32_t raw_index = phba->hbq_get[hbqno]; 1944 uint32_t getidx = le32_to_cpu(raw_index); 1945 1946 hbqp->local_hbqGetIdx = getidx; 1947 1948 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1949 lpfc_printf_log(phba, KERN_ERR, 1950 LOG_SLI | LOG_VPORT, 1951 "1802 HBQ %d: local_hbqGetIdx " 1952 "%u is > than hbqp->entry_count %u\n", 1953 hbqno, hbqp->local_hbqGetIdx, 1954 hbqp->entry_count); 1955 1956 phba->link_state = LPFC_HBA_ERROR; 1957 return NULL; 1958 } 1959 1960 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1961 return NULL; 1962 } 1963 1964 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1965 hbqp->hbqPutIdx; 1966 } 1967 1968 /** 1969 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1970 * @phba: Pointer to HBA context object. 1971 * 1972 * This function is called with no lock held to free all the 1973 * hbq buffers while uninitializing the SLI interface. It also 1974 * frees the HBQ buffers returned by the firmware but not yet 1975 * processed by the upper layers. 1976 **/ 1977 void 1978 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1979 { 1980 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1981 struct hbq_dmabuf *hbq_buf; 1982 unsigned long flags; 1983 int i, hbq_count; 1984 1985 hbq_count = lpfc_sli_hbq_count(); 1986 /* Return all memory used by all HBQs */ 1987 spin_lock_irqsave(&phba->hbalock, flags); 1988 for (i = 0; i < hbq_count; ++i) { 1989 list_for_each_entry_safe(dmabuf, next_dmabuf, 1990 &phba->hbqs[i].hbq_buffer_list, list) { 1991 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1992 list_del(&hbq_buf->dbuf.list); 1993 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1994 } 1995 phba->hbqs[i].buffer_count = 0; 1996 } 1997 1998 /* Mark the HBQs not in use */ 1999 phba->hbq_in_use = 0; 2000 spin_unlock_irqrestore(&phba->hbalock, flags); 2001 } 2002 2003 /** 2004 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 2005 * @phba: Pointer to HBA context object. 2006 * @hbqno: HBQ number. 2007 * @hbq_buf: Pointer to HBQ buffer. 2008 * 2009 * This function is called with the hbalock held to post a 2010 * hbq buffer to the firmware. If the function finds an empty 2011 * slot in the HBQ, it will post the buffer. The function will return 2012 * pointer to the hbq entry if it successfully post the buffer 2013 * else it will return NULL. 2014 **/ 2015 static int 2016 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2017 struct hbq_dmabuf *hbq_buf) 2018 { 2019 lockdep_assert_held(&phba->hbalock); 2020 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2021 } 2022 2023 /** 2024 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2025 * @phba: Pointer to HBA context object. 2026 * @hbqno: HBQ number. 2027 * @hbq_buf: Pointer to HBQ buffer. 2028 * 2029 * This function is called with the hbalock held to post a hbq buffer to the 2030 * firmware. If the function finds an empty slot in the HBQ, it will post the 2031 * buffer and place it on the hbq_buffer_list. The function will return zero if 2032 * it successfully post the buffer else it will return an error. 2033 **/ 2034 static int 2035 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2036 struct hbq_dmabuf *hbq_buf) 2037 { 2038 struct lpfc_hbq_entry *hbqe; 2039 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2040 2041 lockdep_assert_held(&phba->hbalock); 2042 /* Get next HBQ entry slot to use */ 2043 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2044 if (hbqe) { 2045 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2046 2047 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2048 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2049 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2050 hbqe->bde.tus.f.bdeFlags = 0; 2051 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2052 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2053 /* Sync SLIM */ 2054 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2055 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2056 /* flush */ 2057 readl(phba->hbq_put + hbqno); 2058 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2059 return 0; 2060 } else 2061 return -ENOMEM; 2062 } 2063 2064 /** 2065 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2066 * @phba: Pointer to HBA context object. 2067 * @hbqno: HBQ number. 2068 * @hbq_buf: Pointer to HBQ buffer. 2069 * 2070 * This function is called with the hbalock held to post an RQE to the SLI4 2071 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2072 * the hbq_buffer_list and return zero, otherwise it will return an error. 2073 **/ 2074 static int 2075 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2076 struct hbq_dmabuf *hbq_buf) 2077 { 2078 int rc; 2079 struct lpfc_rqe hrqe; 2080 struct lpfc_rqe drqe; 2081 struct lpfc_queue *hrq; 2082 struct lpfc_queue *drq; 2083 2084 if (hbqno != LPFC_ELS_HBQ) 2085 return 1; 2086 hrq = phba->sli4_hba.hdr_rq; 2087 drq = phba->sli4_hba.dat_rq; 2088 2089 lockdep_assert_held(&phba->hbalock); 2090 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2091 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2092 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2093 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2094 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2095 if (rc < 0) 2096 return rc; 2097 hbq_buf->tag = (rc | (hbqno << 16)); 2098 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2099 return 0; 2100 } 2101 2102 /* HBQ for ELS and CT traffic. */ 2103 static struct lpfc_hbq_init lpfc_els_hbq = { 2104 .rn = 1, 2105 .entry_count = 256, 2106 .mask_count = 0, 2107 .profile = 0, 2108 .ring_mask = (1 << LPFC_ELS_RING), 2109 .buffer_count = 0, 2110 .init_count = 40, 2111 .add_count = 40, 2112 }; 2113 2114 /* Array of HBQs */ 2115 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2116 &lpfc_els_hbq, 2117 }; 2118 2119 /** 2120 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2121 * @phba: Pointer to HBA context object. 2122 * @hbqno: HBQ number. 2123 * @count: Number of HBQ buffers to be posted. 2124 * 2125 * This function is called with no lock held to post more hbq buffers to the 2126 * given HBQ. The function returns the number of HBQ buffers successfully 2127 * posted. 2128 **/ 2129 static int 2130 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2131 { 2132 uint32_t i, posted = 0; 2133 unsigned long flags; 2134 struct hbq_dmabuf *hbq_buffer; 2135 LIST_HEAD(hbq_buf_list); 2136 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2137 return 0; 2138 2139 if ((phba->hbqs[hbqno].buffer_count + count) > 2140 lpfc_hbq_defs[hbqno]->entry_count) 2141 count = lpfc_hbq_defs[hbqno]->entry_count - 2142 phba->hbqs[hbqno].buffer_count; 2143 if (!count) 2144 return 0; 2145 /* Allocate HBQ entries */ 2146 for (i = 0; i < count; i++) { 2147 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2148 if (!hbq_buffer) 2149 break; 2150 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2151 } 2152 /* Check whether HBQ is still in use */ 2153 spin_lock_irqsave(&phba->hbalock, flags); 2154 if (!phba->hbq_in_use) 2155 goto err; 2156 while (!list_empty(&hbq_buf_list)) { 2157 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2158 dbuf.list); 2159 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2160 (hbqno << 16)); 2161 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2162 phba->hbqs[hbqno].buffer_count++; 2163 posted++; 2164 } else 2165 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2166 } 2167 spin_unlock_irqrestore(&phba->hbalock, flags); 2168 return posted; 2169 err: 2170 spin_unlock_irqrestore(&phba->hbalock, flags); 2171 while (!list_empty(&hbq_buf_list)) { 2172 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2173 dbuf.list); 2174 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2175 } 2176 return 0; 2177 } 2178 2179 /** 2180 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2181 * @phba: Pointer to HBA context object. 2182 * @qno: HBQ number. 2183 * 2184 * This function posts more buffers to the HBQ. This function 2185 * is called with no lock held. The function returns the number of HBQ entries 2186 * successfully allocated. 2187 **/ 2188 int 2189 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2190 { 2191 if (phba->sli_rev == LPFC_SLI_REV4) 2192 return 0; 2193 else 2194 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2195 lpfc_hbq_defs[qno]->add_count); 2196 } 2197 2198 /** 2199 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2200 * @phba: Pointer to HBA context object. 2201 * @qno: HBQ queue number. 2202 * 2203 * This function is called from SLI initialization code path with 2204 * no lock held to post initial HBQ buffers to firmware. The 2205 * function returns the number of HBQ entries successfully allocated. 2206 **/ 2207 static int 2208 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2209 { 2210 if (phba->sli_rev == LPFC_SLI_REV4) 2211 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2212 lpfc_hbq_defs[qno]->entry_count); 2213 else 2214 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2215 lpfc_hbq_defs[qno]->init_count); 2216 } 2217 2218 /** 2219 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2220 * @phba: Pointer to HBA context object. 2221 * @hbqno: HBQ number. 2222 * 2223 * This function removes the first hbq buffer on an hbq list and returns a 2224 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2225 **/ 2226 static struct hbq_dmabuf * 2227 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2228 { 2229 struct lpfc_dmabuf *d_buf; 2230 2231 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2232 if (!d_buf) 2233 return NULL; 2234 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2235 } 2236 2237 /** 2238 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2239 * @phba: Pointer to HBA context object. 2240 * @hbqno: HBQ number. 2241 * 2242 * This function removes the first RQ buffer on an RQ buffer list and returns a 2243 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2244 **/ 2245 static struct rqb_dmabuf * 2246 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2247 { 2248 struct lpfc_dmabuf *h_buf; 2249 struct lpfc_rqb *rqbp; 2250 2251 rqbp = hrq->rqbp; 2252 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2253 struct lpfc_dmabuf, list); 2254 if (!h_buf) 2255 return NULL; 2256 rqbp->buffer_count--; 2257 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2258 } 2259 2260 /** 2261 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2262 * @phba: Pointer to HBA context object. 2263 * @tag: Tag of the hbq buffer. 2264 * 2265 * This function searches for the hbq buffer associated with the given tag in 2266 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2267 * otherwise it returns NULL. 2268 **/ 2269 static struct hbq_dmabuf * 2270 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2271 { 2272 struct lpfc_dmabuf *d_buf; 2273 struct hbq_dmabuf *hbq_buf; 2274 uint32_t hbqno; 2275 2276 hbqno = tag >> 16; 2277 if (hbqno >= LPFC_MAX_HBQS) 2278 return NULL; 2279 2280 spin_lock_irq(&phba->hbalock); 2281 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2282 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2283 if (hbq_buf->tag == tag) { 2284 spin_unlock_irq(&phba->hbalock); 2285 return hbq_buf; 2286 } 2287 } 2288 spin_unlock_irq(&phba->hbalock); 2289 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2290 "1803 Bad hbq tag. Data: x%x x%x\n", 2291 tag, phba->hbqs[tag >> 16].buffer_count); 2292 return NULL; 2293 } 2294 2295 /** 2296 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2297 * @phba: Pointer to HBA context object. 2298 * @hbq_buffer: Pointer to HBQ buffer. 2299 * 2300 * This function is called with hbalock. This function gives back 2301 * the hbq buffer to firmware. If the HBQ does not have space to 2302 * post the buffer, it will free the buffer. 2303 **/ 2304 void 2305 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2306 { 2307 uint32_t hbqno; 2308 2309 if (hbq_buffer) { 2310 hbqno = hbq_buffer->tag >> 16; 2311 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2312 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2313 } 2314 } 2315 2316 /** 2317 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2318 * @mbxCommand: mailbox command code. 2319 * 2320 * This function is called by the mailbox event handler function to verify 2321 * that the completed mailbox command is a legitimate mailbox command. If the 2322 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2323 * and the mailbox event handler will take the HBA offline. 2324 **/ 2325 static int 2326 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2327 { 2328 uint8_t ret; 2329 2330 switch (mbxCommand) { 2331 case MBX_LOAD_SM: 2332 case MBX_READ_NV: 2333 case MBX_WRITE_NV: 2334 case MBX_WRITE_VPARMS: 2335 case MBX_RUN_BIU_DIAG: 2336 case MBX_INIT_LINK: 2337 case MBX_DOWN_LINK: 2338 case MBX_CONFIG_LINK: 2339 case MBX_CONFIG_RING: 2340 case MBX_RESET_RING: 2341 case MBX_READ_CONFIG: 2342 case MBX_READ_RCONFIG: 2343 case MBX_READ_SPARM: 2344 case MBX_READ_STATUS: 2345 case MBX_READ_RPI: 2346 case MBX_READ_XRI: 2347 case MBX_READ_REV: 2348 case MBX_READ_LNK_STAT: 2349 case MBX_REG_LOGIN: 2350 case MBX_UNREG_LOGIN: 2351 case MBX_CLEAR_LA: 2352 case MBX_DUMP_MEMORY: 2353 case MBX_DUMP_CONTEXT: 2354 case MBX_RUN_DIAGS: 2355 case MBX_RESTART: 2356 case MBX_UPDATE_CFG: 2357 case MBX_DOWN_LOAD: 2358 case MBX_DEL_LD_ENTRY: 2359 case MBX_RUN_PROGRAM: 2360 case MBX_SET_MASK: 2361 case MBX_SET_VARIABLE: 2362 case MBX_UNREG_D_ID: 2363 case MBX_KILL_BOARD: 2364 case MBX_CONFIG_FARP: 2365 case MBX_BEACON: 2366 case MBX_LOAD_AREA: 2367 case MBX_RUN_BIU_DIAG64: 2368 case MBX_CONFIG_PORT: 2369 case MBX_READ_SPARM64: 2370 case MBX_READ_RPI64: 2371 case MBX_REG_LOGIN64: 2372 case MBX_READ_TOPOLOGY: 2373 case MBX_WRITE_WWN: 2374 case MBX_SET_DEBUG: 2375 case MBX_LOAD_EXP_ROM: 2376 case MBX_ASYNCEVT_ENABLE: 2377 case MBX_REG_VPI: 2378 case MBX_UNREG_VPI: 2379 case MBX_HEARTBEAT: 2380 case MBX_PORT_CAPABILITIES: 2381 case MBX_PORT_IOV_CONTROL: 2382 case MBX_SLI4_CONFIG: 2383 case MBX_SLI4_REQ_FTRS: 2384 case MBX_REG_FCFI: 2385 case MBX_UNREG_FCFI: 2386 case MBX_REG_VFI: 2387 case MBX_UNREG_VFI: 2388 case MBX_INIT_VPI: 2389 case MBX_INIT_VFI: 2390 case MBX_RESUME_RPI: 2391 case MBX_READ_EVENT_LOG_STATUS: 2392 case MBX_READ_EVENT_LOG: 2393 case MBX_SECURITY_MGMT: 2394 case MBX_AUTH_PORT: 2395 case MBX_ACCESS_VDATA: 2396 ret = mbxCommand; 2397 break; 2398 default: 2399 ret = MBX_SHUTDOWN; 2400 break; 2401 } 2402 return ret; 2403 } 2404 2405 /** 2406 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2407 * @phba: Pointer to HBA context object. 2408 * @pmboxq: Pointer to mailbox command. 2409 * 2410 * This is completion handler function for mailbox commands issued from 2411 * lpfc_sli_issue_mbox_wait function. This function is called by the 2412 * mailbox event handler function with no lock held. This function 2413 * will wake up thread waiting on the wait queue pointed by context1 2414 * of the mailbox. 2415 **/ 2416 void 2417 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2418 { 2419 unsigned long drvr_flag; 2420 struct completion *pmbox_done; 2421 2422 /* 2423 * If pmbox_done is empty, the driver thread gave up waiting and 2424 * continued running. 2425 */ 2426 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2427 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2428 pmbox_done = (struct completion *)pmboxq->context3; 2429 if (pmbox_done) 2430 complete(pmbox_done); 2431 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2432 return; 2433 } 2434 2435 2436 /** 2437 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2438 * @phba: Pointer to HBA context object. 2439 * @pmb: Pointer to mailbox object. 2440 * 2441 * This function is the default mailbox completion handler. It 2442 * frees the memory resources associated with the completed mailbox 2443 * command. If the completed command is a REG_LOGIN mailbox command, 2444 * this function will issue a UREG_LOGIN to re-claim the RPI. 2445 **/ 2446 void 2447 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2448 { 2449 struct lpfc_vport *vport = pmb->vport; 2450 struct lpfc_dmabuf *mp; 2451 struct lpfc_nodelist *ndlp; 2452 struct Scsi_Host *shost; 2453 uint16_t rpi, vpi; 2454 int rc; 2455 2456 mp = (struct lpfc_dmabuf *) (pmb->context1); 2457 2458 if (mp) { 2459 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2460 kfree(mp); 2461 } 2462 2463 /* 2464 * If a REG_LOGIN succeeded after node is destroyed or node 2465 * is in re-discovery driver need to cleanup the RPI. 2466 */ 2467 if (!(phba->pport->load_flag & FC_UNLOADING) && 2468 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2469 !pmb->u.mb.mbxStatus) { 2470 rpi = pmb->u.mb.un.varWords[0]; 2471 vpi = pmb->u.mb.un.varRegLogin.vpi; 2472 lpfc_unreg_login(phba, vpi, rpi, pmb); 2473 pmb->vport = vport; 2474 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2475 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2476 if (rc != MBX_NOT_FINISHED) 2477 return; 2478 } 2479 2480 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2481 !(phba->pport->load_flag & FC_UNLOADING) && 2482 !pmb->u.mb.mbxStatus) { 2483 shost = lpfc_shost_from_vport(vport); 2484 spin_lock_irq(shost->host_lock); 2485 vport->vpi_state |= LPFC_VPI_REGISTERED; 2486 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2487 spin_unlock_irq(shost->host_lock); 2488 } 2489 2490 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2491 ndlp = (struct lpfc_nodelist *)pmb->context2; 2492 lpfc_nlp_put(ndlp); 2493 pmb->context2 = NULL; 2494 } 2495 2496 /* Check security permission status on INIT_LINK mailbox command */ 2497 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2498 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2499 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2500 "2860 SLI authentication is required " 2501 "for INIT_LINK but has not done yet\n"); 2502 2503 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2504 lpfc_sli4_mbox_cmd_free(phba, pmb); 2505 else 2506 mempool_free(pmb, phba->mbox_mem_pool); 2507 } 2508 /** 2509 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2510 * @phba: Pointer to HBA context object. 2511 * @pmb: Pointer to mailbox object. 2512 * 2513 * This function is the unreg rpi mailbox completion handler. It 2514 * frees the memory resources associated with the completed mailbox 2515 * command. An additional refrenece is put on the ndlp to prevent 2516 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2517 * the unreg mailbox command completes, this routine puts the 2518 * reference back. 2519 * 2520 **/ 2521 void 2522 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2523 { 2524 struct lpfc_vport *vport = pmb->vport; 2525 struct lpfc_nodelist *ndlp; 2526 2527 ndlp = pmb->context1; 2528 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2529 if (phba->sli_rev == LPFC_SLI_REV4 && 2530 (bf_get(lpfc_sli_intf_if_type, 2531 &phba->sli4_hba.sli_intf) >= 2532 LPFC_SLI_INTF_IF_TYPE_2)) { 2533 if (ndlp) { 2534 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 2535 "0010 UNREG_LOGIN vpi:%x " 2536 "rpi:%x DID:%x map:%x %p\n", 2537 vport->vpi, ndlp->nlp_rpi, 2538 ndlp->nlp_DID, 2539 ndlp->nlp_usg_map, ndlp); 2540 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2541 lpfc_nlp_put(ndlp); 2542 } 2543 } 2544 } 2545 2546 mempool_free(pmb, phba->mbox_mem_pool); 2547 } 2548 2549 /** 2550 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2551 * @phba: Pointer to HBA context object. 2552 * 2553 * This function is called with no lock held. This function processes all 2554 * the completed mailbox commands and gives it to upper layers. The interrupt 2555 * service routine processes mailbox completion interrupt and adds completed 2556 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2557 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2558 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2559 * function returns the mailbox commands to the upper layer by calling the 2560 * completion handler function of each mailbox. 2561 **/ 2562 int 2563 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2564 { 2565 MAILBOX_t *pmbox; 2566 LPFC_MBOXQ_t *pmb; 2567 int rc; 2568 LIST_HEAD(cmplq); 2569 2570 phba->sli.slistat.mbox_event++; 2571 2572 /* Get all completed mailboxe buffers into the cmplq */ 2573 spin_lock_irq(&phba->hbalock); 2574 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2575 spin_unlock_irq(&phba->hbalock); 2576 2577 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2578 do { 2579 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2580 if (pmb == NULL) 2581 break; 2582 2583 pmbox = &pmb->u.mb; 2584 2585 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2586 if (pmb->vport) { 2587 lpfc_debugfs_disc_trc(pmb->vport, 2588 LPFC_DISC_TRC_MBOX_VPORT, 2589 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2590 (uint32_t)pmbox->mbxCommand, 2591 pmbox->un.varWords[0], 2592 pmbox->un.varWords[1]); 2593 } 2594 else { 2595 lpfc_debugfs_disc_trc(phba->pport, 2596 LPFC_DISC_TRC_MBOX, 2597 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2598 (uint32_t)pmbox->mbxCommand, 2599 pmbox->un.varWords[0], 2600 pmbox->un.varWords[1]); 2601 } 2602 } 2603 2604 /* 2605 * It is a fatal error if unknown mbox command completion. 2606 */ 2607 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2608 MBX_SHUTDOWN) { 2609 /* Unknown mailbox command compl */ 2610 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2611 "(%d):0323 Unknown Mailbox command " 2612 "x%x (x%x/x%x) Cmpl\n", 2613 pmb->vport ? pmb->vport->vpi : 0, 2614 pmbox->mbxCommand, 2615 lpfc_sli_config_mbox_subsys_get(phba, 2616 pmb), 2617 lpfc_sli_config_mbox_opcode_get(phba, 2618 pmb)); 2619 phba->link_state = LPFC_HBA_ERROR; 2620 phba->work_hs = HS_FFER3; 2621 lpfc_handle_eratt(phba); 2622 continue; 2623 } 2624 2625 if (pmbox->mbxStatus) { 2626 phba->sli.slistat.mbox_stat_err++; 2627 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2628 /* Mbox cmd cmpl error - RETRYing */ 2629 lpfc_printf_log(phba, KERN_INFO, 2630 LOG_MBOX | LOG_SLI, 2631 "(%d):0305 Mbox cmd cmpl " 2632 "error - RETRYing Data: x%x " 2633 "(x%x/x%x) x%x x%x x%x\n", 2634 pmb->vport ? pmb->vport->vpi : 0, 2635 pmbox->mbxCommand, 2636 lpfc_sli_config_mbox_subsys_get(phba, 2637 pmb), 2638 lpfc_sli_config_mbox_opcode_get(phba, 2639 pmb), 2640 pmbox->mbxStatus, 2641 pmbox->un.varWords[0], 2642 pmb->vport->port_state); 2643 pmbox->mbxStatus = 0; 2644 pmbox->mbxOwner = OWN_HOST; 2645 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2646 if (rc != MBX_NOT_FINISHED) 2647 continue; 2648 } 2649 } 2650 2651 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2652 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2653 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2654 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2655 "x%x x%x x%x\n", 2656 pmb->vport ? pmb->vport->vpi : 0, 2657 pmbox->mbxCommand, 2658 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2659 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2660 pmb->mbox_cmpl, 2661 *((uint32_t *) pmbox), 2662 pmbox->un.varWords[0], 2663 pmbox->un.varWords[1], 2664 pmbox->un.varWords[2], 2665 pmbox->un.varWords[3], 2666 pmbox->un.varWords[4], 2667 pmbox->un.varWords[5], 2668 pmbox->un.varWords[6], 2669 pmbox->un.varWords[7], 2670 pmbox->un.varWords[8], 2671 pmbox->un.varWords[9], 2672 pmbox->un.varWords[10]); 2673 2674 if (pmb->mbox_cmpl) 2675 pmb->mbox_cmpl(phba,pmb); 2676 } while (1); 2677 return 0; 2678 } 2679 2680 /** 2681 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2682 * @phba: Pointer to HBA context object. 2683 * @pring: Pointer to driver SLI ring object. 2684 * @tag: buffer tag. 2685 * 2686 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2687 * is set in the tag the buffer is posted for a particular exchange, 2688 * the function will return the buffer without replacing the buffer. 2689 * If the buffer is for unsolicited ELS or CT traffic, this function 2690 * returns the buffer and also posts another buffer to the firmware. 2691 **/ 2692 static struct lpfc_dmabuf * 2693 lpfc_sli_get_buff(struct lpfc_hba *phba, 2694 struct lpfc_sli_ring *pring, 2695 uint32_t tag) 2696 { 2697 struct hbq_dmabuf *hbq_entry; 2698 2699 if (tag & QUE_BUFTAG_BIT) 2700 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2701 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2702 if (!hbq_entry) 2703 return NULL; 2704 return &hbq_entry->dbuf; 2705 } 2706 2707 /** 2708 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2709 * @phba: Pointer to HBA context object. 2710 * @pring: Pointer to driver SLI ring object. 2711 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2712 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2713 * @fch_type: the type for the first frame of the sequence. 2714 * 2715 * This function is called with no lock held. This function uses the r_ctl and 2716 * type of the received sequence to find the correct callback function to call 2717 * to process the sequence. 2718 **/ 2719 static int 2720 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2721 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2722 uint32_t fch_type) 2723 { 2724 int i; 2725 2726 switch (fch_type) { 2727 case FC_TYPE_NVME: 2728 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2729 return 1; 2730 default: 2731 break; 2732 } 2733 2734 /* unSolicited Responses */ 2735 if (pring->prt[0].profile) { 2736 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2737 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2738 saveq); 2739 return 1; 2740 } 2741 /* We must search, based on rctl / type 2742 for the right routine */ 2743 for (i = 0; i < pring->num_mask; i++) { 2744 if ((pring->prt[i].rctl == fch_r_ctl) && 2745 (pring->prt[i].type == fch_type)) { 2746 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2747 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2748 (phba, pring, saveq); 2749 return 1; 2750 } 2751 } 2752 return 0; 2753 } 2754 2755 /** 2756 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2757 * @phba: Pointer to HBA context object. 2758 * @pring: Pointer to driver SLI ring object. 2759 * @saveq: Pointer to the unsolicited iocb. 2760 * 2761 * This function is called with no lock held by the ring event handler 2762 * when there is an unsolicited iocb posted to the response ring by the 2763 * firmware. This function gets the buffer associated with the iocbs 2764 * and calls the event handler for the ring. This function handles both 2765 * qring buffers and hbq buffers. 2766 * When the function returns 1 the caller can free the iocb object otherwise 2767 * upper layer functions will free the iocb objects. 2768 **/ 2769 static int 2770 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2771 struct lpfc_iocbq *saveq) 2772 { 2773 IOCB_t * irsp; 2774 WORD5 * w5p; 2775 uint32_t Rctl, Type; 2776 struct lpfc_iocbq *iocbq; 2777 struct lpfc_dmabuf *dmzbuf; 2778 2779 irsp = &(saveq->iocb); 2780 2781 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2782 if (pring->lpfc_sli_rcv_async_status) 2783 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2784 else 2785 lpfc_printf_log(phba, 2786 KERN_WARNING, 2787 LOG_SLI, 2788 "0316 Ring %d handler: unexpected " 2789 "ASYNC_STATUS iocb received evt_code " 2790 "0x%x\n", 2791 pring->ringno, 2792 irsp->un.asyncstat.evt_code); 2793 return 1; 2794 } 2795 2796 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2797 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2798 if (irsp->ulpBdeCount > 0) { 2799 dmzbuf = lpfc_sli_get_buff(phba, pring, 2800 irsp->un.ulpWord[3]); 2801 lpfc_in_buf_free(phba, dmzbuf); 2802 } 2803 2804 if (irsp->ulpBdeCount > 1) { 2805 dmzbuf = lpfc_sli_get_buff(phba, pring, 2806 irsp->unsli3.sli3Words[3]); 2807 lpfc_in_buf_free(phba, dmzbuf); 2808 } 2809 2810 if (irsp->ulpBdeCount > 2) { 2811 dmzbuf = lpfc_sli_get_buff(phba, pring, 2812 irsp->unsli3.sli3Words[7]); 2813 lpfc_in_buf_free(phba, dmzbuf); 2814 } 2815 2816 return 1; 2817 } 2818 2819 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2820 if (irsp->ulpBdeCount != 0) { 2821 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2822 irsp->un.ulpWord[3]); 2823 if (!saveq->context2) 2824 lpfc_printf_log(phba, 2825 KERN_ERR, 2826 LOG_SLI, 2827 "0341 Ring %d Cannot find buffer for " 2828 "an unsolicited iocb. tag 0x%x\n", 2829 pring->ringno, 2830 irsp->un.ulpWord[3]); 2831 } 2832 if (irsp->ulpBdeCount == 2) { 2833 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2834 irsp->unsli3.sli3Words[7]); 2835 if (!saveq->context3) 2836 lpfc_printf_log(phba, 2837 KERN_ERR, 2838 LOG_SLI, 2839 "0342 Ring %d Cannot find buffer for an" 2840 " unsolicited iocb. tag 0x%x\n", 2841 pring->ringno, 2842 irsp->unsli3.sli3Words[7]); 2843 } 2844 list_for_each_entry(iocbq, &saveq->list, list) { 2845 irsp = &(iocbq->iocb); 2846 if (irsp->ulpBdeCount != 0) { 2847 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2848 irsp->un.ulpWord[3]); 2849 if (!iocbq->context2) 2850 lpfc_printf_log(phba, 2851 KERN_ERR, 2852 LOG_SLI, 2853 "0343 Ring %d Cannot find " 2854 "buffer for an unsolicited iocb" 2855 ". tag 0x%x\n", pring->ringno, 2856 irsp->un.ulpWord[3]); 2857 } 2858 if (irsp->ulpBdeCount == 2) { 2859 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2860 irsp->unsli3.sli3Words[7]); 2861 if (!iocbq->context3) 2862 lpfc_printf_log(phba, 2863 KERN_ERR, 2864 LOG_SLI, 2865 "0344 Ring %d Cannot find " 2866 "buffer for an unsolicited " 2867 "iocb. tag 0x%x\n", 2868 pring->ringno, 2869 irsp->unsli3.sli3Words[7]); 2870 } 2871 } 2872 } 2873 if (irsp->ulpBdeCount != 0 && 2874 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2875 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2876 int found = 0; 2877 2878 /* search continue save q for same XRI */ 2879 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2880 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2881 saveq->iocb.unsli3.rcvsli3.ox_id) { 2882 list_add_tail(&saveq->list, &iocbq->list); 2883 found = 1; 2884 break; 2885 } 2886 } 2887 if (!found) 2888 list_add_tail(&saveq->clist, 2889 &pring->iocb_continue_saveq); 2890 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2891 list_del_init(&iocbq->clist); 2892 saveq = iocbq; 2893 irsp = &(saveq->iocb); 2894 } else 2895 return 0; 2896 } 2897 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2898 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2899 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2900 Rctl = FC_RCTL_ELS_REQ; 2901 Type = FC_TYPE_ELS; 2902 } else { 2903 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2904 Rctl = w5p->hcsw.Rctl; 2905 Type = w5p->hcsw.Type; 2906 2907 /* Firmware Workaround */ 2908 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2909 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2910 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2911 Rctl = FC_RCTL_ELS_REQ; 2912 Type = FC_TYPE_ELS; 2913 w5p->hcsw.Rctl = Rctl; 2914 w5p->hcsw.Type = Type; 2915 } 2916 } 2917 2918 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2919 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2920 "0313 Ring %d handler: unexpected Rctl x%x " 2921 "Type x%x received\n", 2922 pring->ringno, Rctl, Type); 2923 2924 return 1; 2925 } 2926 2927 /** 2928 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2929 * @phba: Pointer to HBA context object. 2930 * @pring: Pointer to driver SLI ring object. 2931 * @prspiocb: Pointer to response iocb object. 2932 * 2933 * This function looks up the iocb_lookup table to get the command iocb 2934 * corresponding to the given response iocb using the iotag of the 2935 * response iocb. This function is called with the hbalock held 2936 * for sli3 devices or the ring_lock for sli4 devices. 2937 * This function returns the command iocb object if it finds the command 2938 * iocb else returns NULL. 2939 **/ 2940 static struct lpfc_iocbq * 2941 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2942 struct lpfc_sli_ring *pring, 2943 struct lpfc_iocbq *prspiocb) 2944 { 2945 struct lpfc_iocbq *cmd_iocb = NULL; 2946 uint16_t iotag; 2947 lockdep_assert_held(&phba->hbalock); 2948 2949 iotag = prspiocb->iocb.ulpIoTag; 2950 2951 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2952 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2953 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2954 /* remove from txcmpl queue list */ 2955 list_del_init(&cmd_iocb->list); 2956 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2957 return cmd_iocb; 2958 } 2959 } 2960 2961 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2962 "0317 iotag x%x is out of " 2963 "range: max iotag x%x wd0 x%x\n", 2964 iotag, phba->sli.last_iotag, 2965 *(((uint32_t *) &prspiocb->iocb) + 7)); 2966 return NULL; 2967 } 2968 2969 /** 2970 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2971 * @phba: Pointer to HBA context object. 2972 * @pring: Pointer to driver SLI ring object. 2973 * @iotag: IOCB tag. 2974 * 2975 * This function looks up the iocb_lookup table to get the command iocb 2976 * corresponding to the given iotag. This function is called with the 2977 * hbalock held. 2978 * This function returns the command iocb object if it finds the command 2979 * iocb else returns NULL. 2980 **/ 2981 static struct lpfc_iocbq * 2982 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2983 struct lpfc_sli_ring *pring, uint16_t iotag) 2984 { 2985 struct lpfc_iocbq *cmd_iocb = NULL; 2986 2987 lockdep_assert_held(&phba->hbalock); 2988 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2989 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2990 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2991 /* remove from txcmpl queue list */ 2992 list_del_init(&cmd_iocb->list); 2993 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2994 return cmd_iocb; 2995 } 2996 } 2997 2998 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2999 "0372 iotag x%x lookup error: max iotag (x%x) " 3000 "iocb_flag x%x\n", 3001 iotag, phba->sli.last_iotag, 3002 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 3003 return NULL; 3004 } 3005 3006 /** 3007 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3008 * @phba: Pointer to HBA context object. 3009 * @pring: Pointer to driver SLI ring object. 3010 * @saveq: Pointer to the response iocb to be processed. 3011 * 3012 * This function is called by the ring event handler for non-fcp 3013 * rings when there is a new response iocb in the response ring. 3014 * The caller is not required to hold any locks. This function 3015 * gets the command iocb associated with the response iocb and 3016 * calls the completion handler for the command iocb. If there 3017 * is no completion handler, the function will free the resources 3018 * associated with command iocb. If the response iocb is for 3019 * an already aborted command iocb, the status of the completion 3020 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3021 * This function always returns 1. 3022 **/ 3023 static int 3024 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3025 struct lpfc_iocbq *saveq) 3026 { 3027 struct lpfc_iocbq *cmdiocbp; 3028 int rc = 1; 3029 unsigned long iflag; 3030 3031 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 3032 if (phba->sli_rev == LPFC_SLI_REV4) 3033 spin_lock_irqsave(&pring->ring_lock, iflag); 3034 else 3035 spin_lock_irqsave(&phba->hbalock, iflag); 3036 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3037 if (phba->sli_rev == LPFC_SLI_REV4) 3038 spin_unlock_irqrestore(&pring->ring_lock, iflag); 3039 else 3040 spin_unlock_irqrestore(&phba->hbalock, iflag); 3041 3042 if (cmdiocbp) { 3043 if (cmdiocbp->iocb_cmpl) { 3044 /* 3045 * If an ELS command failed send an event to mgmt 3046 * application. 3047 */ 3048 if (saveq->iocb.ulpStatus && 3049 (pring->ringno == LPFC_ELS_RING) && 3050 (cmdiocbp->iocb.ulpCommand == 3051 CMD_ELS_REQUEST64_CR)) 3052 lpfc_send_els_failure_event(phba, 3053 cmdiocbp, saveq); 3054 3055 /* 3056 * Post all ELS completions to the worker thread. 3057 * All other are passed to the completion callback. 3058 */ 3059 if (pring->ringno == LPFC_ELS_RING) { 3060 if ((phba->sli_rev < LPFC_SLI_REV4) && 3061 (cmdiocbp->iocb_flag & 3062 LPFC_DRIVER_ABORTED)) { 3063 spin_lock_irqsave(&phba->hbalock, 3064 iflag); 3065 cmdiocbp->iocb_flag &= 3066 ~LPFC_DRIVER_ABORTED; 3067 spin_unlock_irqrestore(&phba->hbalock, 3068 iflag); 3069 saveq->iocb.ulpStatus = 3070 IOSTAT_LOCAL_REJECT; 3071 saveq->iocb.un.ulpWord[4] = 3072 IOERR_SLI_ABORTED; 3073 3074 /* Firmware could still be in progress 3075 * of DMAing payload, so don't free data 3076 * buffer till after a hbeat. 3077 */ 3078 spin_lock_irqsave(&phba->hbalock, 3079 iflag); 3080 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 3081 spin_unlock_irqrestore(&phba->hbalock, 3082 iflag); 3083 } 3084 if (phba->sli_rev == LPFC_SLI_REV4) { 3085 if (saveq->iocb_flag & 3086 LPFC_EXCHANGE_BUSY) { 3087 /* Set cmdiocb flag for the 3088 * exchange busy so sgl (xri) 3089 * will not be released until 3090 * the abort xri is received 3091 * from hba. 3092 */ 3093 spin_lock_irqsave( 3094 &phba->hbalock, iflag); 3095 cmdiocbp->iocb_flag |= 3096 LPFC_EXCHANGE_BUSY; 3097 spin_unlock_irqrestore( 3098 &phba->hbalock, iflag); 3099 } 3100 if (cmdiocbp->iocb_flag & 3101 LPFC_DRIVER_ABORTED) { 3102 /* 3103 * Clear LPFC_DRIVER_ABORTED 3104 * bit in case it was driver 3105 * initiated abort. 3106 */ 3107 spin_lock_irqsave( 3108 &phba->hbalock, iflag); 3109 cmdiocbp->iocb_flag &= 3110 ~LPFC_DRIVER_ABORTED; 3111 spin_unlock_irqrestore( 3112 &phba->hbalock, iflag); 3113 cmdiocbp->iocb.ulpStatus = 3114 IOSTAT_LOCAL_REJECT; 3115 cmdiocbp->iocb.un.ulpWord[4] = 3116 IOERR_ABORT_REQUESTED; 3117 /* 3118 * For SLI4, irsiocb contains 3119 * NO_XRI in sli_xritag, it 3120 * shall not affect releasing 3121 * sgl (xri) process. 3122 */ 3123 saveq->iocb.ulpStatus = 3124 IOSTAT_LOCAL_REJECT; 3125 saveq->iocb.un.ulpWord[4] = 3126 IOERR_SLI_ABORTED; 3127 spin_lock_irqsave( 3128 &phba->hbalock, iflag); 3129 saveq->iocb_flag |= 3130 LPFC_DELAY_MEM_FREE; 3131 spin_unlock_irqrestore( 3132 &phba->hbalock, iflag); 3133 } 3134 } 3135 } 3136 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 3137 } else 3138 lpfc_sli_release_iocbq(phba, cmdiocbp); 3139 } else { 3140 /* 3141 * Unknown initiating command based on the response iotag. 3142 * This could be the case on the ELS ring because of 3143 * lpfc_els_abort(). 3144 */ 3145 if (pring->ringno != LPFC_ELS_RING) { 3146 /* 3147 * Ring <ringno> handler: unexpected completion IoTag 3148 * <IoTag> 3149 */ 3150 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3151 "0322 Ring %d handler: " 3152 "unexpected completion IoTag x%x " 3153 "Data: x%x x%x x%x x%x\n", 3154 pring->ringno, 3155 saveq->iocb.ulpIoTag, 3156 saveq->iocb.ulpStatus, 3157 saveq->iocb.un.ulpWord[4], 3158 saveq->iocb.ulpCommand, 3159 saveq->iocb.ulpContext); 3160 } 3161 } 3162 3163 return rc; 3164 } 3165 3166 /** 3167 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3168 * @phba: Pointer to HBA context object. 3169 * @pring: Pointer to driver SLI ring object. 3170 * 3171 * This function is called from the iocb ring event handlers when 3172 * put pointer is ahead of the get pointer for a ring. This function signal 3173 * an error attention condition to the worker thread and the worker 3174 * thread will transition the HBA to offline state. 3175 **/ 3176 static void 3177 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3178 { 3179 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3180 /* 3181 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3182 * rsp ring <portRspMax> 3183 */ 3184 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3185 "0312 Ring %d handler: portRspPut %d " 3186 "is bigger than rsp ring %d\n", 3187 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3188 pring->sli.sli3.numRiocb); 3189 3190 phba->link_state = LPFC_HBA_ERROR; 3191 3192 /* 3193 * All error attention handlers are posted to 3194 * worker thread 3195 */ 3196 phba->work_ha |= HA_ERATT; 3197 phba->work_hs = HS_FFER3; 3198 3199 lpfc_worker_wake_up(phba); 3200 3201 return; 3202 } 3203 3204 /** 3205 * lpfc_poll_eratt - Error attention polling timer timeout handler 3206 * @ptr: Pointer to address of HBA context object. 3207 * 3208 * This function is invoked by the Error Attention polling timer when the 3209 * timer times out. It will check the SLI Error Attention register for 3210 * possible attention events. If so, it will post an Error Attention event 3211 * and wake up worker thread to process it. Otherwise, it will set up the 3212 * Error Attention polling timer for the next poll. 3213 **/ 3214 void lpfc_poll_eratt(struct timer_list *t) 3215 { 3216 struct lpfc_hba *phba; 3217 uint32_t eratt = 0; 3218 uint64_t sli_intr, cnt; 3219 3220 phba = from_timer(phba, t, eratt_poll); 3221 3222 /* Here we will also keep track of interrupts per sec of the hba */ 3223 sli_intr = phba->sli.slistat.sli_intr; 3224 3225 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3226 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3227 sli_intr); 3228 else 3229 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3230 3231 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3232 do_div(cnt, phba->eratt_poll_interval); 3233 phba->sli.slistat.sli_ips = cnt; 3234 3235 phba->sli.slistat.sli_prev_intr = sli_intr; 3236 3237 /* Check chip HA register for error event */ 3238 eratt = lpfc_sli_check_eratt(phba); 3239 3240 if (eratt) 3241 /* Tell the worker thread there is work to do */ 3242 lpfc_worker_wake_up(phba); 3243 else 3244 /* Restart the timer for next eratt poll */ 3245 mod_timer(&phba->eratt_poll, 3246 jiffies + 3247 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3248 return; 3249 } 3250 3251 3252 /** 3253 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3254 * @phba: Pointer to HBA context object. 3255 * @pring: Pointer to driver SLI ring object. 3256 * @mask: Host attention register mask for this ring. 3257 * 3258 * This function is called from the interrupt context when there is a ring 3259 * event for the fcp ring. The caller does not hold any lock. 3260 * The function processes each response iocb in the response ring until it 3261 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3262 * LE bit set. The function will call the completion handler of the command iocb 3263 * if the response iocb indicates a completion for a command iocb or it is 3264 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3265 * function if this is an unsolicited iocb. 3266 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3267 * to check it explicitly. 3268 */ 3269 int 3270 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3271 struct lpfc_sli_ring *pring, uint32_t mask) 3272 { 3273 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3274 IOCB_t *irsp = NULL; 3275 IOCB_t *entry = NULL; 3276 struct lpfc_iocbq *cmdiocbq = NULL; 3277 struct lpfc_iocbq rspiocbq; 3278 uint32_t status; 3279 uint32_t portRspPut, portRspMax; 3280 int rc = 1; 3281 lpfc_iocb_type type; 3282 unsigned long iflag; 3283 uint32_t rsp_cmpl = 0; 3284 3285 spin_lock_irqsave(&phba->hbalock, iflag); 3286 pring->stats.iocb_event++; 3287 3288 /* 3289 * The next available response entry should never exceed the maximum 3290 * entries. If it does, treat it as an adapter hardware error. 3291 */ 3292 portRspMax = pring->sli.sli3.numRiocb; 3293 portRspPut = le32_to_cpu(pgp->rspPutInx); 3294 if (unlikely(portRspPut >= portRspMax)) { 3295 lpfc_sli_rsp_pointers_error(phba, pring); 3296 spin_unlock_irqrestore(&phba->hbalock, iflag); 3297 return 1; 3298 } 3299 if (phba->fcp_ring_in_use) { 3300 spin_unlock_irqrestore(&phba->hbalock, iflag); 3301 return 1; 3302 } else 3303 phba->fcp_ring_in_use = 1; 3304 3305 rmb(); 3306 while (pring->sli.sli3.rspidx != portRspPut) { 3307 /* 3308 * Fetch an entry off the ring and copy it into a local data 3309 * structure. The copy involves a byte-swap since the 3310 * network byte order and pci byte orders are different. 3311 */ 3312 entry = lpfc_resp_iocb(phba, pring); 3313 phba->last_completion_time = jiffies; 3314 3315 if (++pring->sli.sli3.rspidx >= portRspMax) 3316 pring->sli.sli3.rspidx = 0; 3317 3318 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3319 (uint32_t *) &rspiocbq.iocb, 3320 phba->iocb_rsp_size); 3321 INIT_LIST_HEAD(&(rspiocbq.list)); 3322 irsp = &rspiocbq.iocb; 3323 3324 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3325 pring->stats.iocb_rsp++; 3326 rsp_cmpl++; 3327 3328 if (unlikely(irsp->ulpStatus)) { 3329 /* 3330 * If resource errors reported from HBA, reduce 3331 * queuedepths of the SCSI device. 3332 */ 3333 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3334 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3335 IOERR_NO_RESOURCES)) { 3336 spin_unlock_irqrestore(&phba->hbalock, iflag); 3337 phba->lpfc_rampdown_queue_depth(phba); 3338 spin_lock_irqsave(&phba->hbalock, iflag); 3339 } 3340 3341 /* Rsp ring <ringno> error: IOCB */ 3342 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3343 "0336 Rsp Ring %d error: IOCB Data: " 3344 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3345 pring->ringno, 3346 irsp->un.ulpWord[0], 3347 irsp->un.ulpWord[1], 3348 irsp->un.ulpWord[2], 3349 irsp->un.ulpWord[3], 3350 irsp->un.ulpWord[4], 3351 irsp->un.ulpWord[5], 3352 *(uint32_t *)&irsp->un1, 3353 *((uint32_t *)&irsp->un1 + 1)); 3354 } 3355 3356 switch (type) { 3357 case LPFC_ABORT_IOCB: 3358 case LPFC_SOL_IOCB: 3359 /* 3360 * Idle exchange closed via ABTS from port. No iocb 3361 * resources need to be recovered. 3362 */ 3363 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3364 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3365 "0333 IOCB cmd 0x%x" 3366 " processed. Skipping" 3367 " completion\n", 3368 irsp->ulpCommand); 3369 break; 3370 } 3371 3372 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3373 &rspiocbq); 3374 if (unlikely(!cmdiocbq)) 3375 break; 3376 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3377 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3378 if (cmdiocbq->iocb_cmpl) { 3379 spin_unlock_irqrestore(&phba->hbalock, iflag); 3380 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3381 &rspiocbq); 3382 spin_lock_irqsave(&phba->hbalock, iflag); 3383 } 3384 break; 3385 case LPFC_UNSOL_IOCB: 3386 spin_unlock_irqrestore(&phba->hbalock, iflag); 3387 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3388 spin_lock_irqsave(&phba->hbalock, iflag); 3389 break; 3390 default: 3391 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3392 char adaptermsg[LPFC_MAX_ADPTMSG]; 3393 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3394 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3395 MAX_MSG_DATA); 3396 dev_warn(&((phba->pcidev)->dev), 3397 "lpfc%d: %s\n", 3398 phba->brd_no, adaptermsg); 3399 } else { 3400 /* Unknown IOCB command */ 3401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3402 "0334 Unknown IOCB command " 3403 "Data: x%x, x%x x%x x%x x%x\n", 3404 type, irsp->ulpCommand, 3405 irsp->ulpStatus, 3406 irsp->ulpIoTag, 3407 irsp->ulpContext); 3408 } 3409 break; 3410 } 3411 3412 /* 3413 * The response IOCB has been processed. Update the ring 3414 * pointer in SLIM. If the port response put pointer has not 3415 * been updated, sync the pgp->rspPutInx and fetch the new port 3416 * response put pointer. 3417 */ 3418 writel(pring->sli.sli3.rspidx, 3419 &phba->host_gp[pring->ringno].rspGetInx); 3420 3421 if (pring->sli.sli3.rspidx == portRspPut) 3422 portRspPut = le32_to_cpu(pgp->rspPutInx); 3423 } 3424 3425 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3426 pring->stats.iocb_rsp_full++; 3427 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3428 writel(status, phba->CAregaddr); 3429 readl(phba->CAregaddr); 3430 } 3431 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3432 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3433 pring->stats.iocb_cmd_empty++; 3434 3435 /* Force update of the local copy of cmdGetInx */ 3436 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3437 lpfc_sli_resume_iocb(phba, pring); 3438 3439 if ((pring->lpfc_sli_cmd_available)) 3440 (pring->lpfc_sli_cmd_available) (phba, pring); 3441 3442 } 3443 3444 phba->fcp_ring_in_use = 0; 3445 spin_unlock_irqrestore(&phba->hbalock, iflag); 3446 return rc; 3447 } 3448 3449 /** 3450 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3451 * @phba: Pointer to HBA context object. 3452 * @pring: Pointer to driver SLI ring object. 3453 * @rspiocbp: Pointer to driver response IOCB object. 3454 * 3455 * This function is called from the worker thread when there is a slow-path 3456 * response IOCB to process. This function chains all the response iocbs until 3457 * seeing the iocb with the LE bit set. The function will call 3458 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3459 * completion of a command iocb. The function will call the 3460 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3461 * The function frees the resources or calls the completion handler if this 3462 * iocb is an abort completion. The function returns NULL when the response 3463 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3464 * this function shall chain the iocb on to the iocb_continueq and return the 3465 * response iocb passed in. 3466 **/ 3467 static struct lpfc_iocbq * 3468 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3469 struct lpfc_iocbq *rspiocbp) 3470 { 3471 struct lpfc_iocbq *saveq; 3472 struct lpfc_iocbq *cmdiocbp; 3473 struct lpfc_iocbq *next_iocb; 3474 IOCB_t *irsp = NULL; 3475 uint32_t free_saveq; 3476 uint8_t iocb_cmd_type; 3477 lpfc_iocb_type type; 3478 unsigned long iflag; 3479 int rc; 3480 3481 spin_lock_irqsave(&phba->hbalock, iflag); 3482 /* First add the response iocb to the countinueq list */ 3483 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3484 pring->iocb_continueq_cnt++; 3485 3486 /* Now, determine whether the list is completed for processing */ 3487 irsp = &rspiocbp->iocb; 3488 if (irsp->ulpLe) { 3489 /* 3490 * By default, the driver expects to free all resources 3491 * associated with this iocb completion. 3492 */ 3493 free_saveq = 1; 3494 saveq = list_get_first(&pring->iocb_continueq, 3495 struct lpfc_iocbq, list); 3496 irsp = &(saveq->iocb); 3497 list_del_init(&pring->iocb_continueq); 3498 pring->iocb_continueq_cnt = 0; 3499 3500 pring->stats.iocb_rsp++; 3501 3502 /* 3503 * If resource errors reported from HBA, reduce 3504 * queuedepths of the SCSI device. 3505 */ 3506 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3507 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3508 IOERR_NO_RESOURCES)) { 3509 spin_unlock_irqrestore(&phba->hbalock, iflag); 3510 phba->lpfc_rampdown_queue_depth(phba); 3511 spin_lock_irqsave(&phba->hbalock, iflag); 3512 } 3513 3514 if (irsp->ulpStatus) { 3515 /* Rsp ring <ringno> error: IOCB */ 3516 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3517 "0328 Rsp Ring %d error: " 3518 "IOCB Data: " 3519 "x%x x%x x%x x%x " 3520 "x%x x%x x%x x%x " 3521 "x%x x%x x%x x%x " 3522 "x%x x%x x%x x%x\n", 3523 pring->ringno, 3524 irsp->un.ulpWord[0], 3525 irsp->un.ulpWord[1], 3526 irsp->un.ulpWord[2], 3527 irsp->un.ulpWord[3], 3528 irsp->un.ulpWord[4], 3529 irsp->un.ulpWord[5], 3530 *(((uint32_t *) irsp) + 6), 3531 *(((uint32_t *) irsp) + 7), 3532 *(((uint32_t *) irsp) + 8), 3533 *(((uint32_t *) irsp) + 9), 3534 *(((uint32_t *) irsp) + 10), 3535 *(((uint32_t *) irsp) + 11), 3536 *(((uint32_t *) irsp) + 12), 3537 *(((uint32_t *) irsp) + 13), 3538 *(((uint32_t *) irsp) + 14), 3539 *(((uint32_t *) irsp) + 15)); 3540 } 3541 3542 /* 3543 * Fetch the IOCB command type and call the correct completion 3544 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3545 * get freed back to the lpfc_iocb_list by the discovery 3546 * kernel thread. 3547 */ 3548 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3549 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3550 switch (type) { 3551 case LPFC_SOL_IOCB: 3552 spin_unlock_irqrestore(&phba->hbalock, iflag); 3553 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3554 spin_lock_irqsave(&phba->hbalock, iflag); 3555 break; 3556 3557 case LPFC_UNSOL_IOCB: 3558 spin_unlock_irqrestore(&phba->hbalock, iflag); 3559 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3560 spin_lock_irqsave(&phba->hbalock, iflag); 3561 if (!rc) 3562 free_saveq = 0; 3563 break; 3564 3565 case LPFC_ABORT_IOCB: 3566 cmdiocbp = NULL; 3567 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3568 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3569 saveq); 3570 if (cmdiocbp) { 3571 /* Call the specified completion routine */ 3572 if (cmdiocbp->iocb_cmpl) { 3573 spin_unlock_irqrestore(&phba->hbalock, 3574 iflag); 3575 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3576 saveq); 3577 spin_lock_irqsave(&phba->hbalock, 3578 iflag); 3579 } else 3580 __lpfc_sli_release_iocbq(phba, 3581 cmdiocbp); 3582 } 3583 break; 3584 3585 case LPFC_UNKNOWN_IOCB: 3586 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3587 char adaptermsg[LPFC_MAX_ADPTMSG]; 3588 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3589 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3590 MAX_MSG_DATA); 3591 dev_warn(&((phba->pcidev)->dev), 3592 "lpfc%d: %s\n", 3593 phba->brd_no, adaptermsg); 3594 } else { 3595 /* Unknown IOCB command */ 3596 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3597 "0335 Unknown IOCB " 3598 "command Data: x%x " 3599 "x%x x%x x%x\n", 3600 irsp->ulpCommand, 3601 irsp->ulpStatus, 3602 irsp->ulpIoTag, 3603 irsp->ulpContext); 3604 } 3605 break; 3606 } 3607 3608 if (free_saveq) { 3609 list_for_each_entry_safe(rspiocbp, next_iocb, 3610 &saveq->list, list) { 3611 list_del_init(&rspiocbp->list); 3612 __lpfc_sli_release_iocbq(phba, rspiocbp); 3613 } 3614 __lpfc_sli_release_iocbq(phba, saveq); 3615 } 3616 rspiocbp = NULL; 3617 } 3618 spin_unlock_irqrestore(&phba->hbalock, iflag); 3619 return rspiocbp; 3620 } 3621 3622 /** 3623 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3624 * @phba: Pointer to HBA context object. 3625 * @pring: Pointer to driver SLI ring object. 3626 * @mask: Host attention register mask for this ring. 3627 * 3628 * This routine wraps the actual slow_ring event process routine from the 3629 * API jump table function pointer from the lpfc_hba struct. 3630 **/ 3631 void 3632 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3633 struct lpfc_sli_ring *pring, uint32_t mask) 3634 { 3635 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3636 } 3637 3638 /** 3639 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3640 * @phba: Pointer to HBA context object. 3641 * @pring: Pointer to driver SLI ring object. 3642 * @mask: Host attention register mask for this ring. 3643 * 3644 * This function is called from the worker thread when there is a ring event 3645 * for non-fcp rings. The caller does not hold any lock. The function will 3646 * remove each response iocb in the response ring and calls the handle 3647 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3648 **/ 3649 static void 3650 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3651 struct lpfc_sli_ring *pring, uint32_t mask) 3652 { 3653 struct lpfc_pgp *pgp; 3654 IOCB_t *entry; 3655 IOCB_t *irsp = NULL; 3656 struct lpfc_iocbq *rspiocbp = NULL; 3657 uint32_t portRspPut, portRspMax; 3658 unsigned long iflag; 3659 uint32_t status; 3660 3661 pgp = &phba->port_gp[pring->ringno]; 3662 spin_lock_irqsave(&phba->hbalock, iflag); 3663 pring->stats.iocb_event++; 3664 3665 /* 3666 * The next available response entry should never exceed the maximum 3667 * entries. If it does, treat it as an adapter hardware error. 3668 */ 3669 portRspMax = pring->sli.sli3.numRiocb; 3670 portRspPut = le32_to_cpu(pgp->rspPutInx); 3671 if (portRspPut >= portRspMax) { 3672 /* 3673 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3674 * rsp ring <portRspMax> 3675 */ 3676 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3677 "0303 Ring %d handler: portRspPut %d " 3678 "is bigger than rsp ring %d\n", 3679 pring->ringno, portRspPut, portRspMax); 3680 3681 phba->link_state = LPFC_HBA_ERROR; 3682 spin_unlock_irqrestore(&phba->hbalock, iflag); 3683 3684 phba->work_hs = HS_FFER3; 3685 lpfc_handle_eratt(phba); 3686 3687 return; 3688 } 3689 3690 rmb(); 3691 while (pring->sli.sli3.rspidx != portRspPut) { 3692 /* 3693 * Build a completion list and call the appropriate handler. 3694 * The process is to get the next available response iocb, get 3695 * a free iocb from the list, copy the response data into the 3696 * free iocb, insert to the continuation list, and update the 3697 * next response index to slim. This process makes response 3698 * iocb's in the ring available to DMA as fast as possible but 3699 * pays a penalty for a copy operation. Since the iocb is 3700 * only 32 bytes, this penalty is considered small relative to 3701 * the PCI reads for register values and a slim write. When 3702 * the ulpLe field is set, the entire Command has been 3703 * received. 3704 */ 3705 entry = lpfc_resp_iocb(phba, pring); 3706 3707 phba->last_completion_time = jiffies; 3708 rspiocbp = __lpfc_sli_get_iocbq(phba); 3709 if (rspiocbp == NULL) { 3710 printk(KERN_ERR "%s: out of buffers! Failing " 3711 "completion.\n", __func__); 3712 break; 3713 } 3714 3715 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3716 phba->iocb_rsp_size); 3717 irsp = &rspiocbp->iocb; 3718 3719 if (++pring->sli.sli3.rspidx >= portRspMax) 3720 pring->sli.sli3.rspidx = 0; 3721 3722 if (pring->ringno == LPFC_ELS_RING) { 3723 lpfc_debugfs_slow_ring_trc(phba, 3724 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3725 *(((uint32_t *) irsp) + 4), 3726 *(((uint32_t *) irsp) + 6), 3727 *(((uint32_t *) irsp) + 7)); 3728 } 3729 3730 writel(pring->sli.sli3.rspidx, 3731 &phba->host_gp[pring->ringno].rspGetInx); 3732 3733 spin_unlock_irqrestore(&phba->hbalock, iflag); 3734 /* Handle the response IOCB */ 3735 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3736 spin_lock_irqsave(&phba->hbalock, iflag); 3737 3738 /* 3739 * If the port response put pointer has not been updated, sync 3740 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3741 * response put pointer. 3742 */ 3743 if (pring->sli.sli3.rspidx == portRspPut) { 3744 portRspPut = le32_to_cpu(pgp->rspPutInx); 3745 } 3746 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3747 3748 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3749 /* At least one response entry has been freed */ 3750 pring->stats.iocb_rsp_full++; 3751 /* SET RxRE_RSP in Chip Att register */ 3752 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3753 writel(status, phba->CAregaddr); 3754 readl(phba->CAregaddr); /* flush */ 3755 } 3756 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3757 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3758 pring->stats.iocb_cmd_empty++; 3759 3760 /* Force update of the local copy of cmdGetInx */ 3761 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3762 lpfc_sli_resume_iocb(phba, pring); 3763 3764 if ((pring->lpfc_sli_cmd_available)) 3765 (pring->lpfc_sli_cmd_available) (phba, pring); 3766 3767 } 3768 3769 spin_unlock_irqrestore(&phba->hbalock, iflag); 3770 return; 3771 } 3772 3773 /** 3774 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3775 * @phba: Pointer to HBA context object. 3776 * @pring: Pointer to driver SLI ring object. 3777 * @mask: Host attention register mask for this ring. 3778 * 3779 * This function is called from the worker thread when there is a pending 3780 * ELS response iocb on the driver internal slow-path response iocb worker 3781 * queue. The caller does not hold any lock. The function will remove each 3782 * response iocb from the response worker queue and calls the handle 3783 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3784 **/ 3785 static void 3786 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3787 struct lpfc_sli_ring *pring, uint32_t mask) 3788 { 3789 struct lpfc_iocbq *irspiocbq; 3790 struct hbq_dmabuf *dmabuf; 3791 struct lpfc_cq_event *cq_event; 3792 unsigned long iflag; 3793 3794 spin_lock_irqsave(&phba->hbalock, iflag); 3795 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3796 spin_unlock_irqrestore(&phba->hbalock, iflag); 3797 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3798 /* Get the response iocb from the head of work queue */ 3799 spin_lock_irqsave(&phba->hbalock, iflag); 3800 list_remove_head(&phba->sli4_hba.sp_queue_event, 3801 cq_event, struct lpfc_cq_event, list); 3802 spin_unlock_irqrestore(&phba->hbalock, iflag); 3803 3804 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3805 case CQE_CODE_COMPL_WQE: 3806 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3807 cq_event); 3808 /* Translate ELS WCQE to response IOCBQ */ 3809 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3810 irspiocbq); 3811 if (irspiocbq) 3812 lpfc_sli_sp_handle_rspiocb(phba, pring, 3813 irspiocbq); 3814 break; 3815 case CQE_CODE_RECEIVE: 3816 case CQE_CODE_RECEIVE_V1: 3817 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3818 cq_event); 3819 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3820 break; 3821 default: 3822 break; 3823 } 3824 } 3825 } 3826 3827 /** 3828 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3829 * @phba: Pointer to HBA context object. 3830 * @pring: Pointer to driver SLI ring object. 3831 * 3832 * This function aborts all iocbs in the given ring and frees all the iocb 3833 * objects in txq. This function issues an abort iocb for all the iocb commands 3834 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3835 * the return of this function. The caller is not required to hold any locks. 3836 **/ 3837 void 3838 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3839 { 3840 LIST_HEAD(completions); 3841 struct lpfc_iocbq *iocb, *next_iocb; 3842 3843 if (pring->ringno == LPFC_ELS_RING) { 3844 lpfc_fabric_abort_hba(phba); 3845 } 3846 3847 /* Error everything on txq and txcmplq 3848 * First do the txq. 3849 */ 3850 if (phba->sli_rev >= LPFC_SLI_REV4) { 3851 spin_lock_irq(&pring->ring_lock); 3852 list_splice_init(&pring->txq, &completions); 3853 pring->txq_cnt = 0; 3854 spin_unlock_irq(&pring->ring_lock); 3855 3856 spin_lock_irq(&phba->hbalock); 3857 /* Next issue ABTS for everything on the txcmplq */ 3858 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3859 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3860 spin_unlock_irq(&phba->hbalock); 3861 } else { 3862 spin_lock_irq(&phba->hbalock); 3863 list_splice_init(&pring->txq, &completions); 3864 pring->txq_cnt = 0; 3865 3866 /* Next issue ABTS for everything on the txcmplq */ 3867 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3868 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3869 spin_unlock_irq(&phba->hbalock); 3870 } 3871 3872 /* Cancel all the IOCBs from the completions list */ 3873 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3874 IOERR_SLI_ABORTED); 3875 } 3876 3877 /** 3878 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring 3879 * @phba: Pointer to HBA context object. 3880 * @pring: Pointer to driver SLI ring object. 3881 * 3882 * This function aborts all iocbs in the given ring and frees all the iocb 3883 * objects in txq. This function issues an abort iocb for all the iocb commands 3884 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3885 * the return of this function. The caller is not required to hold any locks. 3886 **/ 3887 void 3888 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3889 { 3890 LIST_HEAD(completions); 3891 struct lpfc_iocbq *iocb, *next_iocb; 3892 3893 if (pring->ringno == LPFC_ELS_RING) 3894 lpfc_fabric_abort_hba(phba); 3895 3896 spin_lock_irq(&phba->hbalock); 3897 /* Next issue ABTS for everything on the txcmplq */ 3898 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3899 lpfc_sli4_abort_nvme_io(phba, pring, iocb); 3900 spin_unlock_irq(&phba->hbalock); 3901 } 3902 3903 3904 /** 3905 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3906 * @phba: Pointer to HBA context object. 3907 * @pring: Pointer to driver SLI ring object. 3908 * 3909 * This function aborts all iocbs in FCP rings and frees all the iocb 3910 * objects in txq. This function issues an abort iocb for all the iocb commands 3911 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3912 * the return of this function. The caller is not required to hold any locks. 3913 **/ 3914 void 3915 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3916 { 3917 struct lpfc_sli *psli = &phba->sli; 3918 struct lpfc_sli_ring *pring; 3919 uint32_t i; 3920 3921 /* Look on all the FCP Rings for the iotag */ 3922 if (phba->sli_rev >= LPFC_SLI_REV4) { 3923 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3924 pring = phba->sli4_hba.fcp_wq[i]->pring; 3925 lpfc_sli_abort_iocb_ring(phba, pring); 3926 } 3927 } else { 3928 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3929 lpfc_sli_abort_iocb_ring(phba, pring); 3930 } 3931 } 3932 3933 /** 3934 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings 3935 * @phba: Pointer to HBA context object. 3936 * 3937 * This function aborts all wqes in NVME rings. This function issues an 3938 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in 3939 * the txcmplq is not guaranteed to complete before the return of this 3940 * function. The caller is not required to hold any locks. 3941 **/ 3942 void 3943 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) 3944 { 3945 struct lpfc_sli_ring *pring; 3946 uint32_t i; 3947 3948 if (phba->sli_rev < LPFC_SLI_REV4) 3949 return; 3950 3951 /* Abort all IO on each NVME ring. */ 3952 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 3953 pring = phba->sli4_hba.nvme_wq[i]->pring; 3954 lpfc_sli_abort_wqe_ring(phba, pring); 3955 } 3956 } 3957 3958 3959 /** 3960 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3961 * @phba: Pointer to HBA context object. 3962 * 3963 * This function flushes all iocbs in the fcp ring and frees all the iocb 3964 * objects in txq and txcmplq. This function will not issue abort iocbs 3965 * for all the iocb commands in txcmplq, they will just be returned with 3966 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3967 * slot has been permanently disabled. 3968 **/ 3969 void 3970 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3971 { 3972 LIST_HEAD(txq); 3973 LIST_HEAD(txcmplq); 3974 struct lpfc_sli *psli = &phba->sli; 3975 struct lpfc_sli_ring *pring; 3976 uint32_t i; 3977 struct lpfc_iocbq *piocb, *next_iocb; 3978 3979 spin_lock_irq(&phba->hbalock); 3980 /* Indicate the I/O queues are flushed */ 3981 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3982 spin_unlock_irq(&phba->hbalock); 3983 3984 /* Look on all the FCP Rings for the iotag */ 3985 if (phba->sli_rev >= LPFC_SLI_REV4) { 3986 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3987 pring = phba->sli4_hba.fcp_wq[i]->pring; 3988 3989 spin_lock_irq(&pring->ring_lock); 3990 /* Retrieve everything on txq */ 3991 list_splice_init(&pring->txq, &txq); 3992 list_for_each_entry_safe(piocb, next_iocb, 3993 &pring->txcmplq, list) 3994 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3995 /* Retrieve everything on the txcmplq */ 3996 list_splice_init(&pring->txcmplq, &txcmplq); 3997 pring->txq_cnt = 0; 3998 pring->txcmplq_cnt = 0; 3999 spin_unlock_irq(&pring->ring_lock); 4000 4001 /* Flush the txq */ 4002 lpfc_sli_cancel_iocbs(phba, &txq, 4003 IOSTAT_LOCAL_REJECT, 4004 IOERR_SLI_DOWN); 4005 /* Flush the txcmpq */ 4006 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4007 IOSTAT_LOCAL_REJECT, 4008 IOERR_SLI_DOWN); 4009 } 4010 } else { 4011 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4012 4013 spin_lock_irq(&phba->hbalock); 4014 /* Retrieve everything on txq */ 4015 list_splice_init(&pring->txq, &txq); 4016 list_for_each_entry_safe(piocb, next_iocb, 4017 &pring->txcmplq, list) 4018 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4019 /* Retrieve everything on the txcmplq */ 4020 list_splice_init(&pring->txcmplq, &txcmplq); 4021 pring->txq_cnt = 0; 4022 pring->txcmplq_cnt = 0; 4023 spin_unlock_irq(&phba->hbalock); 4024 4025 /* Flush the txq */ 4026 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4027 IOERR_SLI_DOWN); 4028 /* Flush the txcmpq */ 4029 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4030 IOERR_SLI_DOWN); 4031 } 4032 } 4033 4034 /** 4035 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings 4036 * @phba: Pointer to HBA context object. 4037 * 4038 * This function flushes all wqes in the nvme rings and frees all resources 4039 * in the txcmplq. This function does not issue abort wqes for the IO 4040 * commands in txcmplq, they will just be returned with 4041 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4042 * slot has been permanently disabled. 4043 **/ 4044 void 4045 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) 4046 { 4047 LIST_HEAD(txcmplq); 4048 struct lpfc_sli_ring *pring; 4049 uint32_t i; 4050 struct lpfc_iocbq *piocb, *next_iocb; 4051 4052 if (phba->sli_rev < LPFC_SLI_REV4) 4053 return; 4054 4055 /* Hint to other driver operations that a flush is in progress. */ 4056 spin_lock_irq(&phba->hbalock); 4057 phba->hba_flag |= HBA_NVME_IOQ_FLUSH; 4058 spin_unlock_irq(&phba->hbalock); 4059 4060 /* Cycle through all NVME rings and complete each IO with 4061 * a local driver reason code. This is a flush so no 4062 * abort exchange to FW. 4063 */ 4064 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 4065 pring = phba->sli4_hba.nvme_wq[i]->pring; 4066 4067 spin_lock_irq(&pring->ring_lock); 4068 list_for_each_entry_safe(piocb, next_iocb, 4069 &pring->txcmplq, list) 4070 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4071 /* Retrieve everything on the txcmplq */ 4072 list_splice_init(&pring->txcmplq, &txcmplq); 4073 pring->txcmplq_cnt = 0; 4074 spin_unlock_irq(&pring->ring_lock); 4075 4076 /* Flush the txcmpq &&&PAE */ 4077 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4078 IOSTAT_LOCAL_REJECT, 4079 IOERR_SLI_DOWN); 4080 } 4081 } 4082 4083 /** 4084 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4085 * @phba: Pointer to HBA context object. 4086 * @mask: Bit mask to be checked. 4087 * 4088 * This function reads the host status register and compares 4089 * with the provided bit mask to check if HBA completed 4090 * the restart. This function will wait in a loop for the 4091 * HBA to complete restart. If the HBA does not restart within 4092 * 15 iterations, the function will reset the HBA again. The 4093 * function returns 1 when HBA fail to restart otherwise returns 4094 * zero. 4095 **/ 4096 static int 4097 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4098 { 4099 uint32_t status; 4100 int i = 0; 4101 int retval = 0; 4102 4103 /* Read the HBA Host Status Register */ 4104 if (lpfc_readl(phba->HSregaddr, &status)) 4105 return 1; 4106 4107 /* 4108 * Check status register every 100ms for 5 retries, then every 4109 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4110 * every 2.5 sec for 4. 4111 * Break our of the loop if errors occurred during init. 4112 */ 4113 while (((status & mask) != mask) && 4114 !(status & HS_FFERM) && 4115 i++ < 20) { 4116 4117 if (i <= 5) 4118 msleep(10); 4119 else if (i <= 10) 4120 msleep(500); 4121 else 4122 msleep(2500); 4123 4124 if (i == 15) { 4125 /* Do post */ 4126 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4127 lpfc_sli_brdrestart(phba); 4128 } 4129 /* Read the HBA Host Status Register */ 4130 if (lpfc_readl(phba->HSregaddr, &status)) { 4131 retval = 1; 4132 break; 4133 } 4134 } 4135 4136 /* Check to see if any errors occurred during init */ 4137 if ((status & HS_FFERM) || (i >= 20)) { 4138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4139 "2751 Adapter failed to restart, " 4140 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4141 status, 4142 readl(phba->MBslimaddr + 0xa8), 4143 readl(phba->MBslimaddr + 0xac)); 4144 phba->link_state = LPFC_HBA_ERROR; 4145 retval = 1; 4146 } 4147 4148 return retval; 4149 } 4150 4151 /** 4152 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4153 * @phba: Pointer to HBA context object. 4154 * @mask: Bit mask to be checked. 4155 * 4156 * This function checks the host status register to check if HBA is 4157 * ready. This function will wait in a loop for the HBA to be ready 4158 * If the HBA is not ready , the function will will reset the HBA PCI 4159 * function again. The function returns 1 when HBA fail to be ready 4160 * otherwise returns zero. 4161 **/ 4162 static int 4163 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4164 { 4165 uint32_t status; 4166 int retval = 0; 4167 4168 /* Read the HBA Host Status Register */ 4169 status = lpfc_sli4_post_status_check(phba); 4170 4171 if (status) { 4172 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4173 lpfc_sli_brdrestart(phba); 4174 status = lpfc_sli4_post_status_check(phba); 4175 } 4176 4177 /* Check to see if any errors occurred during init */ 4178 if (status) { 4179 phba->link_state = LPFC_HBA_ERROR; 4180 retval = 1; 4181 } else 4182 phba->sli4_hba.intr_enable = 0; 4183 4184 return retval; 4185 } 4186 4187 /** 4188 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4189 * @phba: Pointer to HBA context object. 4190 * @mask: Bit mask to be checked. 4191 * 4192 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4193 * from the API jump table function pointer from the lpfc_hba struct. 4194 **/ 4195 int 4196 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4197 { 4198 return phba->lpfc_sli_brdready(phba, mask); 4199 } 4200 4201 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4202 4203 /** 4204 * lpfc_reset_barrier - Make HBA ready for HBA reset 4205 * @phba: Pointer to HBA context object. 4206 * 4207 * This function is called before resetting an HBA. This function is called 4208 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4209 **/ 4210 void lpfc_reset_barrier(struct lpfc_hba *phba) 4211 { 4212 uint32_t __iomem *resp_buf; 4213 uint32_t __iomem *mbox_buf; 4214 volatile uint32_t mbox; 4215 uint32_t hc_copy, ha_copy, resp_data; 4216 int i; 4217 uint8_t hdrtype; 4218 4219 lockdep_assert_held(&phba->hbalock); 4220 4221 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4222 if (hdrtype != 0x80 || 4223 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4224 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4225 return; 4226 4227 /* 4228 * Tell the other part of the chip to suspend temporarily all 4229 * its DMA activity. 4230 */ 4231 resp_buf = phba->MBslimaddr; 4232 4233 /* Disable the error attention */ 4234 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4235 return; 4236 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4237 readl(phba->HCregaddr); /* flush */ 4238 phba->link_flag |= LS_IGNORE_ERATT; 4239 4240 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4241 return; 4242 if (ha_copy & HA_ERATT) { 4243 /* Clear Chip error bit */ 4244 writel(HA_ERATT, phba->HAregaddr); 4245 phba->pport->stopped = 1; 4246 } 4247 4248 mbox = 0; 4249 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4250 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4251 4252 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4253 mbox_buf = phba->MBslimaddr; 4254 writel(mbox, mbox_buf); 4255 4256 for (i = 0; i < 50; i++) { 4257 if (lpfc_readl((resp_buf + 1), &resp_data)) 4258 return; 4259 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4260 mdelay(1); 4261 else 4262 break; 4263 } 4264 resp_data = 0; 4265 if (lpfc_readl((resp_buf + 1), &resp_data)) 4266 return; 4267 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4268 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4269 phba->pport->stopped) 4270 goto restore_hc; 4271 else 4272 goto clear_errat; 4273 } 4274 4275 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4276 resp_data = 0; 4277 for (i = 0; i < 500; i++) { 4278 if (lpfc_readl(resp_buf, &resp_data)) 4279 return; 4280 if (resp_data != mbox) 4281 mdelay(1); 4282 else 4283 break; 4284 } 4285 4286 clear_errat: 4287 4288 while (++i < 500) { 4289 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4290 return; 4291 if (!(ha_copy & HA_ERATT)) 4292 mdelay(1); 4293 else 4294 break; 4295 } 4296 4297 if (readl(phba->HAregaddr) & HA_ERATT) { 4298 writel(HA_ERATT, phba->HAregaddr); 4299 phba->pport->stopped = 1; 4300 } 4301 4302 restore_hc: 4303 phba->link_flag &= ~LS_IGNORE_ERATT; 4304 writel(hc_copy, phba->HCregaddr); 4305 readl(phba->HCregaddr); /* flush */ 4306 } 4307 4308 /** 4309 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4310 * @phba: Pointer to HBA context object. 4311 * 4312 * This function issues a kill_board mailbox command and waits for 4313 * the error attention interrupt. This function is called for stopping 4314 * the firmware processing. The caller is not required to hold any 4315 * locks. This function calls lpfc_hba_down_post function to free 4316 * any pending commands after the kill. The function will return 1 when it 4317 * fails to kill the board else will return 0. 4318 **/ 4319 int 4320 lpfc_sli_brdkill(struct lpfc_hba *phba) 4321 { 4322 struct lpfc_sli *psli; 4323 LPFC_MBOXQ_t *pmb; 4324 uint32_t status; 4325 uint32_t ha_copy; 4326 int retval; 4327 int i = 0; 4328 4329 psli = &phba->sli; 4330 4331 /* Kill HBA */ 4332 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4333 "0329 Kill HBA Data: x%x x%x\n", 4334 phba->pport->port_state, psli->sli_flag); 4335 4336 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4337 if (!pmb) 4338 return 1; 4339 4340 /* Disable the error attention */ 4341 spin_lock_irq(&phba->hbalock); 4342 if (lpfc_readl(phba->HCregaddr, &status)) { 4343 spin_unlock_irq(&phba->hbalock); 4344 mempool_free(pmb, phba->mbox_mem_pool); 4345 return 1; 4346 } 4347 status &= ~HC_ERINT_ENA; 4348 writel(status, phba->HCregaddr); 4349 readl(phba->HCregaddr); /* flush */ 4350 phba->link_flag |= LS_IGNORE_ERATT; 4351 spin_unlock_irq(&phba->hbalock); 4352 4353 lpfc_kill_board(phba, pmb); 4354 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4355 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4356 4357 if (retval != MBX_SUCCESS) { 4358 if (retval != MBX_BUSY) 4359 mempool_free(pmb, phba->mbox_mem_pool); 4360 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4361 "2752 KILL_BOARD command failed retval %d\n", 4362 retval); 4363 spin_lock_irq(&phba->hbalock); 4364 phba->link_flag &= ~LS_IGNORE_ERATT; 4365 spin_unlock_irq(&phba->hbalock); 4366 return 1; 4367 } 4368 4369 spin_lock_irq(&phba->hbalock); 4370 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4371 spin_unlock_irq(&phba->hbalock); 4372 4373 mempool_free(pmb, phba->mbox_mem_pool); 4374 4375 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4376 * attention every 100ms for 3 seconds. If we don't get ERATT after 4377 * 3 seconds we still set HBA_ERROR state because the status of the 4378 * board is now undefined. 4379 */ 4380 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4381 return 1; 4382 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4383 mdelay(100); 4384 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4385 return 1; 4386 } 4387 4388 del_timer_sync(&psli->mbox_tmo); 4389 if (ha_copy & HA_ERATT) { 4390 writel(HA_ERATT, phba->HAregaddr); 4391 phba->pport->stopped = 1; 4392 } 4393 spin_lock_irq(&phba->hbalock); 4394 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4395 psli->mbox_active = NULL; 4396 phba->link_flag &= ~LS_IGNORE_ERATT; 4397 spin_unlock_irq(&phba->hbalock); 4398 4399 lpfc_hba_down_post(phba); 4400 phba->link_state = LPFC_HBA_ERROR; 4401 4402 return ha_copy & HA_ERATT ? 0 : 1; 4403 } 4404 4405 /** 4406 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4407 * @phba: Pointer to HBA context object. 4408 * 4409 * This function resets the HBA by writing HC_INITFF to the control 4410 * register. After the HBA resets, this function resets all the iocb ring 4411 * indices. This function disables PCI layer parity checking during 4412 * the reset. 4413 * This function returns 0 always. 4414 * The caller is not required to hold any locks. 4415 **/ 4416 int 4417 lpfc_sli_brdreset(struct lpfc_hba *phba) 4418 { 4419 struct lpfc_sli *psli; 4420 struct lpfc_sli_ring *pring; 4421 uint16_t cfg_value; 4422 int i; 4423 4424 psli = &phba->sli; 4425 4426 /* Reset HBA */ 4427 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4428 "0325 Reset HBA Data: x%x x%x\n", 4429 (phba->pport) ? phba->pport->port_state : 0, 4430 psli->sli_flag); 4431 4432 /* perform board reset */ 4433 phba->fc_eventTag = 0; 4434 phba->link_events = 0; 4435 if (phba->pport) { 4436 phba->pport->fc_myDID = 0; 4437 phba->pport->fc_prevDID = 0; 4438 } 4439 4440 /* Turn off parity checking and serr during the physical reset */ 4441 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4442 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4443 (cfg_value & 4444 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4445 4446 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4447 4448 /* Now toggle INITFF bit in the Host Control Register */ 4449 writel(HC_INITFF, phba->HCregaddr); 4450 mdelay(1); 4451 readl(phba->HCregaddr); /* flush */ 4452 writel(0, phba->HCregaddr); 4453 readl(phba->HCregaddr); /* flush */ 4454 4455 /* Restore PCI cmd register */ 4456 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4457 4458 /* Initialize relevant SLI info */ 4459 for (i = 0; i < psli->num_rings; i++) { 4460 pring = &psli->sli3_ring[i]; 4461 pring->flag = 0; 4462 pring->sli.sli3.rspidx = 0; 4463 pring->sli.sli3.next_cmdidx = 0; 4464 pring->sli.sli3.local_getidx = 0; 4465 pring->sli.sli3.cmdidx = 0; 4466 pring->missbufcnt = 0; 4467 } 4468 4469 phba->link_state = LPFC_WARM_START; 4470 return 0; 4471 } 4472 4473 /** 4474 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4475 * @phba: Pointer to HBA context object. 4476 * 4477 * This function resets a SLI4 HBA. This function disables PCI layer parity 4478 * checking during resets the device. The caller is not required to hold 4479 * any locks. 4480 * 4481 * This function returns 0 always. 4482 **/ 4483 int 4484 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4485 { 4486 struct lpfc_sli *psli = &phba->sli; 4487 uint16_t cfg_value; 4488 int rc = 0; 4489 4490 /* Reset HBA */ 4491 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4492 "0295 Reset HBA Data: x%x x%x x%x\n", 4493 phba->pport->port_state, psli->sli_flag, 4494 phba->hba_flag); 4495 4496 /* perform board reset */ 4497 phba->fc_eventTag = 0; 4498 phba->link_events = 0; 4499 phba->pport->fc_myDID = 0; 4500 phba->pport->fc_prevDID = 0; 4501 4502 spin_lock_irq(&phba->hbalock); 4503 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4504 phba->fcf.fcf_flag = 0; 4505 spin_unlock_irq(&phba->hbalock); 4506 4507 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4508 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4509 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4510 return rc; 4511 } 4512 4513 /* Now physically reset the device */ 4514 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4515 "0389 Performing PCI function reset!\n"); 4516 4517 /* Turn off parity checking and serr during the physical reset */ 4518 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4519 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4520 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4521 4522 /* Perform FCoE PCI function reset before freeing queue memory */ 4523 rc = lpfc_pci_function_reset(phba); 4524 4525 /* Restore PCI cmd register */ 4526 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4527 4528 return rc; 4529 } 4530 4531 /** 4532 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4533 * @phba: Pointer to HBA context object. 4534 * 4535 * This function is called in the SLI initialization code path to 4536 * restart the HBA. The caller is not required to hold any lock. 4537 * This function writes MBX_RESTART mailbox command to the SLIM and 4538 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4539 * function to free any pending commands. The function enables 4540 * POST only during the first initialization. The function returns zero. 4541 * The function does not guarantee completion of MBX_RESTART mailbox 4542 * command before the return of this function. 4543 **/ 4544 static int 4545 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4546 { 4547 MAILBOX_t *mb; 4548 struct lpfc_sli *psli; 4549 volatile uint32_t word0; 4550 void __iomem *to_slim; 4551 uint32_t hba_aer_enabled; 4552 4553 spin_lock_irq(&phba->hbalock); 4554 4555 /* Take PCIe device Advanced Error Reporting (AER) state */ 4556 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4557 4558 psli = &phba->sli; 4559 4560 /* Restart HBA */ 4561 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4562 "0337 Restart HBA Data: x%x x%x\n", 4563 (phba->pport) ? phba->pport->port_state : 0, 4564 psli->sli_flag); 4565 4566 word0 = 0; 4567 mb = (MAILBOX_t *) &word0; 4568 mb->mbxCommand = MBX_RESTART; 4569 mb->mbxHc = 1; 4570 4571 lpfc_reset_barrier(phba); 4572 4573 to_slim = phba->MBslimaddr; 4574 writel(*(uint32_t *) mb, to_slim); 4575 readl(to_slim); /* flush */ 4576 4577 /* Only skip post after fc_ffinit is completed */ 4578 if (phba->pport && phba->pport->port_state) 4579 word0 = 1; /* This is really setting up word1 */ 4580 else 4581 word0 = 0; /* This is really setting up word1 */ 4582 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4583 writel(*(uint32_t *) mb, to_slim); 4584 readl(to_slim); /* flush */ 4585 4586 lpfc_sli_brdreset(phba); 4587 if (phba->pport) 4588 phba->pport->stopped = 0; 4589 phba->link_state = LPFC_INIT_START; 4590 phba->hba_flag = 0; 4591 spin_unlock_irq(&phba->hbalock); 4592 4593 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4594 psli->stats_start = get_seconds(); 4595 4596 /* Give the INITFF and Post time to settle. */ 4597 mdelay(100); 4598 4599 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4600 if (hba_aer_enabled) 4601 pci_disable_pcie_error_reporting(phba->pcidev); 4602 4603 lpfc_hba_down_post(phba); 4604 4605 return 0; 4606 } 4607 4608 /** 4609 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4610 * @phba: Pointer to HBA context object. 4611 * 4612 * This function is called in the SLI initialization code path to restart 4613 * a SLI4 HBA. The caller is not required to hold any lock. 4614 * At the end of the function, it calls lpfc_hba_down_post function to 4615 * free any pending commands. 4616 **/ 4617 static int 4618 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4619 { 4620 struct lpfc_sli *psli = &phba->sli; 4621 uint32_t hba_aer_enabled; 4622 int rc; 4623 4624 /* Restart HBA */ 4625 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4626 "0296 Restart HBA Data: x%x x%x\n", 4627 phba->pport->port_state, psli->sli_flag); 4628 4629 /* Take PCIe device Advanced Error Reporting (AER) state */ 4630 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4631 4632 rc = lpfc_sli4_brdreset(phba); 4633 4634 spin_lock_irq(&phba->hbalock); 4635 phba->pport->stopped = 0; 4636 phba->link_state = LPFC_INIT_START; 4637 phba->hba_flag = 0; 4638 spin_unlock_irq(&phba->hbalock); 4639 4640 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4641 psli->stats_start = get_seconds(); 4642 4643 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4644 if (hba_aer_enabled) 4645 pci_disable_pcie_error_reporting(phba->pcidev); 4646 4647 lpfc_hba_down_post(phba); 4648 lpfc_sli4_queue_destroy(phba); 4649 4650 return rc; 4651 } 4652 4653 /** 4654 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4655 * @phba: Pointer to HBA context object. 4656 * 4657 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4658 * API jump table function pointer from the lpfc_hba struct. 4659 **/ 4660 int 4661 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4662 { 4663 return phba->lpfc_sli_brdrestart(phba); 4664 } 4665 4666 /** 4667 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4668 * @phba: Pointer to HBA context object. 4669 * 4670 * This function is called after a HBA restart to wait for successful 4671 * restart of the HBA. Successful restart of the HBA is indicated by 4672 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4673 * iteration, the function will restart the HBA again. The function returns 4674 * zero if HBA successfully restarted else returns negative error code. 4675 **/ 4676 int 4677 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4678 { 4679 uint32_t status, i = 0; 4680 4681 /* Read the HBA Host Status Register */ 4682 if (lpfc_readl(phba->HSregaddr, &status)) 4683 return -EIO; 4684 4685 /* Check status register to see what current state is */ 4686 i = 0; 4687 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4688 4689 /* Check every 10ms for 10 retries, then every 100ms for 90 4690 * retries, then every 1 sec for 50 retires for a total of 4691 * ~60 seconds before reset the board again and check every 4692 * 1 sec for 50 retries. The up to 60 seconds before the 4693 * board ready is required by the Falcon FIPS zeroization 4694 * complete, and any reset the board in between shall cause 4695 * restart of zeroization, further delay the board ready. 4696 */ 4697 if (i++ >= 200) { 4698 /* Adapter failed to init, timeout, status reg 4699 <status> */ 4700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4701 "0436 Adapter failed to init, " 4702 "timeout, status reg x%x, " 4703 "FW Data: A8 x%x AC x%x\n", status, 4704 readl(phba->MBslimaddr + 0xa8), 4705 readl(phba->MBslimaddr + 0xac)); 4706 phba->link_state = LPFC_HBA_ERROR; 4707 return -ETIMEDOUT; 4708 } 4709 4710 /* Check to see if any errors occurred during init */ 4711 if (status & HS_FFERM) { 4712 /* ERROR: During chipset initialization */ 4713 /* Adapter failed to init, chipset, status reg 4714 <status> */ 4715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4716 "0437 Adapter failed to init, " 4717 "chipset, status reg x%x, " 4718 "FW Data: A8 x%x AC x%x\n", status, 4719 readl(phba->MBslimaddr + 0xa8), 4720 readl(phba->MBslimaddr + 0xac)); 4721 phba->link_state = LPFC_HBA_ERROR; 4722 return -EIO; 4723 } 4724 4725 if (i <= 10) 4726 msleep(10); 4727 else if (i <= 100) 4728 msleep(100); 4729 else 4730 msleep(1000); 4731 4732 if (i == 150) { 4733 /* Do post */ 4734 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4735 lpfc_sli_brdrestart(phba); 4736 } 4737 /* Read the HBA Host Status Register */ 4738 if (lpfc_readl(phba->HSregaddr, &status)) 4739 return -EIO; 4740 } 4741 4742 /* Check to see if any errors occurred during init */ 4743 if (status & HS_FFERM) { 4744 /* ERROR: During chipset initialization */ 4745 /* Adapter failed to init, chipset, status reg <status> */ 4746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4747 "0438 Adapter failed to init, chipset, " 4748 "status reg x%x, " 4749 "FW Data: A8 x%x AC x%x\n", status, 4750 readl(phba->MBslimaddr + 0xa8), 4751 readl(phba->MBslimaddr + 0xac)); 4752 phba->link_state = LPFC_HBA_ERROR; 4753 return -EIO; 4754 } 4755 4756 /* Clear all interrupt enable conditions */ 4757 writel(0, phba->HCregaddr); 4758 readl(phba->HCregaddr); /* flush */ 4759 4760 /* setup host attn register */ 4761 writel(0xffffffff, phba->HAregaddr); 4762 readl(phba->HAregaddr); /* flush */ 4763 return 0; 4764 } 4765 4766 /** 4767 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4768 * 4769 * This function calculates and returns the number of HBQs required to be 4770 * configured. 4771 **/ 4772 int 4773 lpfc_sli_hbq_count(void) 4774 { 4775 return ARRAY_SIZE(lpfc_hbq_defs); 4776 } 4777 4778 /** 4779 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4780 * 4781 * This function adds the number of hbq entries in every HBQ to get 4782 * the total number of hbq entries required for the HBA and returns 4783 * the total count. 4784 **/ 4785 static int 4786 lpfc_sli_hbq_entry_count(void) 4787 { 4788 int hbq_count = lpfc_sli_hbq_count(); 4789 int count = 0; 4790 int i; 4791 4792 for (i = 0; i < hbq_count; ++i) 4793 count += lpfc_hbq_defs[i]->entry_count; 4794 return count; 4795 } 4796 4797 /** 4798 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4799 * 4800 * This function calculates amount of memory required for all hbq entries 4801 * to be configured and returns the total memory required. 4802 **/ 4803 int 4804 lpfc_sli_hbq_size(void) 4805 { 4806 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4807 } 4808 4809 /** 4810 * lpfc_sli_hbq_setup - configure and initialize HBQs 4811 * @phba: Pointer to HBA context object. 4812 * 4813 * This function is called during the SLI initialization to configure 4814 * all the HBQs and post buffers to the HBQ. The caller is not 4815 * required to hold any locks. This function will return zero if successful 4816 * else it will return negative error code. 4817 **/ 4818 static int 4819 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4820 { 4821 int hbq_count = lpfc_sli_hbq_count(); 4822 LPFC_MBOXQ_t *pmb; 4823 MAILBOX_t *pmbox; 4824 uint32_t hbqno; 4825 uint32_t hbq_entry_index; 4826 4827 /* Get a Mailbox buffer to setup mailbox 4828 * commands for HBA initialization 4829 */ 4830 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4831 4832 if (!pmb) 4833 return -ENOMEM; 4834 4835 pmbox = &pmb->u.mb; 4836 4837 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4838 phba->link_state = LPFC_INIT_MBX_CMDS; 4839 phba->hbq_in_use = 1; 4840 4841 hbq_entry_index = 0; 4842 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4843 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4844 phba->hbqs[hbqno].hbqPutIdx = 0; 4845 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4846 phba->hbqs[hbqno].entry_count = 4847 lpfc_hbq_defs[hbqno]->entry_count; 4848 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4849 hbq_entry_index, pmb); 4850 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4851 4852 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4853 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4854 mbxStatus <status>, ring <num> */ 4855 4856 lpfc_printf_log(phba, KERN_ERR, 4857 LOG_SLI | LOG_VPORT, 4858 "1805 Adapter failed to init. " 4859 "Data: x%x x%x x%x\n", 4860 pmbox->mbxCommand, 4861 pmbox->mbxStatus, hbqno); 4862 4863 phba->link_state = LPFC_HBA_ERROR; 4864 mempool_free(pmb, phba->mbox_mem_pool); 4865 return -ENXIO; 4866 } 4867 } 4868 phba->hbq_count = hbq_count; 4869 4870 mempool_free(pmb, phba->mbox_mem_pool); 4871 4872 /* Initially populate or replenish the HBQs */ 4873 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4874 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4875 return 0; 4876 } 4877 4878 /** 4879 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4880 * @phba: Pointer to HBA context object. 4881 * 4882 * This function is called during the SLI initialization to configure 4883 * all the HBQs and post buffers to the HBQ. The caller is not 4884 * required to hold any locks. This function will return zero if successful 4885 * else it will return negative error code. 4886 **/ 4887 static int 4888 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4889 { 4890 phba->hbq_in_use = 1; 4891 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4892 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4893 phba->hbq_count = 1; 4894 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4895 /* Initially populate or replenish the HBQs */ 4896 return 0; 4897 } 4898 4899 /** 4900 * lpfc_sli_config_port - Issue config port mailbox command 4901 * @phba: Pointer to HBA context object. 4902 * @sli_mode: sli mode - 2/3 4903 * 4904 * This function is called by the sli initialization code path 4905 * to issue config_port mailbox command. This function restarts the 4906 * HBA firmware and issues a config_port mailbox command to configure 4907 * the SLI interface in the sli mode specified by sli_mode 4908 * variable. The caller is not required to hold any locks. 4909 * The function returns 0 if successful, else returns negative error 4910 * code. 4911 **/ 4912 int 4913 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4914 { 4915 LPFC_MBOXQ_t *pmb; 4916 uint32_t resetcount = 0, rc = 0, done = 0; 4917 4918 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4919 if (!pmb) { 4920 phba->link_state = LPFC_HBA_ERROR; 4921 return -ENOMEM; 4922 } 4923 4924 phba->sli_rev = sli_mode; 4925 while (resetcount < 2 && !done) { 4926 spin_lock_irq(&phba->hbalock); 4927 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4928 spin_unlock_irq(&phba->hbalock); 4929 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4930 lpfc_sli_brdrestart(phba); 4931 rc = lpfc_sli_chipset_init(phba); 4932 if (rc) 4933 break; 4934 4935 spin_lock_irq(&phba->hbalock); 4936 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4937 spin_unlock_irq(&phba->hbalock); 4938 resetcount++; 4939 4940 /* Call pre CONFIG_PORT mailbox command initialization. A 4941 * value of 0 means the call was successful. Any other 4942 * nonzero value is a failure, but if ERESTART is returned, 4943 * the driver may reset the HBA and try again. 4944 */ 4945 rc = lpfc_config_port_prep(phba); 4946 if (rc == -ERESTART) { 4947 phba->link_state = LPFC_LINK_UNKNOWN; 4948 continue; 4949 } else if (rc) 4950 break; 4951 4952 phba->link_state = LPFC_INIT_MBX_CMDS; 4953 lpfc_config_port(phba, pmb); 4954 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4955 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4956 LPFC_SLI3_HBQ_ENABLED | 4957 LPFC_SLI3_CRP_ENABLED | 4958 LPFC_SLI3_BG_ENABLED | 4959 LPFC_SLI3_DSS_ENABLED); 4960 if (rc != MBX_SUCCESS) { 4961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4962 "0442 Adapter failed to init, mbxCmd x%x " 4963 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4964 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4965 spin_lock_irq(&phba->hbalock); 4966 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4967 spin_unlock_irq(&phba->hbalock); 4968 rc = -ENXIO; 4969 } else { 4970 /* Allow asynchronous mailbox command to go through */ 4971 spin_lock_irq(&phba->hbalock); 4972 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4973 spin_unlock_irq(&phba->hbalock); 4974 done = 1; 4975 4976 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4977 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4978 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4979 "3110 Port did not grant ASABT\n"); 4980 } 4981 } 4982 if (!done) { 4983 rc = -EINVAL; 4984 goto do_prep_failed; 4985 } 4986 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4987 if (!pmb->u.mb.un.varCfgPort.cMA) { 4988 rc = -ENXIO; 4989 goto do_prep_failed; 4990 } 4991 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4992 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4993 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4994 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4995 phba->max_vpi : phba->max_vports; 4996 4997 } else 4998 phba->max_vpi = 0; 4999 phba->fips_level = 0; 5000 phba->fips_spec_rev = 0; 5001 if (pmb->u.mb.un.varCfgPort.gdss) { 5002 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 5003 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 5004 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 5005 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5006 "2850 Security Crypto Active. FIPS x%d " 5007 "(Spec Rev: x%d)", 5008 phba->fips_level, phba->fips_spec_rev); 5009 } 5010 if (pmb->u.mb.un.varCfgPort.sec_err) { 5011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5012 "2856 Config Port Security Crypto " 5013 "Error: x%x ", 5014 pmb->u.mb.un.varCfgPort.sec_err); 5015 } 5016 if (pmb->u.mb.un.varCfgPort.gerbm) 5017 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5018 if (pmb->u.mb.un.varCfgPort.gcrp) 5019 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5020 5021 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5022 phba->port_gp = phba->mbox->us.s3_pgp.port; 5023 5024 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5025 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5026 phba->cfg_enable_bg = 0; 5027 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5029 "0443 Adapter did not grant " 5030 "BlockGuard\n"); 5031 } 5032 } 5033 } else { 5034 phba->hbq_get = NULL; 5035 phba->port_gp = phba->mbox->us.s2.port; 5036 phba->max_vpi = 0; 5037 } 5038 do_prep_failed: 5039 mempool_free(pmb, phba->mbox_mem_pool); 5040 return rc; 5041 } 5042 5043 5044 /** 5045 * lpfc_sli_hba_setup - SLI initialization function 5046 * @phba: Pointer to HBA context object. 5047 * 5048 * This function is the main SLI initialization function. This function 5049 * is called by the HBA initialization code, HBA reset code and HBA 5050 * error attention handler code. Caller is not required to hold any 5051 * locks. This function issues config_port mailbox command to configure 5052 * the SLI, setup iocb rings and HBQ rings. In the end the function 5053 * calls the config_port_post function to issue init_link mailbox 5054 * command and to start the discovery. The function will return zero 5055 * if successful, else it will return negative error code. 5056 **/ 5057 int 5058 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5059 { 5060 uint32_t rc; 5061 int mode = 3, i; 5062 int longs; 5063 5064 switch (phba->cfg_sli_mode) { 5065 case 2: 5066 if (phba->cfg_enable_npiv) { 5067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5068 "1824 NPIV enabled: Override sli_mode " 5069 "parameter (%d) to auto (0).\n", 5070 phba->cfg_sli_mode); 5071 break; 5072 } 5073 mode = 2; 5074 break; 5075 case 0: 5076 case 3: 5077 break; 5078 default: 5079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5080 "1819 Unrecognized sli_mode parameter: %d.\n", 5081 phba->cfg_sli_mode); 5082 5083 break; 5084 } 5085 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5086 5087 rc = lpfc_sli_config_port(phba, mode); 5088 5089 if (rc && phba->cfg_sli_mode == 3) 5090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5091 "1820 Unable to select SLI-3. " 5092 "Not supported by adapter.\n"); 5093 if (rc && mode != 2) 5094 rc = lpfc_sli_config_port(phba, 2); 5095 else if (rc && mode == 2) 5096 rc = lpfc_sli_config_port(phba, 3); 5097 if (rc) 5098 goto lpfc_sli_hba_setup_error; 5099 5100 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 5101 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 5102 rc = pci_enable_pcie_error_reporting(phba->pcidev); 5103 if (!rc) { 5104 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5105 "2709 This device supports " 5106 "Advanced Error Reporting (AER)\n"); 5107 spin_lock_irq(&phba->hbalock); 5108 phba->hba_flag |= HBA_AER_ENABLED; 5109 spin_unlock_irq(&phba->hbalock); 5110 } else { 5111 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5112 "2708 This device does not support " 5113 "Advanced Error Reporting (AER): %d\n", 5114 rc); 5115 phba->cfg_aer_support = 0; 5116 } 5117 } 5118 5119 if (phba->sli_rev == 3) { 5120 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5121 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5122 } else { 5123 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5124 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5125 phba->sli3_options = 0; 5126 } 5127 5128 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5129 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5130 phba->sli_rev, phba->max_vpi); 5131 rc = lpfc_sli_ring_map(phba); 5132 5133 if (rc) 5134 goto lpfc_sli_hba_setup_error; 5135 5136 /* Initialize VPIs. */ 5137 if (phba->sli_rev == LPFC_SLI_REV3) { 5138 /* 5139 * The VPI bitmask and physical ID array are allocated 5140 * and initialized once only - at driver load. A port 5141 * reset doesn't need to reinitialize this memory. 5142 */ 5143 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5144 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5145 phba->vpi_bmask = kcalloc(longs, 5146 sizeof(unsigned long), 5147 GFP_KERNEL); 5148 if (!phba->vpi_bmask) { 5149 rc = -ENOMEM; 5150 goto lpfc_sli_hba_setup_error; 5151 } 5152 5153 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5154 sizeof(uint16_t), 5155 GFP_KERNEL); 5156 if (!phba->vpi_ids) { 5157 kfree(phba->vpi_bmask); 5158 rc = -ENOMEM; 5159 goto lpfc_sli_hba_setup_error; 5160 } 5161 for (i = 0; i < phba->max_vpi; i++) 5162 phba->vpi_ids[i] = i; 5163 } 5164 } 5165 5166 /* Init HBQs */ 5167 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5168 rc = lpfc_sli_hbq_setup(phba); 5169 if (rc) 5170 goto lpfc_sli_hba_setup_error; 5171 } 5172 spin_lock_irq(&phba->hbalock); 5173 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5174 spin_unlock_irq(&phba->hbalock); 5175 5176 rc = lpfc_config_port_post(phba); 5177 if (rc) 5178 goto lpfc_sli_hba_setup_error; 5179 5180 return rc; 5181 5182 lpfc_sli_hba_setup_error: 5183 phba->link_state = LPFC_HBA_ERROR; 5184 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5185 "0445 Firmware initialization failed\n"); 5186 return rc; 5187 } 5188 5189 /** 5190 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5191 * @phba: Pointer to HBA context object. 5192 * @mboxq: mailbox pointer. 5193 * This function issue a dump mailbox command to read config region 5194 * 23 and parse the records in the region and populate driver 5195 * data structure. 5196 **/ 5197 static int 5198 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5199 { 5200 LPFC_MBOXQ_t *mboxq; 5201 struct lpfc_dmabuf *mp; 5202 struct lpfc_mqe *mqe; 5203 uint32_t data_length; 5204 int rc; 5205 5206 /* Program the default value of vlan_id and fc_map */ 5207 phba->valid_vlan = 0; 5208 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5209 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5210 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5211 5212 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5213 if (!mboxq) 5214 return -ENOMEM; 5215 5216 mqe = &mboxq->u.mqe; 5217 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5218 rc = -ENOMEM; 5219 goto out_free_mboxq; 5220 } 5221 5222 mp = (struct lpfc_dmabuf *) mboxq->context1; 5223 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5224 5225 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5226 "(%d):2571 Mailbox cmd x%x Status x%x " 5227 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5228 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5229 "CQ: x%x x%x x%x x%x\n", 5230 mboxq->vport ? mboxq->vport->vpi : 0, 5231 bf_get(lpfc_mqe_command, mqe), 5232 bf_get(lpfc_mqe_status, mqe), 5233 mqe->un.mb_words[0], mqe->un.mb_words[1], 5234 mqe->un.mb_words[2], mqe->un.mb_words[3], 5235 mqe->un.mb_words[4], mqe->un.mb_words[5], 5236 mqe->un.mb_words[6], mqe->un.mb_words[7], 5237 mqe->un.mb_words[8], mqe->un.mb_words[9], 5238 mqe->un.mb_words[10], mqe->un.mb_words[11], 5239 mqe->un.mb_words[12], mqe->un.mb_words[13], 5240 mqe->un.mb_words[14], mqe->un.mb_words[15], 5241 mqe->un.mb_words[16], mqe->un.mb_words[50], 5242 mboxq->mcqe.word0, 5243 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5244 mboxq->mcqe.trailer); 5245 5246 if (rc) { 5247 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5248 kfree(mp); 5249 rc = -EIO; 5250 goto out_free_mboxq; 5251 } 5252 data_length = mqe->un.mb_words[5]; 5253 if (data_length > DMP_RGN23_SIZE) { 5254 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5255 kfree(mp); 5256 rc = -EIO; 5257 goto out_free_mboxq; 5258 } 5259 5260 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5261 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5262 kfree(mp); 5263 rc = 0; 5264 5265 out_free_mboxq: 5266 mempool_free(mboxq, phba->mbox_mem_pool); 5267 return rc; 5268 } 5269 5270 /** 5271 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5272 * @phba: pointer to lpfc hba data structure. 5273 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5274 * @vpd: pointer to the memory to hold resulting port vpd data. 5275 * @vpd_size: On input, the number of bytes allocated to @vpd. 5276 * On output, the number of data bytes in @vpd. 5277 * 5278 * This routine executes a READ_REV SLI4 mailbox command. In 5279 * addition, this routine gets the port vpd data. 5280 * 5281 * Return codes 5282 * 0 - successful 5283 * -ENOMEM - could not allocated memory. 5284 **/ 5285 static int 5286 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5287 uint8_t *vpd, uint32_t *vpd_size) 5288 { 5289 int rc = 0; 5290 uint32_t dma_size; 5291 struct lpfc_dmabuf *dmabuf; 5292 struct lpfc_mqe *mqe; 5293 5294 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5295 if (!dmabuf) 5296 return -ENOMEM; 5297 5298 /* 5299 * Get a DMA buffer for the vpd data resulting from the READ_REV 5300 * mailbox command. 5301 */ 5302 dma_size = *vpd_size; 5303 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, 5304 &dmabuf->phys, GFP_KERNEL); 5305 if (!dmabuf->virt) { 5306 kfree(dmabuf); 5307 return -ENOMEM; 5308 } 5309 5310 /* 5311 * The SLI4 implementation of READ_REV conflicts at word1, 5312 * bits 31:16 and SLI4 adds vpd functionality not present 5313 * in SLI3. This code corrects the conflicts. 5314 */ 5315 lpfc_read_rev(phba, mboxq); 5316 mqe = &mboxq->u.mqe; 5317 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5318 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5319 mqe->un.read_rev.word1 &= 0x0000FFFF; 5320 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5321 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5322 5323 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5324 if (rc) { 5325 dma_free_coherent(&phba->pcidev->dev, dma_size, 5326 dmabuf->virt, dmabuf->phys); 5327 kfree(dmabuf); 5328 return -EIO; 5329 } 5330 5331 /* 5332 * The available vpd length cannot be bigger than the 5333 * DMA buffer passed to the port. Catch the less than 5334 * case and update the caller's size. 5335 */ 5336 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5337 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5338 5339 memcpy(vpd, dmabuf->virt, *vpd_size); 5340 5341 dma_free_coherent(&phba->pcidev->dev, dma_size, 5342 dmabuf->virt, dmabuf->phys); 5343 kfree(dmabuf); 5344 return 0; 5345 } 5346 5347 /** 5348 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5349 * @phba: pointer to lpfc hba data structure. 5350 * 5351 * This routine retrieves SLI4 device physical port name this PCI function 5352 * is attached to. 5353 * 5354 * Return codes 5355 * 0 - successful 5356 * otherwise - failed to retrieve physical port name 5357 **/ 5358 static int 5359 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5360 { 5361 LPFC_MBOXQ_t *mboxq; 5362 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5363 struct lpfc_controller_attribute *cntl_attr; 5364 struct lpfc_mbx_get_port_name *get_port_name; 5365 void *virtaddr = NULL; 5366 uint32_t alloclen, reqlen; 5367 uint32_t shdr_status, shdr_add_status; 5368 union lpfc_sli4_cfg_shdr *shdr; 5369 char cport_name = 0; 5370 int rc; 5371 5372 /* We assume nothing at this point */ 5373 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5374 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5375 5376 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5377 if (!mboxq) 5378 return -ENOMEM; 5379 /* obtain link type and link number via READ_CONFIG */ 5380 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5381 lpfc_sli4_read_config(phba); 5382 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5383 goto retrieve_ppname; 5384 5385 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5386 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5387 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5388 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5389 LPFC_SLI4_MBX_NEMBED); 5390 if (alloclen < reqlen) { 5391 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5392 "3084 Allocated DMA memory size (%d) is " 5393 "less than the requested DMA memory size " 5394 "(%d)\n", alloclen, reqlen); 5395 rc = -ENOMEM; 5396 goto out_free_mboxq; 5397 } 5398 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5399 virtaddr = mboxq->sge_array->addr[0]; 5400 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5401 shdr = &mbx_cntl_attr->cfg_shdr; 5402 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5403 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5404 if (shdr_status || shdr_add_status || rc) { 5405 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5406 "3085 Mailbox x%x (x%x/x%x) failed, " 5407 "rc:x%x, status:x%x, add_status:x%x\n", 5408 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5409 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5410 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5411 rc, shdr_status, shdr_add_status); 5412 rc = -ENXIO; 5413 goto out_free_mboxq; 5414 } 5415 cntl_attr = &mbx_cntl_attr->cntl_attr; 5416 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5417 phba->sli4_hba.lnk_info.lnk_tp = 5418 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5419 phba->sli4_hba.lnk_info.lnk_no = 5420 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5421 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5422 "3086 lnk_type:%d, lnk_numb:%d\n", 5423 phba->sli4_hba.lnk_info.lnk_tp, 5424 phba->sli4_hba.lnk_info.lnk_no); 5425 5426 retrieve_ppname: 5427 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5428 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5429 sizeof(struct lpfc_mbx_get_port_name) - 5430 sizeof(struct lpfc_sli4_cfg_mhdr), 5431 LPFC_SLI4_MBX_EMBED); 5432 get_port_name = &mboxq->u.mqe.un.get_port_name; 5433 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5434 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5435 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5436 phba->sli4_hba.lnk_info.lnk_tp); 5437 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5438 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5439 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5440 if (shdr_status || shdr_add_status || rc) { 5441 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5442 "3087 Mailbox x%x (x%x/x%x) failed: " 5443 "rc:x%x, status:x%x, add_status:x%x\n", 5444 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5445 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5446 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5447 rc, shdr_status, shdr_add_status); 5448 rc = -ENXIO; 5449 goto out_free_mboxq; 5450 } 5451 switch (phba->sli4_hba.lnk_info.lnk_no) { 5452 case LPFC_LINK_NUMBER_0: 5453 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5454 &get_port_name->u.response); 5455 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5456 break; 5457 case LPFC_LINK_NUMBER_1: 5458 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5459 &get_port_name->u.response); 5460 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5461 break; 5462 case LPFC_LINK_NUMBER_2: 5463 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5464 &get_port_name->u.response); 5465 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5466 break; 5467 case LPFC_LINK_NUMBER_3: 5468 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5469 &get_port_name->u.response); 5470 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5471 break; 5472 default: 5473 break; 5474 } 5475 5476 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5477 phba->Port[0] = cport_name; 5478 phba->Port[1] = '\0'; 5479 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5480 "3091 SLI get port name: %s\n", phba->Port); 5481 } 5482 5483 out_free_mboxq: 5484 if (rc != MBX_TIMEOUT) { 5485 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5486 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5487 else 5488 mempool_free(mboxq, phba->mbox_mem_pool); 5489 } 5490 return rc; 5491 } 5492 5493 /** 5494 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5495 * @phba: pointer to lpfc hba data structure. 5496 * 5497 * This routine is called to explicitly arm the SLI4 device's completion and 5498 * event queues 5499 **/ 5500 static void 5501 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5502 { 5503 int qidx; 5504 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 5505 5506 sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM); 5507 sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM); 5508 if (sli4_hba->nvmels_cq) 5509 sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq, 5510 LPFC_QUEUE_REARM); 5511 5512 if (sli4_hba->fcp_cq) 5513 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 5514 sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx], 5515 LPFC_QUEUE_REARM); 5516 5517 if (sli4_hba->nvme_cq) 5518 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 5519 sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx], 5520 LPFC_QUEUE_REARM); 5521 5522 if (phba->cfg_fof) 5523 sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM); 5524 5525 if (sli4_hba->hba_eq) 5526 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) 5527 sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx], 5528 LPFC_QUEUE_REARM); 5529 5530 if (phba->nvmet_support) { 5531 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5532 sli4_hba->sli4_cq_release( 5533 sli4_hba->nvmet_cqset[qidx], 5534 LPFC_QUEUE_REARM); 5535 } 5536 } 5537 5538 if (phba->cfg_fof) 5539 sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM); 5540 } 5541 5542 /** 5543 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5544 * @phba: Pointer to HBA context object. 5545 * @type: The resource extent type. 5546 * @extnt_count: buffer to hold port available extent count. 5547 * @extnt_size: buffer to hold element count per extent. 5548 * 5549 * This function calls the port and retrievs the number of available 5550 * extents and their size for a particular extent type. 5551 * 5552 * Returns: 0 if successful. Nonzero otherwise. 5553 **/ 5554 int 5555 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5556 uint16_t *extnt_count, uint16_t *extnt_size) 5557 { 5558 int rc = 0; 5559 uint32_t length; 5560 uint32_t mbox_tmo; 5561 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5562 LPFC_MBOXQ_t *mbox; 5563 5564 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5565 if (!mbox) 5566 return -ENOMEM; 5567 5568 /* Find out how many extents are available for this resource type */ 5569 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5570 sizeof(struct lpfc_sli4_cfg_mhdr)); 5571 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5572 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5573 length, LPFC_SLI4_MBX_EMBED); 5574 5575 /* Send an extents count of 0 - the GET doesn't use it. */ 5576 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5577 LPFC_SLI4_MBX_EMBED); 5578 if (unlikely(rc)) { 5579 rc = -EIO; 5580 goto err_exit; 5581 } 5582 5583 if (!phba->sli4_hba.intr_enable) 5584 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5585 else { 5586 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5587 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5588 } 5589 if (unlikely(rc)) { 5590 rc = -EIO; 5591 goto err_exit; 5592 } 5593 5594 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5595 if (bf_get(lpfc_mbox_hdr_status, 5596 &rsrc_info->header.cfg_shdr.response)) { 5597 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5598 "2930 Failed to get resource extents " 5599 "Status 0x%x Add'l Status 0x%x\n", 5600 bf_get(lpfc_mbox_hdr_status, 5601 &rsrc_info->header.cfg_shdr.response), 5602 bf_get(lpfc_mbox_hdr_add_status, 5603 &rsrc_info->header.cfg_shdr.response)); 5604 rc = -EIO; 5605 goto err_exit; 5606 } 5607 5608 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5609 &rsrc_info->u.rsp); 5610 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5611 &rsrc_info->u.rsp); 5612 5613 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5614 "3162 Retrieved extents type-%d from port: count:%d, " 5615 "size:%d\n", type, *extnt_count, *extnt_size); 5616 5617 err_exit: 5618 mempool_free(mbox, phba->mbox_mem_pool); 5619 return rc; 5620 } 5621 5622 /** 5623 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5624 * @phba: Pointer to HBA context object. 5625 * @type: The extent type to check. 5626 * 5627 * This function reads the current available extents from the port and checks 5628 * if the extent count or extent size has changed since the last access. 5629 * Callers use this routine post port reset to understand if there is a 5630 * extent reprovisioning requirement. 5631 * 5632 * Returns: 5633 * -Error: error indicates problem. 5634 * 1: Extent count or size has changed. 5635 * 0: No changes. 5636 **/ 5637 static int 5638 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5639 { 5640 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5641 uint16_t size_diff, rsrc_ext_size; 5642 int rc = 0; 5643 struct lpfc_rsrc_blks *rsrc_entry; 5644 struct list_head *rsrc_blk_list = NULL; 5645 5646 size_diff = 0; 5647 curr_ext_cnt = 0; 5648 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5649 &rsrc_ext_cnt, 5650 &rsrc_ext_size); 5651 if (unlikely(rc)) 5652 return -EIO; 5653 5654 switch (type) { 5655 case LPFC_RSC_TYPE_FCOE_RPI: 5656 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5657 break; 5658 case LPFC_RSC_TYPE_FCOE_VPI: 5659 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5660 break; 5661 case LPFC_RSC_TYPE_FCOE_XRI: 5662 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5663 break; 5664 case LPFC_RSC_TYPE_FCOE_VFI: 5665 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5666 break; 5667 default: 5668 break; 5669 } 5670 5671 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5672 curr_ext_cnt++; 5673 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5674 size_diff++; 5675 } 5676 5677 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5678 rc = 1; 5679 5680 return rc; 5681 } 5682 5683 /** 5684 * lpfc_sli4_cfg_post_extnts - 5685 * @phba: Pointer to HBA context object. 5686 * @extnt_cnt - number of available extents. 5687 * @type - the extent type (rpi, xri, vfi, vpi). 5688 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5689 * @mbox - pointer to the caller's allocated mailbox structure. 5690 * 5691 * This function executes the extents allocation request. It also 5692 * takes care of the amount of memory needed to allocate or get the 5693 * allocated extents. It is the caller's responsibility to evaluate 5694 * the response. 5695 * 5696 * Returns: 5697 * -Error: Error value describes the condition found. 5698 * 0: if successful 5699 **/ 5700 static int 5701 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5702 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5703 { 5704 int rc = 0; 5705 uint32_t req_len; 5706 uint32_t emb_len; 5707 uint32_t alloc_len, mbox_tmo; 5708 5709 /* Calculate the total requested length of the dma memory */ 5710 req_len = extnt_cnt * sizeof(uint16_t); 5711 5712 /* 5713 * Calculate the size of an embedded mailbox. The uint32_t 5714 * accounts for extents-specific word. 5715 */ 5716 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5717 sizeof(uint32_t); 5718 5719 /* 5720 * Presume the allocation and response will fit into an embedded 5721 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5722 */ 5723 *emb = LPFC_SLI4_MBX_EMBED; 5724 if (req_len > emb_len) { 5725 req_len = extnt_cnt * sizeof(uint16_t) + 5726 sizeof(union lpfc_sli4_cfg_shdr) + 5727 sizeof(uint32_t); 5728 *emb = LPFC_SLI4_MBX_NEMBED; 5729 } 5730 5731 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5732 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5733 req_len, *emb); 5734 if (alloc_len < req_len) { 5735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5736 "2982 Allocated DMA memory size (x%x) is " 5737 "less than the requested DMA memory " 5738 "size (x%x)\n", alloc_len, req_len); 5739 return -ENOMEM; 5740 } 5741 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5742 if (unlikely(rc)) 5743 return -EIO; 5744 5745 if (!phba->sli4_hba.intr_enable) 5746 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5747 else { 5748 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5749 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5750 } 5751 5752 if (unlikely(rc)) 5753 rc = -EIO; 5754 return rc; 5755 } 5756 5757 /** 5758 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5759 * @phba: Pointer to HBA context object. 5760 * @type: The resource extent type to allocate. 5761 * 5762 * This function allocates the number of elements for the specified 5763 * resource type. 5764 **/ 5765 static int 5766 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5767 { 5768 bool emb = false; 5769 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5770 uint16_t rsrc_id, rsrc_start, j, k; 5771 uint16_t *ids; 5772 int i, rc; 5773 unsigned long longs; 5774 unsigned long *bmask; 5775 struct lpfc_rsrc_blks *rsrc_blks; 5776 LPFC_MBOXQ_t *mbox; 5777 uint32_t length; 5778 struct lpfc_id_range *id_array = NULL; 5779 void *virtaddr = NULL; 5780 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5781 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5782 struct list_head *ext_blk_list; 5783 5784 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5785 &rsrc_cnt, 5786 &rsrc_size); 5787 if (unlikely(rc)) 5788 return -EIO; 5789 5790 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5791 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5792 "3009 No available Resource Extents " 5793 "for resource type 0x%x: Count: 0x%x, " 5794 "Size 0x%x\n", type, rsrc_cnt, 5795 rsrc_size); 5796 return -ENOMEM; 5797 } 5798 5799 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5800 "2903 Post resource extents type-0x%x: " 5801 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5802 5803 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5804 if (!mbox) 5805 return -ENOMEM; 5806 5807 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5808 if (unlikely(rc)) { 5809 rc = -EIO; 5810 goto err_exit; 5811 } 5812 5813 /* 5814 * Figure out where the response is located. Then get local pointers 5815 * to the response data. The port does not guarantee to respond to 5816 * all extents counts request so update the local variable with the 5817 * allocated count from the port. 5818 */ 5819 if (emb == LPFC_SLI4_MBX_EMBED) { 5820 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5821 id_array = &rsrc_ext->u.rsp.id[0]; 5822 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5823 } else { 5824 virtaddr = mbox->sge_array->addr[0]; 5825 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5826 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5827 id_array = &n_rsrc->id; 5828 } 5829 5830 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5831 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5832 5833 /* 5834 * Based on the resource size and count, correct the base and max 5835 * resource values. 5836 */ 5837 length = sizeof(struct lpfc_rsrc_blks); 5838 switch (type) { 5839 case LPFC_RSC_TYPE_FCOE_RPI: 5840 phba->sli4_hba.rpi_bmask = kcalloc(longs, 5841 sizeof(unsigned long), 5842 GFP_KERNEL); 5843 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5844 rc = -ENOMEM; 5845 goto err_exit; 5846 } 5847 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 5848 sizeof(uint16_t), 5849 GFP_KERNEL); 5850 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5851 kfree(phba->sli4_hba.rpi_bmask); 5852 rc = -ENOMEM; 5853 goto err_exit; 5854 } 5855 5856 /* 5857 * The next_rpi was initialized with the maximum available 5858 * count but the port may allocate a smaller number. Catch 5859 * that case and update the next_rpi. 5860 */ 5861 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5862 5863 /* Initialize local ptrs for common extent processing later. */ 5864 bmask = phba->sli4_hba.rpi_bmask; 5865 ids = phba->sli4_hba.rpi_ids; 5866 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5867 break; 5868 case LPFC_RSC_TYPE_FCOE_VPI: 5869 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 5870 GFP_KERNEL); 5871 if (unlikely(!phba->vpi_bmask)) { 5872 rc = -ENOMEM; 5873 goto err_exit; 5874 } 5875 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 5876 GFP_KERNEL); 5877 if (unlikely(!phba->vpi_ids)) { 5878 kfree(phba->vpi_bmask); 5879 rc = -ENOMEM; 5880 goto err_exit; 5881 } 5882 5883 /* Initialize local ptrs for common extent processing later. */ 5884 bmask = phba->vpi_bmask; 5885 ids = phba->vpi_ids; 5886 ext_blk_list = &phba->lpfc_vpi_blk_list; 5887 break; 5888 case LPFC_RSC_TYPE_FCOE_XRI: 5889 phba->sli4_hba.xri_bmask = kcalloc(longs, 5890 sizeof(unsigned long), 5891 GFP_KERNEL); 5892 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5893 rc = -ENOMEM; 5894 goto err_exit; 5895 } 5896 phba->sli4_hba.max_cfg_param.xri_used = 0; 5897 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 5898 sizeof(uint16_t), 5899 GFP_KERNEL); 5900 if (unlikely(!phba->sli4_hba.xri_ids)) { 5901 kfree(phba->sli4_hba.xri_bmask); 5902 rc = -ENOMEM; 5903 goto err_exit; 5904 } 5905 5906 /* Initialize local ptrs for common extent processing later. */ 5907 bmask = phba->sli4_hba.xri_bmask; 5908 ids = phba->sli4_hba.xri_ids; 5909 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5910 break; 5911 case LPFC_RSC_TYPE_FCOE_VFI: 5912 phba->sli4_hba.vfi_bmask = kcalloc(longs, 5913 sizeof(unsigned long), 5914 GFP_KERNEL); 5915 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5916 rc = -ENOMEM; 5917 goto err_exit; 5918 } 5919 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 5920 sizeof(uint16_t), 5921 GFP_KERNEL); 5922 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5923 kfree(phba->sli4_hba.vfi_bmask); 5924 rc = -ENOMEM; 5925 goto err_exit; 5926 } 5927 5928 /* Initialize local ptrs for common extent processing later. */ 5929 bmask = phba->sli4_hba.vfi_bmask; 5930 ids = phba->sli4_hba.vfi_ids; 5931 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5932 break; 5933 default: 5934 /* Unsupported Opcode. Fail call. */ 5935 id_array = NULL; 5936 bmask = NULL; 5937 ids = NULL; 5938 ext_blk_list = NULL; 5939 goto err_exit; 5940 } 5941 5942 /* 5943 * Complete initializing the extent configuration with the 5944 * allocated ids assigned to this function. The bitmask serves 5945 * as an index into the array and manages the available ids. The 5946 * array just stores the ids communicated to the port via the wqes. 5947 */ 5948 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5949 if ((i % 2) == 0) 5950 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5951 &id_array[k]); 5952 else 5953 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5954 &id_array[k]); 5955 5956 rsrc_blks = kzalloc(length, GFP_KERNEL); 5957 if (unlikely(!rsrc_blks)) { 5958 rc = -ENOMEM; 5959 kfree(bmask); 5960 kfree(ids); 5961 goto err_exit; 5962 } 5963 rsrc_blks->rsrc_start = rsrc_id; 5964 rsrc_blks->rsrc_size = rsrc_size; 5965 list_add_tail(&rsrc_blks->list, ext_blk_list); 5966 rsrc_start = rsrc_id; 5967 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 5968 phba->sli4_hba.scsi_xri_start = rsrc_start + 5969 lpfc_sli4_get_iocb_cnt(phba); 5970 phba->sli4_hba.nvme_xri_start = 5971 phba->sli4_hba.scsi_xri_start + 5972 phba->sli4_hba.scsi_xri_max; 5973 } 5974 5975 while (rsrc_id < (rsrc_start + rsrc_size)) { 5976 ids[j] = rsrc_id; 5977 rsrc_id++; 5978 j++; 5979 } 5980 /* Entire word processed. Get next word.*/ 5981 if ((i % 2) == 1) 5982 k++; 5983 } 5984 err_exit: 5985 lpfc_sli4_mbox_cmd_free(phba, mbox); 5986 return rc; 5987 } 5988 5989 5990 5991 /** 5992 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5993 * @phba: Pointer to HBA context object. 5994 * @type: the extent's type. 5995 * 5996 * This function deallocates all extents of a particular resource type. 5997 * SLI4 does not allow for deallocating a particular extent range. It 5998 * is the caller's responsibility to release all kernel memory resources. 5999 **/ 6000 static int 6001 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6002 { 6003 int rc; 6004 uint32_t length, mbox_tmo = 0; 6005 LPFC_MBOXQ_t *mbox; 6006 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6007 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6008 6009 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6010 if (!mbox) 6011 return -ENOMEM; 6012 6013 /* 6014 * This function sends an embedded mailbox because it only sends the 6015 * the resource type. All extents of this type are released by the 6016 * port. 6017 */ 6018 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6019 sizeof(struct lpfc_sli4_cfg_mhdr)); 6020 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6021 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6022 length, LPFC_SLI4_MBX_EMBED); 6023 6024 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6025 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6026 LPFC_SLI4_MBX_EMBED); 6027 if (unlikely(rc)) { 6028 rc = -EIO; 6029 goto out_free_mbox; 6030 } 6031 if (!phba->sli4_hba.intr_enable) 6032 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6033 else { 6034 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6035 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6036 } 6037 if (unlikely(rc)) { 6038 rc = -EIO; 6039 goto out_free_mbox; 6040 } 6041 6042 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6043 if (bf_get(lpfc_mbox_hdr_status, 6044 &dealloc_rsrc->header.cfg_shdr.response)) { 6045 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6046 "2919 Failed to release resource extents " 6047 "for type %d - Status 0x%x Add'l Status 0x%x. " 6048 "Resource memory not released.\n", 6049 type, 6050 bf_get(lpfc_mbox_hdr_status, 6051 &dealloc_rsrc->header.cfg_shdr.response), 6052 bf_get(lpfc_mbox_hdr_add_status, 6053 &dealloc_rsrc->header.cfg_shdr.response)); 6054 rc = -EIO; 6055 goto out_free_mbox; 6056 } 6057 6058 /* Release kernel memory resources for the specific type. */ 6059 switch (type) { 6060 case LPFC_RSC_TYPE_FCOE_VPI: 6061 kfree(phba->vpi_bmask); 6062 kfree(phba->vpi_ids); 6063 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6064 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6065 &phba->lpfc_vpi_blk_list, list) { 6066 list_del_init(&rsrc_blk->list); 6067 kfree(rsrc_blk); 6068 } 6069 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6070 break; 6071 case LPFC_RSC_TYPE_FCOE_XRI: 6072 kfree(phba->sli4_hba.xri_bmask); 6073 kfree(phba->sli4_hba.xri_ids); 6074 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6075 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6076 list_del_init(&rsrc_blk->list); 6077 kfree(rsrc_blk); 6078 } 6079 break; 6080 case LPFC_RSC_TYPE_FCOE_VFI: 6081 kfree(phba->sli4_hba.vfi_bmask); 6082 kfree(phba->sli4_hba.vfi_ids); 6083 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6084 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6085 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6086 list_del_init(&rsrc_blk->list); 6087 kfree(rsrc_blk); 6088 } 6089 break; 6090 case LPFC_RSC_TYPE_FCOE_RPI: 6091 /* RPI bitmask and physical id array are cleaned up earlier. */ 6092 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6093 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6094 list_del_init(&rsrc_blk->list); 6095 kfree(rsrc_blk); 6096 } 6097 break; 6098 default: 6099 break; 6100 } 6101 6102 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6103 6104 out_free_mbox: 6105 mempool_free(mbox, phba->mbox_mem_pool); 6106 return rc; 6107 } 6108 6109 static void 6110 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6111 uint32_t feature) 6112 { 6113 uint32_t len; 6114 6115 len = sizeof(struct lpfc_mbx_set_feature) - 6116 sizeof(struct lpfc_sli4_cfg_mhdr); 6117 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6118 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6119 LPFC_SLI4_MBX_EMBED); 6120 6121 switch (feature) { 6122 case LPFC_SET_UE_RECOVERY: 6123 bf_set(lpfc_mbx_set_feature_UER, 6124 &mbox->u.mqe.un.set_feature, 1); 6125 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6126 mbox->u.mqe.un.set_feature.param_len = 8; 6127 break; 6128 case LPFC_SET_MDS_DIAGS: 6129 bf_set(lpfc_mbx_set_feature_mds, 6130 &mbox->u.mqe.un.set_feature, 1); 6131 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6132 &mbox->u.mqe.un.set_feature, 1); 6133 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6134 mbox->u.mqe.un.set_feature.param_len = 8; 6135 break; 6136 } 6137 6138 return; 6139 } 6140 6141 /** 6142 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 6143 * @phba: Pointer to HBA context object. 6144 * 6145 * This function allocates all SLI4 resource identifiers. 6146 **/ 6147 int 6148 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 6149 { 6150 int i, rc, error = 0; 6151 uint16_t count, base; 6152 unsigned long longs; 6153 6154 if (!phba->sli4_hba.rpi_hdrs_in_use) 6155 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 6156 if (phba->sli4_hba.extents_in_use) { 6157 /* 6158 * The port supports resource extents. The XRI, VPI, VFI, RPI 6159 * resource extent count must be read and allocated before 6160 * provisioning the resource id arrays. 6161 */ 6162 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6163 LPFC_IDX_RSRC_RDY) { 6164 /* 6165 * Extent-based resources are set - the driver could 6166 * be in a port reset. Figure out if any corrective 6167 * actions need to be taken. 6168 */ 6169 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6170 LPFC_RSC_TYPE_FCOE_VFI); 6171 if (rc != 0) 6172 error++; 6173 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6174 LPFC_RSC_TYPE_FCOE_VPI); 6175 if (rc != 0) 6176 error++; 6177 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6178 LPFC_RSC_TYPE_FCOE_XRI); 6179 if (rc != 0) 6180 error++; 6181 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6182 LPFC_RSC_TYPE_FCOE_RPI); 6183 if (rc != 0) 6184 error++; 6185 6186 /* 6187 * It's possible that the number of resources 6188 * provided to this port instance changed between 6189 * resets. Detect this condition and reallocate 6190 * resources. Otherwise, there is no action. 6191 */ 6192 if (error) { 6193 lpfc_printf_log(phba, KERN_INFO, 6194 LOG_MBOX | LOG_INIT, 6195 "2931 Detected extent resource " 6196 "change. Reallocating all " 6197 "extents.\n"); 6198 rc = lpfc_sli4_dealloc_extent(phba, 6199 LPFC_RSC_TYPE_FCOE_VFI); 6200 rc = lpfc_sli4_dealloc_extent(phba, 6201 LPFC_RSC_TYPE_FCOE_VPI); 6202 rc = lpfc_sli4_dealloc_extent(phba, 6203 LPFC_RSC_TYPE_FCOE_XRI); 6204 rc = lpfc_sli4_dealloc_extent(phba, 6205 LPFC_RSC_TYPE_FCOE_RPI); 6206 } else 6207 return 0; 6208 } 6209 6210 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6211 if (unlikely(rc)) 6212 goto err_exit; 6213 6214 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6215 if (unlikely(rc)) 6216 goto err_exit; 6217 6218 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6219 if (unlikely(rc)) 6220 goto err_exit; 6221 6222 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6223 if (unlikely(rc)) 6224 goto err_exit; 6225 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6226 LPFC_IDX_RSRC_RDY); 6227 return rc; 6228 } else { 6229 /* 6230 * The port does not support resource extents. The XRI, VPI, 6231 * VFI, RPI resource ids were determined from READ_CONFIG. 6232 * Just allocate the bitmasks and provision the resource id 6233 * arrays. If a port reset is active, the resources don't 6234 * need any action - just exit. 6235 */ 6236 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6237 LPFC_IDX_RSRC_RDY) { 6238 lpfc_sli4_dealloc_resource_identifiers(phba); 6239 lpfc_sli4_remove_rpis(phba); 6240 } 6241 /* RPIs. */ 6242 count = phba->sli4_hba.max_cfg_param.max_rpi; 6243 if (count <= 0) { 6244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6245 "3279 Invalid provisioning of " 6246 "rpi:%d\n", count); 6247 rc = -EINVAL; 6248 goto err_exit; 6249 } 6250 base = phba->sli4_hba.max_cfg_param.rpi_base; 6251 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6252 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6253 sizeof(unsigned long), 6254 GFP_KERNEL); 6255 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6256 rc = -ENOMEM; 6257 goto err_exit; 6258 } 6259 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 6260 GFP_KERNEL); 6261 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6262 rc = -ENOMEM; 6263 goto free_rpi_bmask; 6264 } 6265 6266 for (i = 0; i < count; i++) 6267 phba->sli4_hba.rpi_ids[i] = base + i; 6268 6269 /* VPIs. */ 6270 count = phba->sli4_hba.max_cfg_param.max_vpi; 6271 if (count <= 0) { 6272 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6273 "3280 Invalid provisioning of " 6274 "vpi:%d\n", count); 6275 rc = -EINVAL; 6276 goto free_rpi_ids; 6277 } 6278 base = phba->sli4_hba.max_cfg_param.vpi_base; 6279 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6280 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6281 GFP_KERNEL); 6282 if (unlikely(!phba->vpi_bmask)) { 6283 rc = -ENOMEM; 6284 goto free_rpi_ids; 6285 } 6286 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 6287 GFP_KERNEL); 6288 if (unlikely(!phba->vpi_ids)) { 6289 rc = -ENOMEM; 6290 goto free_vpi_bmask; 6291 } 6292 6293 for (i = 0; i < count; i++) 6294 phba->vpi_ids[i] = base + i; 6295 6296 /* XRIs. */ 6297 count = phba->sli4_hba.max_cfg_param.max_xri; 6298 if (count <= 0) { 6299 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6300 "3281 Invalid provisioning of " 6301 "xri:%d\n", count); 6302 rc = -EINVAL; 6303 goto free_vpi_ids; 6304 } 6305 base = phba->sli4_hba.max_cfg_param.xri_base; 6306 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6307 phba->sli4_hba.xri_bmask = kcalloc(longs, 6308 sizeof(unsigned long), 6309 GFP_KERNEL); 6310 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6311 rc = -ENOMEM; 6312 goto free_vpi_ids; 6313 } 6314 phba->sli4_hba.max_cfg_param.xri_used = 0; 6315 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 6316 GFP_KERNEL); 6317 if (unlikely(!phba->sli4_hba.xri_ids)) { 6318 rc = -ENOMEM; 6319 goto free_xri_bmask; 6320 } 6321 6322 for (i = 0; i < count; i++) 6323 phba->sli4_hba.xri_ids[i] = base + i; 6324 6325 /* VFIs. */ 6326 count = phba->sli4_hba.max_cfg_param.max_vfi; 6327 if (count <= 0) { 6328 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6329 "3282 Invalid provisioning of " 6330 "vfi:%d\n", count); 6331 rc = -EINVAL; 6332 goto free_xri_ids; 6333 } 6334 base = phba->sli4_hba.max_cfg_param.vfi_base; 6335 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6336 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6337 sizeof(unsigned long), 6338 GFP_KERNEL); 6339 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6340 rc = -ENOMEM; 6341 goto free_xri_ids; 6342 } 6343 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 6344 GFP_KERNEL); 6345 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6346 rc = -ENOMEM; 6347 goto free_vfi_bmask; 6348 } 6349 6350 for (i = 0; i < count; i++) 6351 phba->sli4_hba.vfi_ids[i] = base + i; 6352 6353 /* 6354 * Mark all resources ready. An HBA reset doesn't need 6355 * to reset the initialization. 6356 */ 6357 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6358 LPFC_IDX_RSRC_RDY); 6359 return 0; 6360 } 6361 6362 free_vfi_bmask: 6363 kfree(phba->sli4_hba.vfi_bmask); 6364 phba->sli4_hba.vfi_bmask = NULL; 6365 free_xri_ids: 6366 kfree(phba->sli4_hba.xri_ids); 6367 phba->sli4_hba.xri_ids = NULL; 6368 free_xri_bmask: 6369 kfree(phba->sli4_hba.xri_bmask); 6370 phba->sli4_hba.xri_bmask = NULL; 6371 free_vpi_ids: 6372 kfree(phba->vpi_ids); 6373 phba->vpi_ids = NULL; 6374 free_vpi_bmask: 6375 kfree(phba->vpi_bmask); 6376 phba->vpi_bmask = NULL; 6377 free_rpi_ids: 6378 kfree(phba->sli4_hba.rpi_ids); 6379 phba->sli4_hba.rpi_ids = NULL; 6380 free_rpi_bmask: 6381 kfree(phba->sli4_hba.rpi_bmask); 6382 phba->sli4_hba.rpi_bmask = NULL; 6383 err_exit: 6384 return rc; 6385 } 6386 6387 /** 6388 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6389 * @phba: Pointer to HBA context object. 6390 * 6391 * This function allocates the number of elements for the specified 6392 * resource type. 6393 **/ 6394 int 6395 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6396 { 6397 if (phba->sli4_hba.extents_in_use) { 6398 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6399 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6400 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6401 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6402 } else { 6403 kfree(phba->vpi_bmask); 6404 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6405 kfree(phba->vpi_ids); 6406 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6407 kfree(phba->sli4_hba.xri_bmask); 6408 kfree(phba->sli4_hba.xri_ids); 6409 kfree(phba->sli4_hba.vfi_bmask); 6410 kfree(phba->sli4_hba.vfi_ids); 6411 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6412 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6413 } 6414 6415 return 0; 6416 } 6417 6418 /** 6419 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6420 * @phba: Pointer to HBA context object. 6421 * @type: The resource extent type. 6422 * @extnt_count: buffer to hold port extent count response 6423 * @extnt_size: buffer to hold port extent size response. 6424 * 6425 * This function calls the port to read the host allocated extents 6426 * for a particular type. 6427 **/ 6428 int 6429 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6430 uint16_t *extnt_cnt, uint16_t *extnt_size) 6431 { 6432 bool emb; 6433 int rc = 0; 6434 uint16_t curr_blks = 0; 6435 uint32_t req_len, emb_len; 6436 uint32_t alloc_len, mbox_tmo; 6437 struct list_head *blk_list_head; 6438 struct lpfc_rsrc_blks *rsrc_blk; 6439 LPFC_MBOXQ_t *mbox; 6440 void *virtaddr = NULL; 6441 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6442 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6443 union lpfc_sli4_cfg_shdr *shdr; 6444 6445 switch (type) { 6446 case LPFC_RSC_TYPE_FCOE_VPI: 6447 blk_list_head = &phba->lpfc_vpi_blk_list; 6448 break; 6449 case LPFC_RSC_TYPE_FCOE_XRI: 6450 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6451 break; 6452 case LPFC_RSC_TYPE_FCOE_VFI: 6453 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6454 break; 6455 case LPFC_RSC_TYPE_FCOE_RPI: 6456 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6457 break; 6458 default: 6459 return -EIO; 6460 } 6461 6462 /* Count the number of extents currently allocatd for this type. */ 6463 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6464 if (curr_blks == 0) { 6465 /* 6466 * The GET_ALLOCATED mailbox does not return the size, 6467 * just the count. The size should be just the size 6468 * stored in the current allocated block and all sizes 6469 * for an extent type are the same so set the return 6470 * value now. 6471 */ 6472 *extnt_size = rsrc_blk->rsrc_size; 6473 } 6474 curr_blks++; 6475 } 6476 6477 /* 6478 * Calculate the size of an embedded mailbox. The uint32_t 6479 * accounts for extents-specific word. 6480 */ 6481 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6482 sizeof(uint32_t); 6483 6484 /* 6485 * Presume the allocation and response will fit into an embedded 6486 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6487 */ 6488 emb = LPFC_SLI4_MBX_EMBED; 6489 req_len = emb_len; 6490 if (req_len > emb_len) { 6491 req_len = curr_blks * sizeof(uint16_t) + 6492 sizeof(union lpfc_sli4_cfg_shdr) + 6493 sizeof(uint32_t); 6494 emb = LPFC_SLI4_MBX_NEMBED; 6495 } 6496 6497 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6498 if (!mbox) 6499 return -ENOMEM; 6500 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6501 6502 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6503 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6504 req_len, emb); 6505 if (alloc_len < req_len) { 6506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6507 "2983 Allocated DMA memory size (x%x) is " 6508 "less than the requested DMA memory " 6509 "size (x%x)\n", alloc_len, req_len); 6510 rc = -ENOMEM; 6511 goto err_exit; 6512 } 6513 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6514 if (unlikely(rc)) { 6515 rc = -EIO; 6516 goto err_exit; 6517 } 6518 6519 if (!phba->sli4_hba.intr_enable) 6520 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6521 else { 6522 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6523 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6524 } 6525 6526 if (unlikely(rc)) { 6527 rc = -EIO; 6528 goto err_exit; 6529 } 6530 6531 /* 6532 * Figure out where the response is located. Then get local pointers 6533 * to the response data. The port does not guarantee to respond to 6534 * all extents counts request so update the local variable with the 6535 * allocated count from the port. 6536 */ 6537 if (emb == LPFC_SLI4_MBX_EMBED) { 6538 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6539 shdr = &rsrc_ext->header.cfg_shdr; 6540 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6541 } else { 6542 virtaddr = mbox->sge_array->addr[0]; 6543 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6544 shdr = &n_rsrc->cfg_shdr; 6545 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6546 } 6547 6548 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6549 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6550 "2984 Failed to read allocated resources " 6551 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6552 type, 6553 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6554 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6555 rc = -EIO; 6556 goto err_exit; 6557 } 6558 err_exit: 6559 lpfc_sli4_mbox_cmd_free(phba, mbox); 6560 return rc; 6561 } 6562 6563 /** 6564 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6565 * @phba: pointer to lpfc hba data structure. 6566 * @pring: Pointer to driver SLI ring object. 6567 * @sgl_list: linked link of sgl buffers to post 6568 * @cnt: number of linked list buffers 6569 * 6570 * This routine walks the list of buffers that have been allocated and 6571 * repost them to the port by using SGL block post. This is needed after a 6572 * pci_function_reset/warm_start or start. It attempts to construct blocks 6573 * of buffer sgls which contains contiguous xris and uses the non-embedded 6574 * SGL block post mailbox commands to post them to the port. For single 6575 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6576 * mailbox command for posting. 6577 * 6578 * Returns: 0 = success, non-zero failure. 6579 **/ 6580 static int 6581 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6582 struct list_head *sgl_list, int cnt) 6583 { 6584 struct lpfc_sglq *sglq_entry = NULL; 6585 struct lpfc_sglq *sglq_entry_next = NULL; 6586 struct lpfc_sglq *sglq_entry_first = NULL; 6587 int status, total_cnt; 6588 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6589 int last_xritag = NO_XRI; 6590 LIST_HEAD(prep_sgl_list); 6591 LIST_HEAD(blck_sgl_list); 6592 LIST_HEAD(allc_sgl_list); 6593 LIST_HEAD(post_sgl_list); 6594 LIST_HEAD(free_sgl_list); 6595 6596 spin_lock_irq(&phba->hbalock); 6597 spin_lock(&phba->sli4_hba.sgl_list_lock); 6598 list_splice_init(sgl_list, &allc_sgl_list); 6599 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6600 spin_unlock_irq(&phba->hbalock); 6601 6602 total_cnt = cnt; 6603 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6604 &allc_sgl_list, list) { 6605 list_del_init(&sglq_entry->list); 6606 block_cnt++; 6607 if ((last_xritag != NO_XRI) && 6608 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6609 /* a hole in xri block, form a sgl posting block */ 6610 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6611 post_cnt = block_cnt - 1; 6612 /* prepare list for next posting block */ 6613 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6614 block_cnt = 1; 6615 } else { 6616 /* prepare list for next posting block */ 6617 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6618 /* enough sgls for non-embed sgl mbox command */ 6619 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6620 list_splice_init(&prep_sgl_list, 6621 &blck_sgl_list); 6622 post_cnt = block_cnt; 6623 block_cnt = 0; 6624 } 6625 } 6626 num_posted++; 6627 6628 /* keep track of last sgl's xritag */ 6629 last_xritag = sglq_entry->sli4_xritag; 6630 6631 /* end of repost sgl list condition for buffers */ 6632 if (num_posted == total_cnt) { 6633 if (post_cnt == 0) { 6634 list_splice_init(&prep_sgl_list, 6635 &blck_sgl_list); 6636 post_cnt = block_cnt; 6637 } else if (block_cnt == 1) { 6638 status = lpfc_sli4_post_sgl(phba, 6639 sglq_entry->phys, 0, 6640 sglq_entry->sli4_xritag); 6641 if (!status) { 6642 /* successful, put sgl to posted list */ 6643 list_add_tail(&sglq_entry->list, 6644 &post_sgl_list); 6645 } else { 6646 /* Failure, put sgl to free list */ 6647 lpfc_printf_log(phba, KERN_WARNING, 6648 LOG_SLI, 6649 "3159 Failed to post " 6650 "sgl, xritag:x%x\n", 6651 sglq_entry->sli4_xritag); 6652 list_add_tail(&sglq_entry->list, 6653 &free_sgl_list); 6654 total_cnt--; 6655 } 6656 } 6657 } 6658 6659 /* continue until a nembed page worth of sgls */ 6660 if (post_cnt == 0) 6661 continue; 6662 6663 /* post the buffer list sgls as a block */ 6664 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 6665 post_cnt); 6666 6667 if (!status) { 6668 /* success, put sgl list to posted sgl list */ 6669 list_splice_init(&blck_sgl_list, &post_sgl_list); 6670 } else { 6671 /* Failure, put sgl list to free sgl list */ 6672 sglq_entry_first = list_first_entry(&blck_sgl_list, 6673 struct lpfc_sglq, 6674 list); 6675 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6676 "3160 Failed to post sgl-list, " 6677 "xritag:x%x-x%x\n", 6678 sglq_entry_first->sli4_xritag, 6679 (sglq_entry_first->sli4_xritag + 6680 post_cnt - 1)); 6681 list_splice_init(&blck_sgl_list, &free_sgl_list); 6682 total_cnt -= post_cnt; 6683 } 6684 6685 /* don't reset xirtag due to hole in xri block */ 6686 if (block_cnt == 0) 6687 last_xritag = NO_XRI; 6688 6689 /* reset sgl post count for next round of posting */ 6690 post_cnt = 0; 6691 } 6692 6693 /* free the sgls failed to post */ 6694 lpfc_free_sgl_list(phba, &free_sgl_list); 6695 6696 /* push sgls posted to the available list */ 6697 if (!list_empty(&post_sgl_list)) { 6698 spin_lock_irq(&phba->hbalock); 6699 spin_lock(&phba->sli4_hba.sgl_list_lock); 6700 list_splice_init(&post_sgl_list, sgl_list); 6701 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6702 spin_unlock_irq(&phba->hbalock); 6703 } else { 6704 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6705 "3161 Failure to post sgl to port.\n"); 6706 return -EIO; 6707 } 6708 6709 /* return the number of XRIs actually posted */ 6710 return total_cnt; 6711 } 6712 6713 void 6714 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 6715 { 6716 uint32_t len; 6717 6718 len = sizeof(struct lpfc_mbx_set_host_data) - 6719 sizeof(struct lpfc_sli4_cfg_mhdr); 6720 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6721 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 6722 LPFC_SLI4_MBX_EMBED); 6723 6724 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 6725 mbox->u.mqe.un.set_host_data.param_len = 6726 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 6727 snprintf(mbox->u.mqe.un.set_host_data.data, 6728 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 6729 "Linux %s v"LPFC_DRIVER_VERSION, 6730 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 6731 } 6732 6733 int 6734 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 6735 struct lpfc_queue *drq, int count, int idx) 6736 { 6737 int rc, i; 6738 struct lpfc_rqe hrqe; 6739 struct lpfc_rqe drqe; 6740 struct lpfc_rqb *rqbp; 6741 unsigned long flags; 6742 struct rqb_dmabuf *rqb_buffer; 6743 LIST_HEAD(rqb_buf_list); 6744 6745 spin_lock_irqsave(&phba->hbalock, flags); 6746 rqbp = hrq->rqbp; 6747 for (i = 0; i < count; i++) { 6748 /* IF RQ is already full, don't bother */ 6749 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 6750 break; 6751 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 6752 if (!rqb_buffer) 6753 break; 6754 rqb_buffer->hrq = hrq; 6755 rqb_buffer->drq = drq; 6756 rqb_buffer->idx = idx; 6757 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 6758 } 6759 while (!list_empty(&rqb_buf_list)) { 6760 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 6761 hbuf.list); 6762 6763 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 6764 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 6765 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 6766 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 6767 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 6768 if (rc < 0) { 6769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6770 "6421 Cannot post to HRQ %d: %x %x %x " 6771 "DRQ %x %x\n", 6772 hrq->queue_id, 6773 hrq->host_index, 6774 hrq->hba_index, 6775 hrq->entry_count, 6776 drq->host_index, 6777 drq->hba_index); 6778 rqbp->rqb_free_buffer(phba, rqb_buffer); 6779 } else { 6780 list_add_tail(&rqb_buffer->hbuf.list, 6781 &rqbp->rqb_buffer_list); 6782 rqbp->buffer_count++; 6783 } 6784 } 6785 spin_unlock_irqrestore(&phba->hbalock, flags); 6786 return 1; 6787 } 6788 6789 /** 6790 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 6791 * @phba: Pointer to HBA context object. 6792 * 6793 * This function is the main SLI4 device initialization PCI function. This 6794 * function is called by the HBA initialization code, HBA reset code and 6795 * HBA error attention handler code. Caller is not required to hold any 6796 * locks. 6797 **/ 6798 int 6799 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6800 { 6801 int rc, i, cnt; 6802 LPFC_MBOXQ_t *mboxq; 6803 struct lpfc_mqe *mqe; 6804 uint8_t *vpd; 6805 uint32_t vpd_size; 6806 uint32_t ftr_rsp = 0; 6807 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 6808 struct lpfc_vport *vport = phba->pport; 6809 struct lpfc_dmabuf *mp; 6810 struct lpfc_rqb *rqbp; 6811 6812 /* Perform a PCI function reset to start from clean */ 6813 rc = lpfc_pci_function_reset(phba); 6814 if (unlikely(rc)) 6815 return -ENODEV; 6816 6817 /* Check the HBA Host Status Register for readyness */ 6818 rc = lpfc_sli4_post_status_check(phba); 6819 if (unlikely(rc)) 6820 return -ENODEV; 6821 else { 6822 spin_lock_irq(&phba->hbalock); 6823 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 6824 spin_unlock_irq(&phba->hbalock); 6825 } 6826 6827 /* 6828 * Allocate a single mailbox container for initializing the 6829 * port. 6830 */ 6831 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6832 if (!mboxq) 6833 return -ENOMEM; 6834 6835 /* Issue READ_REV to collect vpd and FW information. */ 6836 vpd_size = SLI4_PAGE_SIZE; 6837 vpd = kzalloc(vpd_size, GFP_KERNEL); 6838 if (!vpd) { 6839 rc = -ENOMEM; 6840 goto out_free_mbox; 6841 } 6842 6843 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 6844 if (unlikely(rc)) { 6845 kfree(vpd); 6846 goto out_free_mbox; 6847 } 6848 6849 mqe = &mboxq->u.mqe; 6850 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 6851 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 6852 phba->hba_flag |= HBA_FCOE_MODE; 6853 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 6854 } else { 6855 phba->hba_flag &= ~HBA_FCOE_MODE; 6856 } 6857 6858 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 6859 LPFC_DCBX_CEE_MODE) 6860 phba->hba_flag |= HBA_FIP_SUPPORT; 6861 else 6862 phba->hba_flag &= ~HBA_FIP_SUPPORT; 6863 6864 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 6865 6866 if (phba->sli_rev != LPFC_SLI_REV4) { 6867 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6868 "0376 READ_REV Error. SLI Level %d " 6869 "FCoE enabled %d\n", 6870 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 6871 rc = -EIO; 6872 kfree(vpd); 6873 goto out_free_mbox; 6874 } 6875 6876 /* 6877 * Continue initialization with default values even if driver failed 6878 * to read FCoE param config regions, only read parameters if the 6879 * board is FCoE 6880 */ 6881 if (phba->hba_flag & HBA_FCOE_MODE && 6882 lpfc_sli4_read_fcoe_params(phba)) 6883 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 6884 "2570 Failed to read FCoE parameters\n"); 6885 6886 /* 6887 * Retrieve sli4 device physical port name, failure of doing it 6888 * is considered as non-fatal. 6889 */ 6890 rc = lpfc_sli4_retrieve_pport_name(phba); 6891 if (!rc) 6892 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6893 "3080 Successful retrieving SLI4 device " 6894 "physical port name: %s.\n", phba->Port); 6895 6896 /* 6897 * Evaluate the read rev and vpd data. Populate the driver 6898 * state with the results. If this routine fails, the failure 6899 * is not fatal as the driver will use generic values. 6900 */ 6901 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 6902 if (unlikely(!rc)) { 6903 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6904 "0377 Error %d parsing vpd. " 6905 "Using defaults.\n", rc); 6906 rc = 0; 6907 } 6908 kfree(vpd); 6909 6910 /* Save information as VPD data */ 6911 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 6912 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 6913 6914 /* 6915 * This is because first G7 ASIC doesn't support the standard 6916 * 0x5a NVME cmd descriptor type/subtype 6917 */ 6918 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6919 LPFC_SLI_INTF_IF_TYPE_6) && 6920 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 6921 (phba->vpd.rev.smRev == 0) && 6922 (phba->cfg_nvme_embed_cmd == 1)) 6923 phba->cfg_nvme_embed_cmd = 0; 6924 6925 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 6926 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 6927 &mqe->un.read_rev); 6928 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 6929 &mqe->un.read_rev); 6930 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 6931 &mqe->un.read_rev); 6932 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 6933 &mqe->un.read_rev); 6934 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 6935 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 6936 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 6937 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 6938 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 6939 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 6940 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6941 "(%d):0380 READ_REV Status x%x " 6942 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 6943 mboxq->vport ? mboxq->vport->vpi : 0, 6944 bf_get(lpfc_mqe_status, mqe), 6945 phba->vpd.rev.opFwName, 6946 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 6947 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 6948 6949 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 6950 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 6951 if (phba->pport->cfg_lun_queue_depth > rc) { 6952 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6953 "3362 LUN queue depth changed from %d to %d\n", 6954 phba->pport->cfg_lun_queue_depth, rc); 6955 phba->pport->cfg_lun_queue_depth = rc; 6956 } 6957 6958 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6959 LPFC_SLI_INTF_IF_TYPE_0) { 6960 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 6961 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6962 if (rc == MBX_SUCCESS) { 6963 phba->hba_flag |= HBA_RECOVERABLE_UE; 6964 /* Set 1Sec interval to detect UE */ 6965 phba->eratt_poll_interval = 1; 6966 phba->sli4_hba.ue_to_sr = bf_get( 6967 lpfc_mbx_set_feature_UESR, 6968 &mboxq->u.mqe.un.set_feature); 6969 phba->sli4_hba.ue_to_rp = bf_get( 6970 lpfc_mbx_set_feature_UERP, 6971 &mboxq->u.mqe.un.set_feature); 6972 } 6973 } 6974 6975 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 6976 /* Enable MDS Diagnostics only if the SLI Port supports it */ 6977 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 6978 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6979 if (rc != MBX_SUCCESS) 6980 phba->mds_diags_support = 0; 6981 } 6982 6983 /* 6984 * Discover the port's supported feature set and match it against the 6985 * hosts requests. 6986 */ 6987 lpfc_request_features(phba, mboxq); 6988 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6989 if (unlikely(rc)) { 6990 rc = -EIO; 6991 goto out_free_mbox; 6992 } 6993 6994 /* 6995 * The port must support FCP initiator mode as this is the 6996 * only mode running in the host. 6997 */ 6998 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 6999 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7000 "0378 No support for fcpi mode.\n"); 7001 ftr_rsp++; 7002 } 7003 7004 /* Performance Hints are ONLY for FCoE */ 7005 if (phba->hba_flag & HBA_FCOE_MODE) { 7006 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 7007 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 7008 else 7009 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 7010 } 7011 7012 /* 7013 * If the port cannot support the host's requested features 7014 * then turn off the global config parameters to disable the 7015 * feature in the driver. This is not a fatal error. 7016 */ 7017 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 7018 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 7019 phba->cfg_enable_bg = 0; 7020 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 7021 ftr_rsp++; 7022 } 7023 } 7024 7025 if (phba->max_vpi && phba->cfg_enable_npiv && 7026 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7027 ftr_rsp++; 7028 7029 if (ftr_rsp) { 7030 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7031 "0379 Feature Mismatch Data: x%08x %08x " 7032 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 7033 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 7034 phba->cfg_enable_npiv, phba->max_vpi); 7035 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 7036 phba->cfg_enable_bg = 0; 7037 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7038 phba->cfg_enable_npiv = 0; 7039 } 7040 7041 /* These SLI3 features are assumed in SLI4 */ 7042 spin_lock_irq(&phba->hbalock); 7043 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 7044 spin_unlock_irq(&phba->hbalock); 7045 7046 /* 7047 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 7048 * calls depends on these resources to complete port setup. 7049 */ 7050 rc = lpfc_sli4_alloc_resource_identifiers(phba); 7051 if (rc) { 7052 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7053 "2920 Failed to alloc Resource IDs " 7054 "rc = x%x\n", rc); 7055 goto out_free_mbox; 7056 } 7057 7058 lpfc_set_host_data(phba, mboxq); 7059 7060 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7061 if (rc) { 7062 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7063 "2134 Failed to set host os driver version %x", 7064 rc); 7065 } 7066 7067 /* Read the port's service parameters. */ 7068 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 7069 if (rc) { 7070 phba->link_state = LPFC_HBA_ERROR; 7071 rc = -ENOMEM; 7072 goto out_free_mbox; 7073 } 7074 7075 mboxq->vport = vport; 7076 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7077 mp = (struct lpfc_dmabuf *) mboxq->context1; 7078 if (rc == MBX_SUCCESS) { 7079 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 7080 rc = 0; 7081 } 7082 7083 /* 7084 * This memory was allocated by the lpfc_read_sparam routine. Release 7085 * it to the mbuf pool. 7086 */ 7087 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7088 kfree(mp); 7089 mboxq->context1 = NULL; 7090 if (unlikely(rc)) { 7091 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7092 "0382 READ_SPARAM command failed " 7093 "status %d, mbxStatus x%x\n", 7094 rc, bf_get(lpfc_mqe_status, mqe)); 7095 phba->link_state = LPFC_HBA_ERROR; 7096 rc = -EIO; 7097 goto out_free_mbox; 7098 } 7099 7100 lpfc_update_vport_wwn(vport); 7101 7102 /* Update the fc_host data structures with new wwn. */ 7103 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 7104 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 7105 7106 /* Create all the SLI4 queues */ 7107 rc = lpfc_sli4_queue_create(phba); 7108 if (rc) { 7109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7110 "3089 Failed to allocate queues\n"); 7111 rc = -ENODEV; 7112 goto out_free_mbox; 7113 } 7114 /* Set up all the queues to the device */ 7115 rc = lpfc_sli4_queue_setup(phba); 7116 if (unlikely(rc)) { 7117 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7118 "0381 Error %d during queue setup.\n ", rc); 7119 goto out_stop_timers; 7120 } 7121 /* Initialize the driver internal SLI layer lists. */ 7122 lpfc_sli4_setup(phba); 7123 lpfc_sli4_queue_init(phba); 7124 7125 /* update host els xri-sgl sizes and mappings */ 7126 rc = lpfc_sli4_els_sgl_update(phba); 7127 if (unlikely(rc)) { 7128 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7129 "1400 Failed to update xri-sgl size and " 7130 "mapping: %d\n", rc); 7131 goto out_destroy_queue; 7132 } 7133 7134 /* register the els sgl pool to the port */ 7135 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 7136 phba->sli4_hba.els_xri_cnt); 7137 if (unlikely(rc < 0)) { 7138 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7139 "0582 Error %d during els sgl post " 7140 "operation\n", rc); 7141 rc = -ENODEV; 7142 goto out_destroy_queue; 7143 } 7144 phba->sli4_hba.els_xri_cnt = rc; 7145 7146 if (phba->nvmet_support) { 7147 /* update host nvmet xri-sgl sizes and mappings */ 7148 rc = lpfc_sli4_nvmet_sgl_update(phba); 7149 if (unlikely(rc)) { 7150 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7151 "6308 Failed to update nvmet-sgl size " 7152 "and mapping: %d\n", rc); 7153 goto out_destroy_queue; 7154 } 7155 7156 /* register the nvmet sgl pool to the port */ 7157 rc = lpfc_sli4_repost_sgl_list( 7158 phba, 7159 &phba->sli4_hba.lpfc_nvmet_sgl_list, 7160 phba->sli4_hba.nvmet_xri_cnt); 7161 if (unlikely(rc < 0)) { 7162 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7163 "3117 Error %d during nvmet " 7164 "sgl post\n", rc); 7165 rc = -ENODEV; 7166 goto out_destroy_queue; 7167 } 7168 phba->sli4_hba.nvmet_xri_cnt = rc; 7169 7170 cnt = phba->cfg_iocb_cnt * 1024; 7171 /* We need 1 iocbq for every SGL, for IO processing */ 7172 cnt += phba->sli4_hba.nvmet_xri_cnt; 7173 } else { 7174 /* update host scsi xri-sgl sizes and mappings */ 7175 rc = lpfc_sli4_scsi_sgl_update(phba); 7176 if (unlikely(rc)) { 7177 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7178 "6309 Failed to update scsi-sgl size " 7179 "and mapping: %d\n", rc); 7180 goto out_destroy_queue; 7181 } 7182 7183 /* update host nvme xri-sgl sizes and mappings */ 7184 rc = lpfc_sli4_nvme_sgl_update(phba); 7185 if (unlikely(rc)) { 7186 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7187 "6082 Failed to update nvme-sgl size " 7188 "and mapping: %d\n", rc); 7189 goto out_destroy_queue; 7190 } 7191 7192 cnt = phba->cfg_iocb_cnt * 1024; 7193 } 7194 7195 if (!phba->sli.iocbq_lookup) { 7196 /* Initialize and populate the iocb list per host */ 7197 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7198 "2821 initialize iocb list %d total %d\n", 7199 phba->cfg_iocb_cnt, cnt); 7200 rc = lpfc_init_iocb_list(phba, cnt); 7201 if (rc) { 7202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7203 "1413 Failed to init iocb list.\n"); 7204 goto out_destroy_queue; 7205 } 7206 } 7207 7208 if (phba->nvmet_support) 7209 lpfc_nvmet_create_targetport(phba); 7210 7211 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 7212 /* Post initial buffers to all RQs created */ 7213 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 7214 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 7215 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 7216 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 7217 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 7218 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 7219 rqbp->buffer_count = 0; 7220 7221 lpfc_post_rq_buffer( 7222 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 7223 phba->sli4_hba.nvmet_mrq_data[i], 7224 phba->cfg_nvmet_mrq_post, i); 7225 } 7226 } 7227 7228 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 7229 /* register the allocated scsi sgl pool to the port */ 7230 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 7231 if (unlikely(rc)) { 7232 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7233 "0383 Error %d during scsi sgl post " 7234 "operation\n", rc); 7235 /* Some Scsi buffers were moved to abort scsi list */ 7236 /* A pci function reset will repost them */ 7237 rc = -ENODEV; 7238 goto out_destroy_queue; 7239 } 7240 } 7241 7242 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 7243 (phba->nvmet_support == 0)) { 7244 7245 /* register the allocated nvme sgl pool to the port */ 7246 rc = lpfc_repost_nvme_sgl_list(phba); 7247 if (unlikely(rc)) { 7248 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7249 "6116 Error %d during nvme sgl post " 7250 "operation\n", rc); 7251 /* Some NVME buffers were moved to abort nvme list */ 7252 /* A pci function reset will repost them */ 7253 rc = -ENODEV; 7254 goto out_destroy_queue; 7255 } 7256 } 7257 7258 /* Post the rpi header region to the device. */ 7259 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7260 if (unlikely(rc)) { 7261 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7262 "0393 Error %d during rpi post operation\n", 7263 rc); 7264 rc = -ENODEV; 7265 goto out_destroy_queue; 7266 } 7267 lpfc_sli4_node_prep(phba); 7268 7269 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7270 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7271 /* 7272 * The FC Port needs to register FCFI (index 0) 7273 */ 7274 lpfc_reg_fcfi(phba, mboxq); 7275 mboxq->vport = phba->pport; 7276 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7277 if (rc != MBX_SUCCESS) 7278 goto out_unset_queue; 7279 rc = 0; 7280 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7281 &mboxq->u.mqe.un.reg_fcfi); 7282 } else { 7283 /* We are a NVME Target mode with MRQ > 1 */ 7284 7285 /* First register the FCFI */ 7286 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7287 mboxq->vport = phba->pport; 7288 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7289 if (rc != MBX_SUCCESS) 7290 goto out_unset_queue; 7291 rc = 0; 7292 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7293 &mboxq->u.mqe.un.reg_fcfi_mrq); 7294 7295 /* Next register the MRQs */ 7296 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7297 mboxq->vport = phba->pport; 7298 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7299 if (rc != MBX_SUCCESS) 7300 goto out_unset_queue; 7301 rc = 0; 7302 } 7303 /* Check if the port is configured to be disabled */ 7304 lpfc_sli_read_link_ste(phba); 7305 } 7306 7307 /* Arm the CQs and then EQs on device */ 7308 lpfc_sli4_arm_cqeq_intr(phba); 7309 7310 /* Indicate device interrupt mode */ 7311 phba->sli4_hba.intr_enable = 1; 7312 7313 /* Allow asynchronous mailbox command to go through */ 7314 spin_lock_irq(&phba->hbalock); 7315 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7316 spin_unlock_irq(&phba->hbalock); 7317 7318 /* Post receive buffers to the device */ 7319 lpfc_sli4_rb_setup(phba); 7320 7321 /* Reset HBA FCF states after HBA reset */ 7322 phba->fcf.fcf_flag = 0; 7323 phba->fcf.current_rec.flag = 0; 7324 7325 /* Start the ELS watchdog timer */ 7326 mod_timer(&vport->els_tmofunc, 7327 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7328 7329 /* Start heart beat timer */ 7330 mod_timer(&phba->hb_tmofunc, 7331 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7332 phba->hb_outstanding = 0; 7333 phba->last_completion_time = jiffies; 7334 7335 /* Start error attention (ERATT) polling timer */ 7336 mod_timer(&phba->eratt_poll, 7337 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7338 7339 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7340 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7341 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7342 if (!rc) { 7343 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7344 "2829 This device supports " 7345 "Advanced Error Reporting (AER)\n"); 7346 spin_lock_irq(&phba->hbalock); 7347 phba->hba_flag |= HBA_AER_ENABLED; 7348 spin_unlock_irq(&phba->hbalock); 7349 } else { 7350 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7351 "2830 This device does not support " 7352 "Advanced Error Reporting (AER)\n"); 7353 phba->cfg_aer_support = 0; 7354 } 7355 rc = 0; 7356 } 7357 7358 /* 7359 * The port is ready, set the host's link state to LINK_DOWN 7360 * in preparation for link interrupts. 7361 */ 7362 spin_lock_irq(&phba->hbalock); 7363 phba->link_state = LPFC_LINK_DOWN; 7364 spin_unlock_irq(&phba->hbalock); 7365 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7366 (phba->hba_flag & LINK_DISABLED)) { 7367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7368 "3103 Adapter Link is disabled.\n"); 7369 lpfc_down_link(phba, mboxq); 7370 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7371 if (rc != MBX_SUCCESS) { 7372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7373 "3104 Adapter failed to issue " 7374 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7375 goto out_unset_queue; 7376 } 7377 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7378 /* don't perform init_link on SLI4 FC port loopback test */ 7379 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7380 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7381 if (rc) 7382 goto out_unset_queue; 7383 } 7384 } 7385 mempool_free(mboxq, phba->mbox_mem_pool); 7386 return rc; 7387 out_unset_queue: 7388 /* Unset all the queues set up in this routine when error out */ 7389 lpfc_sli4_queue_unset(phba); 7390 out_destroy_queue: 7391 lpfc_free_iocb_list(phba); 7392 lpfc_sli4_queue_destroy(phba); 7393 out_stop_timers: 7394 lpfc_stop_hba_timers(phba); 7395 out_free_mbox: 7396 mempool_free(mboxq, phba->mbox_mem_pool); 7397 return rc; 7398 } 7399 7400 /** 7401 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7402 * @ptr: context object - pointer to hba structure. 7403 * 7404 * This is the callback function for mailbox timer. The mailbox 7405 * timer is armed when a new mailbox command is issued and the timer 7406 * is deleted when the mailbox complete. The function is called by 7407 * the kernel timer code when a mailbox does not complete within 7408 * expected time. This function wakes up the worker thread to 7409 * process the mailbox timeout and returns. All the processing is 7410 * done by the worker thread function lpfc_mbox_timeout_handler. 7411 **/ 7412 void 7413 lpfc_mbox_timeout(struct timer_list *t) 7414 { 7415 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7416 unsigned long iflag; 7417 uint32_t tmo_posted; 7418 7419 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7420 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7421 if (!tmo_posted) 7422 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7423 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7424 7425 if (!tmo_posted) 7426 lpfc_worker_wake_up(phba); 7427 return; 7428 } 7429 7430 /** 7431 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7432 * are pending 7433 * @phba: Pointer to HBA context object. 7434 * 7435 * This function checks if any mailbox completions are present on the mailbox 7436 * completion queue. 7437 **/ 7438 static bool 7439 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7440 { 7441 7442 uint32_t idx; 7443 struct lpfc_queue *mcq; 7444 struct lpfc_mcqe *mcqe; 7445 bool pending_completions = false; 7446 uint8_t qe_valid; 7447 7448 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7449 return false; 7450 7451 /* Check for completions on mailbox completion queue */ 7452 7453 mcq = phba->sli4_hba.mbx_cq; 7454 idx = mcq->hba_index; 7455 qe_valid = mcq->qe_valid; 7456 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) { 7457 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; 7458 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7459 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7460 pending_completions = true; 7461 break; 7462 } 7463 idx = (idx + 1) % mcq->entry_count; 7464 if (mcq->hba_index == idx) 7465 break; 7466 7467 /* if the index wrapped around, toggle the valid bit */ 7468 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 7469 qe_valid = (qe_valid) ? 0 : 1; 7470 } 7471 return pending_completions; 7472 7473 } 7474 7475 /** 7476 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7477 * that were missed. 7478 * @phba: Pointer to HBA context object. 7479 * 7480 * For sli4, it is possible to miss an interrupt. As such mbox completions 7481 * maybe missed causing erroneous mailbox timeouts to occur. This function 7482 * checks to see if mbox completions are on the mailbox completion queue 7483 * and will process all the completions associated with the eq for the 7484 * mailbox completion queue. 7485 **/ 7486 bool 7487 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7488 { 7489 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 7490 uint32_t eqidx; 7491 struct lpfc_queue *fpeq = NULL; 7492 struct lpfc_eqe *eqe; 7493 bool mbox_pending; 7494 7495 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7496 return false; 7497 7498 /* Find the eq associated with the mcq */ 7499 7500 if (sli4_hba->hba_eq) 7501 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) 7502 if (sli4_hba->hba_eq[eqidx]->queue_id == 7503 sli4_hba->mbx_cq->assoc_qid) { 7504 fpeq = sli4_hba->hba_eq[eqidx]; 7505 break; 7506 } 7507 if (!fpeq) 7508 return false; 7509 7510 /* Turn off interrupts from this EQ */ 7511 7512 sli4_hba->sli4_eq_clr_intr(fpeq); 7513 7514 /* Check to see if a mbox completion is pending */ 7515 7516 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7517 7518 /* 7519 * If a mbox completion is pending, process all the events on EQ 7520 * associated with the mbox completion queue (this could include 7521 * mailbox commands, async events, els commands, receive queue data 7522 * and fcp commands) 7523 */ 7524 7525 if (mbox_pending) 7526 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 7527 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); 7528 fpeq->EQ_processed++; 7529 } 7530 7531 /* Always clear and re-arm the EQ */ 7532 7533 sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 7534 7535 return mbox_pending; 7536 7537 } 7538 7539 /** 7540 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7541 * @phba: Pointer to HBA context object. 7542 * 7543 * This function is called from worker thread when a mailbox command times out. 7544 * The caller is not required to hold any locks. This function will reset the 7545 * HBA and recover all the pending commands. 7546 **/ 7547 void 7548 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 7549 { 7550 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 7551 MAILBOX_t *mb = NULL; 7552 7553 struct lpfc_sli *psli = &phba->sli; 7554 7555 /* If the mailbox completed, process the completion and return */ 7556 if (lpfc_sli4_process_missed_mbox_completions(phba)) 7557 return; 7558 7559 if (pmbox != NULL) 7560 mb = &pmbox->u.mb; 7561 /* Check the pmbox pointer first. There is a race condition 7562 * between the mbox timeout handler getting executed in the 7563 * worklist and the mailbox actually completing. When this 7564 * race condition occurs, the mbox_active will be NULL. 7565 */ 7566 spin_lock_irq(&phba->hbalock); 7567 if (pmbox == NULL) { 7568 lpfc_printf_log(phba, KERN_WARNING, 7569 LOG_MBOX | LOG_SLI, 7570 "0353 Active Mailbox cleared - mailbox timeout " 7571 "exiting\n"); 7572 spin_unlock_irq(&phba->hbalock); 7573 return; 7574 } 7575 7576 /* Mbox cmd <mbxCommand> timeout */ 7577 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7578 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 7579 mb->mbxCommand, 7580 phba->pport->port_state, 7581 phba->sli.sli_flag, 7582 phba->sli.mbox_active); 7583 spin_unlock_irq(&phba->hbalock); 7584 7585 /* Setting state unknown so lpfc_sli_abort_iocb_ring 7586 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 7587 * it to fail all outstanding SCSI IO. 7588 */ 7589 spin_lock_irq(&phba->pport->work_port_lock); 7590 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7591 spin_unlock_irq(&phba->pport->work_port_lock); 7592 spin_lock_irq(&phba->hbalock); 7593 phba->link_state = LPFC_LINK_UNKNOWN; 7594 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7595 spin_unlock_irq(&phba->hbalock); 7596 7597 lpfc_sli_abort_fcp_rings(phba); 7598 7599 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7600 "0345 Resetting board due to mailbox timeout\n"); 7601 7602 /* Reset the HBA device */ 7603 lpfc_reset_hba(phba); 7604 } 7605 7606 /** 7607 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 7608 * @phba: Pointer to HBA context object. 7609 * @pmbox: Pointer to mailbox object. 7610 * @flag: Flag indicating how the mailbox need to be processed. 7611 * 7612 * This function is called by discovery code and HBA management code 7613 * to submit a mailbox command to firmware with SLI-3 interface spec. This 7614 * function gets the hbalock to protect the data structures. 7615 * The mailbox command can be submitted in polling mode, in which case 7616 * this function will wait in a polling loop for the completion of the 7617 * mailbox. 7618 * If the mailbox is submitted in no_wait mode (not polling) the 7619 * function will submit the command and returns immediately without waiting 7620 * for the mailbox completion. The no_wait is supported only when HBA 7621 * is in SLI2/SLI3 mode - interrupts are enabled. 7622 * The SLI interface allows only one mailbox pending at a time. If the 7623 * mailbox is issued in polling mode and there is already a mailbox 7624 * pending, then the function will return an error. If the mailbox is issued 7625 * in NO_WAIT mode and there is a mailbox pending already, the function 7626 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 7627 * The sli layer owns the mailbox object until the completion of mailbox 7628 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 7629 * return codes the caller owns the mailbox command after the return of 7630 * the function. 7631 **/ 7632 static int 7633 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 7634 uint32_t flag) 7635 { 7636 MAILBOX_t *mbx; 7637 struct lpfc_sli *psli = &phba->sli; 7638 uint32_t status, evtctr; 7639 uint32_t ha_copy, hc_copy; 7640 int i; 7641 unsigned long timeout; 7642 unsigned long drvr_flag = 0; 7643 uint32_t word0, ldata; 7644 void __iomem *to_slim; 7645 int processing_queue = 0; 7646 7647 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7648 if (!pmbox) { 7649 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7650 /* processing mbox queue from intr_handler */ 7651 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7652 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7653 return MBX_SUCCESS; 7654 } 7655 processing_queue = 1; 7656 pmbox = lpfc_mbox_get(phba); 7657 if (!pmbox) { 7658 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7659 return MBX_SUCCESS; 7660 } 7661 } 7662 7663 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 7664 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 7665 if(!pmbox->vport) { 7666 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7667 lpfc_printf_log(phba, KERN_ERR, 7668 LOG_MBOX | LOG_VPORT, 7669 "1806 Mbox x%x failed. No vport\n", 7670 pmbox->u.mb.mbxCommand); 7671 dump_stack(); 7672 goto out_not_finished; 7673 } 7674 } 7675 7676 /* If the PCI channel is in offline state, do not post mbox. */ 7677 if (unlikely(pci_channel_offline(phba->pcidev))) { 7678 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7679 goto out_not_finished; 7680 } 7681 7682 /* If HBA has a deferred error attention, fail the iocb. */ 7683 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7684 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7685 goto out_not_finished; 7686 } 7687 7688 psli = &phba->sli; 7689 7690 mbx = &pmbox->u.mb; 7691 status = MBX_SUCCESS; 7692 7693 if (phba->link_state == LPFC_HBA_ERROR) { 7694 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7695 7696 /* Mbox command <mbxCommand> cannot issue */ 7697 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7698 "(%d):0311 Mailbox command x%x cannot " 7699 "issue Data: x%x x%x\n", 7700 pmbox->vport ? pmbox->vport->vpi : 0, 7701 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 7702 goto out_not_finished; 7703 } 7704 7705 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 7706 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 7707 !(hc_copy & HC_MBINT_ENA)) { 7708 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7709 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7710 "(%d):2528 Mailbox command x%x cannot " 7711 "issue Data: x%x x%x\n", 7712 pmbox->vport ? pmbox->vport->vpi : 0, 7713 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 7714 goto out_not_finished; 7715 } 7716 } 7717 7718 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7719 /* Polling for a mbox command when another one is already active 7720 * is not allowed in SLI. Also, the driver must have established 7721 * SLI2 mode to queue and process multiple mbox commands. 7722 */ 7723 7724 if (flag & MBX_POLL) { 7725 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7726 7727 /* Mbox command <mbxCommand> cannot issue */ 7728 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7729 "(%d):2529 Mailbox command x%x " 7730 "cannot issue Data: x%x x%x\n", 7731 pmbox->vport ? pmbox->vport->vpi : 0, 7732 pmbox->u.mb.mbxCommand, 7733 psli->sli_flag, flag); 7734 goto out_not_finished; 7735 } 7736 7737 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 7738 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7739 /* Mbox command <mbxCommand> cannot issue */ 7740 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7741 "(%d):2530 Mailbox command x%x " 7742 "cannot issue Data: x%x x%x\n", 7743 pmbox->vport ? pmbox->vport->vpi : 0, 7744 pmbox->u.mb.mbxCommand, 7745 psli->sli_flag, flag); 7746 goto out_not_finished; 7747 } 7748 7749 /* Another mailbox command is still being processed, queue this 7750 * command to be processed later. 7751 */ 7752 lpfc_mbox_put(phba, pmbox); 7753 7754 /* Mbox cmd issue - BUSY */ 7755 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7756 "(%d):0308 Mbox cmd issue - BUSY Data: " 7757 "x%x x%x x%x x%x\n", 7758 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 7759 mbx->mbxCommand, 7760 phba->pport ? phba->pport->port_state : 0xff, 7761 psli->sli_flag, flag); 7762 7763 psli->slistat.mbox_busy++; 7764 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7765 7766 if (pmbox->vport) { 7767 lpfc_debugfs_disc_trc(pmbox->vport, 7768 LPFC_DISC_TRC_MBOX_VPORT, 7769 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 7770 (uint32_t)mbx->mbxCommand, 7771 mbx->un.varWords[0], mbx->un.varWords[1]); 7772 } 7773 else { 7774 lpfc_debugfs_disc_trc(phba->pport, 7775 LPFC_DISC_TRC_MBOX, 7776 "MBOX Bsy: cmd:x%x mb:x%x x%x", 7777 (uint32_t)mbx->mbxCommand, 7778 mbx->un.varWords[0], mbx->un.varWords[1]); 7779 } 7780 7781 return MBX_BUSY; 7782 } 7783 7784 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7785 7786 /* If we are not polling, we MUST be in SLI2 mode */ 7787 if (flag != MBX_POLL) { 7788 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 7789 (mbx->mbxCommand != MBX_KILL_BOARD)) { 7790 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7791 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7792 /* Mbox command <mbxCommand> cannot issue */ 7793 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7794 "(%d):2531 Mailbox command x%x " 7795 "cannot issue Data: x%x x%x\n", 7796 pmbox->vport ? pmbox->vport->vpi : 0, 7797 pmbox->u.mb.mbxCommand, 7798 psli->sli_flag, flag); 7799 goto out_not_finished; 7800 } 7801 /* timeout active mbox command */ 7802 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7803 1000); 7804 mod_timer(&psli->mbox_tmo, jiffies + timeout); 7805 } 7806 7807 /* Mailbox cmd <cmd> issue */ 7808 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7809 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 7810 "x%x\n", 7811 pmbox->vport ? pmbox->vport->vpi : 0, 7812 mbx->mbxCommand, 7813 phba->pport ? phba->pport->port_state : 0xff, 7814 psli->sli_flag, flag); 7815 7816 if (mbx->mbxCommand != MBX_HEARTBEAT) { 7817 if (pmbox->vport) { 7818 lpfc_debugfs_disc_trc(pmbox->vport, 7819 LPFC_DISC_TRC_MBOX_VPORT, 7820 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7821 (uint32_t)mbx->mbxCommand, 7822 mbx->un.varWords[0], mbx->un.varWords[1]); 7823 } 7824 else { 7825 lpfc_debugfs_disc_trc(phba->pport, 7826 LPFC_DISC_TRC_MBOX, 7827 "MBOX Send: cmd:x%x mb:x%x x%x", 7828 (uint32_t)mbx->mbxCommand, 7829 mbx->un.varWords[0], mbx->un.varWords[1]); 7830 } 7831 } 7832 7833 psli->slistat.mbox_cmd++; 7834 evtctr = psli->slistat.mbox_event; 7835 7836 /* next set own bit for the adapter and copy over command word */ 7837 mbx->mbxOwner = OWN_CHIP; 7838 7839 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7840 /* Populate mbox extension offset word. */ 7841 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 7842 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 7843 = (uint8_t *)phba->mbox_ext 7844 - (uint8_t *)phba->mbox; 7845 } 7846 7847 /* Copy the mailbox extension data */ 7848 if (pmbox->in_ext_byte_len && pmbox->context2) { 7849 lpfc_sli_pcimem_bcopy(pmbox->context2, 7850 (uint8_t *)phba->mbox_ext, 7851 pmbox->in_ext_byte_len); 7852 } 7853 /* Copy command data to host SLIM area */ 7854 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 7855 } else { 7856 /* Populate mbox extension offset word. */ 7857 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 7858 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 7859 = MAILBOX_HBA_EXT_OFFSET; 7860 7861 /* Copy the mailbox extension data */ 7862 if (pmbox->in_ext_byte_len && pmbox->context2) 7863 lpfc_memcpy_to_slim(phba->MBslimaddr + 7864 MAILBOX_HBA_EXT_OFFSET, 7865 pmbox->context2, pmbox->in_ext_byte_len); 7866 7867 if (mbx->mbxCommand == MBX_CONFIG_PORT) 7868 /* copy command data into host mbox for cmpl */ 7869 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 7870 MAILBOX_CMD_SIZE); 7871 7872 /* First copy mbox command data to HBA SLIM, skip past first 7873 word */ 7874 to_slim = phba->MBslimaddr + sizeof (uint32_t); 7875 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 7876 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 7877 7878 /* Next copy over first word, with mbxOwner set */ 7879 ldata = *((uint32_t *)mbx); 7880 to_slim = phba->MBslimaddr; 7881 writel(ldata, to_slim); 7882 readl(to_slim); /* flush */ 7883 7884 if (mbx->mbxCommand == MBX_CONFIG_PORT) 7885 /* switch over to host mailbox */ 7886 psli->sli_flag |= LPFC_SLI_ACTIVE; 7887 } 7888 7889 wmb(); 7890 7891 switch (flag) { 7892 case MBX_NOWAIT: 7893 /* Set up reference to mailbox command */ 7894 psli->mbox_active = pmbox; 7895 /* Interrupt board to do it */ 7896 writel(CA_MBATT, phba->CAregaddr); 7897 readl(phba->CAregaddr); /* flush */ 7898 /* Don't wait for it to finish, just return */ 7899 break; 7900 7901 case MBX_POLL: 7902 /* Set up null reference to mailbox command */ 7903 psli->mbox_active = NULL; 7904 /* Interrupt board to do it */ 7905 writel(CA_MBATT, phba->CAregaddr); 7906 readl(phba->CAregaddr); /* flush */ 7907 7908 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7909 /* First read mbox status word */ 7910 word0 = *((uint32_t *)phba->mbox); 7911 word0 = le32_to_cpu(word0); 7912 } else { 7913 /* First read mbox status word */ 7914 if (lpfc_readl(phba->MBslimaddr, &word0)) { 7915 spin_unlock_irqrestore(&phba->hbalock, 7916 drvr_flag); 7917 goto out_not_finished; 7918 } 7919 } 7920 7921 /* Read the HBA Host Attention Register */ 7922 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7923 spin_unlock_irqrestore(&phba->hbalock, 7924 drvr_flag); 7925 goto out_not_finished; 7926 } 7927 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7928 1000) + jiffies; 7929 i = 0; 7930 /* Wait for command to complete */ 7931 while (((word0 & OWN_CHIP) == OWN_CHIP) || 7932 (!(ha_copy & HA_MBATT) && 7933 (phba->link_state > LPFC_WARM_START))) { 7934 if (time_after(jiffies, timeout)) { 7935 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7936 spin_unlock_irqrestore(&phba->hbalock, 7937 drvr_flag); 7938 goto out_not_finished; 7939 } 7940 7941 /* Check if we took a mbox interrupt while we were 7942 polling */ 7943 if (((word0 & OWN_CHIP) != OWN_CHIP) 7944 && (evtctr != psli->slistat.mbox_event)) 7945 break; 7946 7947 if (i++ > 10) { 7948 spin_unlock_irqrestore(&phba->hbalock, 7949 drvr_flag); 7950 msleep(1); 7951 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7952 } 7953 7954 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7955 /* First copy command data */ 7956 word0 = *((uint32_t *)phba->mbox); 7957 word0 = le32_to_cpu(word0); 7958 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7959 MAILBOX_t *slimmb; 7960 uint32_t slimword0; 7961 /* Check real SLIM for any errors */ 7962 slimword0 = readl(phba->MBslimaddr); 7963 slimmb = (MAILBOX_t *) & slimword0; 7964 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 7965 && slimmb->mbxStatus) { 7966 psli->sli_flag &= 7967 ~LPFC_SLI_ACTIVE; 7968 word0 = slimword0; 7969 } 7970 } 7971 } else { 7972 /* First copy command data */ 7973 word0 = readl(phba->MBslimaddr); 7974 } 7975 /* Read the HBA Host Attention Register */ 7976 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7977 spin_unlock_irqrestore(&phba->hbalock, 7978 drvr_flag); 7979 goto out_not_finished; 7980 } 7981 } 7982 7983 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7984 /* copy results back to user */ 7985 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 7986 MAILBOX_CMD_SIZE); 7987 /* Copy the mailbox extension data */ 7988 if (pmbox->out_ext_byte_len && pmbox->context2) { 7989 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 7990 pmbox->context2, 7991 pmbox->out_ext_byte_len); 7992 } 7993 } else { 7994 /* First copy command data */ 7995 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 7996 MAILBOX_CMD_SIZE); 7997 /* Copy the mailbox extension data */ 7998 if (pmbox->out_ext_byte_len && pmbox->context2) { 7999 lpfc_memcpy_from_slim(pmbox->context2, 8000 phba->MBslimaddr + 8001 MAILBOX_HBA_EXT_OFFSET, 8002 pmbox->out_ext_byte_len); 8003 } 8004 } 8005 8006 writel(HA_MBATT, phba->HAregaddr); 8007 readl(phba->HAregaddr); /* flush */ 8008 8009 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8010 status = mbx->mbxStatus; 8011 } 8012 8013 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8014 return status; 8015 8016 out_not_finished: 8017 if (processing_queue) { 8018 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 8019 lpfc_mbox_cmpl_put(phba, pmbox); 8020 } 8021 return MBX_NOT_FINISHED; 8022 } 8023 8024 /** 8025 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 8026 * @phba: Pointer to HBA context object. 8027 * 8028 * The function blocks the posting of SLI4 asynchronous mailbox commands from 8029 * the driver internal pending mailbox queue. It will then try to wait out the 8030 * possible outstanding mailbox command before return. 8031 * 8032 * Returns: 8033 * 0 - the outstanding mailbox command completed; otherwise, the wait for 8034 * the outstanding mailbox command timed out. 8035 **/ 8036 static int 8037 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 8038 { 8039 struct lpfc_sli *psli = &phba->sli; 8040 int rc = 0; 8041 unsigned long timeout = 0; 8042 8043 /* Mark the asynchronous mailbox command posting as blocked */ 8044 spin_lock_irq(&phba->hbalock); 8045 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8046 /* Determine how long we might wait for the active mailbox 8047 * command to be gracefully completed by firmware. 8048 */ 8049 if (phba->sli.mbox_active) 8050 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 8051 phba->sli.mbox_active) * 8052 1000) + jiffies; 8053 spin_unlock_irq(&phba->hbalock); 8054 8055 /* Make sure the mailbox is really active */ 8056 if (timeout) 8057 lpfc_sli4_process_missed_mbox_completions(phba); 8058 8059 /* Wait for the outstnading mailbox command to complete */ 8060 while (phba->sli.mbox_active) { 8061 /* Check active mailbox complete status every 2ms */ 8062 msleep(2); 8063 if (time_after(jiffies, timeout)) { 8064 /* Timeout, marked the outstanding cmd not complete */ 8065 rc = 1; 8066 break; 8067 } 8068 } 8069 8070 /* Can not cleanly block async mailbox command, fails it */ 8071 if (rc) { 8072 spin_lock_irq(&phba->hbalock); 8073 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8074 spin_unlock_irq(&phba->hbalock); 8075 } 8076 return rc; 8077 } 8078 8079 /** 8080 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 8081 * @phba: Pointer to HBA context object. 8082 * 8083 * The function unblocks and resume posting of SLI4 asynchronous mailbox 8084 * commands from the driver internal pending mailbox queue. It makes sure 8085 * that there is no outstanding mailbox command before resuming posting 8086 * asynchronous mailbox commands. If, for any reason, there is outstanding 8087 * mailbox command, it will try to wait it out before resuming asynchronous 8088 * mailbox command posting. 8089 **/ 8090 static void 8091 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 8092 { 8093 struct lpfc_sli *psli = &phba->sli; 8094 8095 spin_lock_irq(&phba->hbalock); 8096 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8097 /* Asynchronous mailbox posting is not blocked, do nothing */ 8098 spin_unlock_irq(&phba->hbalock); 8099 return; 8100 } 8101 8102 /* Outstanding synchronous mailbox command is guaranteed to be done, 8103 * successful or timeout, after timing-out the outstanding mailbox 8104 * command shall always be removed, so just unblock posting async 8105 * mailbox command and resume 8106 */ 8107 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8108 spin_unlock_irq(&phba->hbalock); 8109 8110 /* wake up worker thread to post asynchronlous mailbox command */ 8111 lpfc_worker_wake_up(phba); 8112 } 8113 8114 /** 8115 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 8116 * @phba: Pointer to HBA context object. 8117 * @mboxq: Pointer to mailbox object. 8118 * 8119 * The function waits for the bootstrap mailbox register ready bit from 8120 * port for twice the regular mailbox command timeout value. 8121 * 8122 * 0 - no timeout on waiting for bootstrap mailbox register ready. 8123 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 8124 **/ 8125 static int 8126 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8127 { 8128 uint32_t db_ready; 8129 unsigned long timeout; 8130 struct lpfc_register bmbx_reg; 8131 8132 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 8133 * 1000) + jiffies; 8134 8135 do { 8136 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 8137 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 8138 if (!db_ready) 8139 msleep(2); 8140 8141 if (time_after(jiffies, timeout)) 8142 return MBXERR_ERROR; 8143 } while (!db_ready); 8144 8145 return 0; 8146 } 8147 8148 /** 8149 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 8150 * @phba: Pointer to HBA context object. 8151 * @mboxq: Pointer to mailbox object. 8152 * 8153 * The function posts a mailbox to the port. The mailbox is expected 8154 * to be comletely filled in and ready for the port to operate on it. 8155 * This routine executes a synchronous completion operation on the 8156 * mailbox by polling for its completion. 8157 * 8158 * The caller must not be holding any locks when calling this routine. 8159 * 8160 * Returns: 8161 * MBX_SUCCESS - mailbox posted successfully 8162 * Any of the MBX error values. 8163 **/ 8164 static int 8165 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8166 { 8167 int rc = MBX_SUCCESS; 8168 unsigned long iflag; 8169 uint32_t mcqe_status; 8170 uint32_t mbx_cmnd; 8171 struct lpfc_sli *psli = &phba->sli; 8172 struct lpfc_mqe *mb = &mboxq->u.mqe; 8173 struct lpfc_bmbx_create *mbox_rgn; 8174 struct dma_address *dma_address; 8175 8176 /* 8177 * Only one mailbox can be active to the bootstrap mailbox region 8178 * at a time and there is no queueing provided. 8179 */ 8180 spin_lock_irqsave(&phba->hbalock, iflag); 8181 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8182 spin_unlock_irqrestore(&phba->hbalock, iflag); 8183 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8184 "(%d):2532 Mailbox command x%x (x%x/x%x) " 8185 "cannot issue Data: x%x x%x\n", 8186 mboxq->vport ? mboxq->vport->vpi : 0, 8187 mboxq->u.mb.mbxCommand, 8188 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8189 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8190 psli->sli_flag, MBX_POLL); 8191 return MBXERR_ERROR; 8192 } 8193 /* The server grabs the token and owns it until release */ 8194 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8195 phba->sli.mbox_active = mboxq; 8196 spin_unlock_irqrestore(&phba->hbalock, iflag); 8197 8198 /* wait for bootstrap mbox register for readyness */ 8199 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8200 if (rc) 8201 goto exit; 8202 8203 /* 8204 * Initialize the bootstrap memory region to avoid stale data areas 8205 * in the mailbox post. Then copy the caller's mailbox contents to 8206 * the bmbx mailbox region. 8207 */ 8208 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 8209 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 8210 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 8211 sizeof(struct lpfc_mqe)); 8212 8213 /* Post the high mailbox dma address to the port and wait for ready. */ 8214 dma_address = &phba->sli4_hba.bmbx.dma_address; 8215 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 8216 8217 /* wait for bootstrap mbox register for hi-address write done */ 8218 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8219 if (rc) 8220 goto exit; 8221 8222 /* Post the low mailbox dma address to the port. */ 8223 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 8224 8225 /* wait for bootstrap mbox register for low address write done */ 8226 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8227 if (rc) 8228 goto exit; 8229 8230 /* 8231 * Read the CQ to ensure the mailbox has completed. 8232 * If so, update the mailbox status so that the upper layers 8233 * can complete the request normally. 8234 */ 8235 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 8236 sizeof(struct lpfc_mqe)); 8237 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 8238 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 8239 sizeof(struct lpfc_mcqe)); 8240 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8241 /* 8242 * When the CQE status indicates a failure and the mailbox status 8243 * indicates success then copy the CQE status into the mailbox status 8244 * (and prefix it with x4000). 8245 */ 8246 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8247 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8248 bf_set(lpfc_mqe_status, mb, 8249 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8250 rc = MBXERR_ERROR; 8251 } else 8252 lpfc_sli4_swap_str(phba, mboxq); 8253 8254 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8255 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8256 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8257 " x%x x%x CQ: x%x x%x x%x x%x\n", 8258 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8259 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8260 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8261 bf_get(lpfc_mqe_status, mb), 8262 mb->un.mb_words[0], mb->un.mb_words[1], 8263 mb->un.mb_words[2], mb->un.mb_words[3], 8264 mb->un.mb_words[4], mb->un.mb_words[5], 8265 mb->un.mb_words[6], mb->un.mb_words[7], 8266 mb->un.mb_words[8], mb->un.mb_words[9], 8267 mb->un.mb_words[10], mb->un.mb_words[11], 8268 mb->un.mb_words[12], mboxq->mcqe.word0, 8269 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8270 mboxq->mcqe.trailer); 8271 exit: 8272 /* We are holding the token, no needed for lock when release */ 8273 spin_lock_irqsave(&phba->hbalock, iflag); 8274 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8275 phba->sli.mbox_active = NULL; 8276 spin_unlock_irqrestore(&phba->hbalock, iflag); 8277 return rc; 8278 } 8279 8280 /** 8281 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8282 * @phba: Pointer to HBA context object. 8283 * @pmbox: Pointer to mailbox object. 8284 * @flag: Flag indicating how the mailbox need to be processed. 8285 * 8286 * This function is called by discovery code and HBA management code to submit 8287 * a mailbox command to firmware with SLI-4 interface spec. 8288 * 8289 * Return codes the caller owns the mailbox command after the return of the 8290 * function. 8291 **/ 8292 static int 8293 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8294 uint32_t flag) 8295 { 8296 struct lpfc_sli *psli = &phba->sli; 8297 unsigned long iflags; 8298 int rc; 8299 8300 /* dump from issue mailbox command if setup */ 8301 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8302 8303 rc = lpfc_mbox_dev_check(phba); 8304 if (unlikely(rc)) { 8305 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8306 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8307 "cannot issue Data: x%x x%x\n", 8308 mboxq->vport ? mboxq->vport->vpi : 0, 8309 mboxq->u.mb.mbxCommand, 8310 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8311 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8312 psli->sli_flag, flag); 8313 goto out_not_finished; 8314 } 8315 8316 /* Detect polling mode and jump to a handler */ 8317 if (!phba->sli4_hba.intr_enable) { 8318 if (flag == MBX_POLL) 8319 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8320 else 8321 rc = -EIO; 8322 if (rc != MBX_SUCCESS) 8323 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8324 "(%d):2541 Mailbox command x%x " 8325 "(x%x/x%x) failure: " 8326 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8327 "Data: x%x x%x\n,", 8328 mboxq->vport ? mboxq->vport->vpi : 0, 8329 mboxq->u.mb.mbxCommand, 8330 lpfc_sli_config_mbox_subsys_get(phba, 8331 mboxq), 8332 lpfc_sli_config_mbox_opcode_get(phba, 8333 mboxq), 8334 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8335 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8336 bf_get(lpfc_mcqe_ext_status, 8337 &mboxq->mcqe), 8338 psli->sli_flag, flag); 8339 return rc; 8340 } else if (flag == MBX_POLL) { 8341 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8342 "(%d):2542 Try to issue mailbox command " 8343 "x%x (x%x/x%x) synchronously ahead of async " 8344 "mailbox command queue: x%x x%x\n", 8345 mboxq->vport ? mboxq->vport->vpi : 0, 8346 mboxq->u.mb.mbxCommand, 8347 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8348 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8349 psli->sli_flag, flag); 8350 /* Try to block the asynchronous mailbox posting */ 8351 rc = lpfc_sli4_async_mbox_block(phba); 8352 if (!rc) { 8353 /* Successfully blocked, now issue sync mbox cmd */ 8354 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8355 if (rc != MBX_SUCCESS) 8356 lpfc_printf_log(phba, KERN_WARNING, 8357 LOG_MBOX | LOG_SLI, 8358 "(%d):2597 Sync Mailbox command " 8359 "x%x (x%x/x%x) failure: " 8360 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8361 "Data: x%x x%x\n,", 8362 mboxq->vport ? mboxq->vport->vpi : 0, 8363 mboxq->u.mb.mbxCommand, 8364 lpfc_sli_config_mbox_subsys_get(phba, 8365 mboxq), 8366 lpfc_sli_config_mbox_opcode_get(phba, 8367 mboxq), 8368 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8369 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8370 bf_get(lpfc_mcqe_ext_status, 8371 &mboxq->mcqe), 8372 psli->sli_flag, flag); 8373 /* Unblock the async mailbox posting afterward */ 8374 lpfc_sli4_async_mbox_unblock(phba); 8375 } 8376 return rc; 8377 } 8378 8379 /* Now, interrupt mode asynchrous mailbox command */ 8380 rc = lpfc_mbox_cmd_check(phba, mboxq); 8381 if (rc) { 8382 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8383 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8384 "cannot issue Data: x%x x%x\n", 8385 mboxq->vport ? mboxq->vport->vpi : 0, 8386 mboxq->u.mb.mbxCommand, 8387 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8388 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8389 psli->sli_flag, flag); 8390 goto out_not_finished; 8391 } 8392 8393 /* Put the mailbox command to the driver internal FIFO */ 8394 psli->slistat.mbox_busy++; 8395 spin_lock_irqsave(&phba->hbalock, iflags); 8396 lpfc_mbox_put(phba, mboxq); 8397 spin_unlock_irqrestore(&phba->hbalock, iflags); 8398 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8399 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8400 "x%x (x%x/x%x) x%x x%x x%x\n", 8401 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8402 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8403 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8404 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8405 phba->pport->port_state, 8406 psli->sli_flag, MBX_NOWAIT); 8407 /* Wake up worker thread to transport mailbox command from head */ 8408 lpfc_worker_wake_up(phba); 8409 8410 return MBX_BUSY; 8411 8412 out_not_finished: 8413 return MBX_NOT_FINISHED; 8414 } 8415 8416 /** 8417 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8418 * @phba: Pointer to HBA context object. 8419 * 8420 * This function is called by worker thread to send a mailbox command to 8421 * SLI4 HBA firmware. 8422 * 8423 **/ 8424 int 8425 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8426 { 8427 struct lpfc_sli *psli = &phba->sli; 8428 LPFC_MBOXQ_t *mboxq; 8429 int rc = MBX_SUCCESS; 8430 unsigned long iflags; 8431 struct lpfc_mqe *mqe; 8432 uint32_t mbx_cmnd; 8433 8434 /* Check interrupt mode before post async mailbox command */ 8435 if (unlikely(!phba->sli4_hba.intr_enable)) 8436 return MBX_NOT_FINISHED; 8437 8438 /* Check for mailbox command service token */ 8439 spin_lock_irqsave(&phba->hbalock, iflags); 8440 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8441 spin_unlock_irqrestore(&phba->hbalock, iflags); 8442 return MBX_NOT_FINISHED; 8443 } 8444 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8445 spin_unlock_irqrestore(&phba->hbalock, iflags); 8446 return MBX_NOT_FINISHED; 8447 } 8448 if (unlikely(phba->sli.mbox_active)) { 8449 spin_unlock_irqrestore(&phba->hbalock, iflags); 8450 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8451 "0384 There is pending active mailbox cmd\n"); 8452 return MBX_NOT_FINISHED; 8453 } 8454 /* Take the mailbox command service token */ 8455 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8456 8457 /* Get the next mailbox command from head of queue */ 8458 mboxq = lpfc_mbox_get(phba); 8459 8460 /* If no more mailbox command waiting for post, we're done */ 8461 if (!mboxq) { 8462 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8463 spin_unlock_irqrestore(&phba->hbalock, iflags); 8464 return MBX_SUCCESS; 8465 } 8466 phba->sli.mbox_active = mboxq; 8467 spin_unlock_irqrestore(&phba->hbalock, iflags); 8468 8469 /* Check device readiness for posting mailbox command */ 8470 rc = lpfc_mbox_dev_check(phba); 8471 if (unlikely(rc)) 8472 /* Driver clean routine will clean up pending mailbox */ 8473 goto out_not_finished; 8474 8475 /* Prepare the mbox command to be posted */ 8476 mqe = &mboxq->u.mqe; 8477 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8478 8479 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8480 mod_timer(&psli->mbox_tmo, (jiffies + 8481 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8482 8483 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8484 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8485 "x%x x%x\n", 8486 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8487 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8488 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8489 phba->pport->port_state, psli->sli_flag); 8490 8491 if (mbx_cmnd != MBX_HEARTBEAT) { 8492 if (mboxq->vport) { 8493 lpfc_debugfs_disc_trc(mboxq->vport, 8494 LPFC_DISC_TRC_MBOX_VPORT, 8495 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8496 mbx_cmnd, mqe->un.mb_words[0], 8497 mqe->un.mb_words[1]); 8498 } else { 8499 lpfc_debugfs_disc_trc(phba->pport, 8500 LPFC_DISC_TRC_MBOX, 8501 "MBOX Send: cmd:x%x mb:x%x x%x", 8502 mbx_cmnd, mqe->un.mb_words[0], 8503 mqe->un.mb_words[1]); 8504 } 8505 } 8506 psli->slistat.mbox_cmd++; 8507 8508 /* Post the mailbox command to the port */ 8509 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8510 if (rc != MBX_SUCCESS) { 8511 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8512 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8513 "cannot issue Data: x%x x%x\n", 8514 mboxq->vport ? mboxq->vport->vpi : 0, 8515 mboxq->u.mb.mbxCommand, 8516 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8517 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8518 psli->sli_flag, MBX_NOWAIT); 8519 goto out_not_finished; 8520 } 8521 8522 return rc; 8523 8524 out_not_finished: 8525 spin_lock_irqsave(&phba->hbalock, iflags); 8526 if (phba->sli.mbox_active) { 8527 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8528 __lpfc_mbox_cmpl_put(phba, mboxq); 8529 /* Release the token */ 8530 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8531 phba->sli.mbox_active = NULL; 8532 } 8533 spin_unlock_irqrestore(&phba->hbalock, iflags); 8534 8535 return MBX_NOT_FINISHED; 8536 } 8537 8538 /** 8539 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8540 * @phba: Pointer to HBA context object. 8541 * @pmbox: Pointer to mailbox object. 8542 * @flag: Flag indicating how the mailbox need to be processed. 8543 * 8544 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 8545 * the API jump table function pointer from the lpfc_hba struct. 8546 * 8547 * Return codes the caller owns the mailbox command after the return of the 8548 * function. 8549 **/ 8550 int 8551 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 8552 { 8553 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 8554 } 8555 8556 /** 8557 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 8558 * @phba: The hba struct for which this call is being executed. 8559 * @dev_grp: The HBA PCI-Device group number. 8560 * 8561 * This routine sets up the mbox interface API function jump table in @phba 8562 * struct. 8563 * Returns: 0 - success, -ENODEV - failure. 8564 **/ 8565 int 8566 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8567 { 8568 8569 switch (dev_grp) { 8570 case LPFC_PCI_DEV_LP: 8571 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 8572 phba->lpfc_sli_handle_slow_ring_event = 8573 lpfc_sli_handle_slow_ring_event_s3; 8574 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 8575 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 8576 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 8577 break; 8578 case LPFC_PCI_DEV_OC: 8579 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 8580 phba->lpfc_sli_handle_slow_ring_event = 8581 lpfc_sli_handle_slow_ring_event_s4; 8582 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 8583 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 8584 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 8585 break; 8586 default: 8587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8588 "1420 Invalid HBA PCI-device group: 0x%x\n", 8589 dev_grp); 8590 return -ENODEV; 8591 break; 8592 } 8593 return 0; 8594 } 8595 8596 /** 8597 * __lpfc_sli_ringtx_put - Add an iocb to the txq 8598 * @phba: Pointer to HBA context object. 8599 * @pring: Pointer to driver SLI ring object. 8600 * @piocb: Pointer to address of newly added command iocb. 8601 * 8602 * This function is called with hbalock held to add a command 8603 * iocb to the txq when SLI layer cannot submit the command iocb 8604 * to the ring. 8605 **/ 8606 void 8607 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8608 struct lpfc_iocbq *piocb) 8609 { 8610 lockdep_assert_held(&phba->hbalock); 8611 /* Insert the caller's iocb in the txq tail for later processing. */ 8612 list_add_tail(&piocb->list, &pring->txq); 8613 } 8614 8615 /** 8616 * lpfc_sli_next_iocb - Get the next iocb in the txq 8617 * @phba: Pointer to HBA context object. 8618 * @pring: Pointer to driver SLI ring object. 8619 * @piocb: Pointer to address of newly added command iocb. 8620 * 8621 * This function is called with hbalock held before a new 8622 * iocb is submitted to the firmware. This function checks 8623 * txq to flush the iocbs in txq to Firmware before 8624 * submitting new iocbs to the Firmware. 8625 * If there are iocbs in the txq which need to be submitted 8626 * to firmware, lpfc_sli_next_iocb returns the first element 8627 * of the txq after dequeuing it from txq. 8628 * If there is no iocb in the txq then the function will return 8629 * *piocb and *piocb is set to NULL. Caller needs to check 8630 * *piocb to find if there are more commands in the txq. 8631 **/ 8632 static struct lpfc_iocbq * 8633 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8634 struct lpfc_iocbq **piocb) 8635 { 8636 struct lpfc_iocbq * nextiocb; 8637 8638 lockdep_assert_held(&phba->hbalock); 8639 8640 nextiocb = lpfc_sli_ringtx_get(phba, pring); 8641 if (!nextiocb) { 8642 nextiocb = *piocb; 8643 *piocb = NULL; 8644 } 8645 8646 return nextiocb; 8647 } 8648 8649 /** 8650 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 8651 * @phba: Pointer to HBA context object. 8652 * @ring_number: SLI ring number to issue iocb on. 8653 * @piocb: Pointer to command iocb. 8654 * @flag: Flag indicating if this command can be put into txq. 8655 * 8656 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 8657 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 8658 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 8659 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 8660 * this function allows only iocbs for posting buffers. This function finds 8661 * next available slot in the command ring and posts the command to the 8662 * available slot and writes the port attention register to request HBA start 8663 * processing new iocb. If there is no slot available in the ring and 8664 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 8665 * the function returns IOCB_BUSY. 8666 * 8667 * This function is called with hbalock held. The function will return success 8668 * after it successfully submit the iocb to firmware or after adding to the 8669 * txq. 8670 **/ 8671 static int 8672 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 8673 struct lpfc_iocbq *piocb, uint32_t flag) 8674 { 8675 struct lpfc_iocbq *nextiocb; 8676 IOCB_t *iocb; 8677 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 8678 8679 lockdep_assert_held(&phba->hbalock); 8680 8681 if (piocb->iocb_cmpl && (!piocb->vport) && 8682 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 8683 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 8684 lpfc_printf_log(phba, KERN_ERR, 8685 LOG_SLI | LOG_VPORT, 8686 "1807 IOCB x%x failed. No vport\n", 8687 piocb->iocb.ulpCommand); 8688 dump_stack(); 8689 return IOCB_ERROR; 8690 } 8691 8692 8693 /* If the PCI channel is in offline state, do not post iocbs. */ 8694 if (unlikely(pci_channel_offline(phba->pcidev))) 8695 return IOCB_ERROR; 8696 8697 /* If HBA has a deferred error attention, fail the iocb. */ 8698 if (unlikely(phba->hba_flag & DEFER_ERATT)) 8699 return IOCB_ERROR; 8700 8701 /* 8702 * We should never get an IOCB if we are in a < LINK_DOWN state 8703 */ 8704 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 8705 return IOCB_ERROR; 8706 8707 /* 8708 * Check to see if we are blocking IOCB processing because of a 8709 * outstanding event. 8710 */ 8711 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 8712 goto iocb_busy; 8713 8714 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 8715 /* 8716 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 8717 * can be issued if the link is not up. 8718 */ 8719 switch (piocb->iocb.ulpCommand) { 8720 case CMD_GEN_REQUEST64_CR: 8721 case CMD_GEN_REQUEST64_CX: 8722 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 8723 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 8724 FC_RCTL_DD_UNSOL_CMD) || 8725 (piocb->iocb.un.genreq64.w5.hcsw.Type != 8726 MENLO_TRANSPORT_TYPE)) 8727 8728 goto iocb_busy; 8729 break; 8730 case CMD_QUE_RING_BUF_CN: 8731 case CMD_QUE_RING_BUF64_CN: 8732 /* 8733 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 8734 * completion, iocb_cmpl MUST be 0. 8735 */ 8736 if (piocb->iocb_cmpl) 8737 piocb->iocb_cmpl = NULL; 8738 /*FALLTHROUGH*/ 8739 case CMD_CREATE_XRI_CR: 8740 case CMD_CLOSE_XRI_CN: 8741 case CMD_CLOSE_XRI_CX: 8742 break; 8743 default: 8744 goto iocb_busy; 8745 } 8746 8747 /* 8748 * For FCP commands, we must be in a state where we can process link 8749 * attention events. 8750 */ 8751 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 8752 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 8753 goto iocb_busy; 8754 } 8755 8756 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 8757 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 8758 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 8759 8760 if (iocb) 8761 lpfc_sli_update_ring(phba, pring); 8762 else 8763 lpfc_sli_update_full_ring(phba, pring); 8764 8765 if (!piocb) 8766 return IOCB_SUCCESS; 8767 8768 goto out_busy; 8769 8770 iocb_busy: 8771 pring->stats.iocb_cmd_delay++; 8772 8773 out_busy: 8774 8775 if (!(flag & SLI_IOCB_RET_IOCB)) { 8776 __lpfc_sli_ringtx_put(phba, pring, piocb); 8777 return IOCB_SUCCESS; 8778 } 8779 8780 return IOCB_BUSY; 8781 } 8782 8783 /** 8784 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 8785 * @phba: Pointer to HBA context object. 8786 * @piocb: Pointer to command iocb. 8787 * @sglq: Pointer to the scatter gather queue object. 8788 * 8789 * This routine converts the bpl or bde that is in the IOCB 8790 * to a sgl list for the sli4 hardware. The physical address 8791 * of the bpl/bde is converted back to a virtual address. 8792 * If the IOCB contains a BPL then the list of BDE's is 8793 * converted to sli4_sge's. If the IOCB contains a single 8794 * BDE then it is converted to a single sli_sge. 8795 * The IOCB is still in cpu endianess so the contents of 8796 * the bpl can be used without byte swapping. 8797 * 8798 * Returns valid XRI = Success, NO_XRI = Failure. 8799 **/ 8800 static uint16_t 8801 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 8802 struct lpfc_sglq *sglq) 8803 { 8804 uint16_t xritag = NO_XRI; 8805 struct ulp_bde64 *bpl = NULL; 8806 struct ulp_bde64 bde; 8807 struct sli4_sge *sgl = NULL; 8808 struct lpfc_dmabuf *dmabuf; 8809 IOCB_t *icmd; 8810 int numBdes = 0; 8811 int i = 0; 8812 uint32_t offset = 0; /* accumulated offset in the sg request list */ 8813 int inbound = 0; /* number of sg reply entries inbound from firmware */ 8814 8815 if (!piocbq || !sglq) 8816 return xritag; 8817 8818 sgl = (struct sli4_sge *)sglq->sgl; 8819 icmd = &piocbq->iocb; 8820 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 8821 return sglq->sli4_xritag; 8822 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8823 numBdes = icmd->un.genreq64.bdl.bdeSize / 8824 sizeof(struct ulp_bde64); 8825 /* The addrHigh and addrLow fields within the IOCB 8826 * have not been byteswapped yet so there is no 8827 * need to swap them back. 8828 */ 8829 if (piocbq->context3) 8830 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 8831 else 8832 return xritag; 8833 8834 bpl = (struct ulp_bde64 *)dmabuf->virt; 8835 if (!bpl) 8836 return xritag; 8837 8838 for (i = 0; i < numBdes; i++) { 8839 /* Should already be byte swapped. */ 8840 sgl->addr_hi = bpl->addrHigh; 8841 sgl->addr_lo = bpl->addrLow; 8842 8843 sgl->word2 = le32_to_cpu(sgl->word2); 8844 if ((i+1) == numBdes) 8845 bf_set(lpfc_sli4_sge_last, sgl, 1); 8846 else 8847 bf_set(lpfc_sli4_sge_last, sgl, 0); 8848 /* swap the size field back to the cpu so we 8849 * can assign it to the sgl. 8850 */ 8851 bde.tus.w = le32_to_cpu(bpl->tus.w); 8852 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 8853 /* The offsets in the sgl need to be accumulated 8854 * separately for the request and reply lists. 8855 * The request is always first, the reply follows. 8856 */ 8857 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 8858 /* add up the reply sg entries */ 8859 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 8860 inbound++; 8861 /* first inbound? reset the offset */ 8862 if (inbound == 1) 8863 offset = 0; 8864 bf_set(lpfc_sli4_sge_offset, sgl, offset); 8865 bf_set(lpfc_sli4_sge_type, sgl, 8866 LPFC_SGE_TYPE_DATA); 8867 offset += bde.tus.f.bdeSize; 8868 } 8869 sgl->word2 = cpu_to_le32(sgl->word2); 8870 bpl++; 8871 sgl++; 8872 } 8873 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 8874 /* The addrHigh and addrLow fields of the BDE have not 8875 * been byteswapped yet so they need to be swapped 8876 * before putting them in the sgl. 8877 */ 8878 sgl->addr_hi = 8879 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 8880 sgl->addr_lo = 8881 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 8882 sgl->word2 = le32_to_cpu(sgl->word2); 8883 bf_set(lpfc_sli4_sge_last, sgl, 1); 8884 sgl->word2 = cpu_to_le32(sgl->word2); 8885 sgl->sge_len = 8886 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 8887 } 8888 return sglq->sli4_xritag; 8889 } 8890 8891 /** 8892 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 8893 * @phba: Pointer to HBA context object. 8894 * @piocb: Pointer to command iocb. 8895 * @wqe: Pointer to the work queue entry. 8896 * 8897 * This routine converts the iocb command to its Work Queue Entry 8898 * equivalent. The wqe pointer should not have any fields set when 8899 * this routine is called because it will memcpy over them. 8900 * This routine does not set the CQ_ID or the WQEC bits in the 8901 * wqe. 8902 * 8903 * Returns: 0 = Success, IOCB_ERROR = Failure. 8904 **/ 8905 static int 8906 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 8907 union lpfc_wqe128 *wqe) 8908 { 8909 uint32_t xmit_len = 0, total_len = 0; 8910 uint8_t ct = 0; 8911 uint32_t fip; 8912 uint32_t abort_tag; 8913 uint8_t command_type = ELS_COMMAND_NON_FIP; 8914 uint8_t cmnd; 8915 uint16_t xritag; 8916 uint16_t abrt_iotag; 8917 struct lpfc_iocbq *abrtiocbq; 8918 struct ulp_bde64 *bpl = NULL; 8919 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 8920 int numBdes, i; 8921 struct ulp_bde64 bde; 8922 struct lpfc_nodelist *ndlp; 8923 uint32_t *pcmd; 8924 uint32_t if_type; 8925 8926 fip = phba->hba_flag & HBA_FIP_SUPPORT; 8927 /* The fcp commands will set command type */ 8928 if (iocbq->iocb_flag & LPFC_IO_FCP) 8929 command_type = FCP_COMMAND; 8930 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 8931 command_type = ELS_COMMAND_FIP; 8932 else 8933 command_type = ELS_COMMAND_NON_FIP; 8934 8935 if (phba->fcp_embed_io) 8936 memset(wqe, 0, sizeof(union lpfc_wqe128)); 8937 /* Some of the fields are in the right position already */ 8938 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 8939 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { 8940 /* The ct field has moved so reset */ 8941 wqe->generic.wqe_com.word7 = 0; 8942 wqe->generic.wqe_com.word10 = 0; 8943 } 8944 8945 abort_tag = (uint32_t) iocbq->iotag; 8946 xritag = iocbq->sli4_xritag; 8947 /* words0-2 bpl convert bde */ 8948 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8949 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8950 sizeof(struct ulp_bde64); 8951 bpl = (struct ulp_bde64 *) 8952 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 8953 if (!bpl) 8954 return IOCB_ERROR; 8955 8956 /* Should already be byte swapped. */ 8957 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 8958 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 8959 /* swap the size field back to the cpu so we 8960 * can assign it to the sgl. 8961 */ 8962 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 8963 xmit_len = wqe->generic.bde.tus.f.bdeSize; 8964 total_len = 0; 8965 for (i = 0; i < numBdes; i++) { 8966 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8967 total_len += bde.tus.f.bdeSize; 8968 } 8969 } else 8970 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 8971 8972 iocbq->iocb.ulpIoTag = iocbq->iotag; 8973 cmnd = iocbq->iocb.ulpCommand; 8974 8975 switch (iocbq->iocb.ulpCommand) { 8976 case CMD_ELS_REQUEST64_CR: 8977 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 8978 ndlp = iocbq->context_un.ndlp; 8979 else 8980 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8981 if (!iocbq->iocb.ulpLe) { 8982 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8983 "2007 Only Limited Edition cmd Format" 8984 " supported 0x%x\n", 8985 iocbq->iocb.ulpCommand); 8986 return IOCB_ERROR; 8987 } 8988 8989 wqe->els_req.payload_len = xmit_len; 8990 /* Els_reguest64 has a TMO */ 8991 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 8992 iocbq->iocb.ulpTimeout); 8993 /* Need a VF for word 4 set the vf bit*/ 8994 bf_set(els_req64_vf, &wqe->els_req, 0); 8995 /* And a VFID for word 12 */ 8996 bf_set(els_req64_vfid, &wqe->els_req, 0); 8997 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8998 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8999 iocbq->iocb.ulpContext); 9000 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 9001 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 9002 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 9003 if (command_type == ELS_COMMAND_FIP) 9004 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 9005 >> LPFC_FIP_ELS_ID_SHIFT); 9006 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9007 iocbq->context2)->virt); 9008 if_type = bf_get(lpfc_sli_intf_if_type, 9009 &phba->sli4_hba.sli_intf); 9010 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9011 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 9012 *pcmd == ELS_CMD_SCR || 9013 *pcmd == ELS_CMD_FDISC || 9014 *pcmd == ELS_CMD_LOGO || 9015 *pcmd == ELS_CMD_PLOGI)) { 9016 bf_set(els_req64_sp, &wqe->els_req, 1); 9017 bf_set(els_req64_sid, &wqe->els_req, 9018 iocbq->vport->fc_myDID); 9019 if ((*pcmd == ELS_CMD_FLOGI) && 9020 !(phba->fc_topology == 9021 LPFC_TOPOLOGY_LOOP)) 9022 bf_set(els_req64_sid, &wqe->els_req, 0); 9023 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 9024 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9025 phba->vpi_ids[iocbq->vport->vpi]); 9026 } else if (pcmd && iocbq->context1) { 9027 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 9028 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9029 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9030 } 9031 } 9032 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 9033 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9034 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 9035 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 9036 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 9037 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 9038 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9039 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 9040 wqe->els_req.max_response_payload_len = total_len - xmit_len; 9041 break; 9042 case CMD_XMIT_SEQUENCE64_CX: 9043 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 9044 iocbq->iocb.un.ulpWord[3]); 9045 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 9046 iocbq->iocb.unsli3.rcvsli3.ox_id); 9047 /* The entire sequence is transmitted for this IOCB */ 9048 xmit_len = total_len; 9049 cmnd = CMD_XMIT_SEQUENCE64_CR; 9050 if (phba->link_flag & LS_LOOPBACK_MODE) 9051 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9052 case CMD_XMIT_SEQUENCE64_CR: 9053 /* word3 iocb=io_tag32 wqe=reserved */ 9054 wqe->xmit_sequence.rsvd3 = 0; 9055 /* word4 relative_offset memcpy */ 9056 /* word5 r_ctl/df_ctl memcpy */ 9057 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 9058 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 9059 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 9060 LPFC_WQE_IOD_WRITE); 9061 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 9062 LPFC_WQE_LENLOC_WORD12); 9063 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 9064 wqe->xmit_sequence.xmit_len = xmit_len; 9065 command_type = OTHER_COMMAND; 9066 break; 9067 case CMD_XMIT_BCAST64_CN: 9068 /* word3 iocb=iotag32 wqe=seq_payload_len */ 9069 wqe->xmit_bcast64.seq_payload_len = xmit_len; 9070 /* word4 iocb=rsvd wqe=rsvd */ 9071 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 9072 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 9073 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 9074 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9075 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 9076 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 9077 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 9078 LPFC_WQE_LENLOC_WORD3); 9079 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 9080 break; 9081 case CMD_FCP_IWRITE64_CR: 9082 command_type = FCP_COMMAND_DATA_OUT; 9083 /* word3 iocb=iotag wqe=payload_offset_len */ 9084 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9085 bf_set(payload_offset_len, &wqe->fcp_iwrite, 9086 xmit_len + sizeof(struct fcp_rsp)); 9087 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 9088 0); 9089 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9090 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9091 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 9092 iocbq->iocb.ulpFCP2Rcvy); 9093 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 9094 /* Always open the exchange */ 9095 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 9096 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 9097 LPFC_WQE_LENLOC_WORD4); 9098 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 9099 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 9100 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9101 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 9102 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 9103 if (iocbq->priority) { 9104 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9105 (iocbq->priority << 1)); 9106 } else { 9107 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9108 (phba->cfg_XLanePriority << 1)); 9109 } 9110 } 9111 /* Note, word 10 is already initialized to 0 */ 9112 9113 /* Don't set PBDE for Perf hints, just fcp_embed_pbde */ 9114 if (phba->fcp_embed_pbde) 9115 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); 9116 else 9117 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 9118 9119 if (phba->fcp_embed_io) { 9120 struct lpfc_scsi_buf *lpfc_cmd; 9121 struct sli4_sge *sgl; 9122 struct fcp_cmnd *fcp_cmnd; 9123 uint32_t *ptr; 9124 9125 /* 128 byte wqe support here */ 9126 9127 lpfc_cmd = iocbq->context1; 9128 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 9129 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9130 9131 /* Word 0-2 - FCP_CMND */ 9132 wqe->generic.bde.tus.f.bdeFlags = 9133 BUFF_TYPE_BDE_IMMED; 9134 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9135 wqe->generic.bde.addrHigh = 0; 9136 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9137 9138 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 9139 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 9140 9141 /* Word 22-29 FCP CMND Payload */ 9142 ptr = &wqe->words[22]; 9143 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9144 } 9145 break; 9146 case CMD_FCP_IREAD64_CR: 9147 /* word3 iocb=iotag wqe=payload_offset_len */ 9148 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9149 bf_set(payload_offset_len, &wqe->fcp_iread, 9150 xmit_len + sizeof(struct fcp_rsp)); 9151 bf_set(cmd_buff_len, &wqe->fcp_iread, 9152 0); 9153 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9154 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9155 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 9156 iocbq->iocb.ulpFCP2Rcvy); 9157 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 9158 /* Always open the exchange */ 9159 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 9160 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 9161 LPFC_WQE_LENLOC_WORD4); 9162 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 9163 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 9164 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9165 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 9166 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 9167 if (iocbq->priority) { 9168 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9169 (iocbq->priority << 1)); 9170 } else { 9171 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9172 (phba->cfg_XLanePriority << 1)); 9173 } 9174 } 9175 /* Note, word 10 is already initialized to 0 */ 9176 9177 /* Don't set PBDE for Perf hints, just fcp_embed_pbde */ 9178 if (phba->fcp_embed_pbde) 9179 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); 9180 else 9181 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 9182 9183 if (phba->fcp_embed_io) { 9184 struct lpfc_scsi_buf *lpfc_cmd; 9185 struct sli4_sge *sgl; 9186 struct fcp_cmnd *fcp_cmnd; 9187 uint32_t *ptr; 9188 9189 /* 128 byte wqe support here */ 9190 9191 lpfc_cmd = iocbq->context1; 9192 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 9193 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9194 9195 /* Word 0-2 - FCP_CMND */ 9196 wqe->generic.bde.tus.f.bdeFlags = 9197 BUFF_TYPE_BDE_IMMED; 9198 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9199 wqe->generic.bde.addrHigh = 0; 9200 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9201 9202 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 9203 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 9204 9205 /* Word 22-29 FCP CMND Payload */ 9206 ptr = &wqe->words[22]; 9207 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9208 } 9209 break; 9210 case CMD_FCP_ICMND64_CR: 9211 /* word3 iocb=iotag wqe=payload_offset_len */ 9212 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9213 bf_set(payload_offset_len, &wqe->fcp_icmd, 9214 xmit_len + sizeof(struct fcp_rsp)); 9215 bf_set(cmd_buff_len, &wqe->fcp_icmd, 9216 0); 9217 /* word3 iocb=IO_TAG wqe=reserved */ 9218 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 9219 /* Always open the exchange */ 9220 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 9221 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 9222 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 9223 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 9224 LPFC_WQE_LENLOC_NONE); 9225 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 9226 iocbq->iocb.ulpFCP2Rcvy); 9227 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9228 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 9229 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 9230 if (iocbq->priority) { 9231 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9232 (iocbq->priority << 1)); 9233 } else { 9234 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9235 (phba->cfg_XLanePriority << 1)); 9236 } 9237 } 9238 /* Note, word 10 is already initialized to 0 */ 9239 9240 if (phba->fcp_embed_io) { 9241 struct lpfc_scsi_buf *lpfc_cmd; 9242 struct sli4_sge *sgl; 9243 struct fcp_cmnd *fcp_cmnd; 9244 uint32_t *ptr; 9245 9246 /* 128 byte wqe support here */ 9247 9248 lpfc_cmd = iocbq->context1; 9249 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 9250 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9251 9252 /* Word 0-2 - FCP_CMND */ 9253 wqe->generic.bde.tus.f.bdeFlags = 9254 BUFF_TYPE_BDE_IMMED; 9255 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9256 wqe->generic.bde.addrHigh = 0; 9257 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9258 9259 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 9260 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 9261 9262 /* Word 22-29 FCP CMND Payload */ 9263 ptr = &wqe->words[22]; 9264 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9265 } 9266 break; 9267 case CMD_GEN_REQUEST64_CR: 9268 /* For this command calculate the xmit length of the 9269 * request bde. 9270 */ 9271 xmit_len = 0; 9272 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9273 sizeof(struct ulp_bde64); 9274 for (i = 0; i < numBdes; i++) { 9275 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9276 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9277 break; 9278 xmit_len += bde.tus.f.bdeSize; 9279 } 9280 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9281 wqe->gen_req.request_payload_len = xmit_len; 9282 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9283 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9284 /* word6 context tag copied in memcpy */ 9285 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9286 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9287 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9288 "2015 Invalid CT %x command 0x%x\n", 9289 ct, iocbq->iocb.ulpCommand); 9290 return IOCB_ERROR; 9291 } 9292 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9293 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9294 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9295 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9296 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9297 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9298 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9299 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9300 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9301 command_type = OTHER_COMMAND; 9302 break; 9303 case CMD_XMIT_ELS_RSP64_CX: 9304 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9305 /* words0-2 BDE memcpy */ 9306 /* word3 iocb=iotag32 wqe=response_payload_len */ 9307 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9308 /* word4 */ 9309 wqe->xmit_els_rsp.word4 = 0; 9310 /* word5 iocb=rsvd wge=did */ 9311 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9312 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9313 9314 if_type = bf_get(lpfc_sli_intf_if_type, 9315 &phba->sli4_hba.sli_intf); 9316 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9317 if (iocbq->vport->fc_flag & FC_PT2PT) { 9318 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9319 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9320 iocbq->vport->fc_myDID); 9321 if (iocbq->vport->fc_myDID == Fabric_DID) { 9322 bf_set(wqe_els_did, 9323 &wqe->xmit_els_rsp.wqe_dest, 0); 9324 } 9325 } 9326 } 9327 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9328 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9329 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9330 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9331 iocbq->iocb.unsli3.rcvsli3.ox_id); 9332 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9333 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9334 phba->vpi_ids[iocbq->vport->vpi]); 9335 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9336 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9337 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9338 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9339 LPFC_WQE_LENLOC_WORD3); 9340 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9341 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9342 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9343 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9344 iocbq->context2)->virt); 9345 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9346 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9347 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9348 iocbq->vport->fc_myDID); 9349 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9350 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9351 phba->vpi_ids[phba->pport->vpi]); 9352 } 9353 command_type = OTHER_COMMAND; 9354 break; 9355 case CMD_CLOSE_XRI_CN: 9356 case CMD_ABORT_XRI_CN: 9357 case CMD_ABORT_XRI_CX: 9358 /* words 0-2 memcpy should be 0 rserved */ 9359 /* port will send abts */ 9360 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9361 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9362 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9363 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9364 } else 9365 fip = 0; 9366 9367 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9368 /* 9369 * The link is down, or the command was ELS_FIP 9370 * so the fw does not need to send abts 9371 * on the wire. 9372 */ 9373 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9374 else 9375 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9376 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9377 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9378 wqe->abort_cmd.rsrvd5 = 0; 9379 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9380 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9381 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9382 /* 9383 * The abort handler will send us CMD_ABORT_XRI_CN or 9384 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9385 */ 9386 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9387 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9388 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9389 LPFC_WQE_LENLOC_NONE); 9390 cmnd = CMD_ABORT_XRI_CX; 9391 command_type = OTHER_COMMAND; 9392 xritag = 0; 9393 break; 9394 case CMD_XMIT_BLS_RSP64_CX: 9395 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9396 /* As BLS ABTS RSP WQE is very different from other WQEs, 9397 * we re-construct this WQE here based on information in 9398 * iocbq from scratch. 9399 */ 9400 memset(wqe, 0, sizeof(union lpfc_wqe)); 9401 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9402 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9403 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9404 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9405 LPFC_ABTS_UNSOL_INT) { 9406 /* ABTS sent by initiator to CT exchange, the 9407 * RX_ID field will be filled with the newly 9408 * allocated responder XRI. 9409 */ 9410 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9411 iocbq->sli4_xritag); 9412 } else { 9413 /* ABTS sent by responder to CT exchange, the 9414 * RX_ID field will be filled with the responder 9415 * RX_ID from ABTS. 9416 */ 9417 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9418 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9419 } 9420 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9421 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9422 9423 /* Use CT=VPI */ 9424 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9425 ndlp->nlp_DID); 9426 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9427 iocbq->iocb.ulpContext); 9428 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9429 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9430 phba->vpi_ids[phba->pport->vpi]); 9431 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9432 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9433 LPFC_WQE_LENLOC_NONE); 9434 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9435 command_type = OTHER_COMMAND; 9436 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9437 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9438 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9439 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9440 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9441 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9442 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9443 } 9444 9445 break; 9446 case CMD_SEND_FRAME: 9447 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9448 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9449 return 0; 9450 case CMD_XRI_ABORTED_CX: 9451 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9452 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9453 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9454 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9455 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9456 default: 9457 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9458 "2014 Invalid command 0x%x\n", 9459 iocbq->iocb.ulpCommand); 9460 return IOCB_ERROR; 9461 break; 9462 } 9463 9464 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9465 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9466 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9467 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9468 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9469 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9470 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9471 LPFC_IO_DIF_INSERT); 9472 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9473 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9474 wqe->generic.wqe_com.abort_tag = abort_tag; 9475 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9476 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9477 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9478 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9479 return 0; 9480 } 9481 9482 /** 9483 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9484 * @phba: Pointer to HBA context object. 9485 * @ring_number: SLI ring number to issue iocb on. 9486 * @piocb: Pointer to command iocb. 9487 * @flag: Flag indicating if this command can be put into txq. 9488 * 9489 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9490 * an iocb command to an HBA with SLI-4 interface spec. 9491 * 9492 * This function is called with hbalock held. The function will return success 9493 * after it successfully submit the iocb to firmware or after adding to the 9494 * txq. 9495 **/ 9496 static int 9497 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9498 struct lpfc_iocbq *piocb, uint32_t flag) 9499 { 9500 struct lpfc_sglq *sglq; 9501 union lpfc_wqe128 wqe; 9502 struct lpfc_queue *wq; 9503 struct lpfc_sli_ring *pring; 9504 9505 /* Get the WQ */ 9506 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9507 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9508 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) 9509 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx]; 9510 else 9511 wq = phba->sli4_hba.oas_wq; 9512 } else { 9513 wq = phba->sli4_hba.els_wq; 9514 } 9515 9516 /* Get corresponding ring */ 9517 pring = wq->pring; 9518 9519 /* 9520 * The WQE can be either 64 or 128 bytes, 9521 */ 9522 9523 lockdep_assert_held(&phba->hbalock); 9524 9525 if (piocb->sli4_xritag == NO_XRI) { 9526 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9527 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9528 sglq = NULL; 9529 else { 9530 if (!list_empty(&pring->txq)) { 9531 if (!(flag & SLI_IOCB_RET_IOCB)) { 9532 __lpfc_sli_ringtx_put(phba, 9533 pring, piocb); 9534 return IOCB_SUCCESS; 9535 } else { 9536 return IOCB_BUSY; 9537 } 9538 } else { 9539 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 9540 if (!sglq) { 9541 if (!(flag & SLI_IOCB_RET_IOCB)) { 9542 __lpfc_sli_ringtx_put(phba, 9543 pring, 9544 piocb); 9545 return IOCB_SUCCESS; 9546 } else 9547 return IOCB_BUSY; 9548 } 9549 } 9550 } 9551 } else if (piocb->iocb_flag & LPFC_IO_FCP) 9552 /* These IO's already have an XRI and a mapped sgl. */ 9553 sglq = NULL; 9554 else { 9555 /* 9556 * This is a continuation of a commandi,(CX) so this 9557 * sglq is on the active list 9558 */ 9559 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 9560 if (!sglq) 9561 return IOCB_ERROR; 9562 } 9563 9564 if (sglq) { 9565 piocb->sli4_lxritag = sglq->sli4_lxritag; 9566 piocb->sli4_xritag = sglq->sli4_xritag; 9567 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 9568 return IOCB_ERROR; 9569 } 9570 9571 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 9572 return IOCB_ERROR; 9573 9574 if (lpfc_sli4_wq_put(wq, &wqe)) 9575 return IOCB_ERROR; 9576 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 9577 9578 return 0; 9579 } 9580 9581 /** 9582 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 9583 * 9584 * This routine wraps the actual lockless version for issusing IOCB function 9585 * pointer from the lpfc_hba struct. 9586 * 9587 * Return codes: 9588 * IOCB_ERROR - Error 9589 * IOCB_SUCCESS - Success 9590 * IOCB_BUSY - Busy 9591 **/ 9592 int 9593 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9594 struct lpfc_iocbq *piocb, uint32_t flag) 9595 { 9596 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9597 } 9598 9599 /** 9600 * lpfc_sli_api_table_setup - Set up sli api function jump table 9601 * @phba: The hba struct for which this call is being executed. 9602 * @dev_grp: The HBA PCI-Device group number. 9603 * 9604 * This routine sets up the SLI interface API function jump table in @phba 9605 * struct. 9606 * Returns: 0 - success, -ENODEV - failure. 9607 **/ 9608 int 9609 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 9610 { 9611 9612 switch (dev_grp) { 9613 case LPFC_PCI_DEV_LP: 9614 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 9615 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 9616 break; 9617 case LPFC_PCI_DEV_OC: 9618 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 9619 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 9620 break; 9621 default: 9622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9623 "1419 Invalid HBA PCI-device group: 0x%x\n", 9624 dev_grp); 9625 return -ENODEV; 9626 break; 9627 } 9628 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 9629 return 0; 9630 } 9631 9632 /** 9633 * lpfc_sli4_calc_ring - Calculates which ring to use 9634 * @phba: Pointer to HBA context object. 9635 * @piocb: Pointer to command iocb. 9636 * 9637 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 9638 * hba_wqidx, thus we need to calculate the corresponding ring. 9639 * Since ABORTS must go on the same WQ of the command they are 9640 * aborting, we use command's hba_wqidx. 9641 */ 9642 struct lpfc_sli_ring * 9643 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 9644 { 9645 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 9646 if (!(phba->cfg_fof) || 9647 (!(piocb->iocb_flag & LPFC_IO_FOF))) { 9648 if (unlikely(!phba->sli4_hba.fcp_wq)) 9649 return NULL; 9650 /* 9651 * for abort iocb hba_wqidx should already 9652 * be setup based on what work queue we used. 9653 */ 9654 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9655 piocb->hba_wqidx = 9656 lpfc_sli4_scmd_to_wqidx_distr(phba, 9657 piocb->context1); 9658 piocb->hba_wqidx = piocb->hba_wqidx % 9659 phba->cfg_fcp_io_channel; 9660 } 9661 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; 9662 } else { 9663 if (unlikely(!phba->sli4_hba.oas_wq)) 9664 return NULL; 9665 piocb->hba_wqidx = 0; 9666 return phba->sli4_hba.oas_wq->pring; 9667 } 9668 } else { 9669 if (unlikely(!phba->sli4_hba.els_wq)) 9670 return NULL; 9671 piocb->hba_wqidx = 0; 9672 return phba->sli4_hba.els_wq->pring; 9673 } 9674 } 9675 9676 /** 9677 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 9678 * @phba: Pointer to HBA context object. 9679 * @pring: Pointer to driver SLI ring object. 9680 * @piocb: Pointer to command iocb. 9681 * @flag: Flag indicating if this command can be put into txq. 9682 * 9683 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 9684 * function. This function gets the hbalock and calls 9685 * __lpfc_sli_issue_iocb function and will return the error returned 9686 * by __lpfc_sli_issue_iocb function. This wrapper is used by 9687 * functions which do not hold hbalock. 9688 **/ 9689 int 9690 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9691 struct lpfc_iocbq *piocb, uint32_t flag) 9692 { 9693 struct lpfc_hba_eq_hdl *hba_eq_hdl; 9694 struct lpfc_sli_ring *pring; 9695 struct lpfc_queue *fpeq; 9696 struct lpfc_eqe *eqe; 9697 unsigned long iflags; 9698 int rc, idx; 9699 9700 if (phba->sli_rev == LPFC_SLI_REV4) { 9701 pring = lpfc_sli4_calc_ring(phba, piocb); 9702 if (unlikely(pring == NULL)) 9703 return IOCB_ERROR; 9704 9705 spin_lock_irqsave(&pring->ring_lock, iflags); 9706 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9707 spin_unlock_irqrestore(&pring->ring_lock, iflags); 9708 9709 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { 9710 idx = piocb->hba_wqidx; 9711 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx]; 9712 9713 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) { 9714 9715 /* Get associated EQ with this index */ 9716 fpeq = phba->sli4_hba.hba_eq[idx]; 9717 9718 /* Turn off interrupts from this EQ */ 9719 phba->sli4_hba.sli4_eq_clr_intr(fpeq); 9720 9721 /* 9722 * Process all the events on FCP EQ 9723 */ 9724 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 9725 lpfc_sli4_hba_handle_eqe(phba, 9726 eqe, idx); 9727 fpeq->EQ_processed++; 9728 } 9729 9730 /* Always clear and re-arm the EQ */ 9731 phba->sli4_hba.sli4_eq_release(fpeq, 9732 LPFC_QUEUE_REARM); 9733 } 9734 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 9735 } 9736 } else { 9737 /* For now, SLI2/3 will still use hbalock */ 9738 spin_lock_irqsave(&phba->hbalock, iflags); 9739 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9740 spin_unlock_irqrestore(&phba->hbalock, iflags); 9741 } 9742 return rc; 9743 } 9744 9745 /** 9746 * lpfc_extra_ring_setup - Extra ring setup function 9747 * @phba: Pointer to HBA context object. 9748 * 9749 * This function is called while driver attaches with the 9750 * HBA to setup the extra ring. The extra ring is used 9751 * only when driver needs to support target mode functionality 9752 * or IP over FC functionalities. 9753 * 9754 * This function is called with no lock held. SLI3 only. 9755 **/ 9756 static int 9757 lpfc_extra_ring_setup( struct lpfc_hba *phba) 9758 { 9759 struct lpfc_sli *psli; 9760 struct lpfc_sli_ring *pring; 9761 9762 psli = &phba->sli; 9763 9764 /* Adjust cmd/rsp ring iocb entries more evenly */ 9765 9766 /* Take some away from the FCP ring */ 9767 pring = &psli->sli3_ring[LPFC_FCP_RING]; 9768 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9769 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9770 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9771 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9772 9773 /* and give them to the extra ring */ 9774 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 9775 9776 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9777 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9778 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9779 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9780 9781 /* Setup default profile for this ring */ 9782 pring->iotag_max = 4096; 9783 pring->num_mask = 1; 9784 pring->prt[0].profile = 0; /* Mask 0 */ 9785 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 9786 pring->prt[0].type = phba->cfg_multi_ring_type; 9787 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 9788 return 0; 9789 } 9790 9791 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 9792 * @phba: Pointer to HBA context object. 9793 * @iocbq: Pointer to iocb object. 9794 * 9795 * The async_event handler calls this routine when it receives 9796 * an ASYNC_STATUS_CN event from the port. The port generates 9797 * this event when an Abort Sequence request to an rport fails 9798 * twice in succession. The abort could be originated by the 9799 * driver or by the port. The ABTS could have been for an ELS 9800 * or FCP IO. The port only generates this event when an ABTS 9801 * fails to complete after one retry. 9802 */ 9803 static void 9804 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 9805 struct lpfc_iocbq *iocbq) 9806 { 9807 struct lpfc_nodelist *ndlp = NULL; 9808 uint16_t rpi = 0, vpi = 0; 9809 struct lpfc_vport *vport = NULL; 9810 9811 /* The rpi in the ulpContext is vport-sensitive. */ 9812 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 9813 rpi = iocbq->iocb.ulpContext; 9814 9815 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9816 "3092 Port generated ABTS async event " 9817 "on vpi %d rpi %d status 0x%x\n", 9818 vpi, rpi, iocbq->iocb.ulpStatus); 9819 9820 vport = lpfc_find_vport_by_vpid(phba, vpi); 9821 if (!vport) 9822 goto err_exit; 9823 ndlp = lpfc_findnode_rpi(vport, rpi); 9824 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 9825 goto err_exit; 9826 9827 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 9828 lpfc_sli_abts_recover_port(vport, ndlp); 9829 return; 9830 9831 err_exit: 9832 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9833 "3095 Event Context not found, no " 9834 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 9835 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 9836 vpi, rpi); 9837 } 9838 9839 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 9840 * @phba: pointer to HBA context object. 9841 * @ndlp: nodelist pointer for the impacted rport. 9842 * @axri: pointer to the wcqe containing the failed exchange. 9843 * 9844 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 9845 * port. The port generates this event when an abort exchange request to an 9846 * rport fails twice in succession with no reply. The abort could be originated 9847 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 9848 */ 9849 void 9850 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 9851 struct lpfc_nodelist *ndlp, 9852 struct sli4_wcqe_xri_aborted *axri) 9853 { 9854 struct lpfc_vport *vport; 9855 uint32_t ext_status = 0; 9856 9857 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 9858 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9859 "3115 Node Context not found, driver " 9860 "ignoring abts err event\n"); 9861 return; 9862 } 9863 9864 vport = ndlp->vport; 9865 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9866 "3116 Port generated FCP XRI ABORT event on " 9867 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 9868 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 9869 bf_get(lpfc_wcqe_xa_xri, axri), 9870 bf_get(lpfc_wcqe_xa_status, axri), 9871 axri->parameter); 9872 9873 /* 9874 * Catch the ABTS protocol failure case. Older OCe FW releases returned 9875 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 9876 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 9877 */ 9878 ext_status = axri->parameter & IOERR_PARAM_MASK; 9879 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 9880 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 9881 lpfc_sli_abts_recover_port(vport, ndlp); 9882 } 9883 9884 /** 9885 * lpfc_sli_async_event_handler - ASYNC iocb handler function 9886 * @phba: Pointer to HBA context object. 9887 * @pring: Pointer to driver SLI ring object. 9888 * @iocbq: Pointer to iocb object. 9889 * 9890 * This function is called by the slow ring event handler 9891 * function when there is an ASYNC event iocb in the ring. 9892 * This function is called with no lock held. 9893 * Currently this function handles only temperature related 9894 * ASYNC events. The function decodes the temperature sensor 9895 * event message and posts events for the management applications. 9896 **/ 9897 static void 9898 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 9899 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 9900 { 9901 IOCB_t *icmd; 9902 uint16_t evt_code; 9903 struct temp_event temp_event_data; 9904 struct Scsi_Host *shost; 9905 uint32_t *iocb_w; 9906 9907 icmd = &iocbq->iocb; 9908 evt_code = icmd->un.asyncstat.evt_code; 9909 9910 switch (evt_code) { 9911 case ASYNC_TEMP_WARN: 9912 case ASYNC_TEMP_SAFE: 9913 temp_event_data.data = (uint32_t) icmd->ulpContext; 9914 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 9915 if (evt_code == ASYNC_TEMP_WARN) { 9916 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 9917 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 9918 "0347 Adapter is very hot, please take " 9919 "corrective action. temperature : %d Celsius\n", 9920 (uint32_t) icmd->ulpContext); 9921 } else { 9922 temp_event_data.event_code = LPFC_NORMAL_TEMP; 9923 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 9924 "0340 Adapter temperature is OK now. " 9925 "temperature : %d Celsius\n", 9926 (uint32_t) icmd->ulpContext); 9927 } 9928 9929 /* Send temperature change event to applications */ 9930 shost = lpfc_shost_from_vport(phba->pport); 9931 fc_host_post_vendor_event(shost, fc_get_event_number(), 9932 sizeof(temp_event_data), (char *) &temp_event_data, 9933 LPFC_NL_VENDOR_ID); 9934 break; 9935 case ASYNC_STATUS_CN: 9936 lpfc_sli_abts_err_handler(phba, iocbq); 9937 break; 9938 default: 9939 iocb_w = (uint32_t *) icmd; 9940 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9941 "0346 Ring %d handler: unexpected ASYNC_STATUS" 9942 " evt_code 0x%x\n" 9943 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 9944 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 9945 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 9946 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 9947 pring->ringno, icmd->un.asyncstat.evt_code, 9948 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 9949 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 9950 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 9951 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 9952 9953 break; 9954 } 9955 } 9956 9957 9958 /** 9959 * lpfc_sli4_setup - SLI ring setup function 9960 * @phba: Pointer to HBA context object. 9961 * 9962 * lpfc_sli_setup sets up rings of the SLI interface with 9963 * number of iocbs per ring and iotags. This function is 9964 * called while driver attach to the HBA and before the 9965 * interrupts are enabled. So there is no need for locking. 9966 * 9967 * This function always returns 0. 9968 **/ 9969 int 9970 lpfc_sli4_setup(struct lpfc_hba *phba) 9971 { 9972 struct lpfc_sli_ring *pring; 9973 9974 pring = phba->sli4_hba.els_wq->pring; 9975 pring->num_mask = LPFC_MAX_RING_MASK; 9976 pring->prt[0].profile = 0; /* Mask 0 */ 9977 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 9978 pring->prt[0].type = FC_TYPE_ELS; 9979 pring->prt[0].lpfc_sli_rcv_unsol_event = 9980 lpfc_els_unsol_event; 9981 pring->prt[1].profile = 0; /* Mask 1 */ 9982 pring->prt[1].rctl = FC_RCTL_ELS_REP; 9983 pring->prt[1].type = FC_TYPE_ELS; 9984 pring->prt[1].lpfc_sli_rcv_unsol_event = 9985 lpfc_els_unsol_event; 9986 pring->prt[2].profile = 0; /* Mask 2 */ 9987 /* NameServer Inquiry */ 9988 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 9989 /* NameServer */ 9990 pring->prt[2].type = FC_TYPE_CT; 9991 pring->prt[2].lpfc_sli_rcv_unsol_event = 9992 lpfc_ct_unsol_event; 9993 pring->prt[3].profile = 0; /* Mask 3 */ 9994 /* NameServer response */ 9995 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 9996 /* NameServer */ 9997 pring->prt[3].type = FC_TYPE_CT; 9998 pring->prt[3].lpfc_sli_rcv_unsol_event = 9999 lpfc_ct_unsol_event; 10000 return 0; 10001 } 10002 10003 /** 10004 * lpfc_sli_setup - SLI ring setup function 10005 * @phba: Pointer to HBA context object. 10006 * 10007 * lpfc_sli_setup sets up rings of the SLI interface with 10008 * number of iocbs per ring and iotags. This function is 10009 * called while driver attach to the HBA and before the 10010 * interrupts are enabled. So there is no need for locking. 10011 * 10012 * This function always returns 0. SLI3 only. 10013 **/ 10014 int 10015 lpfc_sli_setup(struct lpfc_hba *phba) 10016 { 10017 int i, totiocbsize = 0; 10018 struct lpfc_sli *psli = &phba->sli; 10019 struct lpfc_sli_ring *pring; 10020 10021 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 10022 psli->sli_flag = 0; 10023 10024 psli->iocbq_lookup = NULL; 10025 psli->iocbq_lookup_len = 0; 10026 psli->last_iotag = 0; 10027 10028 for (i = 0; i < psli->num_rings; i++) { 10029 pring = &psli->sli3_ring[i]; 10030 switch (i) { 10031 case LPFC_FCP_RING: /* ring 0 - FCP */ 10032 /* numCiocb and numRiocb are used in config_port */ 10033 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 10034 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 10035 pring->sli.sli3.numCiocb += 10036 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10037 pring->sli.sli3.numRiocb += 10038 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10039 pring->sli.sli3.numCiocb += 10040 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10041 pring->sli.sli3.numRiocb += 10042 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10043 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10044 SLI3_IOCB_CMD_SIZE : 10045 SLI2_IOCB_CMD_SIZE; 10046 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10047 SLI3_IOCB_RSP_SIZE : 10048 SLI2_IOCB_RSP_SIZE; 10049 pring->iotag_ctr = 0; 10050 pring->iotag_max = 10051 (phba->cfg_hba_queue_depth * 2); 10052 pring->fast_iotag = pring->iotag_max; 10053 pring->num_mask = 0; 10054 break; 10055 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 10056 /* numCiocb and numRiocb are used in config_port */ 10057 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 10058 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 10059 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10060 SLI3_IOCB_CMD_SIZE : 10061 SLI2_IOCB_CMD_SIZE; 10062 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10063 SLI3_IOCB_RSP_SIZE : 10064 SLI2_IOCB_RSP_SIZE; 10065 pring->iotag_max = phba->cfg_hba_queue_depth; 10066 pring->num_mask = 0; 10067 break; 10068 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 10069 /* numCiocb and numRiocb are used in config_port */ 10070 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 10071 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 10072 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10073 SLI3_IOCB_CMD_SIZE : 10074 SLI2_IOCB_CMD_SIZE; 10075 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10076 SLI3_IOCB_RSP_SIZE : 10077 SLI2_IOCB_RSP_SIZE; 10078 pring->fast_iotag = 0; 10079 pring->iotag_ctr = 0; 10080 pring->iotag_max = 4096; 10081 pring->lpfc_sli_rcv_async_status = 10082 lpfc_sli_async_event_handler; 10083 pring->num_mask = LPFC_MAX_RING_MASK; 10084 pring->prt[0].profile = 0; /* Mask 0 */ 10085 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10086 pring->prt[0].type = FC_TYPE_ELS; 10087 pring->prt[0].lpfc_sli_rcv_unsol_event = 10088 lpfc_els_unsol_event; 10089 pring->prt[1].profile = 0; /* Mask 1 */ 10090 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10091 pring->prt[1].type = FC_TYPE_ELS; 10092 pring->prt[1].lpfc_sli_rcv_unsol_event = 10093 lpfc_els_unsol_event; 10094 pring->prt[2].profile = 0; /* Mask 2 */ 10095 /* NameServer Inquiry */ 10096 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10097 /* NameServer */ 10098 pring->prt[2].type = FC_TYPE_CT; 10099 pring->prt[2].lpfc_sli_rcv_unsol_event = 10100 lpfc_ct_unsol_event; 10101 pring->prt[3].profile = 0; /* Mask 3 */ 10102 /* NameServer response */ 10103 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10104 /* NameServer */ 10105 pring->prt[3].type = FC_TYPE_CT; 10106 pring->prt[3].lpfc_sli_rcv_unsol_event = 10107 lpfc_ct_unsol_event; 10108 break; 10109 } 10110 totiocbsize += (pring->sli.sli3.numCiocb * 10111 pring->sli.sli3.sizeCiocb) + 10112 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 10113 } 10114 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 10115 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 10116 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 10117 "SLI2 SLIM Data: x%x x%lx\n", 10118 phba->brd_no, totiocbsize, 10119 (unsigned long) MAX_SLIM_IOCB_SIZE); 10120 } 10121 if (phba->cfg_multi_ring_support == 2) 10122 lpfc_extra_ring_setup(phba); 10123 10124 return 0; 10125 } 10126 10127 /** 10128 * lpfc_sli4_queue_init - Queue initialization function 10129 * @phba: Pointer to HBA context object. 10130 * 10131 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 10132 * ring. This function also initializes ring indices of each ring. 10133 * This function is called during the initialization of the SLI 10134 * interface of an HBA. 10135 * This function is called with no lock held and always returns 10136 * 1. 10137 **/ 10138 void 10139 lpfc_sli4_queue_init(struct lpfc_hba *phba) 10140 { 10141 struct lpfc_sli *psli; 10142 struct lpfc_sli_ring *pring; 10143 int i; 10144 10145 psli = &phba->sli; 10146 spin_lock_irq(&phba->hbalock); 10147 INIT_LIST_HEAD(&psli->mboxq); 10148 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10149 /* Initialize list headers for txq and txcmplq as double linked lists */ 10150 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 10151 pring = phba->sli4_hba.fcp_wq[i]->pring; 10152 pring->flag = 0; 10153 pring->ringno = LPFC_FCP_RING; 10154 INIT_LIST_HEAD(&pring->txq); 10155 INIT_LIST_HEAD(&pring->txcmplq); 10156 INIT_LIST_HEAD(&pring->iocb_continueq); 10157 spin_lock_init(&pring->ring_lock); 10158 } 10159 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 10160 pring = phba->sli4_hba.nvme_wq[i]->pring; 10161 pring->flag = 0; 10162 pring->ringno = LPFC_FCP_RING; 10163 INIT_LIST_HEAD(&pring->txq); 10164 INIT_LIST_HEAD(&pring->txcmplq); 10165 INIT_LIST_HEAD(&pring->iocb_continueq); 10166 spin_lock_init(&pring->ring_lock); 10167 } 10168 pring = phba->sli4_hba.els_wq->pring; 10169 pring->flag = 0; 10170 pring->ringno = LPFC_ELS_RING; 10171 INIT_LIST_HEAD(&pring->txq); 10172 INIT_LIST_HEAD(&pring->txcmplq); 10173 INIT_LIST_HEAD(&pring->iocb_continueq); 10174 spin_lock_init(&pring->ring_lock); 10175 10176 if (phba->cfg_nvme_io_channel) { 10177 pring = phba->sli4_hba.nvmels_wq->pring; 10178 pring->flag = 0; 10179 pring->ringno = LPFC_ELS_RING; 10180 INIT_LIST_HEAD(&pring->txq); 10181 INIT_LIST_HEAD(&pring->txcmplq); 10182 INIT_LIST_HEAD(&pring->iocb_continueq); 10183 spin_lock_init(&pring->ring_lock); 10184 } 10185 10186 if (phba->cfg_fof) { 10187 pring = phba->sli4_hba.oas_wq->pring; 10188 pring->flag = 0; 10189 pring->ringno = LPFC_FCP_RING; 10190 INIT_LIST_HEAD(&pring->txq); 10191 INIT_LIST_HEAD(&pring->txcmplq); 10192 INIT_LIST_HEAD(&pring->iocb_continueq); 10193 spin_lock_init(&pring->ring_lock); 10194 } 10195 10196 spin_unlock_irq(&phba->hbalock); 10197 } 10198 10199 /** 10200 * lpfc_sli_queue_init - Queue initialization function 10201 * @phba: Pointer to HBA context object. 10202 * 10203 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 10204 * ring. This function also initializes ring indices of each ring. 10205 * This function is called during the initialization of the SLI 10206 * interface of an HBA. 10207 * This function is called with no lock held and always returns 10208 * 1. 10209 **/ 10210 void 10211 lpfc_sli_queue_init(struct lpfc_hba *phba) 10212 { 10213 struct lpfc_sli *psli; 10214 struct lpfc_sli_ring *pring; 10215 int i; 10216 10217 psli = &phba->sli; 10218 spin_lock_irq(&phba->hbalock); 10219 INIT_LIST_HEAD(&psli->mboxq); 10220 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10221 /* Initialize list headers for txq and txcmplq as double linked lists */ 10222 for (i = 0; i < psli->num_rings; i++) { 10223 pring = &psli->sli3_ring[i]; 10224 pring->ringno = i; 10225 pring->sli.sli3.next_cmdidx = 0; 10226 pring->sli.sli3.local_getidx = 0; 10227 pring->sli.sli3.cmdidx = 0; 10228 INIT_LIST_HEAD(&pring->iocb_continueq); 10229 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 10230 INIT_LIST_HEAD(&pring->postbufq); 10231 pring->flag = 0; 10232 INIT_LIST_HEAD(&pring->txq); 10233 INIT_LIST_HEAD(&pring->txcmplq); 10234 spin_lock_init(&pring->ring_lock); 10235 } 10236 spin_unlock_irq(&phba->hbalock); 10237 } 10238 10239 /** 10240 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 10241 * @phba: Pointer to HBA context object. 10242 * 10243 * This routine flushes the mailbox command subsystem. It will unconditionally 10244 * flush all the mailbox commands in the three possible stages in the mailbox 10245 * command sub-system: pending mailbox command queue; the outstanding mailbox 10246 * command; and completed mailbox command queue. It is caller's responsibility 10247 * to make sure that the driver is in the proper state to flush the mailbox 10248 * command sub-system. Namely, the posting of mailbox commands into the 10249 * pending mailbox command queue from the various clients must be stopped; 10250 * either the HBA is in a state that it will never works on the outstanding 10251 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 10252 * mailbox command has been completed. 10253 **/ 10254 static void 10255 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10256 { 10257 LIST_HEAD(completions); 10258 struct lpfc_sli *psli = &phba->sli; 10259 LPFC_MBOXQ_t *pmb; 10260 unsigned long iflag; 10261 10262 /* Flush all the mailbox commands in the mbox system */ 10263 spin_lock_irqsave(&phba->hbalock, iflag); 10264 /* The pending mailbox command queue */ 10265 list_splice_init(&phba->sli.mboxq, &completions); 10266 /* The outstanding active mailbox command */ 10267 if (psli->mbox_active) { 10268 list_add_tail(&psli->mbox_active->list, &completions); 10269 psli->mbox_active = NULL; 10270 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10271 } 10272 /* The completed mailbox command queue */ 10273 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10274 spin_unlock_irqrestore(&phba->hbalock, iflag); 10275 10276 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10277 while (!list_empty(&completions)) { 10278 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10279 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10280 if (pmb->mbox_cmpl) 10281 pmb->mbox_cmpl(phba, pmb); 10282 } 10283 } 10284 10285 /** 10286 * lpfc_sli_host_down - Vport cleanup function 10287 * @vport: Pointer to virtual port object. 10288 * 10289 * lpfc_sli_host_down is called to clean up the resources 10290 * associated with a vport before destroying virtual 10291 * port data structures. 10292 * This function does following operations: 10293 * - Free discovery resources associated with this virtual 10294 * port. 10295 * - Free iocbs associated with this virtual port in 10296 * the txq. 10297 * - Send abort for all iocb commands associated with this 10298 * vport in txcmplq. 10299 * 10300 * This function is called with no lock held and always returns 1. 10301 **/ 10302 int 10303 lpfc_sli_host_down(struct lpfc_vport *vport) 10304 { 10305 LIST_HEAD(completions); 10306 struct lpfc_hba *phba = vport->phba; 10307 struct lpfc_sli *psli = &phba->sli; 10308 struct lpfc_queue *qp = NULL; 10309 struct lpfc_sli_ring *pring; 10310 struct lpfc_iocbq *iocb, *next_iocb; 10311 int i; 10312 unsigned long flags = 0; 10313 uint16_t prev_pring_flag; 10314 10315 lpfc_cleanup_discovery_resources(vport); 10316 10317 spin_lock_irqsave(&phba->hbalock, flags); 10318 10319 /* 10320 * Error everything on the txq since these iocbs 10321 * have not been given to the FW yet. 10322 * Also issue ABTS for everything on the txcmplq 10323 */ 10324 if (phba->sli_rev != LPFC_SLI_REV4) { 10325 for (i = 0; i < psli->num_rings; i++) { 10326 pring = &psli->sli3_ring[i]; 10327 prev_pring_flag = pring->flag; 10328 /* Only slow rings */ 10329 if (pring->ringno == LPFC_ELS_RING) { 10330 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10331 /* Set the lpfc data pending flag */ 10332 set_bit(LPFC_DATA_READY, &phba->data_flags); 10333 } 10334 list_for_each_entry_safe(iocb, next_iocb, 10335 &pring->txq, list) { 10336 if (iocb->vport != vport) 10337 continue; 10338 list_move_tail(&iocb->list, &completions); 10339 } 10340 list_for_each_entry_safe(iocb, next_iocb, 10341 &pring->txcmplq, list) { 10342 if (iocb->vport != vport) 10343 continue; 10344 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10345 } 10346 pring->flag = prev_pring_flag; 10347 } 10348 } else { 10349 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10350 pring = qp->pring; 10351 if (!pring) 10352 continue; 10353 if (pring == phba->sli4_hba.els_wq->pring) { 10354 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10355 /* Set the lpfc data pending flag */ 10356 set_bit(LPFC_DATA_READY, &phba->data_flags); 10357 } 10358 prev_pring_flag = pring->flag; 10359 spin_lock_irq(&pring->ring_lock); 10360 list_for_each_entry_safe(iocb, next_iocb, 10361 &pring->txq, list) { 10362 if (iocb->vport != vport) 10363 continue; 10364 list_move_tail(&iocb->list, &completions); 10365 } 10366 spin_unlock_irq(&pring->ring_lock); 10367 list_for_each_entry_safe(iocb, next_iocb, 10368 &pring->txcmplq, list) { 10369 if (iocb->vport != vport) 10370 continue; 10371 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10372 } 10373 pring->flag = prev_pring_flag; 10374 } 10375 } 10376 spin_unlock_irqrestore(&phba->hbalock, flags); 10377 10378 /* Cancel all the IOCBs from the completions list */ 10379 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10380 IOERR_SLI_DOWN); 10381 return 1; 10382 } 10383 10384 /** 10385 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10386 * @phba: Pointer to HBA context object. 10387 * 10388 * This function cleans up all iocb, buffers, mailbox commands 10389 * while shutting down the HBA. This function is called with no 10390 * lock held and always returns 1. 10391 * This function does the following to cleanup driver resources: 10392 * - Free discovery resources for each virtual port 10393 * - Cleanup any pending fabric iocbs 10394 * - Iterate through the iocb txq and free each entry 10395 * in the list. 10396 * - Free up any buffer posted to the HBA 10397 * - Free mailbox commands in the mailbox queue. 10398 **/ 10399 int 10400 lpfc_sli_hba_down(struct lpfc_hba *phba) 10401 { 10402 LIST_HEAD(completions); 10403 struct lpfc_sli *psli = &phba->sli; 10404 struct lpfc_queue *qp = NULL; 10405 struct lpfc_sli_ring *pring; 10406 struct lpfc_dmabuf *buf_ptr; 10407 unsigned long flags = 0; 10408 int i; 10409 10410 /* Shutdown the mailbox command sub-system */ 10411 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10412 10413 lpfc_hba_down_prep(phba); 10414 10415 lpfc_fabric_abort_hba(phba); 10416 10417 spin_lock_irqsave(&phba->hbalock, flags); 10418 10419 /* 10420 * Error everything on the txq since these iocbs 10421 * have not been given to the FW yet. 10422 */ 10423 if (phba->sli_rev != LPFC_SLI_REV4) { 10424 for (i = 0; i < psli->num_rings; i++) { 10425 pring = &psli->sli3_ring[i]; 10426 /* Only slow rings */ 10427 if (pring->ringno == LPFC_ELS_RING) { 10428 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10429 /* Set the lpfc data pending flag */ 10430 set_bit(LPFC_DATA_READY, &phba->data_flags); 10431 } 10432 list_splice_init(&pring->txq, &completions); 10433 } 10434 } else { 10435 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10436 pring = qp->pring; 10437 if (!pring) 10438 continue; 10439 spin_lock_irq(&pring->ring_lock); 10440 list_splice_init(&pring->txq, &completions); 10441 spin_unlock_irq(&pring->ring_lock); 10442 if (pring == phba->sli4_hba.els_wq->pring) { 10443 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10444 /* Set the lpfc data pending flag */ 10445 set_bit(LPFC_DATA_READY, &phba->data_flags); 10446 } 10447 } 10448 } 10449 spin_unlock_irqrestore(&phba->hbalock, flags); 10450 10451 /* Cancel all the IOCBs from the completions list */ 10452 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10453 IOERR_SLI_DOWN); 10454 10455 spin_lock_irqsave(&phba->hbalock, flags); 10456 list_splice_init(&phba->elsbuf, &completions); 10457 phba->elsbuf_cnt = 0; 10458 phba->elsbuf_prev_cnt = 0; 10459 spin_unlock_irqrestore(&phba->hbalock, flags); 10460 10461 while (!list_empty(&completions)) { 10462 list_remove_head(&completions, buf_ptr, 10463 struct lpfc_dmabuf, list); 10464 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10465 kfree(buf_ptr); 10466 } 10467 10468 /* Return any active mbox cmds */ 10469 del_timer_sync(&psli->mbox_tmo); 10470 10471 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10472 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10473 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10474 10475 return 1; 10476 } 10477 10478 /** 10479 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10480 * @srcp: Source memory pointer. 10481 * @destp: Destination memory pointer. 10482 * @cnt: Number of words required to be copied. 10483 * 10484 * This function is used for copying data between driver memory 10485 * and the SLI memory. This function also changes the endianness 10486 * of each word if native endianness is different from SLI 10487 * endianness. This function can be called with or without 10488 * lock. 10489 **/ 10490 void 10491 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10492 { 10493 uint32_t *src = srcp; 10494 uint32_t *dest = destp; 10495 uint32_t ldata; 10496 int i; 10497 10498 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10499 ldata = *src; 10500 ldata = le32_to_cpu(ldata); 10501 *dest = ldata; 10502 src++; 10503 dest++; 10504 } 10505 } 10506 10507 10508 /** 10509 * lpfc_sli_bemem_bcopy - SLI memory copy function 10510 * @srcp: Source memory pointer. 10511 * @destp: Destination memory pointer. 10512 * @cnt: Number of words required to be copied. 10513 * 10514 * This function is used for copying data between a data structure 10515 * with big endian representation to local endianness. 10516 * This function can be called with or without lock. 10517 **/ 10518 void 10519 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10520 { 10521 uint32_t *src = srcp; 10522 uint32_t *dest = destp; 10523 uint32_t ldata; 10524 int i; 10525 10526 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10527 ldata = *src; 10528 ldata = be32_to_cpu(ldata); 10529 *dest = ldata; 10530 src++; 10531 dest++; 10532 } 10533 } 10534 10535 /** 10536 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10537 * @phba: Pointer to HBA context object. 10538 * @pring: Pointer to driver SLI ring object. 10539 * @mp: Pointer to driver buffer object. 10540 * 10541 * This function is called with no lock held. 10542 * It always return zero after adding the buffer to the postbufq 10543 * buffer list. 10544 **/ 10545 int 10546 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10547 struct lpfc_dmabuf *mp) 10548 { 10549 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10550 later */ 10551 spin_lock_irq(&phba->hbalock); 10552 list_add_tail(&mp->list, &pring->postbufq); 10553 pring->postbufq_cnt++; 10554 spin_unlock_irq(&phba->hbalock); 10555 return 0; 10556 } 10557 10558 /** 10559 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10560 * @phba: Pointer to HBA context object. 10561 * 10562 * When HBQ is enabled, buffers are searched based on tags. This function 10563 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10564 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10565 * does not conflict with tags of buffer posted for unsolicited events. 10566 * The function returns the allocated tag. The function is called with 10567 * no locks held. 10568 **/ 10569 uint32_t 10570 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10571 { 10572 spin_lock_irq(&phba->hbalock); 10573 phba->buffer_tag_count++; 10574 /* 10575 * Always set the QUE_BUFTAG_BIT to distiguish between 10576 * a tag assigned by HBQ. 10577 */ 10578 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 10579 spin_unlock_irq(&phba->hbalock); 10580 return phba->buffer_tag_count; 10581 } 10582 10583 /** 10584 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 10585 * @phba: Pointer to HBA context object. 10586 * @pring: Pointer to driver SLI ring object. 10587 * @tag: Buffer tag. 10588 * 10589 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 10590 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 10591 * iocb is posted to the response ring with the tag of the buffer. 10592 * This function searches the pring->postbufq list using the tag 10593 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 10594 * iocb. If the buffer is found then lpfc_dmabuf object of the 10595 * buffer is returned to the caller else NULL is returned. 10596 * This function is called with no lock held. 10597 **/ 10598 struct lpfc_dmabuf * 10599 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10600 uint32_t tag) 10601 { 10602 struct lpfc_dmabuf *mp, *next_mp; 10603 struct list_head *slp = &pring->postbufq; 10604 10605 /* Search postbufq, from the beginning, looking for a match on tag */ 10606 spin_lock_irq(&phba->hbalock); 10607 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10608 if (mp->buffer_tag == tag) { 10609 list_del_init(&mp->list); 10610 pring->postbufq_cnt--; 10611 spin_unlock_irq(&phba->hbalock); 10612 return mp; 10613 } 10614 } 10615 10616 spin_unlock_irq(&phba->hbalock); 10617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10618 "0402 Cannot find virtual addr for buffer tag on " 10619 "ring %d Data x%lx x%p x%p x%x\n", 10620 pring->ringno, (unsigned long) tag, 10621 slp->next, slp->prev, pring->postbufq_cnt); 10622 10623 return NULL; 10624 } 10625 10626 /** 10627 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 10628 * @phba: Pointer to HBA context object. 10629 * @pring: Pointer to driver SLI ring object. 10630 * @phys: DMA address of the buffer. 10631 * 10632 * This function searches the buffer list using the dma_address 10633 * of unsolicited event to find the driver's lpfc_dmabuf object 10634 * corresponding to the dma_address. The function returns the 10635 * lpfc_dmabuf object if a buffer is found else it returns NULL. 10636 * This function is called by the ct and els unsolicited event 10637 * handlers to get the buffer associated with the unsolicited 10638 * event. 10639 * 10640 * This function is called with no lock held. 10641 **/ 10642 struct lpfc_dmabuf * 10643 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10644 dma_addr_t phys) 10645 { 10646 struct lpfc_dmabuf *mp, *next_mp; 10647 struct list_head *slp = &pring->postbufq; 10648 10649 /* Search postbufq, from the beginning, looking for a match on phys */ 10650 spin_lock_irq(&phba->hbalock); 10651 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10652 if (mp->phys == phys) { 10653 list_del_init(&mp->list); 10654 pring->postbufq_cnt--; 10655 spin_unlock_irq(&phba->hbalock); 10656 return mp; 10657 } 10658 } 10659 10660 spin_unlock_irq(&phba->hbalock); 10661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10662 "0410 Cannot find virtual addr for mapped buf on " 10663 "ring %d Data x%llx x%p x%p x%x\n", 10664 pring->ringno, (unsigned long long)phys, 10665 slp->next, slp->prev, pring->postbufq_cnt); 10666 return NULL; 10667 } 10668 10669 /** 10670 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 10671 * @phba: Pointer to HBA context object. 10672 * @cmdiocb: Pointer to driver command iocb object. 10673 * @rspiocb: Pointer to driver response iocb object. 10674 * 10675 * This function is the completion handler for the abort iocbs for 10676 * ELS commands. This function is called from the ELS ring event 10677 * handler with no lock held. This function frees memory resources 10678 * associated with the abort iocb. 10679 **/ 10680 static void 10681 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10682 struct lpfc_iocbq *rspiocb) 10683 { 10684 IOCB_t *irsp = &rspiocb->iocb; 10685 uint16_t abort_iotag, abort_context; 10686 struct lpfc_iocbq *abort_iocb = NULL; 10687 10688 if (irsp->ulpStatus) { 10689 10690 /* 10691 * Assume that the port already completed and returned, or 10692 * will return the iocb. Just Log the message. 10693 */ 10694 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 10695 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 10696 10697 spin_lock_irq(&phba->hbalock); 10698 if (phba->sli_rev < LPFC_SLI_REV4) { 10699 if (abort_iotag != 0 && 10700 abort_iotag <= phba->sli.last_iotag) 10701 abort_iocb = 10702 phba->sli.iocbq_lookup[abort_iotag]; 10703 } else 10704 /* For sli4 the abort_tag is the XRI, 10705 * so the abort routine puts the iotag of the iocb 10706 * being aborted in the context field of the abort 10707 * IOCB. 10708 */ 10709 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 10710 10711 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 10712 "0327 Cannot abort els iocb %p " 10713 "with tag %x context %x, abort status %x, " 10714 "abort code %x\n", 10715 abort_iocb, abort_iotag, abort_context, 10716 irsp->ulpStatus, irsp->un.ulpWord[4]); 10717 10718 spin_unlock_irq(&phba->hbalock); 10719 } 10720 lpfc_sli_release_iocbq(phba, cmdiocb); 10721 return; 10722 } 10723 10724 /** 10725 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 10726 * @phba: Pointer to HBA context object. 10727 * @cmdiocb: Pointer to driver command iocb object. 10728 * @rspiocb: Pointer to driver response iocb object. 10729 * 10730 * The function is called from SLI ring event handler with no 10731 * lock held. This function is the completion handler for ELS commands 10732 * which are aborted. The function frees memory resources used for 10733 * the aborted ELS commands. 10734 **/ 10735 static void 10736 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10737 struct lpfc_iocbq *rspiocb) 10738 { 10739 IOCB_t *irsp = &rspiocb->iocb; 10740 10741 /* ELS cmd tag <ulpIoTag> completes */ 10742 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10743 "0139 Ignoring ELS cmd tag x%x completion Data: " 10744 "x%x x%x x%x\n", 10745 irsp->ulpIoTag, irsp->ulpStatus, 10746 irsp->un.ulpWord[4], irsp->ulpTimeout); 10747 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 10748 lpfc_ct_free_iocb(phba, cmdiocb); 10749 else 10750 lpfc_els_free_iocb(phba, cmdiocb); 10751 return; 10752 } 10753 10754 /** 10755 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 10756 * @phba: Pointer to HBA context object. 10757 * @pring: Pointer to driver SLI ring object. 10758 * @cmdiocb: Pointer to driver command iocb object. 10759 * 10760 * This function issues an abort iocb for the provided command iocb down to 10761 * the port. Other than the case the outstanding command iocb is an abort 10762 * request, this function issues abort out unconditionally. This function is 10763 * called with hbalock held. The function returns 0 when it fails due to 10764 * memory allocation failure or when the command iocb is an abort request. 10765 **/ 10766 static int 10767 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10768 struct lpfc_iocbq *cmdiocb) 10769 { 10770 struct lpfc_vport *vport = cmdiocb->vport; 10771 struct lpfc_iocbq *abtsiocbp; 10772 IOCB_t *icmd = NULL; 10773 IOCB_t *iabt = NULL; 10774 int retval; 10775 unsigned long iflags; 10776 10777 lockdep_assert_held(&phba->hbalock); 10778 10779 /* 10780 * There are certain command types we don't want to abort. And we 10781 * don't want to abort commands that are already in the process of 10782 * being aborted. 10783 */ 10784 icmd = &cmdiocb->iocb; 10785 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 10786 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 10787 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10788 return 0; 10789 10790 /* issue ABTS for this IOCB based on iotag */ 10791 abtsiocbp = __lpfc_sli_get_iocbq(phba); 10792 if (abtsiocbp == NULL) 10793 return 0; 10794 10795 /* This signals the response to set the correct status 10796 * before calling the completion handler 10797 */ 10798 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 10799 10800 iabt = &abtsiocbp->iocb; 10801 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 10802 iabt->un.acxri.abortContextTag = icmd->ulpContext; 10803 if (phba->sli_rev == LPFC_SLI_REV4) { 10804 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 10805 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 10806 } 10807 else 10808 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 10809 iabt->ulpLe = 1; 10810 iabt->ulpClass = icmd->ulpClass; 10811 10812 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10813 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 10814 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 10815 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 10816 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 10817 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 10818 10819 if (phba->link_state >= LPFC_LINK_UP) 10820 iabt->ulpCommand = CMD_ABORT_XRI_CN; 10821 else 10822 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 10823 10824 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 10825 abtsiocbp->vport = vport; 10826 10827 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 10828 "0339 Abort xri x%x, original iotag x%x, " 10829 "abort cmd iotag x%x\n", 10830 iabt->un.acxri.abortIoTag, 10831 iabt->un.acxri.abortContextTag, 10832 abtsiocbp->iotag); 10833 10834 if (phba->sli_rev == LPFC_SLI_REV4) { 10835 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 10836 if (unlikely(pring == NULL)) 10837 return 0; 10838 /* Note: both hbalock and ring_lock need to be set here */ 10839 spin_lock_irqsave(&pring->ring_lock, iflags); 10840 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 10841 abtsiocbp, 0); 10842 spin_unlock_irqrestore(&pring->ring_lock, iflags); 10843 } else { 10844 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 10845 abtsiocbp, 0); 10846 } 10847 10848 if (retval) 10849 __lpfc_sli_release_iocbq(phba, abtsiocbp); 10850 10851 /* 10852 * Caller to this routine should check for IOCB_ERROR 10853 * and handle it properly. This routine no longer removes 10854 * iocb off txcmplq and call compl in case of IOCB_ERROR. 10855 */ 10856 return retval; 10857 } 10858 10859 /** 10860 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 10861 * @phba: Pointer to HBA context object. 10862 * @pring: Pointer to driver SLI ring object. 10863 * @cmdiocb: Pointer to driver command iocb object. 10864 * 10865 * This function issues an abort iocb for the provided command iocb. In case 10866 * of unloading, the abort iocb will not be issued to commands on the ELS 10867 * ring. Instead, the callback function shall be changed to those commands 10868 * so that nothing happens when them finishes. This function is called with 10869 * hbalock held. The function returns 0 when the command iocb is an abort 10870 * request. 10871 **/ 10872 int 10873 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10874 struct lpfc_iocbq *cmdiocb) 10875 { 10876 struct lpfc_vport *vport = cmdiocb->vport; 10877 int retval = IOCB_ERROR; 10878 IOCB_t *icmd = NULL; 10879 10880 lockdep_assert_held(&phba->hbalock); 10881 10882 /* 10883 * There are certain command types we don't want to abort. And we 10884 * don't want to abort commands that are already in the process of 10885 * being aborted. 10886 */ 10887 icmd = &cmdiocb->iocb; 10888 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 10889 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 10890 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10891 return 0; 10892 10893 if (!pring) { 10894 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 10895 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 10896 else 10897 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 10898 goto abort_iotag_exit; 10899 } 10900 10901 /* 10902 * If we're unloading, don't abort iocb on the ELS ring, but change 10903 * the callback so that nothing happens when it finishes. 10904 */ 10905 if ((vport->load_flag & FC_UNLOADING) && 10906 (pring->ringno == LPFC_ELS_RING)) { 10907 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 10908 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 10909 else 10910 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 10911 goto abort_iotag_exit; 10912 } 10913 10914 /* Now, we try to issue the abort to the cmdiocb out */ 10915 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 10916 10917 abort_iotag_exit: 10918 /* 10919 * Caller to this routine should check for IOCB_ERROR 10920 * and handle it properly. This routine no longer removes 10921 * iocb off txcmplq and call compl in case of IOCB_ERROR. 10922 */ 10923 return retval; 10924 } 10925 10926 /** 10927 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb 10928 * @phba: Pointer to HBA context object. 10929 * @pring: Pointer to driver SLI ring object. 10930 * @cmdiocb: Pointer to driver command iocb object. 10931 * 10932 * This function issues an abort iocb for the provided command iocb down to 10933 * the port. Other than the case the outstanding command iocb is an abort 10934 * request, this function issues abort out unconditionally. This function is 10935 * called with hbalock held. The function returns 0 when it fails due to 10936 * memory allocation failure or when the command iocb is an abort request. 10937 **/ 10938 static int 10939 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10940 struct lpfc_iocbq *cmdiocb) 10941 { 10942 struct lpfc_vport *vport = cmdiocb->vport; 10943 struct lpfc_iocbq *abtsiocbp; 10944 union lpfc_wqe128 *abts_wqe; 10945 int retval; 10946 10947 /* 10948 * There are certain command types we don't want to abort. And we 10949 * don't want to abort commands that are already in the process of 10950 * being aborted. 10951 */ 10952 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 10953 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || 10954 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10955 return 0; 10956 10957 /* issue ABTS for this io based on iotag */ 10958 abtsiocbp = __lpfc_sli_get_iocbq(phba); 10959 if (abtsiocbp == NULL) 10960 return 0; 10961 10962 /* This signals the response to set the correct status 10963 * before calling the completion handler 10964 */ 10965 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 10966 10967 /* Complete prepping the abort wqe and issue to the FW. */ 10968 abts_wqe = &abtsiocbp->wqe; 10969 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0); 10970 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 10971 10972 /* Explicitly set reserved fields to zero.*/ 10973 abts_wqe->abort_cmd.rsrvd4 = 0; 10974 abts_wqe->abort_cmd.rsrvd5 = 0; 10975 10976 /* WQE Common - word 6. Context is XRI tag. Set 0. */ 10977 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0); 10978 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0); 10979 10980 /* word 7 */ 10981 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 10982 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 10983 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, 10984 cmdiocb->iocb.ulpClass); 10985 10986 /* word 8 - tell the FW to abort the IO associated with this 10987 * outstanding exchange ID. 10988 */ 10989 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag; 10990 10991 /* word 9 - this is the iotag for the abts_wqe completion. */ 10992 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 10993 abtsiocbp->iotag); 10994 10995 /* word 10 */ 10996 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx); 10997 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 10998 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 10999 11000 /* word 11 */ 11001 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 11002 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 11003 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 11004 11005 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11006 abtsiocbp->iocb_flag |= LPFC_IO_NVME; 11007 abtsiocbp->vport = vport; 11008 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; 11009 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); 11010 if (retval) { 11011 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 11012 "6147 Failed abts issue_wqe with status x%x " 11013 "for oxid x%x\n", 11014 retval, cmdiocb->sli4_xritag); 11015 lpfc_sli_release_iocbq(phba, abtsiocbp); 11016 return retval; 11017 } 11018 11019 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 11020 "6148 Drv Abort NVME Request Issued for " 11021 "ox_id x%x on reqtag x%x\n", 11022 cmdiocb->sli4_xritag, 11023 abtsiocbp->iotag); 11024 11025 return retval; 11026 } 11027 11028 /** 11029 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 11030 * @phba: pointer to lpfc HBA data structure. 11031 * 11032 * This routine will abort all pending and outstanding iocbs to an HBA. 11033 **/ 11034 void 11035 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 11036 { 11037 struct lpfc_sli *psli = &phba->sli; 11038 struct lpfc_sli_ring *pring; 11039 struct lpfc_queue *qp = NULL; 11040 int i; 11041 11042 if (phba->sli_rev != LPFC_SLI_REV4) { 11043 for (i = 0; i < psli->num_rings; i++) { 11044 pring = &psli->sli3_ring[i]; 11045 lpfc_sli_abort_iocb_ring(phba, pring); 11046 } 11047 return; 11048 } 11049 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 11050 pring = qp->pring; 11051 if (!pring) 11052 continue; 11053 lpfc_sli_abort_iocb_ring(phba, pring); 11054 } 11055 } 11056 11057 /** 11058 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 11059 * @iocbq: Pointer to driver iocb object. 11060 * @vport: Pointer to driver virtual port object. 11061 * @tgt_id: SCSI ID of the target. 11062 * @lun_id: LUN ID of the scsi device. 11063 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 11064 * 11065 * This function acts as an iocb filter for functions which abort or count 11066 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 11067 * 0 if the filtering criteria is met for the given iocb and will return 11068 * 1 if the filtering criteria is not met. 11069 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 11070 * given iocb is for the SCSI device specified by vport, tgt_id and 11071 * lun_id parameter. 11072 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 11073 * given iocb is for the SCSI target specified by vport and tgt_id 11074 * parameters. 11075 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 11076 * given iocb is for the SCSI host associated with the given vport. 11077 * This function is called with no locks held. 11078 **/ 11079 static int 11080 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 11081 uint16_t tgt_id, uint64_t lun_id, 11082 lpfc_ctx_cmd ctx_cmd) 11083 { 11084 struct lpfc_scsi_buf *lpfc_cmd; 11085 int rc = 1; 11086 11087 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 11088 return rc; 11089 11090 if (iocbq->vport != vport) 11091 return rc; 11092 11093 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 11094 11095 if (lpfc_cmd->pCmd == NULL) 11096 return rc; 11097 11098 switch (ctx_cmd) { 11099 case LPFC_CTX_LUN: 11100 if ((lpfc_cmd->rdata->pnode) && 11101 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 11102 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 11103 rc = 0; 11104 break; 11105 case LPFC_CTX_TGT: 11106 if ((lpfc_cmd->rdata->pnode) && 11107 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 11108 rc = 0; 11109 break; 11110 case LPFC_CTX_HOST: 11111 rc = 0; 11112 break; 11113 default: 11114 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 11115 __func__, ctx_cmd); 11116 break; 11117 } 11118 11119 return rc; 11120 } 11121 11122 /** 11123 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 11124 * @vport: Pointer to virtual port. 11125 * @tgt_id: SCSI ID of the target. 11126 * @lun_id: LUN ID of the scsi device. 11127 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11128 * 11129 * This function returns number of FCP commands pending for the vport. 11130 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 11131 * commands pending on the vport associated with SCSI device specified 11132 * by tgt_id and lun_id parameters. 11133 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 11134 * commands pending on the vport associated with SCSI target specified 11135 * by tgt_id parameter. 11136 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 11137 * commands pending on the vport. 11138 * This function returns the number of iocbs which satisfy the filter. 11139 * This function is called without any lock held. 11140 **/ 11141 int 11142 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 11143 lpfc_ctx_cmd ctx_cmd) 11144 { 11145 struct lpfc_hba *phba = vport->phba; 11146 struct lpfc_iocbq *iocbq; 11147 int sum, i; 11148 11149 spin_lock_irq(&phba->hbalock); 11150 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 11151 iocbq = phba->sli.iocbq_lookup[i]; 11152 11153 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 11154 ctx_cmd) == 0) 11155 sum++; 11156 } 11157 spin_unlock_irq(&phba->hbalock); 11158 11159 return sum; 11160 } 11161 11162 /** 11163 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 11164 * @phba: Pointer to HBA context object 11165 * @cmdiocb: Pointer to command iocb object. 11166 * @rspiocb: Pointer to response iocb object. 11167 * 11168 * This function is called when an aborted FCP iocb completes. This 11169 * function is called by the ring event handler with no lock held. 11170 * This function frees the iocb. 11171 **/ 11172 void 11173 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11174 struct lpfc_iocbq *rspiocb) 11175 { 11176 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11177 "3096 ABORT_XRI_CN completing on rpi x%x " 11178 "original iotag x%x, abort cmd iotag x%x " 11179 "status 0x%x, reason 0x%x\n", 11180 cmdiocb->iocb.un.acxri.abortContextTag, 11181 cmdiocb->iocb.un.acxri.abortIoTag, 11182 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 11183 rspiocb->iocb.un.ulpWord[4]); 11184 lpfc_sli_release_iocbq(phba, cmdiocb); 11185 return; 11186 } 11187 11188 /** 11189 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 11190 * @vport: Pointer to virtual port. 11191 * @pring: Pointer to driver SLI ring object. 11192 * @tgt_id: SCSI ID of the target. 11193 * @lun_id: LUN ID of the scsi device. 11194 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11195 * 11196 * This function sends an abort command for every SCSI command 11197 * associated with the given virtual port pending on the ring 11198 * filtered by lpfc_sli_validate_fcp_iocb function. 11199 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 11200 * FCP iocbs associated with lun specified by tgt_id and lun_id 11201 * parameters 11202 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 11203 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11204 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 11205 * FCP iocbs associated with virtual port. 11206 * This function returns number of iocbs it failed to abort. 11207 * This function is called with no locks held. 11208 **/ 11209 int 11210 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11211 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 11212 { 11213 struct lpfc_hba *phba = vport->phba; 11214 struct lpfc_iocbq *iocbq; 11215 struct lpfc_iocbq *abtsiocb; 11216 struct lpfc_sli_ring *pring_s4; 11217 IOCB_t *cmd = NULL; 11218 int errcnt = 0, ret_val = 0; 11219 int i; 11220 11221 for (i = 1; i <= phba->sli.last_iotag; i++) { 11222 iocbq = phba->sli.iocbq_lookup[i]; 11223 11224 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11225 abort_cmd) != 0) 11226 continue; 11227 11228 /* 11229 * If the iocbq is already being aborted, don't take a second 11230 * action, but do count it. 11231 */ 11232 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11233 continue; 11234 11235 /* issue ABTS for this IOCB based on iotag */ 11236 abtsiocb = lpfc_sli_get_iocbq(phba); 11237 if (abtsiocb == NULL) { 11238 errcnt++; 11239 continue; 11240 } 11241 11242 /* indicate the IO is being aborted by the driver. */ 11243 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11244 11245 cmd = &iocbq->iocb; 11246 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11247 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 11248 if (phba->sli_rev == LPFC_SLI_REV4) 11249 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 11250 else 11251 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 11252 abtsiocb->iocb.ulpLe = 1; 11253 abtsiocb->iocb.ulpClass = cmd->ulpClass; 11254 abtsiocb->vport = vport; 11255 11256 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11257 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 11258 if (iocbq->iocb_flag & LPFC_IO_FCP) 11259 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 11260 if (iocbq->iocb_flag & LPFC_IO_FOF) 11261 abtsiocb->iocb_flag |= LPFC_IO_FOF; 11262 11263 if (lpfc_is_link_up(phba)) 11264 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11265 else 11266 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11267 11268 /* Setup callback routine and issue the command. */ 11269 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11270 if (phba->sli_rev == LPFC_SLI_REV4) { 11271 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11272 if (!pring_s4) 11273 continue; 11274 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11275 abtsiocb, 0); 11276 } else 11277 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11278 abtsiocb, 0); 11279 if (ret_val == IOCB_ERROR) { 11280 lpfc_sli_release_iocbq(phba, abtsiocb); 11281 errcnt++; 11282 continue; 11283 } 11284 } 11285 11286 return errcnt; 11287 } 11288 11289 /** 11290 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11291 * @vport: Pointer to virtual port. 11292 * @pring: Pointer to driver SLI ring object. 11293 * @tgt_id: SCSI ID of the target. 11294 * @lun_id: LUN ID of the scsi device. 11295 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11296 * 11297 * This function sends an abort command for every SCSI command 11298 * associated with the given virtual port pending on the ring 11299 * filtered by lpfc_sli_validate_fcp_iocb function. 11300 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11301 * FCP iocbs associated with lun specified by tgt_id and lun_id 11302 * parameters 11303 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11304 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11305 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11306 * FCP iocbs associated with virtual port. 11307 * This function returns number of iocbs it aborted . 11308 * This function is called with no locks held right after a taskmgmt 11309 * command is sent. 11310 **/ 11311 int 11312 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11313 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11314 { 11315 struct lpfc_hba *phba = vport->phba; 11316 struct lpfc_scsi_buf *lpfc_cmd; 11317 struct lpfc_iocbq *abtsiocbq; 11318 struct lpfc_nodelist *ndlp; 11319 struct lpfc_iocbq *iocbq; 11320 IOCB_t *icmd; 11321 int sum, i, ret_val; 11322 unsigned long iflags; 11323 struct lpfc_sli_ring *pring_s4; 11324 11325 spin_lock_irqsave(&phba->hbalock, iflags); 11326 11327 /* all I/Os are in process of being flushed */ 11328 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11329 spin_unlock_irqrestore(&phba->hbalock, iflags); 11330 return 0; 11331 } 11332 sum = 0; 11333 11334 for (i = 1; i <= phba->sli.last_iotag; i++) { 11335 iocbq = phba->sli.iocbq_lookup[i]; 11336 11337 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11338 cmd) != 0) 11339 continue; 11340 11341 /* 11342 * If the iocbq is already being aborted, don't take a second 11343 * action, but do count it. 11344 */ 11345 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11346 continue; 11347 11348 /* issue ABTS for this IOCB based on iotag */ 11349 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11350 if (abtsiocbq == NULL) 11351 continue; 11352 11353 icmd = &iocbq->iocb; 11354 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11355 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11356 if (phba->sli_rev == LPFC_SLI_REV4) 11357 abtsiocbq->iocb.un.acxri.abortIoTag = 11358 iocbq->sli4_xritag; 11359 else 11360 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11361 abtsiocbq->iocb.ulpLe = 1; 11362 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11363 abtsiocbq->vport = vport; 11364 11365 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11366 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11367 if (iocbq->iocb_flag & LPFC_IO_FCP) 11368 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11369 if (iocbq->iocb_flag & LPFC_IO_FOF) 11370 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11371 11372 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 11373 ndlp = lpfc_cmd->rdata->pnode; 11374 11375 if (lpfc_is_link_up(phba) && 11376 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11377 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11378 else 11379 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11380 11381 /* Setup callback routine and issue the command. */ 11382 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11383 11384 /* 11385 * Indicate the IO is being aborted by the driver and set 11386 * the caller's flag into the aborted IO. 11387 */ 11388 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11389 11390 if (phba->sli_rev == LPFC_SLI_REV4) { 11391 pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq); 11392 if (!pring_s4) 11393 continue; 11394 /* Note: both hbalock and ring_lock must be set here */ 11395 spin_lock(&pring_s4->ring_lock); 11396 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11397 abtsiocbq, 0); 11398 spin_unlock(&pring_s4->ring_lock); 11399 } else { 11400 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11401 abtsiocbq, 0); 11402 } 11403 11404 11405 if (ret_val == IOCB_ERROR) 11406 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11407 else 11408 sum++; 11409 } 11410 spin_unlock_irqrestore(&phba->hbalock, iflags); 11411 return sum; 11412 } 11413 11414 /** 11415 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11416 * @phba: Pointer to HBA context object. 11417 * @cmdiocbq: Pointer to command iocb. 11418 * @rspiocbq: Pointer to response iocb. 11419 * 11420 * This function is the completion handler for iocbs issued using 11421 * lpfc_sli_issue_iocb_wait function. This function is called by the 11422 * ring event handler function without any lock held. This function 11423 * can be called from both worker thread context and interrupt 11424 * context. This function also can be called from other thread which 11425 * cleans up the SLI layer objects. 11426 * This function copy the contents of the response iocb to the 11427 * response iocb memory object provided by the caller of 11428 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11429 * sleeps for the iocb completion. 11430 **/ 11431 static void 11432 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11433 struct lpfc_iocbq *cmdiocbq, 11434 struct lpfc_iocbq *rspiocbq) 11435 { 11436 wait_queue_head_t *pdone_q; 11437 unsigned long iflags; 11438 struct lpfc_scsi_buf *lpfc_cmd; 11439 11440 spin_lock_irqsave(&phba->hbalock, iflags); 11441 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11442 11443 /* 11444 * A time out has occurred for the iocb. If a time out 11445 * completion handler has been supplied, call it. Otherwise, 11446 * just free the iocbq. 11447 */ 11448 11449 spin_unlock_irqrestore(&phba->hbalock, iflags); 11450 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11451 cmdiocbq->wait_iocb_cmpl = NULL; 11452 if (cmdiocbq->iocb_cmpl) 11453 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11454 else 11455 lpfc_sli_release_iocbq(phba, cmdiocbq); 11456 return; 11457 } 11458 11459 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11460 if (cmdiocbq->context2 && rspiocbq) 11461 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11462 &rspiocbq->iocb, sizeof(IOCB_t)); 11463 11464 /* Set the exchange busy flag for task management commands */ 11465 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11466 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11467 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 11468 cur_iocbq); 11469 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 11470 } 11471 11472 pdone_q = cmdiocbq->context_un.wait_queue; 11473 if (pdone_q) 11474 wake_up(pdone_q); 11475 spin_unlock_irqrestore(&phba->hbalock, iflags); 11476 return; 11477 } 11478 11479 /** 11480 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11481 * @phba: Pointer to HBA context object.. 11482 * @piocbq: Pointer to command iocb. 11483 * @flag: Flag to test. 11484 * 11485 * This routine grabs the hbalock and then test the iocb_flag to 11486 * see if the passed in flag is set. 11487 * Returns: 11488 * 1 if flag is set. 11489 * 0 if flag is not set. 11490 **/ 11491 static int 11492 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11493 struct lpfc_iocbq *piocbq, uint32_t flag) 11494 { 11495 unsigned long iflags; 11496 int ret; 11497 11498 spin_lock_irqsave(&phba->hbalock, iflags); 11499 ret = piocbq->iocb_flag & flag; 11500 spin_unlock_irqrestore(&phba->hbalock, iflags); 11501 return ret; 11502 11503 } 11504 11505 /** 11506 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11507 * @phba: Pointer to HBA context object.. 11508 * @pring: Pointer to sli ring. 11509 * @piocb: Pointer to command iocb. 11510 * @prspiocbq: Pointer to response iocb. 11511 * @timeout: Timeout in number of seconds. 11512 * 11513 * This function issues the iocb to firmware and waits for the 11514 * iocb to complete. The iocb_cmpl field of the shall be used 11515 * to handle iocbs which time out. If the field is NULL, the 11516 * function shall free the iocbq structure. If more clean up is 11517 * needed, the caller is expected to provide a completion function 11518 * that will provide the needed clean up. If the iocb command is 11519 * not completed within timeout seconds, the function will either 11520 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11521 * completion function set in the iocb_cmpl field and then return 11522 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11523 * resources if this function returns IOCB_TIMEDOUT. 11524 * The function waits for the iocb completion using an 11525 * non-interruptible wait. 11526 * This function will sleep while waiting for iocb completion. 11527 * So, this function should not be called from any context which 11528 * does not allow sleeping. Due to the same reason, this function 11529 * cannot be called with interrupt disabled. 11530 * This function assumes that the iocb completions occur while 11531 * this function sleep. So, this function cannot be called from 11532 * the thread which process iocb completion for this ring. 11533 * This function clears the iocb_flag of the iocb object before 11534 * issuing the iocb and the iocb completion handler sets this 11535 * flag and wakes this thread when the iocb completes. 11536 * The contents of the response iocb will be copied to prspiocbq 11537 * by the completion handler when the command completes. 11538 * This function returns IOCB_SUCCESS when success. 11539 * This function is called with no lock held. 11540 **/ 11541 int 11542 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11543 uint32_t ring_number, 11544 struct lpfc_iocbq *piocb, 11545 struct lpfc_iocbq *prspiocbq, 11546 uint32_t timeout) 11547 { 11548 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11549 long timeleft, timeout_req = 0; 11550 int retval = IOCB_SUCCESS; 11551 uint32_t creg_val; 11552 struct lpfc_iocbq *iocb; 11553 int txq_cnt = 0; 11554 int txcmplq_cnt = 0; 11555 struct lpfc_sli_ring *pring; 11556 unsigned long iflags; 11557 bool iocb_completed = true; 11558 11559 if (phba->sli_rev >= LPFC_SLI_REV4) 11560 pring = lpfc_sli4_calc_ring(phba, piocb); 11561 else 11562 pring = &phba->sli.sli3_ring[ring_number]; 11563 /* 11564 * If the caller has provided a response iocbq buffer, then context2 11565 * is NULL or its an error. 11566 */ 11567 if (prspiocbq) { 11568 if (piocb->context2) 11569 return IOCB_ERROR; 11570 piocb->context2 = prspiocbq; 11571 } 11572 11573 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11574 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11575 piocb->context_un.wait_queue = &done_q; 11576 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11577 11578 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11579 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11580 return IOCB_ERROR; 11581 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11582 writel(creg_val, phba->HCregaddr); 11583 readl(phba->HCregaddr); /* flush */ 11584 } 11585 11586 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11587 SLI_IOCB_RET_IOCB); 11588 if (retval == IOCB_SUCCESS) { 11589 timeout_req = msecs_to_jiffies(timeout * 1000); 11590 timeleft = wait_event_timeout(done_q, 11591 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11592 timeout_req); 11593 spin_lock_irqsave(&phba->hbalock, iflags); 11594 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11595 11596 /* 11597 * IOCB timed out. Inform the wake iocb wait 11598 * completion function and set local status 11599 */ 11600 11601 iocb_completed = false; 11602 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11603 } 11604 spin_unlock_irqrestore(&phba->hbalock, iflags); 11605 if (iocb_completed) { 11606 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11607 "0331 IOCB wake signaled\n"); 11608 /* Note: we are not indicating if the IOCB has a success 11609 * status or not - that's for the caller to check. 11610 * IOCB_SUCCESS means just that the command was sent and 11611 * completed. Not that it completed successfully. 11612 * */ 11613 } else if (timeleft == 0) { 11614 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11615 "0338 IOCB wait timeout error - no " 11616 "wake response Data x%x\n", timeout); 11617 retval = IOCB_TIMEDOUT; 11618 } else { 11619 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11620 "0330 IOCB wake NOT set, " 11621 "Data x%x x%lx\n", 11622 timeout, (timeleft / jiffies)); 11623 retval = IOCB_TIMEDOUT; 11624 } 11625 } else if (retval == IOCB_BUSY) { 11626 if (phba->cfg_log_verbose & LOG_SLI) { 11627 list_for_each_entry(iocb, &pring->txq, list) { 11628 txq_cnt++; 11629 } 11630 list_for_each_entry(iocb, &pring->txcmplq, list) { 11631 txcmplq_cnt++; 11632 } 11633 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11634 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 11635 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 11636 } 11637 return retval; 11638 } else { 11639 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11640 "0332 IOCB wait issue failed, Data x%x\n", 11641 retval); 11642 retval = IOCB_ERROR; 11643 } 11644 11645 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11646 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11647 return IOCB_ERROR; 11648 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 11649 writel(creg_val, phba->HCregaddr); 11650 readl(phba->HCregaddr); /* flush */ 11651 } 11652 11653 if (prspiocbq) 11654 piocb->context2 = NULL; 11655 11656 piocb->context_un.wait_queue = NULL; 11657 piocb->iocb_cmpl = NULL; 11658 return retval; 11659 } 11660 11661 /** 11662 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 11663 * @phba: Pointer to HBA context object. 11664 * @pmboxq: Pointer to driver mailbox object. 11665 * @timeout: Timeout in number of seconds. 11666 * 11667 * This function issues the mailbox to firmware and waits for the 11668 * mailbox command to complete. If the mailbox command is not 11669 * completed within timeout seconds, it returns MBX_TIMEOUT. 11670 * The function waits for the mailbox completion using an 11671 * interruptible wait. If the thread is woken up due to a 11672 * signal, MBX_TIMEOUT error is returned to the caller. Caller 11673 * should not free the mailbox resources, if this function returns 11674 * MBX_TIMEOUT. 11675 * This function will sleep while waiting for mailbox completion. 11676 * So, this function should not be called from any context which 11677 * does not allow sleeping. Due to the same reason, this function 11678 * cannot be called with interrupt disabled. 11679 * This function assumes that the mailbox completion occurs while 11680 * this function sleep. So, this function cannot be called from 11681 * the worker thread which processes mailbox completion. 11682 * This function is called in the context of HBA management 11683 * applications. 11684 * This function returns MBX_SUCCESS when successful. 11685 * This function is called with no lock held. 11686 **/ 11687 int 11688 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 11689 uint32_t timeout) 11690 { 11691 struct completion mbox_done; 11692 int retval; 11693 unsigned long flag; 11694 11695 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 11696 /* setup wake call as IOCB callback */ 11697 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 11698 11699 /* setup context3 field to pass wait_queue pointer to wake function */ 11700 init_completion(&mbox_done); 11701 pmboxq->context3 = &mbox_done; 11702 /* now issue the command */ 11703 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 11704 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 11705 wait_for_completion_timeout(&mbox_done, 11706 msecs_to_jiffies(timeout * 1000)); 11707 11708 spin_lock_irqsave(&phba->hbalock, flag); 11709 pmboxq->context3 = NULL; 11710 /* 11711 * if LPFC_MBX_WAKE flag is set the mailbox is completed 11712 * else do not free the resources. 11713 */ 11714 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 11715 retval = MBX_SUCCESS; 11716 } else { 11717 retval = MBX_TIMEOUT; 11718 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11719 } 11720 spin_unlock_irqrestore(&phba->hbalock, flag); 11721 } 11722 return retval; 11723 } 11724 11725 /** 11726 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 11727 * @phba: Pointer to HBA context. 11728 * 11729 * This function is called to shutdown the driver's mailbox sub-system. 11730 * It first marks the mailbox sub-system is in a block state to prevent 11731 * the asynchronous mailbox command from issued off the pending mailbox 11732 * command queue. If the mailbox command sub-system shutdown is due to 11733 * HBA error conditions such as EEH or ERATT, this routine shall invoke 11734 * the mailbox sub-system flush routine to forcefully bring down the 11735 * mailbox sub-system. Otherwise, if it is due to normal condition (such 11736 * as with offline or HBA function reset), this routine will wait for the 11737 * outstanding mailbox command to complete before invoking the mailbox 11738 * sub-system flush routine to gracefully bring down mailbox sub-system. 11739 **/ 11740 void 11741 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 11742 { 11743 struct lpfc_sli *psli = &phba->sli; 11744 unsigned long timeout; 11745 11746 if (mbx_action == LPFC_MBX_NO_WAIT) { 11747 /* delay 100ms for port state */ 11748 msleep(100); 11749 lpfc_sli_mbox_sys_flush(phba); 11750 return; 11751 } 11752 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 11753 11754 spin_lock_irq(&phba->hbalock); 11755 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11756 11757 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 11758 /* Determine how long we might wait for the active mailbox 11759 * command to be gracefully completed by firmware. 11760 */ 11761 if (phba->sli.mbox_active) 11762 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 11763 phba->sli.mbox_active) * 11764 1000) + jiffies; 11765 spin_unlock_irq(&phba->hbalock); 11766 11767 while (phba->sli.mbox_active) { 11768 /* Check active mailbox complete status every 2ms */ 11769 msleep(2); 11770 if (time_after(jiffies, timeout)) 11771 /* Timeout, let the mailbox flush routine to 11772 * forcefully release active mailbox command 11773 */ 11774 break; 11775 } 11776 } else 11777 spin_unlock_irq(&phba->hbalock); 11778 11779 lpfc_sli_mbox_sys_flush(phba); 11780 } 11781 11782 /** 11783 * lpfc_sli_eratt_read - read sli-3 error attention events 11784 * @phba: Pointer to HBA context. 11785 * 11786 * This function is called to read the SLI3 device error attention registers 11787 * for possible error attention events. The caller must hold the hostlock 11788 * with spin_lock_irq(). 11789 * 11790 * This function returns 1 when there is Error Attention in the Host Attention 11791 * Register and returns 0 otherwise. 11792 **/ 11793 static int 11794 lpfc_sli_eratt_read(struct lpfc_hba *phba) 11795 { 11796 uint32_t ha_copy; 11797 11798 /* Read chip Host Attention (HA) register */ 11799 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 11800 goto unplug_err; 11801 11802 if (ha_copy & HA_ERATT) { 11803 /* Read host status register to retrieve error event */ 11804 if (lpfc_sli_read_hs(phba)) 11805 goto unplug_err; 11806 11807 /* Check if there is a deferred error condition is active */ 11808 if ((HS_FFER1 & phba->work_hs) && 11809 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 11810 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 11811 phba->hba_flag |= DEFER_ERATT; 11812 /* Clear all interrupt enable conditions */ 11813 writel(0, phba->HCregaddr); 11814 readl(phba->HCregaddr); 11815 } 11816 11817 /* Set the driver HA work bitmap */ 11818 phba->work_ha |= HA_ERATT; 11819 /* Indicate polling handles this ERATT */ 11820 phba->hba_flag |= HBA_ERATT_HANDLED; 11821 return 1; 11822 } 11823 return 0; 11824 11825 unplug_err: 11826 /* Set the driver HS work bitmap */ 11827 phba->work_hs |= UNPLUG_ERR; 11828 /* Set the driver HA work bitmap */ 11829 phba->work_ha |= HA_ERATT; 11830 /* Indicate polling handles this ERATT */ 11831 phba->hba_flag |= HBA_ERATT_HANDLED; 11832 return 1; 11833 } 11834 11835 /** 11836 * lpfc_sli4_eratt_read - read sli-4 error attention events 11837 * @phba: Pointer to HBA context. 11838 * 11839 * This function is called to read the SLI4 device error attention registers 11840 * for possible error attention events. The caller must hold the hostlock 11841 * with spin_lock_irq(). 11842 * 11843 * This function returns 1 when there is Error Attention in the Host Attention 11844 * Register and returns 0 otherwise. 11845 **/ 11846 static int 11847 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 11848 { 11849 uint32_t uerr_sta_hi, uerr_sta_lo; 11850 uint32_t if_type, portsmphr; 11851 struct lpfc_register portstat_reg; 11852 11853 /* 11854 * For now, use the SLI4 device internal unrecoverable error 11855 * registers for error attention. This can be changed later. 11856 */ 11857 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11858 switch (if_type) { 11859 case LPFC_SLI_INTF_IF_TYPE_0: 11860 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 11861 &uerr_sta_lo) || 11862 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 11863 &uerr_sta_hi)) { 11864 phba->work_hs |= UNPLUG_ERR; 11865 phba->work_ha |= HA_ERATT; 11866 phba->hba_flag |= HBA_ERATT_HANDLED; 11867 return 1; 11868 } 11869 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 11870 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 11871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11872 "1423 HBA Unrecoverable error: " 11873 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 11874 "ue_mask_lo_reg=0x%x, " 11875 "ue_mask_hi_reg=0x%x\n", 11876 uerr_sta_lo, uerr_sta_hi, 11877 phba->sli4_hba.ue_mask_lo, 11878 phba->sli4_hba.ue_mask_hi); 11879 phba->work_status[0] = uerr_sta_lo; 11880 phba->work_status[1] = uerr_sta_hi; 11881 phba->work_ha |= HA_ERATT; 11882 phba->hba_flag |= HBA_ERATT_HANDLED; 11883 return 1; 11884 } 11885 break; 11886 case LPFC_SLI_INTF_IF_TYPE_2: 11887 case LPFC_SLI_INTF_IF_TYPE_6: 11888 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 11889 &portstat_reg.word0) || 11890 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 11891 &portsmphr)){ 11892 phba->work_hs |= UNPLUG_ERR; 11893 phba->work_ha |= HA_ERATT; 11894 phba->hba_flag |= HBA_ERATT_HANDLED; 11895 return 1; 11896 } 11897 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 11898 phba->work_status[0] = 11899 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 11900 phba->work_status[1] = 11901 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 11902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11903 "2885 Port Status Event: " 11904 "port status reg 0x%x, " 11905 "port smphr reg 0x%x, " 11906 "error 1=0x%x, error 2=0x%x\n", 11907 portstat_reg.word0, 11908 portsmphr, 11909 phba->work_status[0], 11910 phba->work_status[1]); 11911 phba->work_ha |= HA_ERATT; 11912 phba->hba_flag |= HBA_ERATT_HANDLED; 11913 return 1; 11914 } 11915 break; 11916 case LPFC_SLI_INTF_IF_TYPE_1: 11917 default: 11918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11919 "2886 HBA Error Attention on unsupported " 11920 "if type %d.", if_type); 11921 return 1; 11922 } 11923 11924 return 0; 11925 } 11926 11927 /** 11928 * lpfc_sli_check_eratt - check error attention events 11929 * @phba: Pointer to HBA context. 11930 * 11931 * This function is called from timer soft interrupt context to check HBA's 11932 * error attention register bit for error attention events. 11933 * 11934 * This function returns 1 when there is Error Attention in the Host Attention 11935 * Register and returns 0 otherwise. 11936 **/ 11937 int 11938 lpfc_sli_check_eratt(struct lpfc_hba *phba) 11939 { 11940 uint32_t ha_copy; 11941 11942 /* If somebody is waiting to handle an eratt, don't process it 11943 * here. The brdkill function will do this. 11944 */ 11945 if (phba->link_flag & LS_IGNORE_ERATT) 11946 return 0; 11947 11948 /* Check if interrupt handler handles this ERATT */ 11949 spin_lock_irq(&phba->hbalock); 11950 if (phba->hba_flag & HBA_ERATT_HANDLED) { 11951 /* Interrupt handler has handled ERATT */ 11952 spin_unlock_irq(&phba->hbalock); 11953 return 0; 11954 } 11955 11956 /* 11957 * If there is deferred error attention, do not check for error 11958 * attention 11959 */ 11960 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11961 spin_unlock_irq(&phba->hbalock); 11962 return 0; 11963 } 11964 11965 /* If PCI channel is offline, don't process it */ 11966 if (unlikely(pci_channel_offline(phba->pcidev))) { 11967 spin_unlock_irq(&phba->hbalock); 11968 return 0; 11969 } 11970 11971 switch (phba->sli_rev) { 11972 case LPFC_SLI_REV2: 11973 case LPFC_SLI_REV3: 11974 /* Read chip Host Attention (HA) register */ 11975 ha_copy = lpfc_sli_eratt_read(phba); 11976 break; 11977 case LPFC_SLI_REV4: 11978 /* Read device Uncoverable Error (UERR) registers */ 11979 ha_copy = lpfc_sli4_eratt_read(phba); 11980 break; 11981 default: 11982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11983 "0299 Invalid SLI revision (%d)\n", 11984 phba->sli_rev); 11985 ha_copy = 0; 11986 break; 11987 } 11988 spin_unlock_irq(&phba->hbalock); 11989 11990 return ha_copy; 11991 } 11992 11993 /** 11994 * lpfc_intr_state_check - Check device state for interrupt handling 11995 * @phba: Pointer to HBA context. 11996 * 11997 * This inline routine checks whether a device or its PCI slot is in a state 11998 * that the interrupt should be handled. 11999 * 12000 * This function returns 0 if the device or the PCI slot is in a state that 12001 * interrupt should be handled, otherwise -EIO. 12002 */ 12003 static inline int 12004 lpfc_intr_state_check(struct lpfc_hba *phba) 12005 { 12006 /* If the pci channel is offline, ignore all the interrupts */ 12007 if (unlikely(pci_channel_offline(phba->pcidev))) 12008 return -EIO; 12009 12010 /* Update device level interrupt statistics */ 12011 phba->sli.slistat.sli_intr++; 12012 12013 /* Ignore all interrupts during initialization. */ 12014 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 12015 return -EIO; 12016 12017 return 0; 12018 } 12019 12020 /** 12021 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 12022 * @irq: Interrupt number. 12023 * @dev_id: The device context pointer. 12024 * 12025 * This function is directly called from the PCI layer as an interrupt 12026 * service routine when device with SLI-3 interface spec is enabled with 12027 * MSI-X multi-message interrupt mode and there are slow-path events in 12028 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 12029 * interrupt mode, this function is called as part of the device-level 12030 * interrupt handler. When the PCI slot is in error recovery or the HBA 12031 * is undergoing initialization, the interrupt handler will not process 12032 * the interrupt. The link attention and ELS ring attention events are 12033 * handled by the worker thread. The interrupt handler signals the worker 12034 * thread and returns for these events. This function is called without 12035 * any lock held. It gets the hbalock to access and update SLI data 12036 * structures. 12037 * 12038 * This function returns IRQ_HANDLED when interrupt is handled else it 12039 * returns IRQ_NONE. 12040 **/ 12041 irqreturn_t 12042 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 12043 { 12044 struct lpfc_hba *phba; 12045 uint32_t ha_copy, hc_copy; 12046 uint32_t work_ha_copy; 12047 unsigned long status; 12048 unsigned long iflag; 12049 uint32_t control; 12050 12051 MAILBOX_t *mbox, *pmbox; 12052 struct lpfc_vport *vport; 12053 struct lpfc_nodelist *ndlp; 12054 struct lpfc_dmabuf *mp; 12055 LPFC_MBOXQ_t *pmb; 12056 int rc; 12057 12058 /* 12059 * Get the driver's phba structure from the dev_id and 12060 * assume the HBA is not interrupting. 12061 */ 12062 phba = (struct lpfc_hba *)dev_id; 12063 12064 if (unlikely(!phba)) 12065 return IRQ_NONE; 12066 12067 /* 12068 * Stuff needs to be attented to when this function is invoked as an 12069 * individual interrupt handler in MSI-X multi-message interrupt mode 12070 */ 12071 if (phba->intr_type == MSIX) { 12072 /* Check device state for handling interrupt */ 12073 if (lpfc_intr_state_check(phba)) 12074 return IRQ_NONE; 12075 /* Need to read HA REG for slow-path events */ 12076 spin_lock_irqsave(&phba->hbalock, iflag); 12077 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12078 goto unplug_error; 12079 /* If somebody is waiting to handle an eratt don't process it 12080 * here. The brdkill function will do this. 12081 */ 12082 if (phba->link_flag & LS_IGNORE_ERATT) 12083 ha_copy &= ~HA_ERATT; 12084 /* Check the need for handling ERATT in interrupt handler */ 12085 if (ha_copy & HA_ERATT) { 12086 if (phba->hba_flag & HBA_ERATT_HANDLED) 12087 /* ERATT polling has handled ERATT */ 12088 ha_copy &= ~HA_ERATT; 12089 else 12090 /* Indicate interrupt handler handles ERATT */ 12091 phba->hba_flag |= HBA_ERATT_HANDLED; 12092 } 12093 12094 /* 12095 * If there is deferred error attention, do not check for any 12096 * interrupt. 12097 */ 12098 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12099 spin_unlock_irqrestore(&phba->hbalock, iflag); 12100 return IRQ_NONE; 12101 } 12102 12103 /* Clear up only attention source related to slow-path */ 12104 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 12105 goto unplug_error; 12106 12107 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 12108 HC_LAINT_ENA | HC_ERINT_ENA), 12109 phba->HCregaddr); 12110 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 12111 phba->HAregaddr); 12112 writel(hc_copy, phba->HCregaddr); 12113 readl(phba->HAregaddr); /* flush */ 12114 spin_unlock_irqrestore(&phba->hbalock, iflag); 12115 } else 12116 ha_copy = phba->ha_copy; 12117 12118 work_ha_copy = ha_copy & phba->work_ha_mask; 12119 12120 if (work_ha_copy) { 12121 if (work_ha_copy & HA_LATT) { 12122 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 12123 /* 12124 * Turn off Link Attention interrupts 12125 * until CLEAR_LA done 12126 */ 12127 spin_lock_irqsave(&phba->hbalock, iflag); 12128 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 12129 if (lpfc_readl(phba->HCregaddr, &control)) 12130 goto unplug_error; 12131 control &= ~HC_LAINT_ENA; 12132 writel(control, phba->HCregaddr); 12133 readl(phba->HCregaddr); /* flush */ 12134 spin_unlock_irqrestore(&phba->hbalock, iflag); 12135 } 12136 else 12137 work_ha_copy &= ~HA_LATT; 12138 } 12139 12140 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 12141 /* 12142 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 12143 * the only slow ring. 12144 */ 12145 status = (work_ha_copy & 12146 (HA_RXMASK << (4*LPFC_ELS_RING))); 12147 status >>= (4*LPFC_ELS_RING); 12148 if (status & HA_RXMASK) { 12149 spin_lock_irqsave(&phba->hbalock, iflag); 12150 if (lpfc_readl(phba->HCregaddr, &control)) 12151 goto unplug_error; 12152 12153 lpfc_debugfs_slow_ring_trc(phba, 12154 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 12155 control, status, 12156 (uint32_t)phba->sli.slistat.sli_intr); 12157 12158 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 12159 lpfc_debugfs_slow_ring_trc(phba, 12160 "ISR Disable ring:" 12161 "pwork:x%x hawork:x%x wait:x%x", 12162 phba->work_ha, work_ha_copy, 12163 (uint32_t)((unsigned long) 12164 &phba->work_waitq)); 12165 12166 control &= 12167 ~(HC_R0INT_ENA << LPFC_ELS_RING); 12168 writel(control, phba->HCregaddr); 12169 readl(phba->HCregaddr); /* flush */ 12170 } 12171 else { 12172 lpfc_debugfs_slow_ring_trc(phba, 12173 "ISR slow ring: pwork:" 12174 "x%x hawork:x%x wait:x%x", 12175 phba->work_ha, work_ha_copy, 12176 (uint32_t)((unsigned long) 12177 &phba->work_waitq)); 12178 } 12179 spin_unlock_irqrestore(&phba->hbalock, iflag); 12180 } 12181 } 12182 spin_lock_irqsave(&phba->hbalock, iflag); 12183 if (work_ha_copy & HA_ERATT) { 12184 if (lpfc_sli_read_hs(phba)) 12185 goto unplug_error; 12186 /* 12187 * Check if there is a deferred error condition 12188 * is active 12189 */ 12190 if ((HS_FFER1 & phba->work_hs) && 12191 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12192 HS_FFER6 | HS_FFER7 | HS_FFER8) & 12193 phba->work_hs)) { 12194 phba->hba_flag |= DEFER_ERATT; 12195 /* Clear all interrupt enable conditions */ 12196 writel(0, phba->HCregaddr); 12197 readl(phba->HCregaddr); 12198 } 12199 } 12200 12201 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 12202 pmb = phba->sli.mbox_active; 12203 pmbox = &pmb->u.mb; 12204 mbox = phba->mbox; 12205 vport = pmb->vport; 12206 12207 /* First check out the status word */ 12208 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 12209 if (pmbox->mbxOwner != OWN_HOST) { 12210 spin_unlock_irqrestore(&phba->hbalock, iflag); 12211 /* 12212 * Stray Mailbox Interrupt, mbxCommand <cmd> 12213 * mbxStatus <status> 12214 */ 12215 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12216 LOG_SLI, 12217 "(%d):0304 Stray Mailbox " 12218 "Interrupt mbxCommand x%x " 12219 "mbxStatus x%x\n", 12220 (vport ? vport->vpi : 0), 12221 pmbox->mbxCommand, 12222 pmbox->mbxStatus); 12223 /* clear mailbox attention bit */ 12224 work_ha_copy &= ~HA_MBATT; 12225 } else { 12226 phba->sli.mbox_active = NULL; 12227 spin_unlock_irqrestore(&phba->hbalock, iflag); 12228 phba->last_completion_time = jiffies; 12229 del_timer(&phba->sli.mbox_tmo); 12230 if (pmb->mbox_cmpl) { 12231 lpfc_sli_pcimem_bcopy(mbox, pmbox, 12232 MAILBOX_CMD_SIZE); 12233 if (pmb->out_ext_byte_len && 12234 pmb->context2) 12235 lpfc_sli_pcimem_bcopy( 12236 phba->mbox_ext, 12237 pmb->context2, 12238 pmb->out_ext_byte_len); 12239 } 12240 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12241 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12242 12243 lpfc_debugfs_disc_trc(vport, 12244 LPFC_DISC_TRC_MBOX_VPORT, 12245 "MBOX dflt rpi: : " 12246 "status:x%x rpi:x%x", 12247 (uint32_t)pmbox->mbxStatus, 12248 pmbox->un.varWords[0], 0); 12249 12250 if (!pmbox->mbxStatus) { 12251 mp = (struct lpfc_dmabuf *) 12252 (pmb->context1); 12253 ndlp = (struct lpfc_nodelist *) 12254 pmb->context2; 12255 12256 /* Reg_LOGIN of dflt RPI was 12257 * successful. new lets get 12258 * rid of the RPI using the 12259 * same mbox buffer. 12260 */ 12261 lpfc_unreg_login(phba, 12262 vport->vpi, 12263 pmbox->un.varWords[0], 12264 pmb); 12265 pmb->mbox_cmpl = 12266 lpfc_mbx_cmpl_dflt_rpi; 12267 pmb->context1 = mp; 12268 pmb->context2 = ndlp; 12269 pmb->vport = vport; 12270 rc = lpfc_sli_issue_mbox(phba, 12271 pmb, 12272 MBX_NOWAIT); 12273 if (rc != MBX_BUSY) 12274 lpfc_printf_log(phba, 12275 KERN_ERR, 12276 LOG_MBOX | LOG_SLI, 12277 "0350 rc should have" 12278 "been MBX_BUSY\n"); 12279 if (rc != MBX_NOT_FINISHED) 12280 goto send_current_mbox; 12281 } 12282 } 12283 spin_lock_irqsave( 12284 &phba->pport->work_port_lock, 12285 iflag); 12286 phba->pport->work_port_events &= 12287 ~WORKER_MBOX_TMO; 12288 spin_unlock_irqrestore( 12289 &phba->pport->work_port_lock, 12290 iflag); 12291 lpfc_mbox_cmpl_put(phba, pmb); 12292 } 12293 } else 12294 spin_unlock_irqrestore(&phba->hbalock, iflag); 12295 12296 if ((work_ha_copy & HA_MBATT) && 12297 (phba->sli.mbox_active == NULL)) { 12298 send_current_mbox: 12299 /* Process next mailbox command if there is one */ 12300 do { 12301 rc = lpfc_sli_issue_mbox(phba, NULL, 12302 MBX_NOWAIT); 12303 } while (rc == MBX_NOT_FINISHED); 12304 if (rc != MBX_SUCCESS) 12305 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12306 LOG_SLI, "0349 rc should be " 12307 "MBX_SUCCESS\n"); 12308 } 12309 12310 spin_lock_irqsave(&phba->hbalock, iflag); 12311 phba->work_ha |= work_ha_copy; 12312 spin_unlock_irqrestore(&phba->hbalock, iflag); 12313 lpfc_worker_wake_up(phba); 12314 } 12315 return IRQ_HANDLED; 12316 unplug_error: 12317 spin_unlock_irqrestore(&phba->hbalock, iflag); 12318 return IRQ_HANDLED; 12319 12320 } /* lpfc_sli_sp_intr_handler */ 12321 12322 /** 12323 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12324 * @irq: Interrupt number. 12325 * @dev_id: The device context pointer. 12326 * 12327 * This function is directly called from the PCI layer as an interrupt 12328 * service routine when device with SLI-3 interface spec is enabled with 12329 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12330 * ring event in the HBA. However, when the device is enabled with either 12331 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12332 * device-level interrupt handler. When the PCI slot is in error recovery 12333 * or the HBA is undergoing initialization, the interrupt handler will not 12334 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12335 * the intrrupt context. This function is called without any lock held. 12336 * It gets the hbalock to access and update SLI data structures. 12337 * 12338 * This function returns IRQ_HANDLED when interrupt is handled else it 12339 * returns IRQ_NONE. 12340 **/ 12341 irqreturn_t 12342 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12343 { 12344 struct lpfc_hba *phba; 12345 uint32_t ha_copy; 12346 unsigned long status; 12347 unsigned long iflag; 12348 struct lpfc_sli_ring *pring; 12349 12350 /* Get the driver's phba structure from the dev_id and 12351 * assume the HBA is not interrupting. 12352 */ 12353 phba = (struct lpfc_hba *) dev_id; 12354 12355 if (unlikely(!phba)) 12356 return IRQ_NONE; 12357 12358 /* 12359 * Stuff needs to be attented to when this function is invoked as an 12360 * individual interrupt handler in MSI-X multi-message interrupt mode 12361 */ 12362 if (phba->intr_type == MSIX) { 12363 /* Check device state for handling interrupt */ 12364 if (lpfc_intr_state_check(phba)) 12365 return IRQ_NONE; 12366 /* Need to read HA REG for FCP ring and other ring events */ 12367 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12368 return IRQ_HANDLED; 12369 /* Clear up only attention source related to fast-path */ 12370 spin_lock_irqsave(&phba->hbalock, iflag); 12371 /* 12372 * If there is deferred error attention, do not check for 12373 * any interrupt. 12374 */ 12375 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12376 spin_unlock_irqrestore(&phba->hbalock, iflag); 12377 return IRQ_NONE; 12378 } 12379 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12380 phba->HAregaddr); 12381 readl(phba->HAregaddr); /* flush */ 12382 spin_unlock_irqrestore(&phba->hbalock, iflag); 12383 } else 12384 ha_copy = phba->ha_copy; 12385 12386 /* 12387 * Process all events on FCP ring. Take the optimized path for FCP IO. 12388 */ 12389 ha_copy &= ~(phba->work_ha_mask); 12390 12391 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12392 status >>= (4*LPFC_FCP_RING); 12393 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12394 if (status & HA_RXMASK) 12395 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12396 12397 if (phba->cfg_multi_ring_support == 2) { 12398 /* 12399 * Process all events on extra ring. Take the optimized path 12400 * for extra ring IO. 12401 */ 12402 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12403 status >>= (4*LPFC_EXTRA_RING); 12404 if (status & HA_RXMASK) { 12405 lpfc_sli_handle_fast_ring_event(phba, 12406 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12407 status); 12408 } 12409 } 12410 return IRQ_HANDLED; 12411 } /* lpfc_sli_fp_intr_handler */ 12412 12413 /** 12414 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12415 * @irq: Interrupt number. 12416 * @dev_id: The device context pointer. 12417 * 12418 * This function is the HBA device-level interrupt handler to device with 12419 * SLI-3 interface spec, called from the PCI layer when either MSI or 12420 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12421 * requires driver attention. This function invokes the slow-path interrupt 12422 * attention handling function and fast-path interrupt attention handling 12423 * function in turn to process the relevant HBA attention events. This 12424 * function is called without any lock held. It gets the hbalock to access 12425 * and update SLI data structures. 12426 * 12427 * This function returns IRQ_HANDLED when interrupt is handled, else it 12428 * returns IRQ_NONE. 12429 **/ 12430 irqreturn_t 12431 lpfc_sli_intr_handler(int irq, void *dev_id) 12432 { 12433 struct lpfc_hba *phba; 12434 irqreturn_t sp_irq_rc, fp_irq_rc; 12435 unsigned long status1, status2; 12436 uint32_t hc_copy; 12437 12438 /* 12439 * Get the driver's phba structure from the dev_id and 12440 * assume the HBA is not interrupting. 12441 */ 12442 phba = (struct lpfc_hba *) dev_id; 12443 12444 if (unlikely(!phba)) 12445 return IRQ_NONE; 12446 12447 /* Check device state for handling interrupt */ 12448 if (lpfc_intr_state_check(phba)) 12449 return IRQ_NONE; 12450 12451 spin_lock(&phba->hbalock); 12452 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12453 spin_unlock(&phba->hbalock); 12454 return IRQ_HANDLED; 12455 } 12456 12457 if (unlikely(!phba->ha_copy)) { 12458 spin_unlock(&phba->hbalock); 12459 return IRQ_NONE; 12460 } else if (phba->ha_copy & HA_ERATT) { 12461 if (phba->hba_flag & HBA_ERATT_HANDLED) 12462 /* ERATT polling has handled ERATT */ 12463 phba->ha_copy &= ~HA_ERATT; 12464 else 12465 /* Indicate interrupt handler handles ERATT */ 12466 phba->hba_flag |= HBA_ERATT_HANDLED; 12467 } 12468 12469 /* 12470 * If there is deferred error attention, do not check for any interrupt. 12471 */ 12472 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12473 spin_unlock(&phba->hbalock); 12474 return IRQ_NONE; 12475 } 12476 12477 /* Clear attention sources except link and error attentions */ 12478 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12479 spin_unlock(&phba->hbalock); 12480 return IRQ_HANDLED; 12481 } 12482 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12483 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12484 phba->HCregaddr); 12485 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12486 writel(hc_copy, phba->HCregaddr); 12487 readl(phba->HAregaddr); /* flush */ 12488 spin_unlock(&phba->hbalock); 12489 12490 /* 12491 * Invokes slow-path host attention interrupt handling as appropriate. 12492 */ 12493 12494 /* status of events with mailbox and link attention */ 12495 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12496 12497 /* status of events with ELS ring */ 12498 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12499 status2 >>= (4*LPFC_ELS_RING); 12500 12501 if (status1 || (status2 & HA_RXMASK)) 12502 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12503 else 12504 sp_irq_rc = IRQ_NONE; 12505 12506 /* 12507 * Invoke fast-path host attention interrupt handling as appropriate. 12508 */ 12509 12510 /* status of events with FCP ring */ 12511 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12512 status1 >>= (4*LPFC_FCP_RING); 12513 12514 /* status of events with extra ring */ 12515 if (phba->cfg_multi_ring_support == 2) { 12516 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12517 status2 >>= (4*LPFC_EXTRA_RING); 12518 } else 12519 status2 = 0; 12520 12521 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12522 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12523 else 12524 fp_irq_rc = IRQ_NONE; 12525 12526 /* Return device-level interrupt handling status */ 12527 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12528 } /* lpfc_sli_intr_handler */ 12529 12530 /** 12531 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 12532 * @phba: pointer to lpfc hba data structure. 12533 * 12534 * This routine is invoked by the worker thread to process all the pending 12535 * SLI4 FCP abort XRI events. 12536 **/ 12537 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 12538 { 12539 struct lpfc_cq_event *cq_event; 12540 12541 /* First, declare the fcp xri abort event has been handled */ 12542 spin_lock_irq(&phba->hbalock); 12543 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 12544 spin_unlock_irq(&phba->hbalock); 12545 /* Now, handle all the fcp xri abort events */ 12546 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 12547 /* Get the first event from the head of the event queue */ 12548 spin_lock_irq(&phba->hbalock); 12549 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 12550 cq_event, struct lpfc_cq_event, list); 12551 spin_unlock_irq(&phba->hbalock); 12552 /* Notify aborted XRI for FCP work queue */ 12553 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12554 /* Free the event processed back to the free pool */ 12555 lpfc_sli4_cq_event_release(phba, cq_event); 12556 } 12557 } 12558 12559 /** 12560 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12561 * @phba: pointer to lpfc hba data structure. 12562 * 12563 * This routine is invoked by the worker thread to process all the pending 12564 * SLI4 els abort xri events. 12565 **/ 12566 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12567 { 12568 struct lpfc_cq_event *cq_event; 12569 12570 /* First, declare the els xri abort event has been handled */ 12571 spin_lock_irq(&phba->hbalock); 12572 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12573 spin_unlock_irq(&phba->hbalock); 12574 /* Now, handle all the els xri abort events */ 12575 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12576 /* Get the first event from the head of the event queue */ 12577 spin_lock_irq(&phba->hbalock); 12578 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12579 cq_event, struct lpfc_cq_event, list); 12580 spin_unlock_irq(&phba->hbalock); 12581 /* Notify aborted XRI for ELS work queue */ 12582 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12583 /* Free the event processed back to the free pool */ 12584 lpfc_sli4_cq_event_release(phba, cq_event); 12585 } 12586 } 12587 12588 /** 12589 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12590 * @phba: pointer to lpfc hba data structure 12591 * @pIocbIn: pointer to the rspiocbq 12592 * @pIocbOut: pointer to the cmdiocbq 12593 * @wcqe: pointer to the complete wcqe 12594 * 12595 * This routine transfers the fields of a command iocbq to a response iocbq 12596 * by copying all the IOCB fields from command iocbq and transferring the 12597 * completion status information from the complete wcqe. 12598 **/ 12599 static void 12600 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12601 struct lpfc_iocbq *pIocbIn, 12602 struct lpfc_iocbq *pIocbOut, 12603 struct lpfc_wcqe_complete *wcqe) 12604 { 12605 int numBdes, i; 12606 unsigned long iflags; 12607 uint32_t status, max_response; 12608 struct lpfc_dmabuf *dmabuf; 12609 struct ulp_bde64 *bpl, bde; 12610 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12611 12612 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12613 sizeof(struct lpfc_iocbq) - offset); 12614 /* Map WCQE parameters into irspiocb parameters */ 12615 status = bf_get(lpfc_wcqe_c_status, wcqe); 12616 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12617 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12618 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12619 pIocbIn->iocb.un.fcpi.fcpi_parm = 12620 pIocbOut->iocb.un.fcpi.fcpi_parm - 12621 wcqe->total_data_placed; 12622 else 12623 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12624 else { 12625 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12626 switch (pIocbOut->iocb.ulpCommand) { 12627 case CMD_ELS_REQUEST64_CR: 12628 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12629 bpl = (struct ulp_bde64 *)dmabuf->virt; 12630 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12631 max_response = bde.tus.f.bdeSize; 12632 break; 12633 case CMD_GEN_REQUEST64_CR: 12634 max_response = 0; 12635 if (!pIocbOut->context3) 12636 break; 12637 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12638 sizeof(struct ulp_bde64); 12639 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12640 bpl = (struct ulp_bde64 *)dmabuf->virt; 12641 for (i = 0; i < numBdes; i++) { 12642 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12643 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12644 max_response += bde.tus.f.bdeSize; 12645 } 12646 break; 12647 default: 12648 max_response = wcqe->total_data_placed; 12649 break; 12650 } 12651 if (max_response < wcqe->total_data_placed) 12652 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 12653 else 12654 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 12655 wcqe->total_data_placed; 12656 } 12657 12658 /* Convert BG errors for completion status */ 12659 if (status == CQE_STATUS_DI_ERROR) { 12660 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 12661 12662 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 12663 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 12664 else 12665 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 12666 12667 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 12668 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 12669 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12670 BGS_GUARD_ERR_MASK; 12671 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 12672 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12673 BGS_APPTAG_ERR_MASK; 12674 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 12675 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12676 BGS_REFTAG_ERR_MASK; 12677 12678 /* Check to see if there was any good data before the error */ 12679 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 12680 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12681 BGS_HI_WATER_MARK_PRESENT_MASK; 12682 pIocbIn->iocb.unsli3.sli3_bg.bghm = 12683 wcqe->total_data_placed; 12684 } 12685 12686 /* 12687 * Set ALL the error bits to indicate we don't know what 12688 * type of error it is. 12689 */ 12690 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 12691 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12692 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 12693 BGS_GUARD_ERR_MASK); 12694 } 12695 12696 /* Pick up HBA exchange busy condition */ 12697 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 12698 spin_lock_irqsave(&phba->hbalock, iflags); 12699 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 12700 spin_unlock_irqrestore(&phba->hbalock, iflags); 12701 } 12702 } 12703 12704 /** 12705 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 12706 * @phba: Pointer to HBA context object. 12707 * @wcqe: Pointer to work-queue completion queue entry. 12708 * 12709 * This routine handles an ELS work-queue completion event and construct 12710 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 12711 * discovery engine to handle. 12712 * 12713 * Return: Pointer to the receive IOCBQ, NULL otherwise. 12714 **/ 12715 static struct lpfc_iocbq * 12716 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 12717 struct lpfc_iocbq *irspiocbq) 12718 { 12719 struct lpfc_sli_ring *pring; 12720 struct lpfc_iocbq *cmdiocbq; 12721 struct lpfc_wcqe_complete *wcqe; 12722 unsigned long iflags; 12723 12724 pring = lpfc_phba_elsring(phba); 12725 if (unlikely(!pring)) 12726 return NULL; 12727 12728 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 12729 spin_lock_irqsave(&pring->ring_lock, iflags); 12730 pring->stats.iocb_event++; 12731 /* Look up the ELS command IOCB and create pseudo response IOCB */ 12732 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 12733 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12734 if (unlikely(!cmdiocbq)) { 12735 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12736 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12737 "0386 ELS complete with no corresponding " 12738 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 12739 wcqe->word0, wcqe->total_data_placed, 12740 wcqe->parameter, wcqe->word3); 12741 lpfc_sli_release_iocbq(phba, irspiocbq); 12742 return NULL; 12743 } 12744 12745 /* Put the iocb back on the txcmplq */ 12746 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 12747 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12748 12749 /* Fake the irspiocbq and copy necessary response information */ 12750 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 12751 12752 return irspiocbq; 12753 } 12754 12755 inline struct lpfc_cq_event * 12756 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 12757 { 12758 struct lpfc_cq_event *cq_event; 12759 12760 /* Allocate a new internal CQ_EVENT entry */ 12761 cq_event = lpfc_sli4_cq_event_alloc(phba); 12762 if (!cq_event) { 12763 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12764 "0602 Failed to alloc CQ_EVENT entry\n"); 12765 return NULL; 12766 } 12767 12768 /* Move the CQE into the event */ 12769 memcpy(&cq_event->cqe, entry, size); 12770 return cq_event; 12771 } 12772 12773 /** 12774 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 12775 * @phba: Pointer to HBA context object. 12776 * @cqe: Pointer to mailbox completion queue entry. 12777 * 12778 * This routine process a mailbox completion queue entry with asynchrous 12779 * event. 12780 * 12781 * Return: true if work posted to worker thread, otherwise false. 12782 **/ 12783 static bool 12784 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 12785 { 12786 struct lpfc_cq_event *cq_event; 12787 unsigned long iflags; 12788 12789 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 12790 "0392 Async Event: word0:x%x, word1:x%x, " 12791 "word2:x%x, word3:x%x\n", mcqe->word0, 12792 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 12793 12794 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 12795 if (!cq_event) 12796 return false; 12797 spin_lock_irqsave(&phba->hbalock, iflags); 12798 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 12799 /* Set the async event flag */ 12800 phba->hba_flag |= ASYNC_EVENT; 12801 spin_unlock_irqrestore(&phba->hbalock, iflags); 12802 12803 return true; 12804 } 12805 12806 /** 12807 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 12808 * @phba: Pointer to HBA context object. 12809 * @cqe: Pointer to mailbox completion queue entry. 12810 * 12811 * This routine process a mailbox completion queue entry with mailbox 12812 * completion event. 12813 * 12814 * Return: true if work posted to worker thread, otherwise false. 12815 **/ 12816 static bool 12817 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 12818 { 12819 uint32_t mcqe_status; 12820 MAILBOX_t *mbox, *pmbox; 12821 struct lpfc_mqe *mqe; 12822 struct lpfc_vport *vport; 12823 struct lpfc_nodelist *ndlp; 12824 struct lpfc_dmabuf *mp; 12825 unsigned long iflags; 12826 LPFC_MBOXQ_t *pmb; 12827 bool workposted = false; 12828 int rc; 12829 12830 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 12831 if (!bf_get(lpfc_trailer_completed, mcqe)) 12832 goto out_no_mqe_complete; 12833 12834 /* Get the reference to the active mbox command */ 12835 spin_lock_irqsave(&phba->hbalock, iflags); 12836 pmb = phba->sli.mbox_active; 12837 if (unlikely(!pmb)) { 12838 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 12839 "1832 No pending MBOX command to handle\n"); 12840 spin_unlock_irqrestore(&phba->hbalock, iflags); 12841 goto out_no_mqe_complete; 12842 } 12843 spin_unlock_irqrestore(&phba->hbalock, iflags); 12844 mqe = &pmb->u.mqe; 12845 pmbox = (MAILBOX_t *)&pmb->u.mqe; 12846 mbox = phba->mbox; 12847 vport = pmb->vport; 12848 12849 /* Reset heartbeat timer */ 12850 phba->last_completion_time = jiffies; 12851 del_timer(&phba->sli.mbox_tmo); 12852 12853 /* Move mbox data to caller's mailbox region, do endian swapping */ 12854 if (pmb->mbox_cmpl && mbox) 12855 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 12856 12857 /* 12858 * For mcqe errors, conditionally move a modified error code to 12859 * the mbox so that the error will not be missed. 12860 */ 12861 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 12862 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 12863 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 12864 bf_set(lpfc_mqe_status, mqe, 12865 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 12866 } 12867 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12868 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12869 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 12870 "MBOX dflt rpi: status:x%x rpi:x%x", 12871 mcqe_status, 12872 pmbox->un.varWords[0], 0); 12873 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 12874 mp = (struct lpfc_dmabuf *)(pmb->context1); 12875 ndlp = (struct lpfc_nodelist *)pmb->context2; 12876 /* Reg_LOGIN of dflt RPI was successful. Now lets get 12877 * RID of the PPI using the same mbox buffer. 12878 */ 12879 lpfc_unreg_login(phba, vport->vpi, 12880 pmbox->un.varWords[0], pmb); 12881 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 12882 pmb->context1 = mp; 12883 pmb->context2 = ndlp; 12884 pmb->vport = vport; 12885 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 12886 if (rc != MBX_BUSY) 12887 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12888 LOG_SLI, "0385 rc should " 12889 "have been MBX_BUSY\n"); 12890 if (rc != MBX_NOT_FINISHED) 12891 goto send_current_mbox; 12892 } 12893 } 12894 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 12895 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 12896 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 12897 12898 /* There is mailbox completion work to do */ 12899 spin_lock_irqsave(&phba->hbalock, iflags); 12900 __lpfc_mbox_cmpl_put(phba, pmb); 12901 phba->work_ha |= HA_MBATT; 12902 spin_unlock_irqrestore(&phba->hbalock, iflags); 12903 workposted = true; 12904 12905 send_current_mbox: 12906 spin_lock_irqsave(&phba->hbalock, iflags); 12907 /* Release the mailbox command posting token */ 12908 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 12909 /* Setting active mailbox pointer need to be in sync to flag clear */ 12910 phba->sli.mbox_active = NULL; 12911 spin_unlock_irqrestore(&phba->hbalock, iflags); 12912 /* Wake up worker thread to post the next pending mailbox command */ 12913 lpfc_worker_wake_up(phba); 12914 out_no_mqe_complete: 12915 if (bf_get(lpfc_trailer_consumed, mcqe)) 12916 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 12917 return workposted; 12918 } 12919 12920 /** 12921 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 12922 * @phba: Pointer to HBA context object. 12923 * @cqe: Pointer to mailbox completion queue entry. 12924 * 12925 * This routine process a mailbox completion queue entry, it invokes the 12926 * proper mailbox complete handling or asynchrous event handling routine 12927 * according to the MCQE's async bit. 12928 * 12929 * Return: true if work posted to worker thread, otherwise false. 12930 **/ 12931 static bool 12932 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 12933 { 12934 struct lpfc_mcqe mcqe; 12935 bool workposted; 12936 12937 /* Copy the mailbox MCQE and convert endian order as needed */ 12938 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 12939 12940 /* Invoke the proper event handling routine */ 12941 if (!bf_get(lpfc_trailer_async, &mcqe)) 12942 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 12943 else 12944 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 12945 return workposted; 12946 } 12947 12948 /** 12949 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 12950 * @phba: Pointer to HBA context object. 12951 * @cq: Pointer to associated CQ 12952 * @wcqe: Pointer to work-queue completion queue entry. 12953 * 12954 * This routine handles an ELS work-queue completion event. 12955 * 12956 * Return: true if work posted to worker thread, otherwise false. 12957 **/ 12958 static bool 12959 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12960 struct lpfc_wcqe_complete *wcqe) 12961 { 12962 struct lpfc_iocbq *irspiocbq; 12963 unsigned long iflags; 12964 struct lpfc_sli_ring *pring = cq->pring; 12965 int txq_cnt = 0; 12966 int txcmplq_cnt = 0; 12967 int fcp_txcmplq_cnt = 0; 12968 12969 /* Check for response status */ 12970 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 12971 /* Log the error status */ 12972 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 12973 "0357 ELS CQE error: status=x%x: " 12974 "CQE: %08x %08x %08x %08x\n", 12975 bf_get(lpfc_wcqe_c_status, wcqe), 12976 wcqe->word0, wcqe->total_data_placed, 12977 wcqe->parameter, wcqe->word3); 12978 } 12979 12980 /* Get an irspiocbq for later ELS response processing use */ 12981 irspiocbq = lpfc_sli_get_iocbq(phba); 12982 if (!irspiocbq) { 12983 if (!list_empty(&pring->txq)) 12984 txq_cnt++; 12985 if (!list_empty(&pring->txcmplq)) 12986 txcmplq_cnt++; 12987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12988 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 12989 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 12990 txq_cnt, phba->iocb_cnt, 12991 fcp_txcmplq_cnt, 12992 txcmplq_cnt); 12993 return false; 12994 } 12995 12996 /* Save off the slow-path queue event for work thread to process */ 12997 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 12998 spin_lock_irqsave(&phba->hbalock, iflags); 12999 list_add_tail(&irspiocbq->cq_event.list, 13000 &phba->sli4_hba.sp_queue_event); 13001 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13002 spin_unlock_irqrestore(&phba->hbalock, iflags); 13003 13004 return true; 13005 } 13006 13007 /** 13008 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 13009 * @phba: Pointer to HBA context object. 13010 * @wcqe: Pointer to work-queue completion queue entry. 13011 * 13012 * This routine handles slow-path WQ entry consumed event by invoking the 13013 * proper WQ release routine to the slow-path WQ. 13014 **/ 13015 static void 13016 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 13017 struct lpfc_wcqe_release *wcqe) 13018 { 13019 /* sanity check on queue memory */ 13020 if (unlikely(!phba->sli4_hba.els_wq)) 13021 return; 13022 /* Check for the slow-path ELS work queue */ 13023 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 13024 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 13025 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13026 else 13027 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13028 "2579 Slow-path wqe consume event carries " 13029 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 13030 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 13031 phba->sli4_hba.els_wq->queue_id); 13032 } 13033 13034 /** 13035 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 13036 * @phba: Pointer to HBA context object. 13037 * @cq: Pointer to a WQ completion queue. 13038 * @wcqe: Pointer to work-queue completion queue entry. 13039 * 13040 * This routine handles an XRI abort event. 13041 * 13042 * Return: true if work posted to worker thread, otherwise false. 13043 **/ 13044 static bool 13045 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 13046 struct lpfc_queue *cq, 13047 struct sli4_wcqe_xri_aborted *wcqe) 13048 { 13049 bool workposted = false; 13050 struct lpfc_cq_event *cq_event; 13051 unsigned long iflags; 13052 13053 switch (cq->subtype) { 13054 case LPFC_FCP: 13055 cq_event = lpfc_cq_event_setup( 13056 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13057 if (!cq_event) 13058 return false; 13059 spin_lock_irqsave(&phba->hbalock, iflags); 13060 list_add_tail(&cq_event->list, 13061 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 13062 /* Set the fcp xri abort event flag */ 13063 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 13064 spin_unlock_irqrestore(&phba->hbalock, iflags); 13065 workposted = true; 13066 break; 13067 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 13068 case LPFC_ELS: 13069 cq_event = lpfc_cq_event_setup( 13070 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13071 if (!cq_event) 13072 return false; 13073 spin_lock_irqsave(&phba->hbalock, iflags); 13074 list_add_tail(&cq_event->list, 13075 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 13076 /* Set the els xri abort event flag */ 13077 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 13078 spin_unlock_irqrestore(&phba->hbalock, iflags); 13079 workposted = true; 13080 break; 13081 case LPFC_NVME: 13082 /* Notify aborted XRI for NVME work queue */ 13083 if (phba->nvmet_support) 13084 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 13085 else 13086 lpfc_sli4_nvme_xri_aborted(phba, wcqe); 13087 13088 workposted = false; 13089 break; 13090 default: 13091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13092 "0603 Invalid CQ subtype %d: " 13093 "%08x %08x %08x %08x\n", 13094 cq->subtype, wcqe->word0, wcqe->parameter, 13095 wcqe->word2, wcqe->word3); 13096 workposted = false; 13097 break; 13098 } 13099 return workposted; 13100 } 13101 13102 /** 13103 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 13104 * @phba: Pointer to HBA context object. 13105 * @rcqe: Pointer to receive-queue completion queue entry. 13106 * 13107 * This routine process a receive-queue completion queue entry. 13108 * 13109 * Return: true if work posted to worker thread, otherwise false. 13110 **/ 13111 static bool 13112 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13113 { 13114 bool workposted = false; 13115 struct fc_frame_header *fc_hdr; 13116 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13117 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13118 struct lpfc_nvmet_tgtport *tgtp; 13119 struct hbq_dmabuf *dma_buf; 13120 uint32_t status, rq_id; 13121 unsigned long iflags; 13122 13123 /* sanity check on queue memory */ 13124 if (unlikely(!hrq) || unlikely(!drq)) 13125 return workposted; 13126 13127 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13128 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13129 else 13130 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13131 if (rq_id != hrq->queue_id) 13132 goto out; 13133 13134 status = bf_get(lpfc_rcqe_status, rcqe); 13135 switch (status) { 13136 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13137 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13138 "2537 Receive Frame Truncated!!\n"); 13139 case FC_STATUS_RQ_SUCCESS: 13140 spin_lock_irqsave(&phba->hbalock, iflags); 13141 lpfc_sli4_rq_release(hrq, drq); 13142 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 13143 if (!dma_buf) { 13144 hrq->RQ_no_buf_found++; 13145 spin_unlock_irqrestore(&phba->hbalock, iflags); 13146 goto out; 13147 } 13148 hrq->RQ_rcv_buf++; 13149 hrq->RQ_buf_posted--; 13150 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13151 13152 /* If a NVME LS event (type 0x28), treat it as Fast path */ 13153 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13154 13155 /* save off the frame for the word thread to process */ 13156 list_add_tail(&dma_buf->cq_event.list, 13157 &phba->sli4_hba.sp_queue_event); 13158 /* Frame received */ 13159 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13160 spin_unlock_irqrestore(&phba->hbalock, iflags); 13161 workposted = true; 13162 break; 13163 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13164 if (phba->nvmet_support) { 13165 tgtp = phba->targetport->private; 13166 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13167 "6402 RQE Error x%x, posted %d err_cnt " 13168 "%d: %x %x %x\n", 13169 status, hrq->RQ_buf_posted, 13170 hrq->RQ_no_posted_buf, 13171 atomic_read(&tgtp->rcv_fcp_cmd_in), 13172 atomic_read(&tgtp->rcv_fcp_cmd_out), 13173 atomic_read(&tgtp->xmt_fcp_release)); 13174 } 13175 /* fallthrough */ 13176 13177 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13178 hrq->RQ_no_posted_buf++; 13179 /* Post more buffers if possible */ 13180 spin_lock_irqsave(&phba->hbalock, iflags); 13181 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 13182 spin_unlock_irqrestore(&phba->hbalock, iflags); 13183 workposted = true; 13184 break; 13185 } 13186 out: 13187 return workposted; 13188 } 13189 13190 /** 13191 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 13192 * @phba: Pointer to HBA context object. 13193 * @cq: Pointer to the completion queue. 13194 * @wcqe: Pointer to a completion queue entry. 13195 * 13196 * This routine process a slow-path work-queue or receive queue completion queue 13197 * entry. 13198 * 13199 * Return: true if work posted to worker thread, otherwise false. 13200 **/ 13201 static bool 13202 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13203 struct lpfc_cqe *cqe) 13204 { 13205 struct lpfc_cqe cqevt; 13206 bool workposted = false; 13207 13208 /* Copy the work queue CQE and convert endian order if needed */ 13209 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 13210 13211 /* Check and process for different type of WCQE and dispatch */ 13212 switch (bf_get(lpfc_cqe_code, &cqevt)) { 13213 case CQE_CODE_COMPL_WQE: 13214 /* Process the WQ/RQ complete event */ 13215 phba->last_completion_time = jiffies; 13216 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 13217 (struct lpfc_wcqe_complete *)&cqevt); 13218 break; 13219 case CQE_CODE_RELEASE_WQE: 13220 /* Process the WQ release event */ 13221 lpfc_sli4_sp_handle_rel_wcqe(phba, 13222 (struct lpfc_wcqe_release *)&cqevt); 13223 break; 13224 case CQE_CODE_XRI_ABORTED: 13225 /* Process the WQ XRI abort event */ 13226 phba->last_completion_time = jiffies; 13227 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13228 (struct sli4_wcqe_xri_aborted *)&cqevt); 13229 break; 13230 case CQE_CODE_RECEIVE: 13231 case CQE_CODE_RECEIVE_V1: 13232 /* Process the RQ event */ 13233 phba->last_completion_time = jiffies; 13234 workposted = lpfc_sli4_sp_handle_rcqe(phba, 13235 (struct lpfc_rcqe *)&cqevt); 13236 break; 13237 default: 13238 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13239 "0388 Not a valid WCQE code: x%x\n", 13240 bf_get(lpfc_cqe_code, &cqevt)); 13241 break; 13242 } 13243 return workposted; 13244 } 13245 13246 /** 13247 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 13248 * @phba: Pointer to HBA context object. 13249 * @eqe: Pointer to fast-path event queue entry. 13250 * 13251 * This routine process a event queue entry from the slow-path event queue. 13252 * It will check the MajorCode and MinorCode to determine this is for a 13253 * completion event on a completion queue, if not, an error shall be logged 13254 * and just return. Otherwise, it will get to the corresponding completion 13255 * queue and process all the entries on that completion queue, rearm the 13256 * completion queue, and then return. 13257 * 13258 **/ 13259 static void 13260 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13261 struct lpfc_queue *speq) 13262 { 13263 struct lpfc_queue *cq = NULL, *childq; 13264 uint16_t cqid; 13265 13266 /* Get the reference to the corresponding CQ */ 13267 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13268 13269 list_for_each_entry(childq, &speq->child_list, list) { 13270 if (childq->queue_id == cqid) { 13271 cq = childq; 13272 break; 13273 } 13274 } 13275 if (unlikely(!cq)) { 13276 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13277 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13278 "0365 Slow-path CQ identifier " 13279 "(%d) does not exist\n", cqid); 13280 return; 13281 } 13282 13283 /* Save EQ associated with this CQ */ 13284 cq->assoc_qp = speq; 13285 13286 if (!queue_work(phba->wq, &cq->spwork)) 13287 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13288 "0390 Cannot schedule soft IRQ " 13289 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13290 cqid, cq->queue_id, smp_processor_id()); 13291 } 13292 13293 /** 13294 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 13295 * @phba: Pointer to HBA context object. 13296 * 13297 * This routine process a event queue entry from the slow-path event queue. 13298 * It will check the MajorCode and MinorCode to determine this is for a 13299 * completion event on a completion queue, if not, an error shall be logged 13300 * and just return. Otherwise, it will get to the corresponding completion 13301 * queue and process all the entries on that completion queue, rearm the 13302 * completion queue, and then return. 13303 * 13304 **/ 13305 static void 13306 lpfc_sli4_sp_process_cq(struct work_struct *work) 13307 { 13308 struct lpfc_queue *cq = 13309 container_of(work, struct lpfc_queue, spwork); 13310 struct lpfc_hba *phba = cq->phba; 13311 struct lpfc_cqe *cqe; 13312 bool workposted = false; 13313 int ccount = 0; 13314 13315 /* Process all the entries to the CQ */ 13316 switch (cq->type) { 13317 case LPFC_MCQ: 13318 while ((cqe = lpfc_sli4_cq_get(cq))) { 13319 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 13320 if (!(++ccount % cq->entry_repost)) 13321 break; 13322 cq->CQ_mbox++; 13323 } 13324 break; 13325 case LPFC_WCQ: 13326 while ((cqe = lpfc_sli4_cq_get(cq))) { 13327 if (cq->subtype == LPFC_FCP || 13328 cq->subtype == LPFC_NVME) { 13329 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13330 if (phba->ktime_on) 13331 cq->isr_timestamp = ktime_get_ns(); 13332 else 13333 cq->isr_timestamp = 0; 13334 #endif 13335 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, 13336 cqe); 13337 } else { 13338 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 13339 cqe); 13340 } 13341 if (!(++ccount % cq->entry_repost)) 13342 break; 13343 } 13344 13345 /* Track the max number of CQEs processed in 1 EQ */ 13346 if (ccount > cq->CQ_max_cqe) 13347 cq->CQ_max_cqe = ccount; 13348 break; 13349 default: 13350 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13351 "0370 Invalid completion queue type (%d)\n", 13352 cq->type); 13353 return; 13354 } 13355 13356 /* Catch the no cq entry condition, log an error */ 13357 if (unlikely(ccount == 0)) 13358 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13359 "0371 No entry from the CQ: identifier " 13360 "(x%x), type (%d)\n", cq->queue_id, cq->type); 13361 13362 /* In any case, flash and re-arm the RCQ */ 13363 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM); 13364 13365 /* wake up worker thread if there are works to be done */ 13366 if (workposted) 13367 lpfc_worker_wake_up(phba); 13368 } 13369 13370 /** 13371 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13372 * @phba: Pointer to HBA context object. 13373 * @cq: Pointer to associated CQ 13374 * @wcqe: Pointer to work-queue completion queue entry. 13375 * 13376 * This routine process a fast-path work queue completion entry from fast-path 13377 * event queue for FCP command response completion. 13378 **/ 13379 static void 13380 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13381 struct lpfc_wcqe_complete *wcqe) 13382 { 13383 struct lpfc_sli_ring *pring = cq->pring; 13384 struct lpfc_iocbq *cmdiocbq; 13385 struct lpfc_iocbq irspiocbq; 13386 unsigned long iflags; 13387 13388 /* Check for response status */ 13389 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13390 /* If resource errors reported from HBA, reduce queue 13391 * depth of the SCSI device. 13392 */ 13393 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13394 IOSTAT_LOCAL_REJECT)) && 13395 ((wcqe->parameter & IOERR_PARAM_MASK) == 13396 IOERR_NO_RESOURCES)) 13397 phba->lpfc_rampdown_queue_depth(phba); 13398 13399 /* Log the error status */ 13400 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13401 "0373 FCP CQE error: status=x%x: " 13402 "CQE: %08x %08x %08x %08x\n", 13403 bf_get(lpfc_wcqe_c_status, wcqe), 13404 wcqe->word0, wcqe->total_data_placed, 13405 wcqe->parameter, wcqe->word3); 13406 } 13407 13408 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13409 spin_lock_irqsave(&pring->ring_lock, iflags); 13410 pring->stats.iocb_event++; 13411 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13412 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13413 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13414 if (unlikely(!cmdiocbq)) { 13415 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13416 "0374 FCP complete with no corresponding " 13417 "cmdiocb: iotag (%d)\n", 13418 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13419 return; 13420 } 13421 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13422 cmdiocbq->isr_timestamp = cq->isr_timestamp; 13423 #endif 13424 if (cmdiocbq->iocb_cmpl == NULL) { 13425 if (cmdiocbq->wqe_cmpl) { 13426 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13427 spin_lock_irqsave(&phba->hbalock, iflags); 13428 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13429 spin_unlock_irqrestore(&phba->hbalock, iflags); 13430 } 13431 13432 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13433 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13434 return; 13435 } 13436 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13437 "0375 FCP cmdiocb not callback function " 13438 "iotag: (%d)\n", 13439 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13440 return; 13441 } 13442 13443 /* Fake the irspiocb and copy necessary response information */ 13444 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13445 13446 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13447 spin_lock_irqsave(&phba->hbalock, iflags); 13448 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13449 spin_unlock_irqrestore(&phba->hbalock, iflags); 13450 } 13451 13452 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13453 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13454 } 13455 13456 /** 13457 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13458 * @phba: Pointer to HBA context object. 13459 * @cq: Pointer to completion queue. 13460 * @wcqe: Pointer to work-queue completion queue entry. 13461 * 13462 * This routine handles an fast-path WQ entry consumed event by invoking the 13463 * proper WQ release routine to the slow-path WQ. 13464 **/ 13465 static void 13466 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13467 struct lpfc_wcqe_release *wcqe) 13468 { 13469 struct lpfc_queue *childwq; 13470 bool wqid_matched = false; 13471 uint16_t hba_wqid; 13472 13473 /* Check for fast-path FCP work queue release */ 13474 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13475 list_for_each_entry(childwq, &cq->child_list, list) { 13476 if (childwq->queue_id == hba_wqid) { 13477 lpfc_sli4_wq_release(childwq, 13478 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13479 if (childwq->q_flag & HBA_NVMET_WQFULL) 13480 lpfc_nvmet_wqfull_process(phba, childwq); 13481 wqid_matched = true; 13482 break; 13483 } 13484 } 13485 /* Report warning log message if no match found */ 13486 if (wqid_matched != true) 13487 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13488 "2580 Fast-path wqe consume event carries " 13489 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13490 } 13491 13492 /** 13493 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13494 * @phba: Pointer to HBA context object. 13495 * @rcqe: Pointer to receive-queue completion queue entry. 13496 * 13497 * This routine process a receive-queue completion queue entry. 13498 * 13499 * Return: true if work posted to worker thread, otherwise false. 13500 **/ 13501 static bool 13502 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13503 struct lpfc_rcqe *rcqe) 13504 { 13505 bool workposted = false; 13506 struct lpfc_queue *hrq; 13507 struct lpfc_queue *drq; 13508 struct rqb_dmabuf *dma_buf; 13509 struct fc_frame_header *fc_hdr; 13510 struct lpfc_nvmet_tgtport *tgtp; 13511 uint32_t status, rq_id; 13512 unsigned long iflags; 13513 uint32_t fctl, idx; 13514 13515 if ((phba->nvmet_support == 0) || 13516 (phba->sli4_hba.nvmet_cqset == NULL)) 13517 return workposted; 13518 13519 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13520 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13521 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13522 13523 /* sanity check on queue memory */ 13524 if (unlikely(!hrq) || unlikely(!drq)) 13525 return workposted; 13526 13527 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13528 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13529 else 13530 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13531 13532 if ((phba->nvmet_support == 0) || 13533 (rq_id != hrq->queue_id)) 13534 return workposted; 13535 13536 status = bf_get(lpfc_rcqe_status, rcqe); 13537 switch (status) { 13538 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13539 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13540 "6126 Receive Frame Truncated!!\n"); 13541 /* Drop thru */ 13542 case FC_STATUS_RQ_SUCCESS: 13543 spin_lock_irqsave(&phba->hbalock, iflags); 13544 lpfc_sli4_rq_release(hrq, drq); 13545 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13546 if (!dma_buf) { 13547 hrq->RQ_no_buf_found++; 13548 spin_unlock_irqrestore(&phba->hbalock, iflags); 13549 goto out; 13550 } 13551 spin_unlock_irqrestore(&phba->hbalock, iflags); 13552 hrq->RQ_rcv_buf++; 13553 hrq->RQ_buf_posted--; 13554 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13555 13556 /* Just some basic sanity checks on FCP Command frame */ 13557 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 13558 fc_hdr->fh_f_ctl[1] << 8 | 13559 fc_hdr->fh_f_ctl[2]); 13560 if (((fctl & 13561 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 13562 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 13563 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 13564 goto drop; 13565 13566 if (fc_hdr->fh_type == FC_TYPE_FCP) { 13567 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13568 lpfc_nvmet_unsol_fcp_event( 13569 phba, idx, dma_buf, 13570 cq->isr_timestamp); 13571 return false; 13572 } 13573 drop: 13574 lpfc_in_buf_free(phba, &dma_buf->dbuf); 13575 break; 13576 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13577 if (phba->nvmet_support) { 13578 tgtp = phba->targetport->private; 13579 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13580 "6401 RQE Error x%x, posted %d err_cnt " 13581 "%d: %x %x %x\n", 13582 status, hrq->RQ_buf_posted, 13583 hrq->RQ_no_posted_buf, 13584 atomic_read(&tgtp->rcv_fcp_cmd_in), 13585 atomic_read(&tgtp->rcv_fcp_cmd_out), 13586 atomic_read(&tgtp->xmt_fcp_release)); 13587 } 13588 /* fallthrough */ 13589 13590 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13591 hrq->RQ_no_posted_buf++; 13592 /* Post more buffers if possible */ 13593 break; 13594 } 13595 out: 13596 return workposted; 13597 } 13598 13599 /** 13600 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 13601 * @cq: Pointer to the completion queue. 13602 * @eqe: Pointer to fast-path completion queue entry. 13603 * 13604 * This routine process a fast-path work queue completion entry from fast-path 13605 * event queue for FCP command response completion. 13606 **/ 13607 static int 13608 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13609 struct lpfc_cqe *cqe) 13610 { 13611 struct lpfc_wcqe_release wcqe; 13612 bool workposted = false; 13613 13614 /* Copy the work queue CQE and convert endian order if needed */ 13615 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 13616 13617 /* Check and process for different type of WCQE and dispatch */ 13618 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 13619 case CQE_CODE_COMPL_WQE: 13620 case CQE_CODE_NVME_ERSP: 13621 cq->CQ_wq++; 13622 /* Process the WQ complete event */ 13623 phba->last_completion_time = jiffies; 13624 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) 13625 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13626 (struct lpfc_wcqe_complete *)&wcqe); 13627 if (cq->subtype == LPFC_NVME_LS) 13628 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13629 (struct lpfc_wcqe_complete *)&wcqe); 13630 break; 13631 case CQE_CODE_RELEASE_WQE: 13632 cq->CQ_release_wqe++; 13633 /* Process the WQ release event */ 13634 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 13635 (struct lpfc_wcqe_release *)&wcqe); 13636 break; 13637 case CQE_CODE_XRI_ABORTED: 13638 cq->CQ_xri_aborted++; 13639 /* Process the WQ XRI abort event */ 13640 phba->last_completion_time = jiffies; 13641 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13642 (struct sli4_wcqe_xri_aborted *)&wcqe); 13643 break; 13644 case CQE_CODE_RECEIVE_V1: 13645 case CQE_CODE_RECEIVE: 13646 phba->last_completion_time = jiffies; 13647 if (cq->subtype == LPFC_NVMET) { 13648 workposted = lpfc_sli4_nvmet_handle_rcqe( 13649 phba, cq, (struct lpfc_rcqe *)&wcqe); 13650 } 13651 break; 13652 default: 13653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13654 "0144 Not a valid CQE code: x%x\n", 13655 bf_get(lpfc_wcqe_c_code, &wcqe)); 13656 break; 13657 } 13658 return workposted; 13659 } 13660 13661 /** 13662 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 13663 * @phba: Pointer to HBA context object. 13664 * @eqe: Pointer to fast-path event queue entry. 13665 * 13666 * This routine process a event queue entry from the fast-path event queue. 13667 * It will check the MajorCode and MinorCode to determine this is for a 13668 * completion event on a completion queue, if not, an error shall be logged 13669 * and just return. Otherwise, it will get to the corresponding completion 13670 * queue and process all the entries on the completion queue, rearm the 13671 * completion queue, and then return. 13672 **/ 13673 static void 13674 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13675 uint32_t qidx) 13676 { 13677 struct lpfc_queue *cq = NULL; 13678 uint16_t cqid, id; 13679 13680 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 13681 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13682 "0366 Not a valid completion " 13683 "event: majorcode=x%x, minorcode=x%x\n", 13684 bf_get_le32(lpfc_eqe_major_code, eqe), 13685 bf_get_le32(lpfc_eqe_minor_code, eqe)); 13686 return; 13687 } 13688 13689 /* Get the reference to the corresponding CQ */ 13690 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13691 13692 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 13693 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 13694 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 13695 /* Process NVMET unsol rcv */ 13696 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 13697 goto process_cq; 13698 } 13699 } 13700 13701 if (phba->sli4_hba.nvme_cq_map && 13702 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) { 13703 /* Process NVME / NVMET command completion */ 13704 cq = phba->sli4_hba.nvme_cq[qidx]; 13705 goto process_cq; 13706 } 13707 13708 if (phba->sli4_hba.fcp_cq_map && 13709 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) { 13710 /* Process FCP command completion */ 13711 cq = phba->sli4_hba.fcp_cq[qidx]; 13712 goto process_cq; 13713 } 13714 13715 if (phba->sli4_hba.nvmels_cq && 13716 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 13717 /* Process NVME unsol rcv */ 13718 cq = phba->sli4_hba.nvmels_cq; 13719 } 13720 13721 /* Otherwise this is a Slow path event */ 13722 if (cq == NULL) { 13723 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]); 13724 return; 13725 } 13726 13727 process_cq: 13728 if (unlikely(cqid != cq->queue_id)) { 13729 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13730 "0368 Miss-matched fast-path completion " 13731 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 13732 cqid, cq->queue_id); 13733 return; 13734 } 13735 13736 /* Save EQ associated with this CQ */ 13737 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx]; 13738 13739 if (!queue_work(phba->wq, &cq->irqwork)) 13740 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13741 "0363 Cannot schedule soft IRQ " 13742 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13743 cqid, cq->queue_id, smp_processor_id()); 13744 } 13745 13746 /** 13747 * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 13748 * @phba: Pointer to HBA context object. 13749 * @eqe: Pointer to fast-path event queue entry. 13750 * 13751 * This routine process a event queue entry from the fast-path event queue. 13752 * It will check the MajorCode and MinorCode to determine this is for a 13753 * completion event on a completion queue, if not, an error shall be logged 13754 * and just return. Otherwise, it will get to the corresponding completion 13755 * queue and process all the entries on the completion queue, rearm the 13756 * completion queue, and then return. 13757 **/ 13758 static void 13759 lpfc_sli4_hba_process_cq(struct work_struct *work) 13760 { 13761 struct lpfc_queue *cq = 13762 container_of(work, struct lpfc_queue, irqwork); 13763 struct lpfc_hba *phba = cq->phba; 13764 struct lpfc_cqe *cqe; 13765 bool workposted = false; 13766 int ccount = 0; 13767 13768 /* Process all the entries to the CQ */ 13769 while ((cqe = lpfc_sli4_cq_get(cq))) { 13770 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13771 if (phba->ktime_on) 13772 cq->isr_timestamp = ktime_get_ns(); 13773 else 13774 cq->isr_timestamp = 0; 13775 #endif 13776 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 13777 if (!(++ccount % cq->entry_repost)) 13778 break; 13779 } 13780 13781 /* Track the max number of CQEs processed in 1 EQ */ 13782 if (ccount > cq->CQ_max_cqe) 13783 cq->CQ_max_cqe = ccount; 13784 cq->assoc_qp->EQ_cqe_cnt += ccount; 13785 13786 /* Catch the no cq entry condition */ 13787 if (unlikely(ccount == 0)) 13788 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13789 "0369 No entry from fast-path completion " 13790 "queue fcpcqid=%d\n", cq->queue_id); 13791 13792 /* In any case, flash and re-arm the CQ */ 13793 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM); 13794 13795 /* wake up worker thread if there are works to be done */ 13796 if (workposted) 13797 lpfc_worker_wake_up(phba); 13798 } 13799 13800 static void 13801 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 13802 { 13803 struct lpfc_eqe *eqe; 13804 13805 /* walk all the EQ entries and drop on the floor */ 13806 while ((eqe = lpfc_sli4_eq_get(eq))) 13807 ; 13808 13809 /* Clear and re-arm the EQ */ 13810 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM); 13811 } 13812 13813 13814 /** 13815 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue 13816 * entry 13817 * @phba: Pointer to HBA context object. 13818 * @eqe: Pointer to fast-path event queue entry. 13819 * 13820 * This routine process a event queue entry from the Flash Optimized Fabric 13821 * event queue. It will check the MajorCode and MinorCode to determine this 13822 * is for a completion event on a completion queue, if not, an error shall be 13823 * logged and just return. Otherwise, it will get to the corresponding 13824 * completion queue and process all the entries on the completion queue, rearm 13825 * the completion queue, and then return. 13826 **/ 13827 static void 13828 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 13829 { 13830 struct lpfc_queue *cq; 13831 uint16_t cqid; 13832 13833 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 13834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13835 "9147 Not a valid completion " 13836 "event: majorcode=x%x, minorcode=x%x\n", 13837 bf_get_le32(lpfc_eqe_major_code, eqe), 13838 bf_get_le32(lpfc_eqe_minor_code, eqe)); 13839 return; 13840 } 13841 13842 /* Get the reference to the corresponding CQ */ 13843 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13844 13845 /* Next check for OAS */ 13846 cq = phba->sli4_hba.oas_cq; 13847 if (unlikely(!cq)) { 13848 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13849 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13850 "9148 OAS completion queue " 13851 "does not exist\n"); 13852 return; 13853 } 13854 13855 if (unlikely(cqid != cq->queue_id)) { 13856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13857 "9149 Miss-matched fast-path compl " 13858 "queue id: eqcqid=%d, fcpcqid=%d\n", 13859 cqid, cq->queue_id); 13860 return; 13861 } 13862 13863 /* Save EQ associated with this CQ */ 13864 cq->assoc_qp = phba->sli4_hba.fof_eq; 13865 13866 /* CQ work will be processed on CPU affinitized to this IRQ */ 13867 if (!queue_work(phba->wq, &cq->irqwork)) 13868 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13869 "0367 Cannot schedule soft IRQ " 13870 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13871 cqid, cq->queue_id, smp_processor_id()); 13872 } 13873 13874 /** 13875 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device 13876 * @irq: Interrupt number. 13877 * @dev_id: The device context pointer. 13878 * 13879 * This function is directly called from the PCI layer as an interrupt 13880 * service routine when device with SLI-4 interface spec is enabled with 13881 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric 13882 * IOCB ring event in the HBA. However, when the device is enabled with either 13883 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 13884 * device-level interrupt handler. When the PCI slot is in error recovery 13885 * or the HBA is undergoing initialization, the interrupt handler will not 13886 * process the interrupt. The Flash Optimized Fabric ring event are handled in 13887 * the intrrupt context. This function is called without any lock held. 13888 * It gets the hbalock to access and update SLI data structures. Note that, 13889 * the EQ to CQ are one-to-one map such that the EQ index is 13890 * equal to that of CQ index. 13891 * 13892 * This function returns IRQ_HANDLED when interrupt is handled else it 13893 * returns IRQ_NONE. 13894 **/ 13895 irqreturn_t 13896 lpfc_sli4_fof_intr_handler(int irq, void *dev_id) 13897 { 13898 struct lpfc_hba *phba; 13899 struct lpfc_hba_eq_hdl *hba_eq_hdl; 13900 struct lpfc_queue *eq; 13901 struct lpfc_eqe *eqe; 13902 unsigned long iflag; 13903 int ecount = 0; 13904 13905 /* Get the driver's phba structure from the dev_id */ 13906 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 13907 phba = hba_eq_hdl->phba; 13908 13909 if (unlikely(!phba)) 13910 return IRQ_NONE; 13911 13912 /* Get to the EQ struct associated with this vector */ 13913 eq = phba->sli4_hba.fof_eq; 13914 if (unlikely(!eq)) 13915 return IRQ_NONE; 13916 13917 /* Check device state for handling interrupt */ 13918 if (unlikely(lpfc_intr_state_check(phba))) { 13919 /* Check again for link_state with lock held */ 13920 spin_lock_irqsave(&phba->hbalock, iflag); 13921 if (phba->link_state < LPFC_LINK_DOWN) 13922 /* Flush, clear interrupt, and rearm the EQ */ 13923 lpfc_sli4_eq_flush(phba, eq); 13924 spin_unlock_irqrestore(&phba->hbalock, iflag); 13925 return IRQ_NONE; 13926 } 13927 13928 /* 13929 * Process all the event on FCP fast-path EQ 13930 */ 13931 while ((eqe = lpfc_sli4_eq_get(eq))) { 13932 lpfc_sli4_fof_handle_eqe(phba, eqe); 13933 if (!(++ecount % eq->entry_repost)) 13934 break; 13935 eq->EQ_processed++; 13936 } 13937 13938 /* Track the max number of EQEs processed in 1 intr */ 13939 if (ecount > eq->EQ_max_eqe) 13940 eq->EQ_max_eqe = ecount; 13941 13942 13943 if (unlikely(ecount == 0)) { 13944 eq->EQ_no_entry++; 13945 13946 if (phba->intr_type == MSIX) 13947 /* MSI-X treated interrupt served as no EQ share INT */ 13948 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13949 "9145 MSI-X interrupt with no EQE\n"); 13950 else { 13951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13952 "9146 ISR interrupt with no EQE\n"); 13953 /* Non MSI-X treated on interrupt as EQ share INT */ 13954 return IRQ_NONE; 13955 } 13956 } 13957 /* Always clear and re-arm the fast-path EQ */ 13958 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM); 13959 return IRQ_HANDLED; 13960 } 13961 13962 /** 13963 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 13964 * @irq: Interrupt number. 13965 * @dev_id: The device context pointer. 13966 * 13967 * This function is directly called from the PCI layer as an interrupt 13968 * service routine when device with SLI-4 interface spec is enabled with 13969 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 13970 * ring event in the HBA. However, when the device is enabled with either 13971 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 13972 * device-level interrupt handler. When the PCI slot is in error recovery 13973 * or the HBA is undergoing initialization, the interrupt handler will not 13974 * process the interrupt. The SCSI FCP fast-path ring event are handled in 13975 * the intrrupt context. This function is called without any lock held. 13976 * It gets the hbalock to access and update SLI data structures. Note that, 13977 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 13978 * equal to that of FCP CQ index. 13979 * 13980 * The link attention and ELS ring attention events are handled 13981 * by the worker thread. The interrupt handler signals the worker thread 13982 * and returns for these events. This function is called without any lock 13983 * held. It gets the hbalock to access and update SLI data structures. 13984 * 13985 * This function returns IRQ_HANDLED when interrupt is handled else it 13986 * returns IRQ_NONE. 13987 **/ 13988 irqreturn_t 13989 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 13990 { 13991 struct lpfc_hba *phba; 13992 struct lpfc_hba_eq_hdl *hba_eq_hdl; 13993 struct lpfc_queue *fpeq; 13994 struct lpfc_eqe *eqe; 13995 unsigned long iflag; 13996 int ecount = 0; 13997 int hba_eqidx; 13998 13999 /* Get the driver's phba structure from the dev_id */ 14000 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 14001 phba = hba_eq_hdl->phba; 14002 hba_eqidx = hba_eq_hdl->idx; 14003 14004 if (unlikely(!phba)) 14005 return IRQ_NONE; 14006 if (unlikely(!phba->sli4_hba.hba_eq)) 14007 return IRQ_NONE; 14008 14009 /* Get to the EQ struct associated with this vector */ 14010 fpeq = phba->sli4_hba.hba_eq[hba_eqidx]; 14011 if (unlikely(!fpeq)) 14012 return IRQ_NONE; 14013 14014 if (lpfc_fcp_look_ahead) { 14015 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) 14016 phba->sli4_hba.sli4_eq_clr_intr(fpeq); 14017 else { 14018 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 14019 return IRQ_NONE; 14020 } 14021 } 14022 14023 /* Check device state for handling interrupt */ 14024 if (unlikely(lpfc_intr_state_check(phba))) { 14025 /* Check again for link_state with lock held */ 14026 spin_lock_irqsave(&phba->hbalock, iflag); 14027 if (phba->link_state < LPFC_LINK_DOWN) 14028 /* Flush, clear interrupt, and rearm the EQ */ 14029 lpfc_sli4_eq_flush(phba, fpeq); 14030 spin_unlock_irqrestore(&phba->hbalock, iflag); 14031 if (lpfc_fcp_look_ahead) 14032 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 14033 return IRQ_NONE; 14034 } 14035 14036 /* 14037 * Process all the event on FCP fast-path EQ 14038 */ 14039 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 14040 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); 14041 if (!(++ecount % fpeq->entry_repost)) 14042 break; 14043 fpeq->EQ_processed++; 14044 } 14045 14046 /* Track the max number of EQEs processed in 1 intr */ 14047 if (ecount > fpeq->EQ_max_eqe) 14048 fpeq->EQ_max_eqe = ecount; 14049 14050 /* Always clear and re-arm the fast-path EQ */ 14051 phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 14052 14053 if (unlikely(ecount == 0)) { 14054 fpeq->EQ_no_entry++; 14055 14056 if (lpfc_fcp_look_ahead) { 14057 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 14058 return IRQ_NONE; 14059 } 14060 14061 if (phba->intr_type == MSIX) 14062 /* MSI-X treated interrupt served as no EQ share INT */ 14063 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14064 "0358 MSI-X interrupt with no EQE\n"); 14065 else 14066 /* Non MSI-X treated on interrupt as EQ share INT */ 14067 return IRQ_NONE; 14068 } 14069 14070 if (lpfc_fcp_look_ahead) 14071 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 14072 14073 return IRQ_HANDLED; 14074 } /* lpfc_sli4_fp_intr_handler */ 14075 14076 /** 14077 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 14078 * @irq: Interrupt number. 14079 * @dev_id: The device context pointer. 14080 * 14081 * This function is the device-level interrupt handler to device with SLI-4 14082 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 14083 * interrupt mode is enabled and there is an event in the HBA which requires 14084 * driver attention. This function invokes the slow-path interrupt attention 14085 * handling function and fast-path interrupt attention handling function in 14086 * turn to process the relevant HBA attention events. This function is called 14087 * without any lock held. It gets the hbalock to access and update SLI data 14088 * structures. 14089 * 14090 * This function returns IRQ_HANDLED when interrupt is handled, else it 14091 * returns IRQ_NONE. 14092 **/ 14093 irqreturn_t 14094 lpfc_sli4_intr_handler(int irq, void *dev_id) 14095 { 14096 struct lpfc_hba *phba; 14097 irqreturn_t hba_irq_rc; 14098 bool hba_handled = false; 14099 int qidx; 14100 14101 /* Get the driver's phba structure from the dev_id */ 14102 phba = (struct lpfc_hba *)dev_id; 14103 14104 if (unlikely(!phba)) 14105 return IRQ_NONE; 14106 14107 /* 14108 * Invoke fast-path host attention interrupt handling as appropriate. 14109 */ 14110 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { 14111 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 14112 &phba->sli4_hba.hba_eq_hdl[qidx]); 14113 if (hba_irq_rc == IRQ_HANDLED) 14114 hba_handled |= true; 14115 } 14116 14117 if (phba->cfg_fof) { 14118 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, 14119 &phba->sli4_hba.hba_eq_hdl[qidx]); 14120 if (hba_irq_rc == IRQ_HANDLED) 14121 hba_handled |= true; 14122 } 14123 14124 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 14125 } /* lpfc_sli4_intr_handler */ 14126 14127 /** 14128 * lpfc_sli4_queue_free - free a queue structure and associated memory 14129 * @queue: The queue structure to free. 14130 * 14131 * This function frees a queue structure and the DMAable memory used for 14132 * the host resident queue. This function must be called after destroying the 14133 * queue on the HBA. 14134 **/ 14135 void 14136 lpfc_sli4_queue_free(struct lpfc_queue *queue) 14137 { 14138 struct lpfc_dmabuf *dmabuf; 14139 14140 if (!queue) 14141 return; 14142 14143 while (!list_empty(&queue->page_list)) { 14144 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 14145 list); 14146 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 14147 dmabuf->virt, dmabuf->phys); 14148 kfree(dmabuf); 14149 } 14150 if (queue->rqbp) { 14151 lpfc_free_rq_buffer(queue->phba, queue); 14152 kfree(queue->rqbp); 14153 } 14154 14155 if (!list_empty(&queue->wq_list)) 14156 list_del(&queue->wq_list); 14157 14158 kfree(queue); 14159 return; 14160 } 14161 14162 /** 14163 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 14164 * @phba: The HBA that this queue is being created on. 14165 * @page_size: The size of a queue page 14166 * @entry_size: The size of each queue entry for this queue. 14167 * @entry count: The number of entries that this queue will handle. 14168 * 14169 * This function allocates a queue structure and the DMAable memory used for 14170 * the host resident queue. This function must be called before creating the 14171 * queue on the HBA. 14172 **/ 14173 struct lpfc_queue * 14174 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 14175 uint32_t entry_size, uint32_t entry_count) 14176 { 14177 struct lpfc_queue *queue; 14178 struct lpfc_dmabuf *dmabuf; 14179 int x, total_qe_count; 14180 void *dma_pointer; 14181 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14182 14183 if (!phba->sli4_hba.pc_sli4_params.supported) 14184 hw_page_size = page_size; 14185 14186 queue = kzalloc(sizeof(struct lpfc_queue) + 14187 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 14188 if (!queue) 14189 return NULL; 14190 queue->page_count = (ALIGN(entry_size * entry_count, 14191 hw_page_size))/hw_page_size; 14192 14193 /* If needed, Adjust page count to match the max the adapter supports */ 14194 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) 14195 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; 14196 14197 INIT_LIST_HEAD(&queue->list); 14198 INIT_LIST_HEAD(&queue->wq_list); 14199 INIT_LIST_HEAD(&queue->wqfull_list); 14200 INIT_LIST_HEAD(&queue->page_list); 14201 INIT_LIST_HEAD(&queue->child_list); 14202 14203 /* Set queue parameters now. If the system cannot provide memory 14204 * resources, the free routine needs to know what was allocated. 14205 */ 14206 queue->entry_size = entry_size; 14207 queue->entry_count = entry_count; 14208 queue->page_size = hw_page_size; 14209 queue->phba = phba; 14210 14211 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 14212 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 14213 if (!dmabuf) 14214 goto out_fail; 14215 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 14216 hw_page_size, &dmabuf->phys, 14217 GFP_KERNEL); 14218 if (!dmabuf->virt) { 14219 kfree(dmabuf); 14220 goto out_fail; 14221 } 14222 dmabuf->buffer_tag = x; 14223 list_add_tail(&dmabuf->list, &queue->page_list); 14224 /* initialize queue's entry array */ 14225 dma_pointer = dmabuf->virt; 14226 for (; total_qe_count < entry_count && 14227 dma_pointer < (hw_page_size + dmabuf->virt); 14228 total_qe_count++, dma_pointer += entry_size) { 14229 queue->qe[total_qe_count].address = dma_pointer; 14230 } 14231 } 14232 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 14233 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 14234 14235 /* entry_repost will be set during q creation */ 14236 14237 return queue; 14238 out_fail: 14239 lpfc_sli4_queue_free(queue); 14240 return NULL; 14241 } 14242 14243 /** 14244 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 14245 * @phba: HBA structure that indicates port to create a queue on. 14246 * @pci_barset: PCI BAR set flag. 14247 * 14248 * This function shall perform iomap of the specified PCI BAR address to host 14249 * memory address if not already done so and return it. The returned host 14250 * memory address can be NULL. 14251 */ 14252 static void __iomem * 14253 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 14254 { 14255 if (!phba->pcidev) 14256 return NULL; 14257 14258 switch (pci_barset) { 14259 case WQ_PCI_BAR_0_AND_1: 14260 return phba->pci_bar0_memmap_p; 14261 case WQ_PCI_BAR_2_AND_3: 14262 return phba->pci_bar2_memmap_p; 14263 case WQ_PCI_BAR_4_AND_5: 14264 return phba->pci_bar4_memmap_p; 14265 default: 14266 break; 14267 } 14268 return NULL; 14269 } 14270 14271 /** 14272 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs 14273 * @phba: HBA structure that indicates port to create a queue on. 14274 * @startq: The starting FCP EQ to modify 14275 * 14276 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 14277 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be 14278 * updated in one mailbox command. 14279 * 14280 * The @phba struct is used to send mailbox command to HBA. The @startq 14281 * is used to get the starting FCP EQ to change. 14282 * This function is asynchronous and will wait for the mailbox 14283 * command to finish before continuing. 14284 * 14285 * On success this function will return a zero. If unable to allocate enough 14286 * memory this function will return -ENOMEM. If the queue create mailbox command 14287 * fails this function will return -ENXIO. 14288 **/ 14289 int 14290 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 14291 uint32_t numq, uint32_t imax) 14292 { 14293 struct lpfc_mbx_modify_eq_delay *eq_delay; 14294 LPFC_MBOXQ_t *mbox; 14295 struct lpfc_queue *eq; 14296 int cnt, rc, length, status = 0; 14297 uint32_t shdr_status, shdr_add_status; 14298 uint32_t result, val; 14299 int qidx; 14300 union lpfc_sli4_cfg_shdr *shdr; 14301 uint16_t dmult; 14302 14303 if (startq >= phba->io_channel_irqs) 14304 return 0; 14305 14306 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14307 if (!mbox) 14308 return -ENOMEM; 14309 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14310 sizeof(struct lpfc_sli4_cfg_mhdr)); 14311 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14312 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14313 length, LPFC_SLI4_MBX_EMBED); 14314 eq_delay = &mbox->u.mqe.un.eq_delay; 14315 14316 /* Calculate delay multiper from maximum interrupt per second */ 14317 result = imax / phba->io_channel_irqs; 14318 if (result > LPFC_DMULT_CONST || result == 0) 14319 dmult = 0; 14320 else 14321 dmult = LPFC_DMULT_CONST/result - 1; 14322 if (dmult > LPFC_DMULT_MAX) 14323 dmult = LPFC_DMULT_MAX; 14324 14325 cnt = 0; 14326 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) { 14327 eq = phba->sli4_hba.hba_eq[qidx]; 14328 if (!eq) 14329 continue; 14330 eq->q_mode = imax; 14331 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14332 eq_delay->u.request.eq[cnt].phase = 0; 14333 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14334 cnt++; 14335 14336 /* q_mode is only used for auto_imax */ 14337 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 14338 /* Use EQ Delay Register method for q_mode */ 14339 14340 /* Convert for EQ Delay register */ 14341 val = phba->cfg_fcp_imax; 14342 if (val) { 14343 /* First, interrupts per sec per EQ */ 14344 val = phba->cfg_fcp_imax / 14345 phba->io_channel_irqs; 14346 14347 /* us delay between each interrupt */ 14348 val = LPFC_SEC_TO_USEC / val; 14349 } 14350 eq->q_mode = val; 14351 } else { 14352 eq->q_mode = imax; 14353 } 14354 14355 if (cnt >= numq) 14356 break; 14357 } 14358 eq_delay->u.request.num_eq = cnt; 14359 14360 mbox->vport = phba->pport; 14361 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14362 mbox->context1 = NULL; 14363 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14364 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14365 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14366 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14367 if (shdr_status || shdr_add_status || rc) { 14368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14369 "2512 MODIFY_EQ_DELAY mailbox failed with " 14370 "status x%x add_status x%x, mbx status x%x\n", 14371 shdr_status, shdr_add_status, rc); 14372 status = -ENXIO; 14373 } 14374 mempool_free(mbox, phba->mbox_mem_pool); 14375 return status; 14376 } 14377 14378 /** 14379 * lpfc_eq_create - Create an Event Queue on the HBA 14380 * @phba: HBA structure that indicates port to create a queue on. 14381 * @eq: The queue structure to use to create the event queue. 14382 * @imax: The maximum interrupt per second limit. 14383 * 14384 * This function creates an event queue, as detailed in @eq, on a port, 14385 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14386 * 14387 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14388 * is used to get the entry count and entry size that are necessary to 14389 * determine the number of pages to allocate and use for this queue. This 14390 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14391 * event queue. This function is asynchronous and will wait for the mailbox 14392 * command to finish before continuing. 14393 * 14394 * On success this function will return a zero. If unable to allocate enough 14395 * memory this function will return -ENOMEM. If the queue create mailbox command 14396 * fails this function will return -ENXIO. 14397 **/ 14398 int 14399 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14400 { 14401 struct lpfc_mbx_eq_create *eq_create; 14402 LPFC_MBOXQ_t *mbox; 14403 int rc, length, status = 0; 14404 struct lpfc_dmabuf *dmabuf; 14405 uint32_t shdr_status, shdr_add_status; 14406 union lpfc_sli4_cfg_shdr *shdr; 14407 uint16_t dmult; 14408 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14409 14410 /* sanity check on queue memory */ 14411 if (!eq) 14412 return -ENODEV; 14413 if (!phba->sli4_hba.pc_sli4_params.supported) 14414 hw_page_size = SLI4_PAGE_SIZE; 14415 14416 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14417 if (!mbox) 14418 return -ENOMEM; 14419 length = (sizeof(struct lpfc_mbx_eq_create) - 14420 sizeof(struct lpfc_sli4_cfg_mhdr)); 14421 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14422 LPFC_MBOX_OPCODE_EQ_CREATE, 14423 length, LPFC_SLI4_MBX_EMBED); 14424 eq_create = &mbox->u.mqe.un.eq_create; 14425 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14426 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14427 eq->page_count); 14428 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14429 LPFC_EQE_SIZE); 14430 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14431 14432 /* Use version 2 of CREATE_EQ if eqav is set */ 14433 if (phba->sli4_hba.pc_sli4_params.eqav) { 14434 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14435 LPFC_Q_CREATE_VERSION_2); 14436 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 14437 phba->sli4_hba.pc_sli4_params.eqav); 14438 } 14439 14440 /* don't setup delay multiplier using EQ_CREATE */ 14441 dmult = 0; 14442 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14443 dmult); 14444 switch (eq->entry_count) { 14445 default: 14446 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14447 "0360 Unsupported EQ count. (%d)\n", 14448 eq->entry_count); 14449 if (eq->entry_count < 256) 14450 return -EINVAL; 14451 /* otherwise default to smallest count (drop through) */ 14452 case 256: 14453 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14454 LPFC_EQ_CNT_256); 14455 break; 14456 case 512: 14457 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14458 LPFC_EQ_CNT_512); 14459 break; 14460 case 1024: 14461 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14462 LPFC_EQ_CNT_1024); 14463 break; 14464 case 2048: 14465 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14466 LPFC_EQ_CNT_2048); 14467 break; 14468 case 4096: 14469 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14470 LPFC_EQ_CNT_4096); 14471 break; 14472 } 14473 list_for_each_entry(dmabuf, &eq->page_list, list) { 14474 memset(dmabuf->virt, 0, hw_page_size); 14475 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14476 putPaddrLow(dmabuf->phys); 14477 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14478 putPaddrHigh(dmabuf->phys); 14479 } 14480 mbox->vport = phba->pport; 14481 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14482 mbox->context1 = NULL; 14483 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14484 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14485 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14486 if (shdr_status || shdr_add_status || rc) { 14487 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14488 "2500 EQ_CREATE mailbox failed with " 14489 "status x%x add_status x%x, mbx status x%x\n", 14490 shdr_status, shdr_add_status, rc); 14491 status = -ENXIO; 14492 } 14493 eq->type = LPFC_EQ; 14494 eq->subtype = LPFC_NONE; 14495 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14496 if (eq->queue_id == 0xFFFF) 14497 status = -ENXIO; 14498 eq->host_index = 0; 14499 eq->hba_index = 0; 14500 eq->entry_repost = LPFC_EQ_REPOST; 14501 14502 mempool_free(mbox, phba->mbox_mem_pool); 14503 return status; 14504 } 14505 14506 /** 14507 * lpfc_cq_create - Create a Completion Queue on the HBA 14508 * @phba: HBA structure that indicates port to create a queue on. 14509 * @cq: The queue structure to use to create the completion queue. 14510 * @eq: The event queue to bind this completion queue to. 14511 * 14512 * This function creates a completion queue, as detailed in @wq, on a port, 14513 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14514 * 14515 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14516 * is used to get the entry count and entry size that are necessary to 14517 * determine the number of pages to allocate and use for this queue. The @eq 14518 * is used to indicate which event queue to bind this completion queue to. This 14519 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14520 * completion queue. This function is asynchronous and will wait for the mailbox 14521 * command to finish before continuing. 14522 * 14523 * On success this function will return a zero. If unable to allocate enough 14524 * memory this function will return -ENOMEM. If the queue create mailbox command 14525 * fails this function will return -ENXIO. 14526 **/ 14527 int 14528 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14529 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14530 { 14531 struct lpfc_mbx_cq_create *cq_create; 14532 struct lpfc_dmabuf *dmabuf; 14533 LPFC_MBOXQ_t *mbox; 14534 int rc, length, status = 0; 14535 uint32_t shdr_status, shdr_add_status; 14536 union lpfc_sli4_cfg_shdr *shdr; 14537 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14538 14539 /* sanity check on queue memory */ 14540 if (!cq || !eq) 14541 return -ENODEV; 14542 if (!phba->sli4_hba.pc_sli4_params.supported) 14543 hw_page_size = cq->page_size; 14544 14545 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14546 if (!mbox) 14547 return -ENOMEM; 14548 length = (sizeof(struct lpfc_mbx_cq_create) - 14549 sizeof(struct lpfc_sli4_cfg_mhdr)); 14550 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14551 LPFC_MBOX_OPCODE_CQ_CREATE, 14552 length, LPFC_SLI4_MBX_EMBED); 14553 cq_create = &mbox->u.mqe.un.cq_create; 14554 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14555 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14556 cq->page_count); 14557 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14558 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14559 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14560 phba->sli4_hba.pc_sli4_params.cqv); 14561 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14562 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 14563 (cq->page_size / SLI4_PAGE_SIZE)); 14564 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14565 eq->queue_id); 14566 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 14567 phba->sli4_hba.pc_sli4_params.cqav); 14568 } else { 14569 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 14570 eq->queue_id); 14571 } 14572 switch (cq->entry_count) { 14573 case 2048: 14574 case 4096: 14575 if (phba->sli4_hba.pc_sli4_params.cqv == 14576 LPFC_Q_CREATE_VERSION_2) { 14577 cq_create->u.request.context.lpfc_cq_context_count = 14578 cq->entry_count; 14579 bf_set(lpfc_cq_context_count, 14580 &cq_create->u.request.context, 14581 LPFC_CQ_CNT_WORD7); 14582 break; 14583 } 14584 /* Fall Thru */ 14585 default: 14586 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14587 "0361 Unsupported CQ count: " 14588 "entry cnt %d sz %d pg cnt %d\n", 14589 cq->entry_count, cq->entry_size, 14590 cq->page_count); 14591 if (cq->entry_count < 256) { 14592 status = -EINVAL; 14593 goto out; 14594 } 14595 /* otherwise default to smallest count (drop through) */ 14596 case 256: 14597 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14598 LPFC_CQ_CNT_256); 14599 break; 14600 case 512: 14601 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14602 LPFC_CQ_CNT_512); 14603 break; 14604 case 1024: 14605 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14606 LPFC_CQ_CNT_1024); 14607 break; 14608 } 14609 list_for_each_entry(dmabuf, &cq->page_list, list) { 14610 memset(dmabuf->virt, 0, cq->page_size); 14611 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14612 putPaddrLow(dmabuf->phys); 14613 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14614 putPaddrHigh(dmabuf->phys); 14615 } 14616 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14617 14618 /* The IOCTL status is embedded in the mailbox subheader. */ 14619 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14620 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14621 if (shdr_status || shdr_add_status || rc) { 14622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14623 "2501 CQ_CREATE mailbox failed with " 14624 "status x%x add_status x%x, mbx status x%x\n", 14625 shdr_status, shdr_add_status, rc); 14626 status = -ENXIO; 14627 goto out; 14628 } 14629 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14630 if (cq->queue_id == 0xFFFF) { 14631 status = -ENXIO; 14632 goto out; 14633 } 14634 /* link the cq onto the parent eq child list */ 14635 list_add_tail(&cq->list, &eq->child_list); 14636 /* Set up completion queue's type and subtype */ 14637 cq->type = type; 14638 cq->subtype = subtype; 14639 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14640 cq->assoc_qid = eq->queue_id; 14641 cq->host_index = 0; 14642 cq->hba_index = 0; 14643 cq->entry_repost = LPFC_CQ_REPOST; 14644 14645 out: 14646 mempool_free(mbox, phba->mbox_mem_pool); 14647 return status; 14648 } 14649 14650 /** 14651 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 14652 * @phba: HBA structure that indicates port to create a queue on. 14653 * @cqp: The queue structure array to use to create the completion queues. 14654 * @eqp: The event queue array to bind these completion queues to. 14655 * 14656 * This function creates a set of completion queue, s to support MRQ 14657 * as detailed in @cqp, on a port, 14658 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 14659 * 14660 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14661 * is used to get the entry count and entry size that are necessary to 14662 * determine the number of pages to allocate and use for this queue. The @eq 14663 * is used to indicate which event queue to bind this completion queue to. This 14664 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 14665 * completion queue. This function is asynchronous and will wait for the mailbox 14666 * command to finish before continuing. 14667 * 14668 * On success this function will return a zero. If unable to allocate enough 14669 * memory this function will return -ENOMEM. If the queue create mailbox command 14670 * fails this function will return -ENXIO. 14671 **/ 14672 int 14673 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 14674 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype) 14675 { 14676 struct lpfc_queue *cq; 14677 struct lpfc_queue *eq; 14678 struct lpfc_mbx_cq_create_set *cq_set; 14679 struct lpfc_dmabuf *dmabuf; 14680 LPFC_MBOXQ_t *mbox; 14681 int rc, length, alloclen, status = 0; 14682 int cnt, idx, numcq, page_idx = 0; 14683 uint32_t shdr_status, shdr_add_status; 14684 union lpfc_sli4_cfg_shdr *shdr; 14685 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14686 14687 /* sanity check on queue memory */ 14688 numcq = phba->cfg_nvmet_mrq; 14689 if (!cqp || !eqp || !numcq) 14690 return -ENODEV; 14691 14692 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14693 if (!mbox) 14694 return -ENOMEM; 14695 14696 length = sizeof(struct lpfc_mbx_cq_create_set); 14697 length += ((numcq * cqp[0]->page_count) * 14698 sizeof(struct dma_address)); 14699 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14700 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 14701 LPFC_SLI4_MBX_NEMBED); 14702 if (alloclen < length) { 14703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14704 "3098 Allocated DMA memory size (%d) is " 14705 "less than the requested DMA memory size " 14706 "(%d)\n", alloclen, length); 14707 status = -ENOMEM; 14708 goto out; 14709 } 14710 cq_set = mbox->sge_array->addr[0]; 14711 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 14712 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 14713 14714 for (idx = 0; idx < numcq; idx++) { 14715 cq = cqp[idx]; 14716 eq = eqp[idx]; 14717 if (!cq || !eq) { 14718 status = -ENOMEM; 14719 goto out; 14720 } 14721 if (!phba->sli4_hba.pc_sli4_params.supported) 14722 hw_page_size = cq->page_size; 14723 14724 switch (idx) { 14725 case 0: 14726 bf_set(lpfc_mbx_cq_create_set_page_size, 14727 &cq_set->u.request, 14728 (hw_page_size / SLI4_PAGE_SIZE)); 14729 bf_set(lpfc_mbx_cq_create_set_num_pages, 14730 &cq_set->u.request, cq->page_count); 14731 bf_set(lpfc_mbx_cq_create_set_evt, 14732 &cq_set->u.request, 1); 14733 bf_set(lpfc_mbx_cq_create_set_valid, 14734 &cq_set->u.request, 1); 14735 bf_set(lpfc_mbx_cq_create_set_cqe_size, 14736 &cq_set->u.request, 0); 14737 bf_set(lpfc_mbx_cq_create_set_num_cq, 14738 &cq_set->u.request, numcq); 14739 bf_set(lpfc_mbx_cq_create_set_autovalid, 14740 &cq_set->u.request, 14741 phba->sli4_hba.pc_sli4_params.cqav); 14742 switch (cq->entry_count) { 14743 case 2048: 14744 case 4096: 14745 if (phba->sli4_hba.pc_sli4_params.cqv == 14746 LPFC_Q_CREATE_VERSION_2) { 14747 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14748 &cq_set->u.request, 14749 cq->entry_count); 14750 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14751 &cq_set->u.request, 14752 LPFC_CQ_CNT_WORD7); 14753 break; 14754 } 14755 /* Fall Thru */ 14756 default: 14757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14758 "3118 Bad CQ count. (%d)\n", 14759 cq->entry_count); 14760 if (cq->entry_count < 256) { 14761 status = -EINVAL; 14762 goto out; 14763 } 14764 /* otherwise default to smallest (drop thru) */ 14765 case 256: 14766 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14767 &cq_set->u.request, LPFC_CQ_CNT_256); 14768 break; 14769 case 512: 14770 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14771 &cq_set->u.request, LPFC_CQ_CNT_512); 14772 break; 14773 case 1024: 14774 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14775 &cq_set->u.request, LPFC_CQ_CNT_1024); 14776 break; 14777 } 14778 bf_set(lpfc_mbx_cq_create_set_eq_id0, 14779 &cq_set->u.request, eq->queue_id); 14780 break; 14781 case 1: 14782 bf_set(lpfc_mbx_cq_create_set_eq_id1, 14783 &cq_set->u.request, eq->queue_id); 14784 break; 14785 case 2: 14786 bf_set(lpfc_mbx_cq_create_set_eq_id2, 14787 &cq_set->u.request, eq->queue_id); 14788 break; 14789 case 3: 14790 bf_set(lpfc_mbx_cq_create_set_eq_id3, 14791 &cq_set->u.request, eq->queue_id); 14792 break; 14793 case 4: 14794 bf_set(lpfc_mbx_cq_create_set_eq_id4, 14795 &cq_set->u.request, eq->queue_id); 14796 break; 14797 case 5: 14798 bf_set(lpfc_mbx_cq_create_set_eq_id5, 14799 &cq_set->u.request, eq->queue_id); 14800 break; 14801 case 6: 14802 bf_set(lpfc_mbx_cq_create_set_eq_id6, 14803 &cq_set->u.request, eq->queue_id); 14804 break; 14805 case 7: 14806 bf_set(lpfc_mbx_cq_create_set_eq_id7, 14807 &cq_set->u.request, eq->queue_id); 14808 break; 14809 case 8: 14810 bf_set(lpfc_mbx_cq_create_set_eq_id8, 14811 &cq_set->u.request, eq->queue_id); 14812 break; 14813 case 9: 14814 bf_set(lpfc_mbx_cq_create_set_eq_id9, 14815 &cq_set->u.request, eq->queue_id); 14816 break; 14817 case 10: 14818 bf_set(lpfc_mbx_cq_create_set_eq_id10, 14819 &cq_set->u.request, eq->queue_id); 14820 break; 14821 case 11: 14822 bf_set(lpfc_mbx_cq_create_set_eq_id11, 14823 &cq_set->u.request, eq->queue_id); 14824 break; 14825 case 12: 14826 bf_set(lpfc_mbx_cq_create_set_eq_id12, 14827 &cq_set->u.request, eq->queue_id); 14828 break; 14829 case 13: 14830 bf_set(lpfc_mbx_cq_create_set_eq_id13, 14831 &cq_set->u.request, eq->queue_id); 14832 break; 14833 case 14: 14834 bf_set(lpfc_mbx_cq_create_set_eq_id14, 14835 &cq_set->u.request, eq->queue_id); 14836 break; 14837 case 15: 14838 bf_set(lpfc_mbx_cq_create_set_eq_id15, 14839 &cq_set->u.request, eq->queue_id); 14840 break; 14841 } 14842 14843 /* link the cq onto the parent eq child list */ 14844 list_add_tail(&cq->list, &eq->child_list); 14845 /* Set up completion queue's type and subtype */ 14846 cq->type = type; 14847 cq->subtype = subtype; 14848 cq->assoc_qid = eq->queue_id; 14849 cq->host_index = 0; 14850 cq->hba_index = 0; 14851 cq->entry_repost = LPFC_CQ_REPOST; 14852 cq->chann = idx; 14853 14854 rc = 0; 14855 list_for_each_entry(dmabuf, &cq->page_list, list) { 14856 memset(dmabuf->virt, 0, hw_page_size); 14857 cnt = page_idx + dmabuf->buffer_tag; 14858 cq_set->u.request.page[cnt].addr_lo = 14859 putPaddrLow(dmabuf->phys); 14860 cq_set->u.request.page[cnt].addr_hi = 14861 putPaddrHigh(dmabuf->phys); 14862 rc++; 14863 } 14864 page_idx += rc; 14865 } 14866 14867 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14868 14869 /* The IOCTL status is embedded in the mailbox subheader. */ 14870 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14871 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14872 if (shdr_status || shdr_add_status || rc) { 14873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14874 "3119 CQ_CREATE_SET mailbox failed with " 14875 "status x%x add_status x%x, mbx status x%x\n", 14876 shdr_status, shdr_add_status, rc); 14877 status = -ENXIO; 14878 goto out; 14879 } 14880 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 14881 if (rc == 0xFFFF) { 14882 status = -ENXIO; 14883 goto out; 14884 } 14885 14886 for (idx = 0; idx < numcq; idx++) { 14887 cq = cqp[idx]; 14888 cq->queue_id = rc + idx; 14889 } 14890 14891 out: 14892 lpfc_sli4_mbox_cmd_free(phba, mbox); 14893 return status; 14894 } 14895 14896 /** 14897 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 14898 * @phba: HBA structure that indicates port to create a queue on. 14899 * @mq: The queue structure to use to create the mailbox queue. 14900 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 14901 * @cq: The completion queue to associate with this cq. 14902 * 14903 * This function provides failback (fb) functionality when the 14904 * mq_create_ext fails on older FW generations. It's purpose is identical 14905 * to mq_create_ext otherwise. 14906 * 14907 * This routine cannot fail as all attributes were previously accessed and 14908 * initialized in mq_create_ext. 14909 **/ 14910 static void 14911 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 14912 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 14913 { 14914 struct lpfc_mbx_mq_create *mq_create; 14915 struct lpfc_dmabuf *dmabuf; 14916 int length; 14917 14918 length = (sizeof(struct lpfc_mbx_mq_create) - 14919 sizeof(struct lpfc_sli4_cfg_mhdr)); 14920 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14921 LPFC_MBOX_OPCODE_MQ_CREATE, 14922 length, LPFC_SLI4_MBX_EMBED); 14923 mq_create = &mbox->u.mqe.un.mq_create; 14924 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 14925 mq->page_count); 14926 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 14927 cq->queue_id); 14928 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 14929 switch (mq->entry_count) { 14930 case 16: 14931 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14932 LPFC_MQ_RING_SIZE_16); 14933 break; 14934 case 32: 14935 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14936 LPFC_MQ_RING_SIZE_32); 14937 break; 14938 case 64: 14939 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14940 LPFC_MQ_RING_SIZE_64); 14941 break; 14942 case 128: 14943 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14944 LPFC_MQ_RING_SIZE_128); 14945 break; 14946 } 14947 list_for_each_entry(dmabuf, &mq->page_list, list) { 14948 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14949 putPaddrLow(dmabuf->phys); 14950 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14951 putPaddrHigh(dmabuf->phys); 14952 } 14953 } 14954 14955 /** 14956 * lpfc_mq_create - Create a mailbox Queue on the HBA 14957 * @phba: HBA structure that indicates port to create a queue on. 14958 * @mq: The queue structure to use to create the mailbox queue. 14959 * @cq: The completion queue to associate with this cq. 14960 * @subtype: The queue's subtype. 14961 * 14962 * This function creates a mailbox queue, as detailed in @mq, on a port, 14963 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 14964 * 14965 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14966 * is used to get the entry count and entry size that are necessary to 14967 * determine the number of pages to allocate and use for this queue. This 14968 * function will send the MQ_CREATE mailbox command to the HBA to setup the 14969 * mailbox queue. This function is asynchronous and will wait for the mailbox 14970 * command to finish before continuing. 14971 * 14972 * On success this function will return a zero. If unable to allocate enough 14973 * memory this function will return -ENOMEM. If the queue create mailbox command 14974 * fails this function will return -ENXIO. 14975 **/ 14976 int32_t 14977 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 14978 struct lpfc_queue *cq, uint32_t subtype) 14979 { 14980 struct lpfc_mbx_mq_create *mq_create; 14981 struct lpfc_mbx_mq_create_ext *mq_create_ext; 14982 struct lpfc_dmabuf *dmabuf; 14983 LPFC_MBOXQ_t *mbox; 14984 int rc, length, status = 0; 14985 uint32_t shdr_status, shdr_add_status; 14986 union lpfc_sli4_cfg_shdr *shdr; 14987 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14988 14989 /* sanity check on queue memory */ 14990 if (!mq || !cq) 14991 return -ENODEV; 14992 if (!phba->sli4_hba.pc_sli4_params.supported) 14993 hw_page_size = SLI4_PAGE_SIZE; 14994 14995 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14996 if (!mbox) 14997 return -ENOMEM; 14998 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 14999 sizeof(struct lpfc_sli4_cfg_mhdr)); 15000 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15001 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 15002 length, LPFC_SLI4_MBX_EMBED); 15003 15004 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 15005 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 15006 bf_set(lpfc_mbx_mq_create_ext_num_pages, 15007 &mq_create_ext->u.request, mq->page_count); 15008 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 15009 &mq_create_ext->u.request, 1); 15010 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 15011 &mq_create_ext->u.request, 1); 15012 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 15013 &mq_create_ext->u.request, 1); 15014 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 15015 &mq_create_ext->u.request, 1); 15016 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 15017 &mq_create_ext->u.request, 1); 15018 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 15019 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15020 phba->sli4_hba.pc_sli4_params.mqv); 15021 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 15022 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 15023 cq->queue_id); 15024 else 15025 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 15026 cq->queue_id); 15027 switch (mq->entry_count) { 15028 default: 15029 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15030 "0362 Unsupported MQ count. (%d)\n", 15031 mq->entry_count); 15032 if (mq->entry_count < 16) { 15033 status = -EINVAL; 15034 goto out; 15035 } 15036 /* otherwise default to smallest count (drop through) */ 15037 case 16: 15038 bf_set(lpfc_mq_context_ring_size, 15039 &mq_create_ext->u.request.context, 15040 LPFC_MQ_RING_SIZE_16); 15041 break; 15042 case 32: 15043 bf_set(lpfc_mq_context_ring_size, 15044 &mq_create_ext->u.request.context, 15045 LPFC_MQ_RING_SIZE_32); 15046 break; 15047 case 64: 15048 bf_set(lpfc_mq_context_ring_size, 15049 &mq_create_ext->u.request.context, 15050 LPFC_MQ_RING_SIZE_64); 15051 break; 15052 case 128: 15053 bf_set(lpfc_mq_context_ring_size, 15054 &mq_create_ext->u.request.context, 15055 LPFC_MQ_RING_SIZE_128); 15056 break; 15057 } 15058 list_for_each_entry(dmabuf, &mq->page_list, list) { 15059 memset(dmabuf->virt, 0, hw_page_size); 15060 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 15061 putPaddrLow(dmabuf->phys); 15062 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 15063 putPaddrHigh(dmabuf->phys); 15064 } 15065 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15066 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15067 &mq_create_ext->u.response); 15068 if (rc != MBX_SUCCESS) { 15069 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15070 "2795 MQ_CREATE_EXT failed with " 15071 "status x%x. Failback to MQ_CREATE.\n", 15072 rc); 15073 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 15074 mq_create = &mbox->u.mqe.un.mq_create; 15075 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15076 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 15077 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15078 &mq_create->u.response); 15079 } 15080 15081 /* The IOCTL status is embedded in the mailbox subheader. */ 15082 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15083 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15084 if (shdr_status || shdr_add_status || rc) { 15085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15086 "2502 MQ_CREATE mailbox failed with " 15087 "status x%x add_status x%x, mbx status x%x\n", 15088 shdr_status, shdr_add_status, rc); 15089 status = -ENXIO; 15090 goto out; 15091 } 15092 if (mq->queue_id == 0xFFFF) { 15093 status = -ENXIO; 15094 goto out; 15095 } 15096 mq->type = LPFC_MQ; 15097 mq->assoc_qid = cq->queue_id; 15098 mq->subtype = subtype; 15099 mq->host_index = 0; 15100 mq->hba_index = 0; 15101 mq->entry_repost = LPFC_MQ_REPOST; 15102 15103 /* link the mq onto the parent cq child list */ 15104 list_add_tail(&mq->list, &cq->child_list); 15105 out: 15106 mempool_free(mbox, phba->mbox_mem_pool); 15107 return status; 15108 } 15109 15110 /** 15111 * lpfc_wq_create - Create a Work Queue on the HBA 15112 * @phba: HBA structure that indicates port to create a queue on. 15113 * @wq: The queue structure to use to create the work queue. 15114 * @cq: The completion queue to bind this work queue to. 15115 * @subtype: The subtype of the work queue indicating its functionality. 15116 * 15117 * This function creates a work queue, as detailed in @wq, on a port, described 15118 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 15119 * 15120 * The @phba struct is used to send mailbox command to HBA. The @wq struct 15121 * is used to get the entry count and entry size that are necessary to 15122 * determine the number of pages to allocate and use for this queue. The @cq 15123 * is used to indicate which completion queue to bind this work queue to. This 15124 * function will send the WQ_CREATE mailbox command to the HBA to setup the 15125 * work queue. This function is asynchronous and will wait for the mailbox 15126 * command to finish before continuing. 15127 * 15128 * On success this function will return a zero. If unable to allocate enough 15129 * memory this function will return -ENOMEM. If the queue create mailbox command 15130 * fails this function will return -ENXIO. 15131 **/ 15132 int 15133 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 15134 struct lpfc_queue *cq, uint32_t subtype) 15135 { 15136 struct lpfc_mbx_wq_create *wq_create; 15137 struct lpfc_dmabuf *dmabuf; 15138 LPFC_MBOXQ_t *mbox; 15139 int rc, length, status = 0; 15140 uint32_t shdr_status, shdr_add_status; 15141 union lpfc_sli4_cfg_shdr *shdr; 15142 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15143 struct dma_address *page; 15144 void __iomem *bar_memmap_p; 15145 uint32_t db_offset; 15146 uint16_t pci_barset; 15147 uint8_t dpp_barset; 15148 uint32_t dpp_offset; 15149 unsigned long pg_addr; 15150 uint8_t wq_create_version; 15151 15152 /* sanity check on queue memory */ 15153 if (!wq || !cq) 15154 return -ENODEV; 15155 if (!phba->sli4_hba.pc_sli4_params.supported) 15156 hw_page_size = wq->page_size; 15157 15158 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15159 if (!mbox) 15160 return -ENOMEM; 15161 length = (sizeof(struct lpfc_mbx_wq_create) - 15162 sizeof(struct lpfc_sli4_cfg_mhdr)); 15163 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15164 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 15165 length, LPFC_SLI4_MBX_EMBED); 15166 wq_create = &mbox->u.mqe.un.wq_create; 15167 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 15168 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 15169 wq->page_count); 15170 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 15171 cq->queue_id); 15172 15173 /* wqv is the earliest version supported, NOT the latest */ 15174 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15175 phba->sli4_hba.pc_sli4_params.wqv); 15176 15177 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 15178 (wq->page_size > SLI4_PAGE_SIZE)) 15179 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15180 else 15181 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15182 15183 15184 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) 15185 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15186 else 15187 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15188 15189 switch (wq_create_version) { 15190 case LPFC_Q_CREATE_VERSION_1: 15191 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 15192 wq->entry_count); 15193 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15194 LPFC_Q_CREATE_VERSION_1); 15195 15196 switch (wq->entry_size) { 15197 default: 15198 case 64: 15199 bf_set(lpfc_mbx_wq_create_wqe_size, 15200 &wq_create->u.request_1, 15201 LPFC_WQ_WQE_SIZE_64); 15202 break; 15203 case 128: 15204 bf_set(lpfc_mbx_wq_create_wqe_size, 15205 &wq_create->u.request_1, 15206 LPFC_WQ_WQE_SIZE_128); 15207 break; 15208 } 15209 /* Request DPP by default */ 15210 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 15211 bf_set(lpfc_mbx_wq_create_page_size, 15212 &wq_create->u.request_1, 15213 (wq->page_size / SLI4_PAGE_SIZE)); 15214 page = wq_create->u.request_1.page; 15215 break; 15216 default: 15217 page = wq_create->u.request.page; 15218 break; 15219 } 15220 15221 list_for_each_entry(dmabuf, &wq->page_list, list) { 15222 memset(dmabuf->virt, 0, hw_page_size); 15223 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 15224 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 15225 } 15226 15227 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15228 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 15229 15230 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15231 /* The IOCTL status is embedded in the mailbox subheader. */ 15232 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15233 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15234 if (shdr_status || shdr_add_status || rc) { 15235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15236 "2503 WQ_CREATE mailbox failed with " 15237 "status x%x add_status x%x, mbx status x%x\n", 15238 shdr_status, shdr_add_status, rc); 15239 status = -ENXIO; 15240 goto out; 15241 } 15242 15243 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 15244 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 15245 &wq_create->u.response); 15246 else 15247 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 15248 &wq_create->u.response_1); 15249 15250 if (wq->queue_id == 0xFFFF) { 15251 status = -ENXIO; 15252 goto out; 15253 } 15254 15255 wq->db_format = LPFC_DB_LIST_FORMAT; 15256 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 15257 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15258 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 15259 &wq_create->u.response); 15260 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15261 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15263 "3265 WQ[%d] doorbell format " 15264 "not supported: x%x\n", 15265 wq->queue_id, wq->db_format); 15266 status = -EINVAL; 15267 goto out; 15268 } 15269 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 15270 &wq_create->u.response); 15271 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15272 pci_barset); 15273 if (!bar_memmap_p) { 15274 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15275 "3263 WQ[%d] failed to memmap " 15276 "pci barset:x%x\n", 15277 wq->queue_id, pci_barset); 15278 status = -ENOMEM; 15279 goto out; 15280 } 15281 db_offset = wq_create->u.response.doorbell_offset; 15282 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15283 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15285 "3252 WQ[%d] doorbell offset " 15286 "not supported: x%x\n", 15287 wq->queue_id, db_offset); 15288 status = -EINVAL; 15289 goto out; 15290 } 15291 wq->db_regaddr = bar_memmap_p + db_offset; 15292 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15293 "3264 WQ[%d]: barset:x%x, offset:x%x, " 15294 "format:x%x\n", wq->queue_id, 15295 pci_barset, db_offset, wq->db_format); 15296 } else 15297 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15298 } else { 15299 /* Check if DPP was honored by the firmware */ 15300 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 15301 &wq_create->u.response_1); 15302 if (wq->dpp_enable) { 15303 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 15304 &wq_create->u.response_1); 15305 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15306 pci_barset); 15307 if (!bar_memmap_p) { 15308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15309 "3267 WQ[%d] failed to memmap " 15310 "pci barset:x%x\n", 15311 wq->queue_id, pci_barset); 15312 status = -ENOMEM; 15313 goto out; 15314 } 15315 db_offset = wq_create->u.response_1.doorbell_offset; 15316 wq->db_regaddr = bar_memmap_p + db_offset; 15317 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 15318 &wq_create->u.response_1); 15319 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 15320 &wq_create->u.response_1); 15321 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15322 dpp_barset); 15323 if (!bar_memmap_p) { 15324 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15325 "3268 WQ[%d] failed to memmap " 15326 "pci barset:x%x\n", 15327 wq->queue_id, dpp_barset); 15328 status = -ENOMEM; 15329 goto out; 15330 } 15331 dpp_offset = wq_create->u.response_1.dpp_offset; 15332 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 15333 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15334 "3271 WQ[%d]: barset:x%x, offset:x%x, " 15335 "dpp_id:x%x dpp_barset:x%x " 15336 "dpp_offset:x%x\n", 15337 wq->queue_id, pci_barset, db_offset, 15338 wq->dpp_id, dpp_barset, dpp_offset); 15339 15340 /* Enable combined writes for DPP aperture */ 15341 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 15342 #ifdef CONFIG_X86 15343 rc = set_memory_wc(pg_addr, 1); 15344 if (rc) { 15345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15346 "3272 Cannot setup Combined " 15347 "Write on WQ[%d] - disable DPP\n", 15348 wq->queue_id); 15349 phba->cfg_enable_dpp = 0; 15350 } 15351 #else 15352 phba->cfg_enable_dpp = 0; 15353 #endif 15354 } else 15355 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15356 } 15357 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 15358 if (wq->pring == NULL) { 15359 status = -ENOMEM; 15360 goto out; 15361 } 15362 wq->type = LPFC_WQ; 15363 wq->assoc_qid = cq->queue_id; 15364 wq->subtype = subtype; 15365 wq->host_index = 0; 15366 wq->hba_index = 0; 15367 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 15368 15369 /* link the wq onto the parent cq child list */ 15370 list_add_tail(&wq->list, &cq->child_list); 15371 out: 15372 mempool_free(mbox, phba->mbox_mem_pool); 15373 return status; 15374 } 15375 15376 /** 15377 * lpfc_rq_create - Create a Receive Queue on the HBA 15378 * @phba: HBA structure that indicates port to create a queue on. 15379 * @hrq: The queue structure to use to create the header receive queue. 15380 * @drq: The queue structure to use to create the data receive queue. 15381 * @cq: The completion queue to bind this work queue to. 15382 * 15383 * This function creates a receive buffer queue pair , as detailed in @hrq and 15384 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15385 * to the HBA. 15386 * 15387 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15388 * struct is used to get the entry count that is necessary to determine the 15389 * number of pages to use for this queue. The @cq is used to indicate which 15390 * completion queue to bind received buffers that are posted to these queues to. 15391 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15392 * receive queue pair. This function is asynchronous and will wait for the 15393 * mailbox command to finish before continuing. 15394 * 15395 * On success this function will return a zero. If unable to allocate enough 15396 * memory this function will return -ENOMEM. If the queue create mailbox command 15397 * fails this function will return -ENXIO. 15398 **/ 15399 int 15400 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15401 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 15402 { 15403 struct lpfc_mbx_rq_create *rq_create; 15404 struct lpfc_dmabuf *dmabuf; 15405 LPFC_MBOXQ_t *mbox; 15406 int rc, length, status = 0; 15407 uint32_t shdr_status, shdr_add_status; 15408 union lpfc_sli4_cfg_shdr *shdr; 15409 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15410 void __iomem *bar_memmap_p; 15411 uint32_t db_offset; 15412 uint16_t pci_barset; 15413 15414 /* sanity check on queue memory */ 15415 if (!hrq || !drq || !cq) 15416 return -ENODEV; 15417 if (!phba->sli4_hba.pc_sli4_params.supported) 15418 hw_page_size = SLI4_PAGE_SIZE; 15419 15420 if (hrq->entry_count != drq->entry_count) 15421 return -EINVAL; 15422 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15423 if (!mbox) 15424 return -ENOMEM; 15425 length = (sizeof(struct lpfc_mbx_rq_create) - 15426 sizeof(struct lpfc_sli4_cfg_mhdr)); 15427 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15428 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15429 length, LPFC_SLI4_MBX_EMBED); 15430 rq_create = &mbox->u.mqe.un.rq_create; 15431 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15432 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15433 phba->sli4_hba.pc_sli4_params.rqv); 15434 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15435 bf_set(lpfc_rq_context_rqe_count_1, 15436 &rq_create->u.request.context, 15437 hrq->entry_count); 15438 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15439 bf_set(lpfc_rq_context_rqe_size, 15440 &rq_create->u.request.context, 15441 LPFC_RQE_SIZE_8); 15442 bf_set(lpfc_rq_context_page_size, 15443 &rq_create->u.request.context, 15444 LPFC_RQ_PAGE_SIZE_4096); 15445 } else { 15446 switch (hrq->entry_count) { 15447 default: 15448 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15449 "2535 Unsupported RQ count. (%d)\n", 15450 hrq->entry_count); 15451 if (hrq->entry_count < 512) { 15452 status = -EINVAL; 15453 goto out; 15454 } 15455 /* otherwise default to smallest count (drop through) */ 15456 case 512: 15457 bf_set(lpfc_rq_context_rqe_count, 15458 &rq_create->u.request.context, 15459 LPFC_RQ_RING_SIZE_512); 15460 break; 15461 case 1024: 15462 bf_set(lpfc_rq_context_rqe_count, 15463 &rq_create->u.request.context, 15464 LPFC_RQ_RING_SIZE_1024); 15465 break; 15466 case 2048: 15467 bf_set(lpfc_rq_context_rqe_count, 15468 &rq_create->u.request.context, 15469 LPFC_RQ_RING_SIZE_2048); 15470 break; 15471 case 4096: 15472 bf_set(lpfc_rq_context_rqe_count, 15473 &rq_create->u.request.context, 15474 LPFC_RQ_RING_SIZE_4096); 15475 break; 15476 } 15477 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15478 LPFC_HDR_BUF_SIZE); 15479 } 15480 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15481 cq->queue_id); 15482 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15483 hrq->page_count); 15484 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15485 memset(dmabuf->virt, 0, hw_page_size); 15486 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15487 putPaddrLow(dmabuf->phys); 15488 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15489 putPaddrHigh(dmabuf->phys); 15490 } 15491 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15492 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15493 15494 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15495 /* The IOCTL status is embedded in the mailbox subheader. */ 15496 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15497 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15498 if (shdr_status || shdr_add_status || rc) { 15499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15500 "2504 RQ_CREATE mailbox failed with " 15501 "status x%x add_status x%x, mbx status x%x\n", 15502 shdr_status, shdr_add_status, rc); 15503 status = -ENXIO; 15504 goto out; 15505 } 15506 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15507 if (hrq->queue_id == 0xFFFF) { 15508 status = -ENXIO; 15509 goto out; 15510 } 15511 15512 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15513 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15514 &rq_create->u.response); 15515 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15516 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15518 "3262 RQ [%d] doorbell format not " 15519 "supported: x%x\n", hrq->queue_id, 15520 hrq->db_format); 15521 status = -EINVAL; 15522 goto out; 15523 } 15524 15525 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15526 &rq_create->u.response); 15527 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15528 if (!bar_memmap_p) { 15529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15530 "3269 RQ[%d] failed to memmap pci " 15531 "barset:x%x\n", hrq->queue_id, 15532 pci_barset); 15533 status = -ENOMEM; 15534 goto out; 15535 } 15536 15537 db_offset = rq_create->u.response.doorbell_offset; 15538 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15539 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15540 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15541 "3270 RQ[%d] doorbell offset not " 15542 "supported: x%x\n", hrq->queue_id, 15543 db_offset); 15544 status = -EINVAL; 15545 goto out; 15546 } 15547 hrq->db_regaddr = bar_memmap_p + db_offset; 15548 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15549 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15550 "format:x%x\n", hrq->queue_id, pci_barset, 15551 db_offset, hrq->db_format); 15552 } else { 15553 hrq->db_format = LPFC_DB_RING_FORMAT; 15554 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15555 } 15556 hrq->type = LPFC_HRQ; 15557 hrq->assoc_qid = cq->queue_id; 15558 hrq->subtype = subtype; 15559 hrq->host_index = 0; 15560 hrq->hba_index = 0; 15561 hrq->entry_repost = LPFC_RQ_REPOST; 15562 15563 /* now create the data queue */ 15564 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15565 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15566 length, LPFC_SLI4_MBX_EMBED); 15567 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15568 phba->sli4_hba.pc_sli4_params.rqv); 15569 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15570 bf_set(lpfc_rq_context_rqe_count_1, 15571 &rq_create->u.request.context, hrq->entry_count); 15572 if (subtype == LPFC_NVMET) 15573 rq_create->u.request.context.buffer_size = 15574 LPFC_NVMET_DATA_BUF_SIZE; 15575 else 15576 rq_create->u.request.context.buffer_size = 15577 LPFC_DATA_BUF_SIZE; 15578 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15579 LPFC_RQE_SIZE_8); 15580 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15581 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15582 } else { 15583 switch (drq->entry_count) { 15584 default: 15585 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15586 "2536 Unsupported RQ count. (%d)\n", 15587 drq->entry_count); 15588 if (drq->entry_count < 512) { 15589 status = -EINVAL; 15590 goto out; 15591 } 15592 /* otherwise default to smallest count (drop through) */ 15593 case 512: 15594 bf_set(lpfc_rq_context_rqe_count, 15595 &rq_create->u.request.context, 15596 LPFC_RQ_RING_SIZE_512); 15597 break; 15598 case 1024: 15599 bf_set(lpfc_rq_context_rqe_count, 15600 &rq_create->u.request.context, 15601 LPFC_RQ_RING_SIZE_1024); 15602 break; 15603 case 2048: 15604 bf_set(lpfc_rq_context_rqe_count, 15605 &rq_create->u.request.context, 15606 LPFC_RQ_RING_SIZE_2048); 15607 break; 15608 case 4096: 15609 bf_set(lpfc_rq_context_rqe_count, 15610 &rq_create->u.request.context, 15611 LPFC_RQ_RING_SIZE_4096); 15612 break; 15613 } 15614 if (subtype == LPFC_NVMET) 15615 bf_set(lpfc_rq_context_buf_size, 15616 &rq_create->u.request.context, 15617 LPFC_NVMET_DATA_BUF_SIZE); 15618 else 15619 bf_set(lpfc_rq_context_buf_size, 15620 &rq_create->u.request.context, 15621 LPFC_DATA_BUF_SIZE); 15622 } 15623 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15624 cq->queue_id); 15625 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15626 drq->page_count); 15627 list_for_each_entry(dmabuf, &drq->page_list, list) { 15628 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15629 putPaddrLow(dmabuf->phys); 15630 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15631 putPaddrHigh(dmabuf->phys); 15632 } 15633 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15634 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15635 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15636 /* The IOCTL status is embedded in the mailbox subheader. */ 15637 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15638 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15639 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15640 if (shdr_status || shdr_add_status || rc) { 15641 status = -ENXIO; 15642 goto out; 15643 } 15644 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15645 if (drq->queue_id == 0xFFFF) { 15646 status = -ENXIO; 15647 goto out; 15648 } 15649 drq->type = LPFC_DRQ; 15650 drq->assoc_qid = cq->queue_id; 15651 drq->subtype = subtype; 15652 drq->host_index = 0; 15653 drq->hba_index = 0; 15654 drq->entry_repost = LPFC_RQ_REPOST; 15655 15656 /* link the header and data RQs onto the parent cq child list */ 15657 list_add_tail(&hrq->list, &cq->child_list); 15658 list_add_tail(&drq->list, &cq->child_list); 15659 15660 out: 15661 mempool_free(mbox, phba->mbox_mem_pool); 15662 return status; 15663 } 15664 15665 /** 15666 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 15667 * @phba: HBA structure that indicates port to create a queue on. 15668 * @hrqp: The queue structure array to use to create the header receive queues. 15669 * @drqp: The queue structure array to use to create the data receive queues. 15670 * @cqp: The completion queue array to bind these receive queues to. 15671 * 15672 * This function creates a receive buffer queue pair , as detailed in @hrq and 15673 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15674 * to the HBA. 15675 * 15676 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15677 * struct is used to get the entry count that is necessary to determine the 15678 * number of pages to use for this queue. The @cq is used to indicate which 15679 * completion queue to bind received buffers that are posted to these queues to. 15680 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15681 * receive queue pair. This function is asynchronous and will wait for the 15682 * mailbox command to finish before continuing. 15683 * 15684 * On success this function will return a zero. If unable to allocate enough 15685 * memory this function will return -ENOMEM. If the queue create mailbox command 15686 * fails this function will return -ENXIO. 15687 **/ 15688 int 15689 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 15690 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 15691 uint32_t subtype) 15692 { 15693 struct lpfc_queue *hrq, *drq, *cq; 15694 struct lpfc_mbx_rq_create_v2 *rq_create; 15695 struct lpfc_dmabuf *dmabuf; 15696 LPFC_MBOXQ_t *mbox; 15697 int rc, length, alloclen, status = 0; 15698 int cnt, idx, numrq, page_idx = 0; 15699 uint32_t shdr_status, shdr_add_status; 15700 union lpfc_sli4_cfg_shdr *shdr; 15701 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15702 15703 numrq = phba->cfg_nvmet_mrq; 15704 /* sanity check on array memory */ 15705 if (!hrqp || !drqp || !cqp || !numrq) 15706 return -ENODEV; 15707 if (!phba->sli4_hba.pc_sli4_params.supported) 15708 hw_page_size = SLI4_PAGE_SIZE; 15709 15710 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15711 if (!mbox) 15712 return -ENOMEM; 15713 15714 length = sizeof(struct lpfc_mbx_rq_create_v2); 15715 length += ((2 * numrq * hrqp[0]->page_count) * 15716 sizeof(struct dma_address)); 15717 15718 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15719 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 15720 LPFC_SLI4_MBX_NEMBED); 15721 if (alloclen < length) { 15722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15723 "3099 Allocated DMA memory size (%d) is " 15724 "less than the requested DMA memory size " 15725 "(%d)\n", alloclen, length); 15726 status = -ENOMEM; 15727 goto out; 15728 } 15729 15730 15731 15732 rq_create = mbox->sge_array->addr[0]; 15733 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 15734 15735 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 15736 cnt = 0; 15737 15738 for (idx = 0; idx < numrq; idx++) { 15739 hrq = hrqp[idx]; 15740 drq = drqp[idx]; 15741 cq = cqp[idx]; 15742 15743 /* sanity check on queue memory */ 15744 if (!hrq || !drq || !cq) { 15745 status = -ENODEV; 15746 goto out; 15747 } 15748 15749 if (hrq->entry_count != drq->entry_count) { 15750 status = -EINVAL; 15751 goto out; 15752 } 15753 15754 if (idx == 0) { 15755 bf_set(lpfc_mbx_rq_create_num_pages, 15756 &rq_create->u.request, 15757 hrq->page_count); 15758 bf_set(lpfc_mbx_rq_create_rq_cnt, 15759 &rq_create->u.request, (numrq * 2)); 15760 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 15761 1); 15762 bf_set(lpfc_rq_context_base_cq, 15763 &rq_create->u.request.context, 15764 cq->queue_id); 15765 bf_set(lpfc_rq_context_data_size, 15766 &rq_create->u.request.context, 15767 LPFC_NVMET_DATA_BUF_SIZE); 15768 bf_set(lpfc_rq_context_hdr_size, 15769 &rq_create->u.request.context, 15770 LPFC_HDR_BUF_SIZE); 15771 bf_set(lpfc_rq_context_rqe_count_1, 15772 &rq_create->u.request.context, 15773 hrq->entry_count); 15774 bf_set(lpfc_rq_context_rqe_size, 15775 &rq_create->u.request.context, 15776 LPFC_RQE_SIZE_8); 15777 bf_set(lpfc_rq_context_page_size, 15778 &rq_create->u.request.context, 15779 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15780 } 15781 rc = 0; 15782 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15783 memset(dmabuf->virt, 0, hw_page_size); 15784 cnt = page_idx + dmabuf->buffer_tag; 15785 rq_create->u.request.page[cnt].addr_lo = 15786 putPaddrLow(dmabuf->phys); 15787 rq_create->u.request.page[cnt].addr_hi = 15788 putPaddrHigh(dmabuf->phys); 15789 rc++; 15790 } 15791 page_idx += rc; 15792 15793 rc = 0; 15794 list_for_each_entry(dmabuf, &drq->page_list, list) { 15795 memset(dmabuf->virt, 0, hw_page_size); 15796 cnt = page_idx + dmabuf->buffer_tag; 15797 rq_create->u.request.page[cnt].addr_lo = 15798 putPaddrLow(dmabuf->phys); 15799 rq_create->u.request.page[cnt].addr_hi = 15800 putPaddrHigh(dmabuf->phys); 15801 rc++; 15802 } 15803 page_idx += rc; 15804 15805 hrq->db_format = LPFC_DB_RING_FORMAT; 15806 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15807 hrq->type = LPFC_HRQ; 15808 hrq->assoc_qid = cq->queue_id; 15809 hrq->subtype = subtype; 15810 hrq->host_index = 0; 15811 hrq->hba_index = 0; 15812 hrq->entry_repost = LPFC_RQ_REPOST; 15813 15814 drq->db_format = LPFC_DB_RING_FORMAT; 15815 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15816 drq->type = LPFC_DRQ; 15817 drq->assoc_qid = cq->queue_id; 15818 drq->subtype = subtype; 15819 drq->host_index = 0; 15820 drq->hba_index = 0; 15821 drq->entry_repost = LPFC_RQ_REPOST; 15822 15823 list_add_tail(&hrq->list, &cq->child_list); 15824 list_add_tail(&drq->list, &cq->child_list); 15825 } 15826 15827 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15828 /* The IOCTL status is embedded in the mailbox subheader. */ 15829 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15830 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15831 if (shdr_status || shdr_add_status || rc) { 15832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15833 "3120 RQ_CREATE mailbox failed with " 15834 "status x%x add_status x%x, mbx status x%x\n", 15835 shdr_status, shdr_add_status, rc); 15836 status = -ENXIO; 15837 goto out; 15838 } 15839 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15840 if (rc == 0xFFFF) { 15841 status = -ENXIO; 15842 goto out; 15843 } 15844 15845 /* Initialize all RQs with associated queue id */ 15846 for (idx = 0; idx < numrq; idx++) { 15847 hrq = hrqp[idx]; 15848 hrq->queue_id = rc + (2 * idx); 15849 drq = drqp[idx]; 15850 drq->queue_id = rc + (2 * idx) + 1; 15851 } 15852 15853 out: 15854 lpfc_sli4_mbox_cmd_free(phba, mbox); 15855 return status; 15856 } 15857 15858 /** 15859 * lpfc_eq_destroy - Destroy an event Queue on the HBA 15860 * @eq: The queue structure associated with the queue to destroy. 15861 * 15862 * This function destroys a queue, as detailed in @eq by sending an mailbox 15863 * command, specific to the type of queue, to the HBA. 15864 * 15865 * The @eq struct is used to get the queue ID of the queue to destroy. 15866 * 15867 * On success this function will return a zero. If the queue destroy mailbox 15868 * command fails this function will return -ENXIO. 15869 **/ 15870 int 15871 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 15872 { 15873 LPFC_MBOXQ_t *mbox; 15874 int rc, length, status = 0; 15875 uint32_t shdr_status, shdr_add_status; 15876 union lpfc_sli4_cfg_shdr *shdr; 15877 15878 /* sanity check on queue memory */ 15879 if (!eq) 15880 return -ENODEV; 15881 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 15882 if (!mbox) 15883 return -ENOMEM; 15884 length = (sizeof(struct lpfc_mbx_eq_destroy) - 15885 sizeof(struct lpfc_sli4_cfg_mhdr)); 15886 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15887 LPFC_MBOX_OPCODE_EQ_DESTROY, 15888 length, LPFC_SLI4_MBX_EMBED); 15889 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 15890 eq->queue_id); 15891 mbox->vport = eq->phba->pport; 15892 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15893 15894 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 15895 /* The IOCTL status is embedded in the mailbox subheader. */ 15896 shdr = (union lpfc_sli4_cfg_shdr *) 15897 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 15898 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15899 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15900 if (shdr_status || shdr_add_status || rc) { 15901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15902 "2505 EQ_DESTROY mailbox failed with " 15903 "status x%x add_status x%x, mbx status x%x\n", 15904 shdr_status, shdr_add_status, rc); 15905 status = -ENXIO; 15906 } 15907 15908 /* Remove eq from any list */ 15909 list_del_init(&eq->list); 15910 mempool_free(mbox, eq->phba->mbox_mem_pool); 15911 return status; 15912 } 15913 15914 /** 15915 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 15916 * @cq: The queue structure associated with the queue to destroy. 15917 * 15918 * This function destroys a queue, as detailed in @cq by sending an mailbox 15919 * command, specific to the type of queue, to the HBA. 15920 * 15921 * The @cq struct is used to get the queue ID of the queue to destroy. 15922 * 15923 * On success this function will return a zero. If the queue destroy mailbox 15924 * command fails this function will return -ENXIO. 15925 **/ 15926 int 15927 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 15928 { 15929 LPFC_MBOXQ_t *mbox; 15930 int rc, length, status = 0; 15931 uint32_t shdr_status, shdr_add_status; 15932 union lpfc_sli4_cfg_shdr *shdr; 15933 15934 /* sanity check on queue memory */ 15935 if (!cq) 15936 return -ENODEV; 15937 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 15938 if (!mbox) 15939 return -ENOMEM; 15940 length = (sizeof(struct lpfc_mbx_cq_destroy) - 15941 sizeof(struct lpfc_sli4_cfg_mhdr)); 15942 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15943 LPFC_MBOX_OPCODE_CQ_DESTROY, 15944 length, LPFC_SLI4_MBX_EMBED); 15945 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 15946 cq->queue_id); 15947 mbox->vport = cq->phba->pport; 15948 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15949 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 15950 /* The IOCTL status is embedded in the mailbox subheader. */ 15951 shdr = (union lpfc_sli4_cfg_shdr *) 15952 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 15953 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15954 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15955 if (shdr_status || shdr_add_status || rc) { 15956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15957 "2506 CQ_DESTROY mailbox failed with " 15958 "status x%x add_status x%x, mbx status x%x\n", 15959 shdr_status, shdr_add_status, rc); 15960 status = -ENXIO; 15961 } 15962 /* Remove cq from any list */ 15963 list_del_init(&cq->list); 15964 mempool_free(mbox, cq->phba->mbox_mem_pool); 15965 return status; 15966 } 15967 15968 /** 15969 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 15970 * @qm: The queue structure associated with the queue to destroy. 15971 * 15972 * This function destroys a queue, as detailed in @mq by sending an mailbox 15973 * command, specific to the type of queue, to the HBA. 15974 * 15975 * The @mq struct is used to get the queue ID of the queue to destroy. 15976 * 15977 * On success this function will return a zero. If the queue destroy mailbox 15978 * command fails this function will return -ENXIO. 15979 **/ 15980 int 15981 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 15982 { 15983 LPFC_MBOXQ_t *mbox; 15984 int rc, length, status = 0; 15985 uint32_t shdr_status, shdr_add_status; 15986 union lpfc_sli4_cfg_shdr *shdr; 15987 15988 /* sanity check on queue memory */ 15989 if (!mq) 15990 return -ENODEV; 15991 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 15992 if (!mbox) 15993 return -ENOMEM; 15994 length = (sizeof(struct lpfc_mbx_mq_destroy) - 15995 sizeof(struct lpfc_sli4_cfg_mhdr)); 15996 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15997 LPFC_MBOX_OPCODE_MQ_DESTROY, 15998 length, LPFC_SLI4_MBX_EMBED); 15999 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 16000 mq->queue_id); 16001 mbox->vport = mq->phba->pport; 16002 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16003 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 16004 /* The IOCTL status is embedded in the mailbox subheader. */ 16005 shdr = (union lpfc_sli4_cfg_shdr *) 16006 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 16007 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16008 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16009 if (shdr_status || shdr_add_status || rc) { 16010 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16011 "2507 MQ_DESTROY mailbox failed with " 16012 "status x%x add_status x%x, mbx status x%x\n", 16013 shdr_status, shdr_add_status, rc); 16014 status = -ENXIO; 16015 } 16016 /* Remove mq from any list */ 16017 list_del_init(&mq->list); 16018 mempool_free(mbox, mq->phba->mbox_mem_pool); 16019 return status; 16020 } 16021 16022 /** 16023 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 16024 * @wq: The queue structure associated with the queue to destroy. 16025 * 16026 * This function destroys a queue, as detailed in @wq by sending an mailbox 16027 * command, specific to the type of queue, to the HBA. 16028 * 16029 * The @wq struct is used to get the queue ID of the queue to destroy. 16030 * 16031 * On success this function will return a zero. If the queue destroy mailbox 16032 * command fails this function will return -ENXIO. 16033 **/ 16034 int 16035 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 16036 { 16037 LPFC_MBOXQ_t *mbox; 16038 int rc, length, status = 0; 16039 uint32_t shdr_status, shdr_add_status; 16040 union lpfc_sli4_cfg_shdr *shdr; 16041 16042 /* sanity check on queue memory */ 16043 if (!wq) 16044 return -ENODEV; 16045 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 16046 if (!mbox) 16047 return -ENOMEM; 16048 length = (sizeof(struct lpfc_mbx_wq_destroy) - 16049 sizeof(struct lpfc_sli4_cfg_mhdr)); 16050 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16051 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 16052 length, LPFC_SLI4_MBX_EMBED); 16053 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 16054 wq->queue_id); 16055 mbox->vport = wq->phba->pport; 16056 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16057 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 16058 shdr = (union lpfc_sli4_cfg_shdr *) 16059 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 16060 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16061 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16062 if (shdr_status || shdr_add_status || rc) { 16063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16064 "2508 WQ_DESTROY mailbox failed with " 16065 "status x%x add_status x%x, mbx status x%x\n", 16066 shdr_status, shdr_add_status, rc); 16067 status = -ENXIO; 16068 } 16069 /* Remove wq from any list */ 16070 list_del_init(&wq->list); 16071 kfree(wq->pring); 16072 wq->pring = NULL; 16073 mempool_free(mbox, wq->phba->mbox_mem_pool); 16074 return status; 16075 } 16076 16077 /** 16078 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 16079 * @rq: The queue structure associated with the queue to destroy. 16080 * 16081 * This function destroys a queue, as detailed in @rq by sending an mailbox 16082 * command, specific to the type of queue, to the HBA. 16083 * 16084 * The @rq struct is used to get the queue ID of the queue to destroy. 16085 * 16086 * On success this function will return a zero. If the queue destroy mailbox 16087 * command fails this function will return -ENXIO. 16088 **/ 16089 int 16090 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 16091 struct lpfc_queue *drq) 16092 { 16093 LPFC_MBOXQ_t *mbox; 16094 int rc, length, status = 0; 16095 uint32_t shdr_status, shdr_add_status; 16096 union lpfc_sli4_cfg_shdr *shdr; 16097 16098 /* sanity check on queue memory */ 16099 if (!hrq || !drq) 16100 return -ENODEV; 16101 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 16102 if (!mbox) 16103 return -ENOMEM; 16104 length = (sizeof(struct lpfc_mbx_rq_destroy) - 16105 sizeof(struct lpfc_sli4_cfg_mhdr)); 16106 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16107 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 16108 length, LPFC_SLI4_MBX_EMBED); 16109 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16110 hrq->queue_id); 16111 mbox->vport = hrq->phba->pport; 16112 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16113 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 16114 /* The IOCTL status is embedded in the mailbox subheader. */ 16115 shdr = (union lpfc_sli4_cfg_shdr *) 16116 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16117 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16118 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16119 if (shdr_status || shdr_add_status || rc) { 16120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16121 "2509 RQ_DESTROY mailbox failed with " 16122 "status x%x add_status x%x, mbx status x%x\n", 16123 shdr_status, shdr_add_status, rc); 16124 if (rc != MBX_TIMEOUT) 16125 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16126 return -ENXIO; 16127 } 16128 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16129 drq->queue_id); 16130 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 16131 shdr = (union lpfc_sli4_cfg_shdr *) 16132 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16133 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16134 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16135 if (shdr_status || shdr_add_status || rc) { 16136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16137 "2510 RQ_DESTROY mailbox failed with " 16138 "status x%x add_status x%x, mbx status x%x\n", 16139 shdr_status, shdr_add_status, rc); 16140 status = -ENXIO; 16141 } 16142 list_del_init(&hrq->list); 16143 list_del_init(&drq->list); 16144 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16145 return status; 16146 } 16147 16148 /** 16149 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 16150 * @phba: The virtual port for which this call being executed. 16151 * @pdma_phys_addr0: Physical address of the 1st SGL page. 16152 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 16153 * @xritag: the xritag that ties this io to the SGL pages. 16154 * 16155 * This routine will post the sgl pages for the IO that has the xritag 16156 * that is in the iocbq structure. The xritag is assigned during iocbq 16157 * creation and persists for as long as the driver is loaded. 16158 * if the caller has fewer than 256 scatter gather segments to map then 16159 * pdma_phys_addr1 should be 0. 16160 * If the caller needs to map more than 256 scatter gather segment then 16161 * pdma_phys_addr1 should be a valid physical address. 16162 * physical address for SGLs must be 64 byte aligned. 16163 * If you are going to map 2 SGL's then the first one must have 256 entries 16164 * the second sgl can have between 1 and 256 entries. 16165 * 16166 * Return codes: 16167 * 0 - Success 16168 * -ENXIO, -ENOMEM - Failure 16169 **/ 16170 int 16171 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 16172 dma_addr_t pdma_phys_addr0, 16173 dma_addr_t pdma_phys_addr1, 16174 uint16_t xritag) 16175 { 16176 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 16177 LPFC_MBOXQ_t *mbox; 16178 int rc; 16179 uint32_t shdr_status, shdr_add_status; 16180 uint32_t mbox_tmo; 16181 union lpfc_sli4_cfg_shdr *shdr; 16182 16183 if (xritag == NO_XRI) { 16184 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16185 "0364 Invalid param:\n"); 16186 return -EINVAL; 16187 } 16188 16189 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16190 if (!mbox) 16191 return -ENOMEM; 16192 16193 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16194 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16195 sizeof(struct lpfc_mbx_post_sgl_pages) - 16196 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16197 16198 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 16199 &mbox->u.mqe.un.post_sgl_pages; 16200 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 16201 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 16202 16203 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 16204 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 16205 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 16206 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 16207 16208 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 16209 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 16210 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 16211 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 16212 if (!phba->sli4_hba.intr_enable) 16213 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16214 else { 16215 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16216 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16217 } 16218 /* The IOCTL status is embedded in the mailbox subheader. */ 16219 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 16220 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16221 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16222 if (rc != MBX_TIMEOUT) 16223 mempool_free(mbox, phba->mbox_mem_pool); 16224 if (shdr_status || shdr_add_status || rc) { 16225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16226 "2511 POST_SGL mailbox failed with " 16227 "status x%x add_status x%x, mbx status x%x\n", 16228 shdr_status, shdr_add_status, rc); 16229 } 16230 return 0; 16231 } 16232 16233 /** 16234 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 16235 * @phba: pointer to lpfc hba data structure. 16236 * 16237 * This routine is invoked to post rpi header templates to the 16238 * HBA consistent with the SLI-4 interface spec. This routine 16239 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 16240 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 16241 * 16242 * Returns 16243 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 16244 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 16245 **/ 16246 static uint16_t 16247 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 16248 { 16249 unsigned long xri; 16250 16251 /* 16252 * Fetch the next logical xri. Because this index is logical, 16253 * the driver starts at 0 each time. 16254 */ 16255 spin_lock_irq(&phba->hbalock); 16256 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 16257 phba->sli4_hba.max_cfg_param.max_xri, 0); 16258 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 16259 spin_unlock_irq(&phba->hbalock); 16260 return NO_XRI; 16261 } else { 16262 set_bit(xri, phba->sli4_hba.xri_bmask); 16263 phba->sli4_hba.max_cfg_param.xri_used++; 16264 } 16265 spin_unlock_irq(&phba->hbalock); 16266 return xri; 16267 } 16268 16269 /** 16270 * lpfc_sli4_free_xri - Release an xri for reuse. 16271 * @phba: pointer to lpfc hba data structure. 16272 * 16273 * This routine is invoked to release an xri to the pool of 16274 * available rpis maintained by the driver. 16275 **/ 16276 static void 16277 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16278 { 16279 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 16280 phba->sli4_hba.max_cfg_param.xri_used--; 16281 } 16282 } 16283 16284 /** 16285 * lpfc_sli4_free_xri - Release an xri for reuse. 16286 * @phba: pointer to lpfc hba data structure. 16287 * 16288 * This routine is invoked to release an xri to the pool of 16289 * available rpis maintained by the driver. 16290 **/ 16291 void 16292 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16293 { 16294 spin_lock_irq(&phba->hbalock); 16295 __lpfc_sli4_free_xri(phba, xri); 16296 spin_unlock_irq(&phba->hbalock); 16297 } 16298 16299 /** 16300 * lpfc_sli4_next_xritag - Get an xritag for the io 16301 * @phba: Pointer to HBA context object. 16302 * 16303 * This function gets an xritag for the iocb. If there is no unused xritag 16304 * it will return 0xffff. 16305 * The function returns the allocated xritag if successful, else returns zero. 16306 * Zero is not a valid xritag. 16307 * The caller is not required to hold any lock. 16308 **/ 16309 uint16_t 16310 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 16311 { 16312 uint16_t xri_index; 16313 16314 xri_index = lpfc_sli4_alloc_xri(phba); 16315 if (xri_index == NO_XRI) 16316 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16317 "2004 Failed to allocate XRI.last XRITAG is %d" 16318 " Max XRI is %d, Used XRI is %d\n", 16319 xri_index, 16320 phba->sli4_hba.max_cfg_param.max_xri, 16321 phba->sli4_hba.max_cfg_param.xri_used); 16322 return xri_index; 16323 } 16324 16325 /** 16326 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 16327 * @phba: pointer to lpfc hba data structure. 16328 * @post_sgl_list: pointer to els sgl entry list. 16329 * @count: number of els sgl entries on the list. 16330 * 16331 * This routine is invoked to post a block of driver's sgl pages to the 16332 * HBA using non-embedded mailbox command. No Lock is held. This routine 16333 * is only called when the driver is loading and after all IO has been 16334 * stopped. 16335 **/ 16336 static int 16337 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 16338 struct list_head *post_sgl_list, 16339 int post_cnt) 16340 { 16341 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 16342 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16343 struct sgl_page_pairs *sgl_pg_pairs; 16344 void *viraddr; 16345 LPFC_MBOXQ_t *mbox; 16346 uint32_t reqlen, alloclen, pg_pairs; 16347 uint32_t mbox_tmo; 16348 uint16_t xritag_start = 0; 16349 int rc = 0; 16350 uint32_t shdr_status, shdr_add_status; 16351 union lpfc_sli4_cfg_shdr *shdr; 16352 16353 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 16354 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16355 if (reqlen > SLI4_PAGE_SIZE) { 16356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16357 "2559 Block sgl registration required DMA " 16358 "size (%d) great than a page\n", reqlen); 16359 return -ENOMEM; 16360 } 16361 16362 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16363 if (!mbox) 16364 return -ENOMEM; 16365 16366 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16367 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16368 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16369 LPFC_SLI4_MBX_NEMBED); 16370 16371 if (alloclen < reqlen) { 16372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16373 "0285 Allocated DMA memory size (%d) is " 16374 "less than the requested DMA memory " 16375 "size (%d)\n", alloclen, reqlen); 16376 lpfc_sli4_mbox_cmd_free(phba, mbox); 16377 return -ENOMEM; 16378 } 16379 /* Set up the SGL pages in the non-embedded DMA pages */ 16380 viraddr = mbox->sge_array->addr[0]; 16381 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16382 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16383 16384 pg_pairs = 0; 16385 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 16386 /* Set up the sge entry */ 16387 sgl_pg_pairs->sgl_pg0_addr_lo = 16388 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 16389 sgl_pg_pairs->sgl_pg0_addr_hi = 16390 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 16391 sgl_pg_pairs->sgl_pg1_addr_lo = 16392 cpu_to_le32(putPaddrLow(0)); 16393 sgl_pg_pairs->sgl_pg1_addr_hi = 16394 cpu_to_le32(putPaddrHigh(0)); 16395 16396 /* Keep the first xritag on the list */ 16397 if (pg_pairs == 0) 16398 xritag_start = sglq_entry->sli4_xritag; 16399 sgl_pg_pairs++; 16400 pg_pairs++; 16401 } 16402 16403 /* Complete initialization and perform endian conversion. */ 16404 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16405 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 16406 sgl->word0 = cpu_to_le32(sgl->word0); 16407 16408 if (!phba->sli4_hba.intr_enable) 16409 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16410 else { 16411 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16412 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16413 } 16414 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16415 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16417 if (rc != MBX_TIMEOUT) 16418 lpfc_sli4_mbox_cmd_free(phba, mbox); 16419 if (shdr_status || shdr_add_status || rc) { 16420 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16421 "2513 POST_SGL_BLOCK mailbox command failed " 16422 "status x%x add_status x%x mbx status x%x\n", 16423 shdr_status, shdr_add_status, rc); 16424 rc = -ENXIO; 16425 } 16426 return rc; 16427 } 16428 16429 /** 16430 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 16431 * @phba: pointer to lpfc hba data structure. 16432 * @sblist: pointer to scsi buffer list. 16433 * @count: number of scsi buffers on the list. 16434 * 16435 * This routine is invoked to post a block of @count scsi sgl pages from a 16436 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 16437 * No Lock is held. 16438 * 16439 **/ 16440 int 16441 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, 16442 struct list_head *sblist, 16443 int count) 16444 { 16445 struct lpfc_scsi_buf *psb; 16446 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16447 struct sgl_page_pairs *sgl_pg_pairs; 16448 void *viraddr; 16449 LPFC_MBOXQ_t *mbox; 16450 uint32_t reqlen, alloclen, pg_pairs; 16451 uint32_t mbox_tmo; 16452 uint16_t xritag_start = 0; 16453 int rc = 0; 16454 uint32_t shdr_status, shdr_add_status; 16455 dma_addr_t pdma_phys_bpl1; 16456 union lpfc_sli4_cfg_shdr *shdr; 16457 16458 /* Calculate the requested length of the dma memory */ 16459 reqlen = count * sizeof(struct sgl_page_pairs) + 16460 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16461 if (reqlen > SLI4_PAGE_SIZE) { 16462 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16463 "0217 Block sgl registration required DMA " 16464 "size (%d) great than a page\n", reqlen); 16465 return -ENOMEM; 16466 } 16467 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16468 if (!mbox) { 16469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16470 "0283 Failed to allocate mbox cmd memory\n"); 16471 return -ENOMEM; 16472 } 16473 16474 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16475 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16476 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16477 LPFC_SLI4_MBX_NEMBED); 16478 16479 if (alloclen < reqlen) { 16480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16481 "2561 Allocated DMA memory size (%d) is " 16482 "less than the requested DMA memory " 16483 "size (%d)\n", alloclen, reqlen); 16484 lpfc_sli4_mbox_cmd_free(phba, mbox); 16485 return -ENOMEM; 16486 } 16487 16488 /* Get the first SGE entry from the non-embedded DMA memory */ 16489 viraddr = mbox->sge_array->addr[0]; 16490 16491 /* Set up the SGL pages in the non-embedded DMA pages */ 16492 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16493 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16494 16495 pg_pairs = 0; 16496 list_for_each_entry(psb, sblist, list) { 16497 /* Set up the sge entry */ 16498 sgl_pg_pairs->sgl_pg0_addr_lo = 16499 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 16500 sgl_pg_pairs->sgl_pg0_addr_hi = 16501 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 16502 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16503 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 16504 else 16505 pdma_phys_bpl1 = 0; 16506 sgl_pg_pairs->sgl_pg1_addr_lo = 16507 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16508 sgl_pg_pairs->sgl_pg1_addr_hi = 16509 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16510 /* Keep the first xritag on the list */ 16511 if (pg_pairs == 0) 16512 xritag_start = psb->cur_iocbq.sli4_xritag; 16513 sgl_pg_pairs++; 16514 pg_pairs++; 16515 } 16516 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16517 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16518 /* Perform endian conversion if necessary */ 16519 sgl->word0 = cpu_to_le32(sgl->word0); 16520 16521 if (!phba->sli4_hba.intr_enable) 16522 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16523 else { 16524 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16525 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16526 } 16527 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16528 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16529 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16530 if (rc != MBX_TIMEOUT) 16531 lpfc_sli4_mbox_cmd_free(phba, mbox); 16532 if (shdr_status || shdr_add_status || rc) { 16533 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16534 "2564 POST_SGL_BLOCK mailbox command failed " 16535 "status x%x add_status x%x mbx status x%x\n", 16536 shdr_status, shdr_add_status, rc); 16537 rc = -ENXIO; 16538 } 16539 return rc; 16540 } 16541 16542 /** 16543 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 16544 * @phba: pointer to lpfc_hba struct that the frame was received on 16545 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16546 * 16547 * This function checks the fields in the @fc_hdr to see if the FC frame is a 16548 * valid type of frame that the LPFC driver will handle. This function will 16549 * return a zero if the frame is a valid frame or a non zero value when the 16550 * frame does not pass the check. 16551 **/ 16552 static int 16553 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 16554 { 16555 /* make rctl_names static to save stack space */ 16556 struct fc_vft_header *fc_vft_hdr; 16557 uint32_t *header = (uint32_t *) fc_hdr; 16558 16559 #define FC_RCTL_MDS_DIAGS 0xF4 16560 16561 switch (fc_hdr->fh_r_ctl) { 16562 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16563 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16564 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 16565 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 16566 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 16567 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 16568 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 16569 case FC_RCTL_DD_CMD_STATUS: /* command status */ 16570 case FC_RCTL_ELS_REQ: /* extended link services request */ 16571 case FC_RCTL_ELS_REP: /* extended link services reply */ 16572 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 16573 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 16574 case FC_RCTL_BA_NOP: /* basic link service NOP */ 16575 case FC_RCTL_BA_ABTS: /* basic link service abort */ 16576 case FC_RCTL_BA_RMC: /* remove connection */ 16577 case FC_RCTL_BA_ACC: /* basic accept */ 16578 case FC_RCTL_BA_RJT: /* basic reject */ 16579 case FC_RCTL_BA_PRMT: 16580 case FC_RCTL_ACK_1: /* acknowledge_1 */ 16581 case FC_RCTL_ACK_0: /* acknowledge_0 */ 16582 case FC_RCTL_P_RJT: /* port reject */ 16583 case FC_RCTL_F_RJT: /* fabric reject */ 16584 case FC_RCTL_P_BSY: /* port busy */ 16585 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16586 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16587 case FC_RCTL_LCR: /* link credit reset */ 16588 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 16589 case FC_RCTL_END: /* end */ 16590 break; 16591 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16592 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16593 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 16594 return lpfc_fc_frame_check(phba, fc_hdr); 16595 default: 16596 goto drop; 16597 } 16598 16599 #define FC_TYPE_VENDOR_UNIQUE 0xFF 16600 16601 switch (fc_hdr->fh_type) { 16602 case FC_TYPE_BLS: 16603 case FC_TYPE_ELS: 16604 case FC_TYPE_FCP: 16605 case FC_TYPE_CT: 16606 case FC_TYPE_NVME: 16607 case FC_TYPE_VENDOR_UNIQUE: 16608 break; 16609 case FC_TYPE_IP: 16610 case FC_TYPE_ILS: 16611 default: 16612 goto drop; 16613 } 16614 16615 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16616 "2538 Received frame rctl:x%x, type:x%x, " 16617 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16618 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 16619 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 16620 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 16621 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 16622 be32_to_cpu(header[6])); 16623 return 0; 16624 drop: 16625 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16626 "2539 Dropped frame rctl:x%x type:x%x\n", 16627 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 16628 return 1; 16629 } 16630 16631 /** 16632 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 16633 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16634 * 16635 * This function processes the FC header to retrieve the VFI from the VF 16636 * header, if one exists. This function will return the VFI if one exists 16637 * or 0 if no VSAN Header exists. 16638 **/ 16639 static uint32_t 16640 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 16641 { 16642 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16643 16644 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 16645 return 0; 16646 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 16647 } 16648 16649 /** 16650 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 16651 * @phba: Pointer to the HBA structure to search for the vport on 16652 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16653 * @fcfi: The FC Fabric ID that the frame came from 16654 * 16655 * This function searches the @phba for a vport that matches the content of the 16656 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 16657 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 16658 * returns the matching vport pointer or NULL if unable to match frame to a 16659 * vport. 16660 **/ 16661 static struct lpfc_vport * 16662 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 16663 uint16_t fcfi, uint32_t did) 16664 { 16665 struct lpfc_vport **vports; 16666 struct lpfc_vport *vport = NULL; 16667 int i; 16668 16669 if (did == Fabric_DID) 16670 return phba->pport; 16671 if ((phba->pport->fc_flag & FC_PT2PT) && 16672 !(phba->link_state == LPFC_HBA_READY)) 16673 return phba->pport; 16674 16675 vports = lpfc_create_vport_work_array(phba); 16676 if (vports != NULL) { 16677 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 16678 if (phba->fcf.fcfi == fcfi && 16679 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 16680 vports[i]->fc_myDID == did) { 16681 vport = vports[i]; 16682 break; 16683 } 16684 } 16685 } 16686 lpfc_destroy_vport_work_array(phba, vports); 16687 return vport; 16688 } 16689 16690 /** 16691 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 16692 * @vport: The vport to work on. 16693 * 16694 * This function updates the receive sequence time stamp for this vport. The 16695 * receive sequence time stamp indicates the time that the last frame of the 16696 * the sequence that has been idle for the longest amount of time was received. 16697 * the driver uses this time stamp to indicate if any received sequences have 16698 * timed out. 16699 **/ 16700 static void 16701 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 16702 { 16703 struct lpfc_dmabuf *h_buf; 16704 struct hbq_dmabuf *dmabuf = NULL; 16705 16706 /* get the oldest sequence on the rcv list */ 16707 h_buf = list_get_first(&vport->rcv_buffer_list, 16708 struct lpfc_dmabuf, list); 16709 if (!h_buf) 16710 return; 16711 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16712 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 16713 } 16714 16715 /** 16716 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 16717 * @vport: The vport that the received sequences were sent to. 16718 * 16719 * This function cleans up all outstanding received sequences. This is called 16720 * by the driver when a link event or user action invalidates all the received 16721 * sequences. 16722 **/ 16723 void 16724 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 16725 { 16726 struct lpfc_dmabuf *h_buf, *hnext; 16727 struct lpfc_dmabuf *d_buf, *dnext; 16728 struct hbq_dmabuf *dmabuf = NULL; 16729 16730 /* start with the oldest sequence on the rcv list */ 16731 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 16732 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16733 list_del_init(&dmabuf->hbuf.list); 16734 list_for_each_entry_safe(d_buf, dnext, 16735 &dmabuf->dbuf.list, list) { 16736 list_del_init(&d_buf->list); 16737 lpfc_in_buf_free(vport->phba, d_buf); 16738 } 16739 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 16740 } 16741 } 16742 16743 /** 16744 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 16745 * @vport: The vport that the received sequences were sent to. 16746 * 16747 * This function determines whether any received sequences have timed out by 16748 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 16749 * indicates that there is at least one timed out sequence this routine will 16750 * go through the received sequences one at a time from most inactive to most 16751 * active to determine which ones need to be cleaned up. Once it has determined 16752 * that a sequence needs to be cleaned up it will simply free up the resources 16753 * without sending an abort. 16754 **/ 16755 void 16756 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 16757 { 16758 struct lpfc_dmabuf *h_buf, *hnext; 16759 struct lpfc_dmabuf *d_buf, *dnext; 16760 struct hbq_dmabuf *dmabuf = NULL; 16761 unsigned long timeout; 16762 int abort_count = 0; 16763 16764 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 16765 vport->rcv_buffer_time_stamp); 16766 if (list_empty(&vport->rcv_buffer_list) || 16767 time_before(jiffies, timeout)) 16768 return; 16769 /* start with the oldest sequence on the rcv list */ 16770 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 16771 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16772 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 16773 dmabuf->time_stamp); 16774 if (time_before(jiffies, timeout)) 16775 break; 16776 abort_count++; 16777 list_del_init(&dmabuf->hbuf.list); 16778 list_for_each_entry_safe(d_buf, dnext, 16779 &dmabuf->dbuf.list, list) { 16780 list_del_init(&d_buf->list); 16781 lpfc_in_buf_free(vport->phba, d_buf); 16782 } 16783 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 16784 } 16785 if (abort_count) 16786 lpfc_update_rcv_time_stamp(vport); 16787 } 16788 16789 /** 16790 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 16791 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 16792 * 16793 * This function searches through the existing incomplete sequences that have 16794 * been sent to this @vport. If the frame matches one of the incomplete 16795 * sequences then the dbuf in the @dmabuf is added to the list of frames that 16796 * make up that sequence. If no sequence is found that matches this frame then 16797 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 16798 * This function returns a pointer to the first dmabuf in the sequence list that 16799 * the frame was linked to. 16800 **/ 16801 static struct hbq_dmabuf * 16802 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 16803 { 16804 struct fc_frame_header *new_hdr; 16805 struct fc_frame_header *temp_hdr; 16806 struct lpfc_dmabuf *d_buf; 16807 struct lpfc_dmabuf *h_buf; 16808 struct hbq_dmabuf *seq_dmabuf = NULL; 16809 struct hbq_dmabuf *temp_dmabuf = NULL; 16810 uint8_t found = 0; 16811 16812 INIT_LIST_HEAD(&dmabuf->dbuf.list); 16813 dmabuf->time_stamp = jiffies; 16814 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 16815 16816 /* Use the hdr_buf to find the sequence that this frame belongs to */ 16817 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 16818 temp_hdr = (struct fc_frame_header *)h_buf->virt; 16819 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 16820 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 16821 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 16822 continue; 16823 /* found a pending sequence that matches this frame */ 16824 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16825 break; 16826 } 16827 if (!seq_dmabuf) { 16828 /* 16829 * This indicates first frame received for this sequence. 16830 * Queue the buffer on the vport's rcv_buffer_list. 16831 */ 16832 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 16833 lpfc_update_rcv_time_stamp(vport); 16834 return dmabuf; 16835 } 16836 temp_hdr = seq_dmabuf->hbuf.virt; 16837 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 16838 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 16839 list_del_init(&seq_dmabuf->hbuf.list); 16840 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 16841 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 16842 lpfc_update_rcv_time_stamp(vport); 16843 return dmabuf; 16844 } 16845 /* move this sequence to the tail to indicate a young sequence */ 16846 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 16847 seq_dmabuf->time_stamp = jiffies; 16848 lpfc_update_rcv_time_stamp(vport); 16849 if (list_empty(&seq_dmabuf->dbuf.list)) { 16850 temp_hdr = dmabuf->hbuf.virt; 16851 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 16852 return seq_dmabuf; 16853 } 16854 /* find the correct place in the sequence to insert this frame */ 16855 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 16856 while (!found) { 16857 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 16858 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 16859 /* 16860 * If the frame's sequence count is greater than the frame on 16861 * the list then insert the frame right after this frame 16862 */ 16863 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 16864 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 16865 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 16866 found = 1; 16867 break; 16868 } 16869 16870 if (&d_buf->list == &seq_dmabuf->dbuf.list) 16871 break; 16872 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 16873 } 16874 16875 if (found) 16876 return seq_dmabuf; 16877 return NULL; 16878 } 16879 16880 /** 16881 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 16882 * @vport: pointer to a vitural port 16883 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16884 * 16885 * This function tries to abort from the partially assembed sequence, described 16886 * by the information from basic abbort @dmabuf. It checks to see whether such 16887 * partially assembled sequence held by the driver. If so, it shall free up all 16888 * the frames from the partially assembled sequence. 16889 * 16890 * Return 16891 * true -- if there is matching partially assembled sequence present and all 16892 * the frames freed with the sequence; 16893 * false -- if there is no matching partially assembled sequence present so 16894 * nothing got aborted in the lower layer driver 16895 **/ 16896 static bool 16897 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 16898 struct hbq_dmabuf *dmabuf) 16899 { 16900 struct fc_frame_header *new_hdr; 16901 struct fc_frame_header *temp_hdr; 16902 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 16903 struct hbq_dmabuf *seq_dmabuf = NULL; 16904 16905 /* Use the hdr_buf to find the sequence that matches this frame */ 16906 INIT_LIST_HEAD(&dmabuf->dbuf.list); 16907 INIT_LIST_HEAD(&dmabuf->hbuf.list); 16908 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 16909 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 16910 temp_hdr = (struct fc_frame_header *)h_buf->virt; 16911 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 16912 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 16913 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 16914 continue; 16915 /* found a pending sequence that matches this frame */ 16916 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16917 break; 16918 } 16919 16920 /* Free up all the frames from the partially assembled sequence */ 16921 if (seq_dmabuf) { 16922 list_for_each_entry_safe(d_buf, n_buf, 16923 &seq_dmabuf->dbuf.list, list) { 16924 list_del_init(&d_buf->list); 16925 lpfc_in_buf_free(vport->phba, d_buf); 16926 } 16927 return true; 16928 } 16929 return false; 16930 } 16931 16932 /** 16933 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 16934 * @vport: pointer to a vitural port 16935 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16936 * 16937 * This function tries to abort from the assembed sequence from upper level 16938 * protocol, described by the information from basic abbort @dmabuf. It 16939 * checks to see whether such pending context exists at upper level protocol. 16940 * If so, it shall clean up the pending context. 16941 * 16942 * Return 16943 * true -- if there is matching pending context of the sequence cleaned 16944 * at ulp; 16945 * false -- if there is no matching pending context of the sequence present 16946 * at ulp. 16947 **/ 16948 static bool 16949 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 16950 { 16951 struct lpfc_hba *phba = vport->phba; 16952 int handled; 16953 16954 /* Accepting abort at ulp with SLI4 only */ 16955 if (phba->sli_rev < LPFC_SLI_REV4) 16956 return false; 16957 16958 /* Register all caring upper level protocols to attend abort */ 16959 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 16960 if (handled) 16961 return true; 16962 16963 return false; 16964 } 16965 16966 /** 16967 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 16968 * @phba: Pointer to HBA context object. 16969 * @cmd_iocbq: pointer to the command iocbq structure. 16970 * @rsp_iocbq: pointer to the response iocbq structure. 16971 * 16972 * This function handles the sequence abort response iocb command complete 16973 * event. It properly releases the memory allocated to the sequence abort 16974 * accept iocb. 16975 **/ 16976 static void 16977 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 16978 struct lpfc_iocbq *cmd_iocbq, 16979 struct lpfc_iocbq *rsp_iocbq) 16980 { 16981 struct lpfc_nodelist *ndlp; 16982 16983 if (cmd_iocbq) { 16984 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 16985 lpfc_nlp_put(ndlp); 16986 lpfc_nlp_not_used(ndlp); 16987 lpfc_sli_release_iocbq(phba, cmd_iocbq); 16988 } 16989 16990 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 16991 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 16992 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16993 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 16994 rsp_iocbq->iocb.ulpStatus, 16995 rsp_iocbq->iocb.un.ulpWord[4]); 16996 } 16997 16998 /** 16999 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 17000 * @phba: Pointer to HBA context object. 17001 * @xri: xri id in transaction. 17002 * 17003 * This function validates the xri maps to the known range of XRIs allocated an 17004 * used by the driver. 17005 **/ 17006 uint16_t 17007 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 17008 uint16_t xri) 17009 { 17010 uint16_t i; 17011 17012 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 17013 if (xri == phba->sli4_hba.xri_ids[i]) 17014 return i; 17015 } 17016 return NO_XRI; 17017 } 17018 17019 /** 17020 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 17021 * @phba: Pointer to HBA context object. 17022 * @fc_hdr: pointer to a FC frame header. 17023 * 17024 * This function sends a basic response to a previous unsol sequence abort 17025 * event after aborting the sequence handling. 17026 **/ 17027 void 17028 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 17029 struct fc_frame_header *fc_hdr, bool aborted) 17030 { 17031 struct lpfc_hba *phba = vport->phba; 17032 struct lpfc_iocbq *ctiocb = NULL; 17033 struct lpfc_nodelist *ndlp; 17034 uint16_t oxid, rxid, xri, lxri; 17035 uint32_t sid, fctl; 17036 IOCB_t *icmd; 17037 int rc; 17038 17039 if (!lpfc_is_link_up(phba)) 17040 return; 17041 17042 sid = sli4_sid_from_fc_hdr(fc_hdr); 17043 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 17044 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 17045 17046 ndlp = lpfc_findnode_did(vport, sid); 17047 if (!ndlp) { 17048 ndlp = lpfc_nlp_init(vport, sid); 17049 if (!ndlp) { 17050 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17051 "1268 Failed to allocate ndlp for " 17052 "oxid:x%x SID:x%x\n", oxid, sid); 17053 return; 17054 } 17055 /* Put ndlp onto pport node list */ 17056 lpfc_enqueue_node(vport, ndlp); 17057 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 17058 /* re-setup ndlp without removing from node list */ 17059 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 17060 if (!ndlp) { 17061 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17062 "3275 Failed to active ndlp found " 17063 "for oxid:x%x SID:x%x\n", oxid, sid); 17064 return; 17065 } 17066 } 17067 17068 /* Allocate buffer for rsp iocb */ 17069 ctiocb = lpfc_sli_get_iocbq(phba); 17070 if (!ctiocb) 17071 return; 17072 17073 /* Extract the F_CTL field from FC_HDR */ 17074 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 17075 17076 icmd = &ctiocb->iocb; 17077 icmd->un.xseq64.bdl.bdeSize = 0; 17078 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 17079 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 17080 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 17081 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 17082 17083 /* Fill in the rest of iocb fields */ 17084 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 17085 icmd->ulpBdeCount = 0; 17086 icmd->ulpLe = 1; 17087 icmd->ulpClass = CLASS3; 17088 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 17089 ctiocb->context1 = lpfc_nlp_get(ndlp); 17090 17091 ctiocb->iocb_cmpl = NULL; 17092 ctiocb->vport = phba->pport; 17093 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 17094 ctiocb->sli4_lxritag = NO_XRI; 17095 ctiocb->sli4_xritag = NO_XRI; 17096 17097 if (fctl & FC_FC_EX_CTX) 17098 /* Exchange responder sent the abort so we 17099 * own the oxid. 17100 */ 17101 xri = oxid; 17102 else 17103 xri = rxid; 17104 lxri = lpfc_sli4_xri_inrange(phba, xri); 17105 if (lxri != NO_XRI) 17106 lpfc_set_rrq_active(phba, ndlp, lxri, 17107 (xri == oxid) ? rxid : oxid, 0); 17108 /* For BA_ABTS from exchange responder, if the logical xri with 17109 * the oxid maps to the FCP XRI range, the port no longer has 17110 * that exchange context, send a BLS_RJT. Override the IOCB for 17111 * a BA_RJT. 17112 */ 17113 if ((fctl & FC_FC_EX_CTX) && 17114 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 17115 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17116 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17117 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17118 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17119 } 17120 17121 /* If BA_ABTS failed to abort a partially assembled receive sequence, 17122 * the driver no longer has that exchange, send a BLS_RJT. Override 17123 * the IOCB for a BA_RJT. 17124 */ 17125 if (aborted == false) { 17126 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17127 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17128 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17129 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17130 } 17131 17132 if (fctl & FC_FC_EX_CTX) { 17133 /* ABTS sent by responder to CT exchange, construction 17134 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 17135 * field and RX_ID from ABTS for RX_ID field. 17136 */ 17137 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 17138 } else { 17139 /* ABTS sent by initiator to CT exchange, construction 17140 * of BA_ACC will need to allocate a new XRI as for the 17141 * XRI_TAG field. 17142 */ 17143 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 17144 } 17145 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 17146 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 17147 17148 /* Xmit CT abts response on exchange <xid> */ 17149 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 17150 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 17151 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 17152 17153 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 17154 if (rc == IOCB_ERROR) { 17155 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 17156 "2925 Failed to issue CT ABTS RSP x%x on " 17157 "xri x%x, Data x%x\n", 17158 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 17159 phba->link_state); 17160 lpfc_nlp_put(ndlp); 17161 ctiocb->context1 = NULL; 17162 lpfc_sli_release_iocbq(phba, ctiocb); 17163 } 17164 } 17165 17166 /** 17167 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 17168 * @vport: Pointer to the vport on which this sequence was received 17169 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17170 * 17171 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 17172 * receive sequence is only partially assembed by the driver, it shall abort 17173 * the partially assembled frames for the sequence. Otherwise, if the 17174 * unsolicited receive sequence has been completely assembled and passed to 17175 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 17176 * unsolicited sequence has been aborted. After that, it will issue a basic 17177 * accept to accept the abort. 17178 **/ 17179 static void 17180 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 17181 struct hbq_dmabuf *dmabuf) 17182 { 17183 struct lpfc_hba *phba = vport->phba; 17184 struct fc_frame_header fc_hdr; 17185 uint32_t fctl; 17186 bool aborted; 17187 17188 /* Make a copy of fc_hdr before the dmabuf being released */ 17189 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 17190 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 17191 17192 if (fctl & FC_FC_EX_CTX) { 17193 /* ABTS by responder to exchange, no cleanup needed */ 17194 aborted = true; 17195 } else { 17196 /* ABTS by initiator to exchange, need to do cleanup */ 17197 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 17198 if (aborted == false) 17199 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 17200 } 17201 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17202 17203 if (phba->nvmet_support) { 17204 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 17205 return; 17206 } 17207 17208 /* Respond with BA_ACC or BA_RJT accordingly */ 17209 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 17210 } 17211 17212 /** 17213 * lpfc_seq_complete - Indicates if a sequence is complete 17214 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17215 * 17216 * This function checks the sequence, starting with the frame described by 17217 * @dmabuf, to see if all the frames associated with this sequence are present. 17218 * the frames associated with this sequence are linked to the @dmabuf using the 17219 * dbuf list. This function looks for two major things. 1) That the first frame 17220 * has a sequence count of zero. 2) There is a frame with last frame of sequence 17221 * set. 3) That there are no holes in the sequence count. The function will 17222 * return 1 when the sequence is complete, otherwise it will return 0. 17223 **/ 17224 static int 17225 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 17226 { 17227 struct fc_frame_header *hdr; 17228 struct lpfc_dmabuf *d_buf; 17229 struct hbq_dmabuf *seq_dmabuf; 17230 uint32_t fctl; 17231 int seq_count = 0; 17232 17233 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17234 /* make sure first fame of sequence has a sequence count of zero */ 17235 if (hdr->fh_seq_cnt != seq_count) 17236 return 0; 17237 fctl = (hdr->fh_f_ctl[0] << 16 | 17238 hdr->fh_f_ctl[1] << 8 | 17239 hdr->fh_f_ctl[2]); 17240 /* If last frame of sequence we can return success. */ 17241 if (fctl & FC_FC_END_SEQ) 17242 return 1; 17243 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 17244 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17245 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17246 /* If there is a hole in the sequence count then fail. */ 17247 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 17248 return 0; 17249 fctl = (hdr->fh_f_ctl[0] << 16 | 17250 hdr->fh_f_ctl[1] << 8 | 17251 hdr->fh_f_ctl[2]); 17252 /* If last frame of sequence we can return success. */ 17253 if (fctl & FC_FC_END_SEQ) 17254 return 1; 17255 } 17256 return 0; 17257 } 17258 17259 /** 17260 * lpfc_prep_seq - Prep sequence for ULP processing 17261 * @vport: Pointer to the vport on which this sequence was received 17262 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17263 * 17264 * This function takes a sequence, described by a list of frames, and creates 17265 * a list of iocbq structures to describe the sequence. This iocbq list will be 17266 * used to issue to the generic unsolicited sequence handler. This routine 17267 * returns a pointer to the first iocbq in the list. If the function is unable 17268 * to allocate an iocbq then it throw out the received frames that were not 17269 * able to be described and return a pointer to the first iocbq. If unable to 17270 * allocate any iocbqs (including the first) this function will return NULL. 17271 **/ 17272 static struct lpfc_iocbq * 17273 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 17274 { 17275 struct hbq_dmabuf *hbq_buf; 17276 struct lpfc_dmabuf *d_buf, *n_buf; 17277 struct lpfc_iocbq *first_iocbq, *iocbq; 17278 struct fc_frame_header *fc_hdr; 17279 uint32_t sid; 17280 uint32_t len, tot_len; 17281 struct ulp_bde64 *pbde; 17282 17283 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17284 /* remove from receive buffer list */ 17285 list_del_init(&seq_dmabuf->hbuf.list); 17286 lpfc_update_rcv_time_stamp(vport); 17287 /* get the Remote Port's SID */ 17288 sid = sli4_sid_from_fc_hdr(fc_hdr); 17289 tot_len = 0; 17290 /* Get an iocbq struct to fill in. */ 17291 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 17292 if (first_iocbq) { 17293 /* Initialize the first IOCB. */ 17294 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 17295 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 17296 first_iocbq->vport = vport; 17297 17298 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 17299 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 17300 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 17301 first_iocbq->iocb.un.rcvels.parmRo = 17302 sli4_did_from_fc_hdr(fc_hdr); 17303 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 17304 } else 17305 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 17306 first_iocbq->iocb.ulpContext = NO_XRI; 17307 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 17308 be16_to_cpu(fc_hdr->fh_ox_id); 17309 /* iocbq is prepped for internal consumption. Physical vpi. */ 17310 first_iocbq->iocb.unsli3.rcvsli3.vpi = 17311 vport->phba->vpi_ids[vport->vpi]; 17312 /* put the first buffer into the first IOCBq */ 17313 tot_len = bf_get(lpfc_rcqe_length, 17314 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 17315 17316 first_iocbq->context2 = &seq_dmabuf->dbuf; 17317 first_iocbq->context3 = NULL; 17318 first_iocbq->iocb.ulpBdeCount = 1; 17319 if (tot_len > LPFC_DATA_BUF_SIZE) 17320 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17321 LPFC_DATA_BUF_SIZE; 17322 else 17323 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 17324 17325 first_iocbq->iocb.un.rcvels.remoteID = sid; 17326 17327 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17328 } 17329 iocbq = first_iocbq; 17330 /* 17331 * Each IOCBq can have two Buffers assigned, so go through the list 17332 * of buffers for this sequence and save two buffers in each IOCBq 17333 */ 17334 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 17335 if (!iocbq) { 17336 lpfc_in_buf_free(vport->phba, d_buf); 17337 continue; 17338 } 17339 if (!iocbq->context3) { 17340 iocbq->context3 = d_buf; 17341 iocbq->iocb.ulpBdeCount++; 17342 /* We need to get the size out of the right CQE */ 17343 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17344 len = bf_get(lpfc_rcqe_length, 17345 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17346 pbde = (struct ulp_bde64 *) 17347 &iocbq->iocb.unsli3.sli3Words[4]; 17348 if (len > LPFC_DATA_BUF_SIZE) 17349 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 17350 else 17351 pbde->tus.f.bdeSize = len; 17352 17353 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 17354 tot_len += len; 17355 } else { 17356 iocbq = lpfc_sli_get_iocbq(vport->phba); 17357 if (!iocbq) { 17358 if (first_iocbq) { 17359 first_iocbq->iocb.ulpStatus = 17360 IOSTAT_FCP_RSP_ERROR; 17361 first_iocbq->iocb.un.ulpWord[4] = 17362 IOERR_NO_RESOURCES; 17363 } 17364 lpfc_in_buf_free(vport->phba, d_buf); 17365 continue; 17366 } 17367 /* We need to get the size out of the right CQE */ 17368 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17369 len = bf_get(lpfc_rcqe_length, 17370 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17371 iocbq->context2 = d_buf; 17372 iocbq->context3 = NULL; 17373 iocbq->iocb.ulpBdeCount = 1; 17374 if (len > LPFC_DATA_BUF_SIZE) 17375 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17376 LPFC_DATA_BUF_SIZE; 17377 else 17378 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 17379 17380 tot_len += len; 17381 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17382 17383 iocbq->iocb.un.rcvels.remoteID = sid; 17384 list_add_tail(&iocbq->list, &first_iocbq->list); 17385 } 17386 } 17387 return first_iocbq; 17388 } 17389 17390 static void 17391 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 17392 struct hbq_dmabuf *seq_dmabuf) 17393 { 17394 struct fc_frame_header *fc_hdr; 17395 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 17396 struct lpfc_hba *phba = vport->phba; 17397 17398 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17399 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 17400 if (!iocbq) { 17401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17402 "2707 Ring %d handler: Failed to allocate " 17403 "iocb Rctl x%x Type x%x received\n", 17404 LPFC_ELS_RING, 17405 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17406 return; 17407 } 17408 if (!lpfc_complete_unsol_iocb(phba, 17409 phba->sli4_hba.els_wq->pring, 17410 iocbq, fc_hdr->fh_r_ctl, 17411 fc_hdr->fh_type)) 17412 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17413 "2540 Ring %d handler: unexpected Rctl " 17414 "x%x Type x%x received\n", 17415 LPFC_ELS_RING, 17416 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17417 17418 /* Free iocb created in lpfc_prep_seq */ 17419 list_for_each_entry_safe(curr_iocb, next_iocb, 17420 &iocbq->list, list) { 17421 list_del_init(&curr_iocb->list); 17422 lpfc_sli_release_iocbq(phba, curr_iocb); 17423 } 17424 lpfc_sli_release_iocbq(phba, iocbq); 17425 } 17426 17427 static void 17428 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17429 struct lpfc_iocbq *rspiocb) 17430 { 17431 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17432 17433 if (pcmd && pcmd->virt) 17434 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17435 kfree(pcmd); 17436 lpfc_sli_release_iocbq(phba, cmdiocb); 17437 } 17438 17439 static void 17440 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 17441 struct hbq_dmabuf *dmabuf) 17442 { 17443 struct fc_frame_header *fc_hdr; 17444 struct lpfc_hba *phba = vport->phba; 17445 struct lpfc_iocbq *iocbq = NULL; 17446 union lpfc_wqe *wqe; 17447 struct lpfc_dmabuf *pcmd = NULL; 17448 uint32_t frame_len; 17449 int rc; 17450 17451 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17452 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17453 17454 /* Send the received frame back */ 17455 iocbq = lpfc_sli_get_iocbq(phba); 17456 if (!iocbq) 17457 goto exit; 17458 17459 /* Allocate buffer for command payload */ 17460 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17461 if (pcmd) 17462 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 17463 &pcmd->phys); 17464 if (!pcmd || !pcmd->virt) 17465 goto exit; 17466 17467 INIT_LIST_HEAD(&pcmd->list); 17468 17469 /* copyin the payload */ 17470 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 17471 17472 /* fill in BDE's for command */ 17473 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 17474 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 17475 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 17476 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 17477 17478 iocbq->context2 = pcmd; 17479 iocbq->vport = vport; 17480 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 17481 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 17482 17483 /* 17484 * Setup rest of the iocb as though it were a WQE 17485 * Build the SEND_FRAME WQE 17486 */ 17487 wqe = (union lpfc_wqe *)&iocbq->iocb; 17488 17489 wqe->send_frame.frame_len = frame_len; 17490 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 17491 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 17492 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 17493 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 17494 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 17495 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 17496 17497 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 17498 iocbq->iocb.ulpLe = 1; 17499 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 17500 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 17501 if (rc == IOCB_ERROR) 17502 goto exit; 17503 17504 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17505 return; 17506 17507 exit: 17508 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 17509 "2023 Unable to process MDS loopback frame\n"); 17510 if (pcmd && pcmd->virt) 17511 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17512 kfree(pcmd); 17513 if (iocbq) 17514 lpfc_sli_release_iocbq(phba, iocbq); 17515 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17516 } 17517 17518 /** 17519 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17520 * @phba: Pointer to HBA context object. 17521 * 17522 * This function is called with no lock held. This function processes all 17523 * the received buffers and gives it to upper layers when a received buffer 17524 * indicates that it is the final frame in the sequence. The interrupt 17525 * service routine processes received buffers at interrupt contexts. 17526 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 17527 * appropriate receive function when the final frame in a sequence is received. 17528 **/ 17529 void 17530 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 17531 struct hbq_dmabuf *dmabuf) 17532 { 17533 struct hbq_dmabuf *seq_dmabuf; 17534 struct fc_frame_header *fc_hdr; 17535 struct lpfc_vport *vport; 17536 uint32_t fcfi; 17537 uint32_t did; 17538 17539 /* Process each received buffer */ 17540 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17541 17542 /* check to see if this a valid type of frame */ 17543 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17544 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17545 return; 17546 } 17547 17548 if ((bf_get(lpfc_cqe_code, 17549 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 17550 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 17551 &dmabuf->cq_event.cqe.rcqe_cmpl); 17552 else 17553 fcfi = bf_get(lpfc_rcqe_fcf_id, 17554 &dmabuf->cq_event.cqe.rcqe_cmpl); 17555 17556 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { 17557 vport = phba->pport; 17558 /* Handle MDS Loopback frames */ 17559 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17560 return; 17561 } 17562 17563 /* d_id this frame is directed to */ 17564 did = sli4_did_from_fc_hdr(fc_hdr); 17565 17566 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 17567 if (!vport) { 17568 /* throw out the frame */ 17569 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17570 return; 17571 } 17572 17573 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 17574 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 17575 (did != Fabric_DID)) { 17576 /* 17577 * Throw out the frame if we are not pt2pt. 17578 * The pt2pt protocol allows for discovery frames 17579 * to be received without a registered VPI. 17580 */ 17581 if (!(vport->fc_flag & FC_PT2PT) || 17582 (phba->link_state == LPFC_HBA_READY)) { 17583 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17584 return; 17585 } 17586 } 17587 17588 /* Handle the basic abort sequence (BA_ABTS) event */ 17589 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 17590 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 17591 return; 17592 } 17593 17594 /* Link this frame */ 17595 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 17596 if (!seq_dmabuf) { 17597 /* unable to add frame to vport - throw it out */ 17598 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17599 return; 17600 } 17601 /* If not last frame in sequence continue processing frames. */ 17602 if (!lpfc_seq_complete(seq_dmabuf)) 17603 return; 17604 17605 /* Send the complete sequence to the upper layer protocol */ 17606 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 17607 } 17608 17609 /** 17610 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 17611 * @phba: pointer to lpfc hba data structure. 17612 * 17613 * This routine is invoked to post rpi header templates to the 17614 * HBA consistent with the SLI-4 interface spec. This routine 17615 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17616 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17617 * 17618 * This routine does not require any locks. It's usage is expected 17619 * to be driver load or reset recovery when the driver is 17620 * sequential. 17621 * 17622 * Return codes 17623 * 0 - successful 17624 * -EIO - The mailbox failed to complete successfully. 17625 * When this error occurs, the driver is not guaranteed 17626 * to have any rpi regions posted to the device and 17627 * must either attempt to repost the regions or take a 17628 * fatal error. 17629 **/ 17630 int 17631 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 17632 { 17633 struct lpfc_rpi_hdr *rpi_page; 17634 uint32_t rc = 0; 17635 uint16_t lrpi = 0; 17636 17637 /* SLI4 ports that support extents do not require RPI headers. */ 17638 if (!phba->sli4_hba.rpi_hdrs_in_use) 17639 goto exit; 17640 if (phba->sli4_hba.extents_in_use) 17641 return -EIO; 17642 17643 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 17644 /* 17645 * Assign the rpi headers a physical rpi only if the driver 17646 * has not initialized those resources. A port reset only 17647 * needs the headers posted. 17648 */ 17649 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 17650 LPFC_RPI_RSRC_RDY) 17651 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 17652 17653 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 17654 if (rc != MBX_SUCCESS) { 17655 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17656 "2008 Error %d posting all rpi " 17657 "headers\n", rc); 17658 rc = -EIO; 17659 break; 17660 } 17661 } 17662 17663 exit: 17664 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 17665 LPFC_RPI_RSRC_RDY); 17666 return rc; 17667 } 17668 17669 /** 17670 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 17671 * @phba: pointer to lpfc hba data structure. 17672 * @rpi_page: pointer to the rpi memory region. 17673 * 17674 * This routine is invoked to post a single rpi header to the 17675 * HBA consistent with the SLI-4 interface spec. This memory region 17676 * maps up to 64 rpi context regions. 17677 * 17678 * Return codes 17679 * 0 - successful 17680 * -ENOMEM - No available memory 17681 * -EIO - The mailbox failed to complete successfully. 17682 **/ 17683 int 17684 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 17685 { 17686 LPFC_MBOXQ_t *mboxq; 17687 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 17688 uint32_t rc = 0; 17689 uint32_t shdr_status, shdr_add_status; 17690 union lpfc_sli4_cfg_shdr *shdr; 17691 17692 /* SLI4 ports that support extents do not require RPI headers. */ 17693 if (!phba->sli4_hba.rpi_hdrs_in_use) 17694 return rc; 17695 if (phba->sli4_hba.extents_in_use) 17696 return -EIO; 17697 17698 /* The port is notified of the header region via a mailbox command. */ 17699 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17700 if (!mboxq) { 17701 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17702 "2001 Unable to allocate memory for issuing " 17703 "SLI_CONFIG_SPECIAL mailbox command\n"); 17704 return -ENOMEM; 17705 } 17706 17707 /* Post all rpi memory regions to the port. */ 17708 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 17709 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 17710 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 17711 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 17712 sizeof(struct lpfc_sli4_cfg_mhdr), 17713 LPFC_SLI4_MBX_EMBED); 17714 17715 17716 /* Post the physical rpi to the port for this rpi header. */ 17717 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 17718 rpi_page->start_rpi); 17719 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 17720 hdr_tmpl, rpi_page->page_count); 17721 17722 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 17723 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 17724 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 17725 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 17726 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17727 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17728 if (rc != MBX_TIMEOUT) 17729 mempool_free(mboxq, phba->mbox_mem_pool); 17730 if (shdr_status || shdr_add_status || rc) { 17731 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17732 "2514 POST_RPI_HDR mailbox failed with " 17733 "status x%x add_status x%x, mbx status x%x\n", 17734 shdr_status, shdr_add_status, rc); 17735 rc = -ENXIO; 17736 } else { 17737 /* 17738 * The next_rpi stores the next logical module-64 rpi value used 17739 * to post physical rpis in subsequent rpi postings. 17740 */ 17741 spin_lock_irq(&phba->hbalock); 17742 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 17743 spin_unlock_irq(&phba->hbalock); 17744 } 17745 return rc; 17746 } 17747 17748 /** 17749 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 17750 * @phba: pointer to lpfc hba data structure. 17751 * 17752 * This routine is invoked to post rpi header templates to the 17753 * HBA consistent with the SLI-4 interface spec. This routine 17754 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17755 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17756 * 17757 * Returns 17758 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 17759 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 17760 **/ 17761 int 17762 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 17763 { 17764 unsigned long rpi; 17765 uint16_t max_rpi, rpi_limit; 17766 uint16_t rpi_remaining, lrpi = 0; 17767 struct lpfc_rpi_hdr *rpi_hdr; 17768 unsigned long iflag; 17769 17770 /* 17771 * Fetch the next logical rpi. Because this index is logical, 17772 * the driver starts at 0 each time. 17773 */ 17774 spin_lock_irqsave(&phba->hbalock, iflag); 17775 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 17776 rpi_limit = phba->sli4_hba.next_rpi; 17777 17778 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 17779 if (rpi >= rpi_limit) 17780 rpi = LPFC_RPI_ALLOC_ERROR; 17781 else { 17782 set_bit(rpi, phba->sli4_hba.rpi_bmask); 17783 phba->sli4_hba.max_cfg_param.rpi_used++; 17784 phba->sli4_hba.rpi_count++; 17785 } 17786 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 17787 "0001 rpi:%x max:%x lim:%x\n", 17788 (int) rpi, max_rpi, rpi_limit); 17789 17790 /* 17791 * Don't try to allocate more rpi header regions if the device limit 17792 * has been exhausted. 17793 */ 17794 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 17795 (phba->sli4_hba.rpi_count >= max_rpi)) { 17796 spin_unlock_irqrestore(&phba->hbalock, iflag); 17797 return rpi; 17798 } 17799 17800 /* 17801 * RPI header postings are not required for SLI4 ports capable of 17802 * extents. 17803 */ 17804 if (!phba->sli4_hba.rpi_hdrs_in_use) { 17805 spin_unlock_irqrestore(&phba->hbalock, iflag); 17806 return rpi; 17807 } 17808 17809 /* 17810 * If the driver is running low on rpi resources, allocate another 17811 * page now. Note that the next_rpi value is used because 17812 * it represents how many are actually in use whereas max_rpi notes 17813 * how many are supported max by the device. 17814 */ 17815 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 17816 spin_unlock_irqrestore(&phba->hbalock, iflag); 17817 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 17818 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 17819 if (!rpi_hdr) { 17820 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17821 "2002 Error Could not grow rpi " 17822 "count\n"); 17823 } else { 17824 lrpi = rpi_hdr->start_rpi; 17825 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 17826 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 17827 } 17828 } 17829 17830 return rpi; 17831 } 17832 17833 /** 17834 * lpfc_sli4_free_rpi - Release an rpi for reuse. 17835 * @phba: pointer to lpfc hba data structure. 17836 * 17837 * This routine is invoked to release an rpi to the pool of 17838 * available rpis maintained by the driver. 17839 **/ 17840 static void 17841 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 17842 { 17843 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 17844 phba->sli4_hba.rpi_count--; 17845 phba->sli4_hba.max_cfg_param.rpi_used--; 17846 } 17847 } 17848 17849 /** 17850 * lpfc_sli4_free_rpi - Release an rpi for reuse. 17851 * @phba: pointer to lpfc hba data structure. 17852 * 17853 * This routine is invoked to release an rpi to the pool of 17854 * available rpis maintained by the driver. 17855 **/ 17856 void 17857 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 17858 { 17859 spin_lock_irq(&phba->hbalock); 17860 __lpfc_sli4_free_rpi(phba, rpi); 17861 spin_unlock_irq(&phba->hbalock); 17862 } 17863 17864 /** 17865 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 17866 * @phba: pointer to lpfc hba data structure. 17867 * 17868 * This routine is invoked to remove the memory region that 17869 * provided rpi via a bitmask. 17870 **/ 17871 void 17872 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 17873 { 17874 kfree(phba->sli4_hba.rpi_bmask); 17875 kfree(phba->sli4_hba.rpi_ids); 17876 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 17877 } 17878 17879 /** 17880 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 17881 * @phba: pointer to lpfc hba data structure. 17882 * 17883 * This routine is invoked to remove the memory region that 17884 * provided rpi via a bitmask. 17885 **/ 17886 int 17887 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 17888 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 17889 { 17890 LPFC_MBOXQ_t *mboxq; 17891 struct lpfc_hba *phba = ndlp->phba; 17892 int rc; 17893 17894 /* The port is notified of the header region via a mailbox command. */ 17895 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17896 if (!mboxq) 17897 return -ENOMEM; 17898 17899 /* Post all rpi memory regions to the port. */ 17900 lpfc_resume_rpi(mboxq, ndlp); 17901 if (cmpl) { 17902 mboxq->mbox_cmpl = cmpl; 17903 mboxq->context1 = arg; 17904 mboxq->context2 = ndlp; 17905 } else 17906 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17907 mboxq->vport = ndlp->vport; 17908 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17909 if (rc == MBX_NOT_FINISHED) { 17910 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17911 "2010 Resume RPI Mailbox failed " 17912 "status %d, mbxStatus x%x\n", rc, 17913 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 17914 mempool_free(mboxq, phba->mbox_mem_pool); 17915 return -EIO; 17916 } 17917 return 0; 17918 } 17919 17920 /** 17921 * lpfc_sli4_init_vpi - Initialize a vpi with the port 17922 * @vport: Pointer to the vport for which the vpi is being initialized 17923 * 17924 * This routine is invoked to activate a vpi with the port. 17925 * 17926 * Returns: 17927 * 0 success 17928 * -Evalue otherwise 17929 **/ 17930 int 17931 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 17932 { 17933 LPFC_MBOXQ_t *mboxq; 17934 int rc = 0; 17935 int retval = MBX_SUCCESS; 17936 uint32_t mbox_tmo; 17937 struct lpfc_hba *phba = vport->phba; 17938 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17939 if (!mboxq) 17940 return -ENOMEM; 17941 lpfc_init_vpi(phba, mboxq, vport->vpi); 17942 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 17943 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 17944 if (rc != MBX_SUCCESS) { 17945 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 17946 "2022 INIT VPI Mailbox failed " 17947 "status %d, mbxStatus x%x\n", rc, 17948 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 17949 retval = -EIO; 17950 } 17951 if (rc != MBX_TIMEOUT) 17952 mempool_free(mboxq, vport->phba->mbox_mem_pool); 17953 17954 return retval; 17955 } 17956 17957 /** 17958 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 17959 * @phba: pointer to lpfc hba data structure. 17960 * @mboxq: Pointer to mailbox object. 17961 * 17962 * This routine is invoked to manually add a single FCF record. The caller 17963 * must pass a completely initialized FCF_Record. This routine takes 17964 * care of the nonembedded mailbox operations. 17965 **/ 17966 static void 17967 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 17968 { 17969 void *virt_addr; 17970 union lpfc_sli4_cfg_shdr *shdr; 17971 uint32_t shdr_status, shdr_add_status; 17972 17973 virt_addr = mboxq->sge_array->addr[0]; 17974 /* The IOCTL status is embedded in the mailbox subheader. */ 17975 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 17976 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17977 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17978 17979 if ((shdr_status || shdr_add_status) && 17980 (shdr_status != STATUS_FCF_IN_USE)) 17981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17982 "2558 ADD_FCF_RECORD mailbox failed with " 17983 "status x%x add_status x%x\n", 17984 shdr_status, shdr_add_status); 17985 17986 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17987 } 17988 17989 /** 17990 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 17991 * @phba: pointer to lpfc hba data structure. 17992 * @fcf_record: pointer to the initialized fcf record to add. 17993 * 17994 * This routine is invoked to manually add a single FCF record. The caller 17995 * must pass a completely initialized FCF_Record. This routine takes 17996 * care of the nonembedded mailbox operations. 17997 **/ 17998 int 17999 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 18000 { 18001 int rc = 0; 18002 LPFC_MBOXQ_t *mboxq; 18003 uint8_t *bytep; 18004 void *virt_addr; 18005 struct lpfc_mbx_sge sge; 18006 uint32_t alloc_len, req_len; 18007 uint32_t fcfindex; 18008 18009 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18010 if (!mboxq) { 18011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18012 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 18013 return -ENOMEM; 18014 } 18015 18016 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 18017 sizeof(uint32_t); 18018 18019 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18020 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18021 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 18022 req_len, LPFC_SLI4_MBX_NEMBED); 18023 if (alloc_len < req_len) { 18024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18025 "2523 Allocated DMA memory size (x%x) is " 18026 "less than the requested DMA memory " 18027 "size (x%x)\n", alloc_len, req_len); 18028 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18029 return -ENOMEM; 18030 } 18031 18032 /* 18033 * Get the first SGE entry from the non-embedded DMA memory. This 18034 * routine only uses a single SGE. 18035 */ 18036 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 18037 virt_addr = mboxq->sge_array->addr[0]; 18038 /* 18039 * Configure the FCF record for FCFI 0. This is the driver's 18040 * hardcoded default and gets used in nonFIP mode. 18041 */ 18042 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 18043 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 18044 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 18045 18046 /* 18047 * Copy the fcf_index and the FCF Record Data. The data starts after 18048 * the FCoE header plus word10. The data copy needs to be endian 18049 * correct. 18050 */ 18051 bytep += sizeof(uint32_t); 18052 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 18053 mboxq->vport = phba->pport; 18054 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 18055 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18056 if (rc == MBX_NOT_FINISHED) { 18057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18058 "2515 ADD_FCF_RECORD mailbox failed with " 18059 "status 0x%x\n", rc); 18060 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18061 rc = -EIO; 18062 } else 18063 rc = 0; 18064 18065 return rc; 18066 } 18067 18068 /** 18069 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 18070 * @phba: pointer to lpfc hba data structure. 18071 * @fcf_record: pointer to the fcf record to write the default data. 18072 * @fcf_index: FCF table entry index. 18073 * 18074 * This routine is invoked to build the driver's default FCF record. The 18075 * values used are hardcoded. This routine handles memory initialization. 18076 * 18077 **/ 18078 void 18079 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 18080 struct fcf_record *fcf_record, 18081 uint16_t fcf_index) 18082 { 18083 memset(fcf_record, 0, sizeof(struct fcf_record)); 18084 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 18085 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 18086 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 18087 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 18088 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 18089 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 18090 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 18091 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 18092 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 18093 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 18094 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 18095 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 18096 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 18097 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 18098 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 18099 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 18100 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 18101 /* Set the VLAN bit map */ 18102 if (phba->valid_vlan) { 18103 fcf_record->vlan_bitmap[phba->vlan_id / 8] 18104 = 1 << (phba->vlan_id % 8); 18105 } 18106 } 18107 18108 /** 18109 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 18110 * @phba: pointer to lpfc hba data structure. 18111 * @fcf_index: FCF table entry offset. 18112 * 18113 * This routine is invoked to scan the entire FCF table by reading FCF 18114 * record and processing it one at a time starting from the @fcf_index 18115 * for initial FCF discovery or fast FCF failover rediscovery. 18116 * 18117 * Return 0 if the mailbox command is submitted successfully, none 0 18118 * otherwise. 18119 **/ 18120 int 18121 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18122 { 18123 int rc = 0, error; 18124 LPFC_MBOXQ_t *mboxq; 18125 18126 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 18127 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 18128 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18129 if (!mboxq) { 18130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18131 "2000 Failed to allocate mbox for " 18132 "READ_FCF cmd\n"); 18133 error = -ENOMEM; 18134 goto fail_fcf_scan; 18135 } 18136 /* Construct the read FCF record mailbox command */ 18137 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18138 if (rc) { 18139 error = -EINVAL; 18140 goto fail_fcf_scan; 18141 } 18142 /* Issue the mailbox command asynchronously */ 18143 mboxq->vport = phba->pport; 18144 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 18145 18146 spin_lock_irq(&phba->hbalock); 18147 phba->hba_flag |= FCF_TS_INPROG; 18148 spin_unlock_irq(&phba->hbalock); 18149 18150 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18151 if (rc == MBX_NOT_FINISHED) 18152 error = -EIO; 18153 else { 18154 /* Reset eligible FCF count for new scan */ 18155 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 18156 phba->fcf.eligible_fcf_cnt = 0; 18157 error = 0; 18158 } 18159 fail_fcf_scan: 18160 if (error) { 18161 if (mboxq) 18162 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18163 /* FCF scan failed, clear FCF_TS_INPROG flag */ 18164 spin_lock_irq(&phba->hbalock); 18165 phba->hba_flag &= ~FCF_TS_INPROG; 18166 spin_unlock_irq(&phba->hbalock); 18167 } 18168 return error; 18169 } 18170 18171 /** 18172 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 18173 * @phba: pointer to lpfc hba data structure. 18174 * @fcf_index: FCF table entry offset. 18175 * 18176 * This routine is invoked to read an FCF record indicated by @fcf_index 18177 * and to use it for FLOGI roundrobin FCF failover. 18178 * 18179 * Return 0 if the mailbox command is submitted successfully, none 0 18180 * otherwise. 18181 **/ 18182 int 18183 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18184 { 18185 int rc = 0, error; 18186 LPFC_MBOXQ_t *mboxq; 18187 18188 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18189 if (!mboxq) { 18190 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18191 "2763 Failed to allocate mbox for " 18192 "READ_FCF cmd\n"); 18193 error = -ENOMEM; 18194 goto fail_fcf_read; 18195 } 18196 /* Construct the read FCF record mailbox command */ 18197 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18198 if (rc) { 18199 error = -EINVAL; 18200 goto fail_fcf_read; 18201 } 18202 /* Issue the mailbox command asynchronously */ 18203 mboxq->vport = phba->pport; 18204 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 18205 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18206 if (rc == MBX_NOT_FINISHED) 18207 error = -EIO; 18208 else 18209 error = 0; 18210 18211 fail_fcf_read: 18212 if (error && mboxq) 18213 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18214 return error; 18215 } 18216 18217 /** 18218 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 18219 * @phba: pointer to lpfc hba data structure. 18220 * @fcf_index: FCF table entry offset. 18221 * 18222 * This routine is invoked to read an FCF record indicated by @fcf_index to 18223 * determine whether it's eligible for FLOGI roundrobin failover list. 18224 * 18225 * Return 0 if the mailbox command is submitted successfully, none 0 18226 * otherwise. 18227 **/ 18228 int 18229 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18230 { 18231 int rc = 0, error; 18232 LPFC_MBOXQ_t *mboxq; 18233 18234 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18235 if (!mboxq) { 18236 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18237 "2758 Failed to allocate mbox for " 18238 "READ_FCF cmd\n"); 18239 error = -ENOMEM; 18240 goto fail_fcf_read; 18241 } 18242 /* Construct the read FCF record mailbox command */ 18243 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18244 if (rc) { 18245 error = -EINVAL; 18246 goto fail_fcf_read; 18247 } 18248 /* Issue the mailbox command asynchronously */ 18249 mboxq->vport = phba->pport; 18250 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 18251 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18252 if (rc == MBX_NOT_FINISHED) 18253 error = -EIO; 18254 else 18255 error = 0; 18256 18257 fail_fcf_read: 18258 if (error && mboxq) 18259 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18260 return error; 18261 } 18262 18263 /** 18264 * lpfc_check_next_fcf_pri_level 18265 * phba pointer to the lpfc_hba struct for this port. 18266 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 18267 * routine when the rr_bmask is empty. The FCF indecies are put into the 18268 * rr_bmask based on their priority level. Starting from the highest priority 18269 * to the lowest. The most likely FCF candidate will be in the highest 18270 * priority group. When this routine is called it searches the fcf_pri list for 18271 * next lowest priority group and repopulates the rr_bmask with only those 18272 * fcf_indexes. 18273 * returns: 18274 * 1=success 0=failure 18275 **/ 18276 static int 18277 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 18278 { 18279 uint16_t next_fcf_pri; 18280 uint16_t last_index; 18281 struct lpfc_fcf_pri *fcf_pri; 18282 int rc; 18283 int ret = 0; 18284 18285 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 18286 LPFC_SLI4_FCF_TBL_INDX_MAX); 18287 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18288 "3060 Last IDX %d\n", last_index); 18289 18290 /* Verify the priority list has 2 or more entries */ 18291 spin_lock_irq(&phba->hbalock); 18292 if (list_empty(&phba->fcf.fcf_pri_list) || 18293 list_is_singular(&phba->fcf.fcf_pri_list)) { 18294 spin_unlock_irq(&phba->hbalock); 18295 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18296 "3061 Last IDX %d\n", last_index); 18297 return 0; /* Empty rr list */ 18298 } 18299 spin_unlock_irq(&phba->hbalock); 18300 18301 next_fcf_pri = 0; 18302 /* 18303 * Clear the rr_bmask and set all of the bits that are at this 18304 * priority. 18305 */ 18306 memset(phba->fcf.fcf_rr_bmask, 0, 18307 sizeof(*phba->fcf.fcf_rr_bmask)); 18308 spin_lock_irq(&phba->hbalock); 18309 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18310 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 18311 continue; 18312 /* 18313 * the 1st priority that has not FLOGI failed 18314 * will be the highest. 18315 */ 18316 if (!next_fcf_pri) 18317 next_fcf_pri = fcf_pri->fcf_rec.priority; 18318 spin_unlock_irq(&phba->hbalock); 18319 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18320 rc = lpfc_sli4_fcf_rr_index_set(phba, 18321 fcf_pri->fcf_rec.fcf_index); 18322 if (rc) 18323 return 0; 18324 } 18325 spin_lock_irq(&phba->hbalock); 18326 } 18327 /* 18328 * if next_fcf_pri was not set above and the list is not empty then 18329 * we have failed flogis on all of them. So reset flogi failed 18330 * and start at the beginning. 18331 */ 18332 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 18333 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18334 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 18335 /* 18336 * the 1st priority that has not FLOGI failed 18337 * will be the highest. 18338 */ 18339 if (!next_fcf_pri) 18340 next_fcf_pri = fcf_pri->fcf_rec.priority; 18341 spin_unlock_irq(&phba->hbalock); 18342 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18343 rc = lpfc_sli4_fcf_rr_index_set(phba, 18344 fcf_pri->fcf_rec.fcf_index); 18345 if (rc) 18346 return 0; 18347 } 18348 spin_lock_irq(&phba->hbalock); 18349 } 18350 } else 18351 ret = 1; 18352 spin_unlock_irq(&phba->hbalock); 18353 18354 return ret; 18355 } 18356 /** 18357 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 18358 * @phba: pointer to lpfc hba data structure. 18359 * 18360 * This routine is to get the next eligible FCF record index in a round 18361 * robin fashion. If the next eligible FCF record index equals to the 18362 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 18363 * shall be returned, otherwise, the next eligible FCF record's index 18364 * shall be returned. 18365 **/ 18366 uint16_t 18367 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 18368 { 18369 uint16_t next_fcf_index; 18370 18371 initial_priority: 18372 /* Search start from next bit of currently registered FCF index */ 18373 next_fcf_index = phba->fcf.current_rec.fcf_indx; 18374 18375 next_priority: 18376 /* Determine the next fcf index to check */ 18377 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 18378 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18379 LPFC_SLI4_FCF_TBL_INDX_MAX, 18380 next_fcf_index); 18381 18382 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 18383 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18384 /* 18385 * If we have wrapped then we need to clear the bits that 18386 * have been tested so that we can detect when we should 18387 * change the priority level. 18388 */ 18389 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18390 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 18391 } 18392 18393 18394 /* Check roundrobin failover list empty condition */ 18395 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 18396 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 18397 /* 18398 * If next fcf index is not found check if there are lower 18399 * Priority level fcf's in the fcf_priority list. 18400 * Set up the rr_bmask with all of the avaiable fcf bits 18401 * at that level and continue the selection process. 18402 */ 18403 if (lpfc_check_next_fcf_pri_level(phba)) 18404 goto initial_priority; 18405 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18406 "2844 No roundrobin failover FCF available\n"); 18407 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 18408 return LPFC_FCOE_FCF_NEXT_NONE; 18409 else { 18410 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18411 "3063 Only FCF available idx %d, flag %x\n", 18412 next_fcf_index, 18413 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 18414 return next_fcf_index; 18415 } 18416 } 18417 18418 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18419 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 18420 LPFC_FCF_FLOGI_FAILED) { 18421 if (list_is_singular(&phba->fcf.fcf_pri_list)) 18422 return LPFC_FCOE_FCF_NEXT_NONE; 18423 18424 goto next_priority; 18425 } 18426 18427 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18428 "2845 Get next roundrobin failover FCF (x%x)\n", 18429 next_fcf_index); 18430 18431 return next_fcf_index; 18432 } 18433 18434 /** 18435 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 18436 * @phba: pointer to lpfc hba data structure. 18437 * 18438 * This routine sets the FCF record index in to the eligible bmask for 18439 * roundrobin failover search. It checks to make sure that the index 18440 * does not go beyond the range of the driver allocated bmask dimension 18441 * before setting the bit. 18442 * 18443 * Returns 0 if the index bit successfully set, otherwise, it returns 18444 * -EINVAL. 18445 **/ 18446 int 18447 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 18448 { 18449 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18450 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18451 "2610 FCF (x%x) reached driver's book " 18452 "keeping dimension:x%x\n", 18453 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18454 return -EINVAL; 18455 } 18456 /* Set the eligible FCF record index bmask */ 18457 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18458 18459 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18460 "2790 Set FCF (x%x) to roundrobin FCF failover " 18461 "bmask\n", fcf_index); 18462 18463 return 0; 18464 } 18465 18466 /** 18467 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 18468 * @phba: pointer to lpfc hba data structure. 18469 * 18470 * This routine clears the FCF record index from the eligible bmask for 18471 * roundrobin failover search. It checks to make sure that the index 18472 * does not go beyond the range of the driver allocated bmask dimension 18473 * before clearing the bit. 18474 **/ 18475 void 18476 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 18477 { 18478 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 18479 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18480 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18481 "2762 FCF (x%x) reached driver's book " 18482 "keeping dimension:x%x\n", 18483 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18484 return; 18485 } 18486 /* Clear the eligible FCF record index bmask */ 18487 spin_lock_irq(&phba->hbalock); 18488 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 18489 list) { 18490 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 18491 list_del_init(&fcf_pri->list); 18492 break; 18493 } 18494 } 18495 spin_unlock_irq(&phba->hbalock); 18496 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18497 18498 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18499 "2791 Clear FCF (x%x) from roundrobin failover " 18500 "bmask\n", fcf_index); 18501 } 18502 18503 /** 18504 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 18505 * @phba: pointer to lpfc hba data structure. 18506 * 18507 * This routine is the completion routine for the rediscover FCF table mailbox 18508 * command. If the mailbox command returned failure, it will try to stop the 18509 * FCF rediscover wait timer. 18510 **/ 18511 static void 18512 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 18513 { 18514 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18515 uint32_t shdr_status, shdr_add_status; 18516 18517 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18518 18519 shdr_status = bf_get(lpfc_mbox_hdr_status, 18520 &redisc_fcf->header.cfg_shdr.response); 18521 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 18522 &redisc_fcf->header.cfg_shdr.response); 18523 if (shdr_status || shdr_add_status) { 18524 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18525 "2746 Requesting for FCF rediscovery failed " 18526 "status x%x add_status x%x\n", 18527 shdr_status, shdr_add_status); 18528 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 18529 spin_lock_irq(&phba->hbalock); 18530 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 18531 spin_unlock_irq(&phba->hbalock); 18532 /* 18533 * CVL event triggered FCF rediscover request failed, 18534 * last resort to re-try current registered FCF entry. 18535 */ 18536 lpfc_retry_pport_discovery(phba); 18537 } else { 18538 spin_lock_irq(&phba->hbalock); 18539 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 18540 spin_unlock_irq(&phba->hbalock); 18541 /* 18542 * DEAD FCF event triggered FCF rediscover request 18543 * failed, last resort to fail over as a link down 18544 * to FCF registration. 18545 */ 18546 lpfc_sli4_fcf_dead_failthrough(phba); 18547 } 18548 } else { 18549 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18550 "2775 Start FCF rediscover quiescent timer\n"); 18551 /* 18552 * Start FCF rediscovery wait timer for pending FCF 18553 * before rescan FCF record table. 18554 */ 18555 lpfc_fcf_redisc_wait_start_timer(phba); 18556 } 18557 18558 mempool_free(mbox, phba->mbox_mem_pool); 18559 } 18560 18561 /** 18562 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 18563 * @phba: pointer to lpfc hba data structure. 18564 * 18565 * This routine is invoked to request for rediscovery of the entire FCF table 18566 * by the port. 18567 **/ 18568 int 18569 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 18570 { 18571 LPFC_MBOXQ_t *mbox; 18572 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18573 int rc, length; 18574 18575 /* Cancel retry delay timers to all vports before FCF rediscover */ 18576 lpfc_cancel_all_vport_retry_delay_timer(phba); 18577 18578 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18579 if (!mbox) { 18580 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18581 "2745 Failed to allocate mbox for " 18582 "requesting FCF rediscover.\n"); 18583 return -ENOMEM; 18584 } 18585 18586 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 18587 sizeof(struct lpfc_sli4_cfg_mhdr)); 18588 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18589 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 18590 length, LPFC_SLI4_MBX_EMBED); 18591 18592 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18593 /* Set count to 0 for invalidating the entire FCF database */ 18594 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 18595 18596 /* Issue the mailbox command asynchronously */ 18597 mbox->vport = phba->pport; 18598 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 18599 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 18600 18601 if (rc == MBX_NOT_FINISHED) { 18602 mempool_free(mbox, phba->mbox_mem_pool); 18603 return -EIO; 18604 } 18605 return 0; 18606 } 18607 18608 /** 18609 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 18610 * @phba: pointer to lpfc hba data structure. 18611 * 18612 * This function is the failover routine as a last resort to the FCF DEAD 18613 * event when driver failed to perform fast FCF failover. 18614 **/ 18615 void 18616 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 18617 { 18618 uint32_t link_state; 18619 18620 /* 18621 * Last resort as FCF DEAD event failover will treat this as 18622 * a link down, but save the link state because we don't want 18623 * it to be changed to Link Down unless it is already down. 18624 */ 18625 link_state = phba->link_state; 18626 lpfc_linkdown(phba); 18627 phba->link_state = link_state; 18628 18629 /* Unregister FCF if no devices connected to it */ 18630 lpfc_unregister_unused_fcf(phba); 18631 } 18632 18633 /** 18634 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 18635 * @phba: pointer to lpfc hba data structure. 18636 * @rgn23_data: pointer to configure region 23 data. 18637 * 18638 * This function gets SLI3 port configure region 23 data through memory dump 18639 * mailbox command. When it successfully retrieves data, the size of the data 18640 * will be returned, otherwise, 0 will be returned. 18641 **/ 18642 static uint32_t 18643 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18644 { 18645 LPFC_MBOXQ_t *pmb = NULL; 18646 MAILBOX_t *mb; 18647 uint32_t offset = 0; 18648 int rc; 18649 18650 if (!rgn23_data) 18651 return 0; 18652 18653 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18654 if (!pmb) { 18655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18656 "2600 failed to allocate mailbox memory\n"); 18657 return 0; 18658 } 18659 mb = &pmb->u.mb; 18660 18661 do { 18662 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 18663 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 18664 18665 if (rc != MBX_SUCCESS) { 18666 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 18667 "2601 failed to read config " 18668 "region 23, rc 0x%x Status 0x%x\n", 18669 rc, mb->mbxStatus); 18670 mb->un.varDmp.word_cnt = 0; 18671 } 18672 /* 18673 * dump mem may return a zero when finished or we got a 18674 * mailbox error, either way we are done. 18675 */ 18676 if (mb->un.varDmp.word_cnt == 0) 18677 break; 18678 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 18679 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 18680 18681 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 18682 rgn23_data + offset, 18683 mb->un.varDmp.word_cnt); 18684 offset += mb->un.varDmp.word_cnt; 18685 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 18686 18687 mempool_free(pmb, phba->mbox_mem_pool); 18688 return offset; 18689 } 18690 18691 /** 18692 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 18693 * @phba: pointer to lpfc hba data structure. 18694 * @rgn23_data: pointer to configure region 23 data. 18695 * 18696 * This function gets SLI4 port configure region 23 data through memory dump 18697 * mailbox command. When it successfully retrieves data, the size of the data 18698 * will be returned, otherwise, 0 will be returned. 18699 **/ 18700 static uint32_t 18701 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18702 { 18703 LPFC_MBOXQ_t *mboxq = NULL; 18704 struct lpfc_dmabuf *mp = NULL; 18705 struct lpfc_mqe *mqe; 18706 uint32_t data_length = 0; 18707 int rc; 18708 18709 if (!rgn23_data) 18710 return 0; 18711 18712 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18713 if (!mboxq) { 18714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18715 "3105 failed to allocate mailbox memory\n"); 18716 return 0; 18717 } 18718 18719 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 18720 goto out; 18721 mqe = &mboxq->u.mqe; 18722 mp = (struct lpfc_dmabuf *) mboxq->context1; 18723 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18724 if (rc) 18725 goto out; 18726 data_length = mqe->un.mb_words[5]; 18727 if (data_length == 0) 18728 goto out; 18729 if (data_length > DMP_RGN23_SIZE) { 18730 data_length = 0; 18731 goto out; 18732 } 18733 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 18734 out: 18735 mempool_free(mboxq, phba->mbox_mem_pool); 18736 if (mp) { 18737 lpfc_mbuf_free(phba, mp->virt, mp->phys); 18738 kfree(mp); 18739 } 18740 return data_length; 18741 } 18742 18743 /** 18744 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 18745 * @phba: pointer to lpfc hba data structure. 18746 * 18747 * This function read region 23 and parse TLV for port status to 18748 * decide if the user disaled the port. If the TLV indicates the 18749 * port is disabled, the hba_flag is set accordingly. 18750 **/ 18751 void 18752 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 18753 { 18754 uint8_t *rgn23_data = NULL; 18755 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 18756 uint32_t offset = 0; 18757 18758 /* Get adapter Region 23 data */ 18759 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 18760 if (!rgn23_data) 18761 goto out; 18762 18763 if (phba->sli_rev < LPFC_SLI_REV4) 18764 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 18765 else { 18766 if_type = bf_get(lpfc_sli_intf_if_type, 18767 &phba->sli4_hba.sli_intf); 18768 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 18769 goto out; 18770 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 18771 } 18772 18773 if (!data_size) 18774 goto out; 18775 18776 /* Check the region signature first */ 18777 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 18778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18779 "2619 Config region 23 has bad signature\n"); 18780 goto out; 18781 } 18782 offset += 4; 18783 18784 /* Check the data structure version */ 18785 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 18786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18787 "2620 Config region 23 has bad version\n"); 18788 goto out; 18789 } 18790 offset += 4; 18791 18792 /* Parse TLV entries in the region */ 18793 while (offset < data_size) { 18794 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 18795 break; 18796 /* 18797 * If the TLV is not driver specific TLV or driver id is 18798 * not linux driver id, skip the record. 18799 */ 18800 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 18801 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 18802 (rgn23_data[offset + 3] != 0)) { 18803 offset += rgn23_data[offset + 1] * 4 + 4; 18804 continue; 18805 } 18806 18807 /* Driver found a driver specific TLV in the config region */ 18808 sub_tlv_len = rgn23_data[offset + 1] * 4; 18809 offset += 4; 18810 tlv_offset = 0; 18811 18812 /* 18813 * Search for configured port state sub-TLV. 18814 */ 18815 while ((offset < data_size) && 18816 (tlv_offset < sub_tlv_len)) { 18817 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 18818 offset += 4; 18819 tlv_offset += 4; 18820 break; 18821 } 18822 if (rgn23_data[offset] != PORT_STE_TYPE) { 18823 offset += rgn23_data[offset + 1] * 4 + 4; 18824 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 18825 continue; 18826 } 18827 18828 /* This HBA contains PORT_STE configured */ 18829 if (!rgn23_data[offset + 2]) 18830 phba->hba_flag |= LINK_DISABLED; 18831 18832 goto out; 18833 } 18834 } 18835 18836 out: 18837 kfree(rgn23_data); 18838 return; 18839 } 18840 18841 /** 18842 * lpfc_wr_object - write an object to the firmware 18843 * @phba: HBA structure that indicates port to create a queue on. 18844 * @dmabuf_list: list of dmabufs to write to the port. 18845 * @size: the total byte value of the objects to write to the port. 18846 * @offset: the current offset to be used to start the transfer. 18847 * 18848 * This routine will create a wr_object mailbox command to send to the port. 18849 * the mailbox command will be constructed using the dma buffers described in 18850 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 18851 * BDEs that the imbedded mailbox can support. The @offset variable will be 18852 * used to indicate the starting offset of the transfer and will also return 18853 * the offset after the write object mailbox has completed. @size is used to 18854 * determine the end of the object and whether the eof bit should be set. 18855 * 18856 * Return 0 is successful and offset will contain the the new offset to use 18857 * for the next write. 18858 * Return negative value for error cases. 18859 **/ 18860 int 18861 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 18862 uint32_t size, uint32_t *offset) 18863 { 18864 struct lpfc_mbx_wr_object *wr_object; 18865 LPFC_MBOXQ_t *mbox; 18866 int rc = 0, i = 0; 18867 uint32_t shdr_status, shdr_add_status; 18868 uint32_t mbox_tmo; 18869 union lpfc_sli4_cfg_shdr *shdr; 18870 struct lpfc_dmabuf *dmabuf; 18871 uint32_t written = 0; 18872 18873 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18874 if (!mbox) 18875 return -ENOMEM; 18876 18877 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 18878 LPFC_MBOX_OPCODE_WRITE_OBJECT, 18879 sizeof(struct lpfc_mbx_wr_object) - 18880 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 18881 18882 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 18883 wr_object->u.request.write_offset = *offset; 18884 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 18885 wr_object->u.request.object_name[0] = 18886 cpu_to_le32(wr_object->u.request.object_name[0]); 18887 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 18888 list_for_each_entry(dmabuf, dmabuf_list, list) { 18889 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 18890 break; 18891 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 18892 wr_object->u.request.bde[i].addrHigh = 18893 putPaddrHigh(dmabuf->phys); 18894 if (written + SLI4_PAGE_SIZE >= size) { 18895 wr_object->u.request.bde[i].tus.f.bdeSize = 18896 (size - written); 18897 written += (size - written); 18898 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 18899 } else { 18900 wr_object->u.request.bde[i].tus.f.bdeSize = 18901 SLI4_PAGE_SIZE; 18902 written += SLI4_PAGE_SIZE; 18903 } 18904 i++; 18905 } 18906 wr_object->u.request.bde_count = i; 18907 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 18908 if (!phba->sli4_hba.intr_enable) 18909 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 18910 else { 18911 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 18912 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 18913 } 18914 /* The IOCTL status is embedded in the mailbox subheader. */ 18915 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 18916 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18917 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18918 if (rc != MBX_TIMEOUT) 18919 mempool_free(mbox, phba->mbox_mem_pool); 18920 if (shdr_status || shdr_add_status || rc) { 18921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18922 "3025 Write Object mailbox failed with " 18923 "status x%x add_status x%x, mbx status x%x\n", 18924 shdr_status, shdr_add_status, rc); 18925 rc = -ENXIO; 18926 *offset = shdr_add_status; 18927 } else 18928 *offset += wr_object->u.response.actual_write_length; 18929 return rc; 18930 } 18931 18932 /** 18933 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 18934 * @vport: pointer to vport data structure. 18935 * 18936 * This function iterate through the mailboxq and clean up all REG_LOGIN 18937 * and REG_VPI mailbox commands associated with the vport. This function 18938 * is called when driver want to restart discovery of the vport due to 18939 * a Clear Virtual Link event. 18940 **/ 18941 void 18942 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 18943 { 18944 struct lpfc_hba *phba = vport->phba; 18945 LPFC_MBOXQ_t *mb, *nextmb; 18946 struct lpfc_dmabuf *mp; 18947 struct lpfc_nodelist *ndlp; 18948 struct lpfc_nodelist *act_mbx_ndlp = NULL; 18949 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 18950 LIST_HEAD(mbox_cmd_list); 18951 uint8_t restart_loop; 18952 18953 /* Clean up internally queued mailbox commands with the vport */ 18954 spin_lock_irq(&phba->hbalock); 18955 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 18956 if (mb->vport != vport) 18957 continue; 18958 18959 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 18960 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 18961 continue; 18962 18963 list_del(&mb->list); 18964 list_add_tail(&mb->list, &mbox_cmd_list); 18965 } 18966 /* Clean up active mailbox command with the vport */ 18967 mb = phba->sli.mbox_active; 18968 if (mb && (mb->vport == vport)) { 18969 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 18970 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 18971 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18972 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 18973 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 18974 /* Put reference count for delayed processing */ 18975 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 18976 /* Unregister the RPI when mailbox complete */ 18977 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 18978 } 18979 } 18980 /* Cleanup any mailbox completions which are not yet processed */ 18981 do { 18982 restart_loop = 0; 18983 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 18984 /* 18985 * If this mailox is already processed or it is 18986 * for another vport ignore it. 18987 */ 18988 if ((mb->vport != vport) || 18989 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 18990 continue; 18991 18992 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 18993 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 18994 continue; 18995 18996 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18997 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 18998 ndlp = (struct lpfc_nodelist *)mb->context2; 18999 /* Unregister the RPI when mailbox complete */ 19000 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19001 restart_loop = 1; 19002 spin_unlock_irq(&phba->hbalock); 19003 spin_lock(shost->host_lock); 19004 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19005 spin_unlock(shost->host_lock); 19006 spin_lock_irq(&phba->hbalock); 19007 break; 19008 } 19009 } 19010 } while (restart_loop); 19011 19012 spin_unlock_irq(&phba->hbalock); 19013 19014 /* Release the cleaned-up mailbox commands */ 19015 while (!list_empty(&mbox_cmd_list)) { 19016 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 19017 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19018 mp = (struct lpfc_dmabuf *) (mb->context1); 19019 if (mp) { 19020 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 19021 kfree(mp); 19022 } 19023 ndlp = (struct lpfc_nodelist *) mb->context2; 19024 mb->context2 = NULL; 19025 if (ndlp) { 19026 spin_lock(shost->host_lock); 19027 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19028 spin_unlock(shost->host_lock); 19029 lpfc_nlp_put(ndlp); 19030 } 19031 } 19032 mempool_free(mb, phba->mbox_mem_pool); 19033 } 19034 19035 /* Release the ndlp with the cleaned-up active mailbox command */ 19036 if (act_mbx_ndlp) { 19037 spin_lock(shost->host_lock); 19038 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19039 spin_unlock(shost->host_lock); 19040 lpfc_nlp_put(act_mbx_ndlp); 19041 } 19042 } 19043 19044 /** 19045 * lpfc_drain_txq - Drain the txq 19046 * @phba: Pointer to HBA context object. 19047 * 19048 * This function attempt to submit IOCBs on the txq 19049 * to the adapter. For SLI4 adapters, the txq contains 19050 * ELS IOCBs that have been deferred because the there 19051 * are no SGLs. This congestion can occur with large 19052 * vport counts during node discovery. 19053 **/ 19054 19055 uint32_t 19056 lpfc_drain_txq(struct lpfc_hba *phba) 19057 { 19058 LIST_HEAD(completions); 19059 struct lpfc_sli_ring *pring; 19060 struct lpfc_iocbq *piocbq = NULL; 19061 unsigned long iflags = 0; 19062 char *fail_msg = NULL; 19063 struct lpfc_sglq *sglq; 19064 union lpfc_wqe128 wqe; 19065 uint32_t txq_cnt = 0; 19066 struct lpfc_queue *wq; 19067 19068 if (phba->link_flag & LS_MDS_LOOPBACK) { 19069 /* MDS WQE are posted only to first WQ*/ 19070 wq = phba->sli4_hba.fcp_wq[0]; 19071 if (unlikely(!wq)) 19072 return 0; 19073 pring = wq->pring; 19074 } else { 19075 wq = phba->sli4_hba.els_wq; 19076 if (unlikely(!wq)) 19077 return 0; 19078 pring = lpfc_phba_elsring(phba); 19079 } 19080 19081 if (unlikely(!pring) || list_empty(&pring->txq)) 19082 return 0; 19083 19084 spin_lock_irqsave(&pring->ring_lock, iflags); 19085 list_for_each_entry(piocbq, &pring->txq, list) { 19086 txq_cnt++; 19087 } 19088 19089 if (txq_cnt > pring->txq_max) 19090 pring->txq_max = txq_cnt; 19091 19092 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19093 19094 while (!list_empty(&pring->txq)) { 19095 spin_lock_irqsave(&pring->ring_lock, iflags); 19096 19097 piocbq = lpfc_sli_ringtx_get(phba, pring); 19098 if (!piocbq) { 19099 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19100 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19101 "2823 txq empty and txq_cnt is %d\n ", 19102 txq_cnt); 19103 break; 19104 } 19105 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 19106 if (!sglq) { 19107 __lpfc_sli_ringtx_put(phba, pring, piocbq); 19108 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19109 break; 19110 } 19111 txq_cnt--; 19112 19113 /* The xri and iocb resources secured, 19114 * attempt to issue request 19115 */ 19116 piocbq->sli4_lxritag = sglq->sli4_lxritag; 19117 piocbq->sli4_xritag = sglq->sli4_xritag; 19118 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 19119 fail_msg = "to convert bpl to sgl"; 19120 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 19121 fail_msg = "to convert iocb to wqe"; 19122 else if (lpfc_sli4_wq_put(wq, &wqe)) 19123 fail_msg = " - Wq is full"; 19124 else 19125 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 19126 19127 if (fail_msg) { 19128 /* Failed means we can't issue and need to cancel */ 19129 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19130 "2822 IOCB failed %s iotag 0x%x " 19131 "xri 0x%x\n", 19132 fail_msg, 19133 piocbq->iotag, piocbq->sli4_xritag); 19134 list_add_tail(&piocbq->list, &completions); 19135 } 19136 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19137 } 19138 19139 /* Cancel all the IOCBs that cannot be issued */ 19140 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 19141 IOERR_SLI_ABORTED); 19142 19143 return txq_cnt; 19144 } 19145 19146 /** 19147 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 19148 * @phba: Pointer to HBA context object. 19149 * @pwqe: Pointer to command WQE. 19150 * @sglq: Pointer to the scatter gather queue object. 19151 * 19152 * This routine converts the bpl or bde that is in the WQE 19153 * to a sgl list for the sli4 hardware. The physical address 19154 * of the bpl/bde is converted back to a virtual address. 19155 * If the WQE contains a BPL then the list of BDE's is 19156 * converted to sli4_sge's. If the WQE contains a single 19157 * BDE then it is converted to a single sli_sge. 19158 * The WQE is still in cpu endianness so the contents of 19159 * the bpl can be used without byte swapping. 19160 * 19161 * Returns valid XRI = Success, NO_XRI = Failure. 19162 */ 19163 static uint16_t 19164 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 19165 struct lpfc_sglq *sglq) 19166 { 19167 uint16_t xritag = NO_XRI; 19168 struct ulp_bde64 *bpl = NULL; 19169 struct ulp_bde64 bde; 19170 struct sli4_sge *sgl = NULL; 19171 struct lpfc_dmabuf *dmabuf; 19172 union lpfc_wqe128 *wqe; 19173 int numBdes = 0; 19174 int i = 0; 19175 uint32_t offset = 0; /* accumulated offset in the sg request list */ 19176 int inbound = 0; /* number of sg reply entries inbound from firmware */ 19177 uint32_t cmd; 19178 19179 if (!pwqeq || !sglq) 19180 return xritag; 19181 19182 sgl = (struct sli4_sge *)sglq->sgl; 19183 wqe = &pwqeq->wqe; 19184 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 19185 19186 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 19187 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 19188 return sglq->sli4_xritag; 19189 numBdes = pwqeq->rsvd2; 19190 if (numBdes) { 19191 /* The addrHigh and addrLow fields within the WQE 19192 * have not been byteswapped yet so there is no 19193 * need to swap them back. 19194 */ 19195 if (pwqeq->context3) 19196 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 19197 else 19198 return xritag; 19199 19200 bpl = (struct ulp_bde64 *)dmabuf->virt; 19201 if (!bpl) 19202 return xritag; 19203 19204 for (i = 0; i < numBdes; i++) { 19205 /* Should already be byte swapped. */ 19206 sgl->addr_hi = bpl->addrHigh; 19207 sgl->addr_lo = bpl->addrLow; 19208 19209 sgl->word2 = le32_to_cpu(sgl->word2); 19210 if ((i+1) == numBdes) 19211 bf_set(lpfc_sli4_sge_last, sgl, 1); 19212 else 19213 bf_set(lpfc_sli4_sge_last, sgl, 0); 19214 /* swap the size field back to the cpu so we 19215 * can assign it to the sgl. 19216 */ 19217 bde.tus.w = le32_to_cpu(bpl->tus.w); 19218 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 19219 /* The offsets in the sgl need to be accumulated 19220 * separately for the request and reply lists. 19221 * The request is always first, the reply follows. 19222 */ 19223 switch (cmd) { 19224 case CMD_GEN_REQUEST64_WQE: 19225 /* add up the reply sg entries */ 19226 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 19227 inbound++; 19228 /* first inbound? reset the offset */ 19229 if (inbound == 1) 19230 offset = 0; 19231 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19232 bf_set(lpfc_sli4_sge_type, sgl, 19233 LPFC_SGE_TYPE_DATA); 19234 offset += bde.tus.f.bdeSize; 19235 break; 19236 case CMD_FCP_TRSP64_WQE: 19237 bf_set(lpfc_sli4_sge_offset, sgl, 0); 19238 bf_set(lpfc_sli4_sge_type, sgl, 19239 LPFC_SGE_TYPE_DATA); 19240 break; 19241 case CMD_FCP_TSEND64_WQE: 19242 case CMD_FCP_TRECEIVE64_WQE: 19243 bf_set(lpfc_sli4_sge_type, sgl, 19244 bpl->tus.f.bdeFlags); 19245 if (i < 3) 19246 offset = 0; 19247 else 19248 offset += bde.tus.f.bdeSize; 19249 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19250 break; 19251 } 19252 sgl->word2 = cpu_to_le32(sgl->word2); 19253 bpl++; 19254 sgl++; 19255 } 19256 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 19257 /* The addrHigh and addrLow fields of the BDE have not 19258 * been byteswapped yet so they need to be swapped 19259 * before putting them in the sgl. 19260 */ 19261 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 19262 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 19263 sgl->word2 = le32_to_cpu(sgl->word2); 19264 bf_set(lpfc_sli4_sge_last, sgl, 1); 19265 sgl->word2 = cpu_to_le32(sgl->word2); 19266 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 19267 } 19268 return sglq->sli4_xritag; 19269 } 19270 19271 /** 19272 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 19273 * @phba: Pointer to HBA context object. 19274 * @ring_number: Base sli ring number 19275 * @pwqe: Pointer to command WQE. 19276 **/ 19277 int 19278 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, 19279 struct lpfc_iocbq *pwqe) 19280 { 19281 union lpfc_wqe128 *wqe = &pwqe->wqe; 19282 struct lpfc_nvmet_rcv_ctx *ctxp; 19283 struct lpfc_queue *wq; 19284 struct lpfc_sglq *sglq; 19285 struct lpfc_sli_ring *pring; 19286 unsigned long iflags; 19287 uint32_t ret = 0; 19288 19289 /* NVME_LS and NVME_LS ABTS requests. */ 19290 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 19291 pring = phba->sli4_hba.nvmels_wq->pring; 19292 spin_lock_irqsave(&pring->ring_lock, iflags); 19293 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 19294 if (!sglq) { 19295 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19296 return WQE_BUSY; 19297 } 19298 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19299 pwqe->sli4_xritag = sglq->sli4_xritag; 19300 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 19301 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19302 return WQE_ERROR; 19303 } 19304 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19305 pwqe->sli4_xritag); 19306 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 19307 if (ret) { 19308 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19309 return ret; 19310 } 19311 19312 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19313 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19314 return 0; 19315 } 19316 19317 /* NVME_FCREQ and NVME_ABTS requests */ 19318 if (pwqe->iocb_flag & LPFC_IO_NVME) { 19319 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19320 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; 19321 19322 spin_lock_irqsave(&pring->ring_lock, iflags); 19323 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; 19324 bf_set(wqe_cqid, &wqe->generic.wqe_com, 19325 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); 19326 ret = lpfc_sli4_wq_put(wq, wqe); 19327 if (ret) { 19328 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19329 return ret; 19330 } 19331 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19332 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19333 return 0; 19334 } 19335 19336 /* NVMET requests */ 19337 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19338 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19339 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; 19340 19341 spin_lock_irqsave(&pring->ring_lock, iflags); 19342 ctxp = pwqe->context2; 19343 sglq = ctxp->ctxbuf->sglq; 19344 if (pwqe->sli4_xritag == NO_XRI) { 19345 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19346 pwqe->sli4_xritag = sglq->sli4_xritag; 19347 } 19348 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19349 pwqe->sli4_xritag); 19350 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; 19351 bf_set(wqe_cqid, &wqe->generic.wqe_com, 19352 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); 19353 ret = lpfc_sli4_wq_put(wq, wqe); 19354 if (ret) { 19355 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19356 return ret; 19357 } 19358 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19359 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19360 return 0; 19361 } 19362 return WQE_ERROR; 19363 } 19364