1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 #include <linux/aer.h> 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include <linux/nvme-fc-driver.h> 43 44 #include "lpfc_hw4.h" 45 #include "lpfc_hw.h" 46 #include "lpfc_sli.h" 47 #include "lpfc_sli4.h" 48 #include "lpfc_nl.h" 49 #include "lpfc_disc.h" 50 #include "lpfc.h" 51 #include "lpfc_scsi.h" 52 #include "lpfc_nvme.h" 53 #include "lpfc_nvmet.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_logmsg.h" 56 #include "lpfc_compat.h" 57 #include "lpfc_debugfs.h" 58 #include "lpfc_vport.h" 59 #include "lpfc_version.h" 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 70 /* Provide function prototypes local to this module. */ 71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint32_t); 73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 74 uint8_t *, uint32_t *); 75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 76 struct lpfc_iocbq *); 77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 78 struct hbq_dmabuf *); 79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 80 struct hbq_dmabuf *dmabuf); 81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, 82 struct lpfc_queue *cq, struct lpfc_cqe *cqe); 83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 84 int); 85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 86 struct lpfc_queue *eq, 87 struct lpfc_eqe *eqe); 88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 90 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, 91 struct lpfc_sli_ring *pring, 92 struct lpfc_iocbq *cmdiocb); 93 94 static IOCB_t * 95 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 96 { 97 return &iocbq->iocb; 98 } 99 100 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 101 /** 102 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 103 * @srcp: Source memory pointer. 104 * @destp: Destination memory pointer. 105 * @cnt: Number of words required to be copied. 106 * Must be a multiple of sizeof(uint64_t) 107 * 108 * This function is used for copying data between driver memory 109 * and the SLI WQ. This function also changes the endianness 110 * of each word if native endianness is different from SLI 111 * endianness. This function can be called with or without 112 * lock. 113 **/ 114 void 115 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 116 { 117 uint64_t *src = srcp; 118 uint64_t *dest = destp; 119 int i; 120 121 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 122 *dest++ = *src++; 123 } 124 #else 125 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 126 #endif 127 128 /** 129 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 130 * @q: The Work Queue to operate on. 131 * @wqe: The work Queue Entry to put on the Work queue. 132 * 133 * This routine will copy the contents of @wqe to the next available entry on 134 * the @q. This function will then ring the Work Queue Doorbell to signal the 135 * HBA to start processing the Work Queue Entry. This function returns 0 if 136 * successful. If no entries are available on @q then this function will return 137 * -ENOMEM. 138 * The caller is expected to hold the hbalock when calling this routine. 139 **/ 140 static int 141 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 142 { 143 union lpfc_wqe *temp_wqe; 144 struct lpfc_register doorbell; 145 uint32_t host_index; 146 uint32_t idx; 147 uint32_t i = 0; 148 uint8_t *tmp; 149 u32 if_type; 150 151 /* sanity check on queue memory */ 152 if (unlikely(!q)) 153 return -ENOMEM; 154 temp_wqe = q->qe[q->host_index].wqe; 155 156 /* If the host has not yet processed the next entry then we are done */ 157 idx = ((q->host_index + 1) % q->entry_count); 158 if (idx == q->hba_index) { 159 q->WQ_overflow++; 160 return -EBUSY; 161 } 162 q->WQ_posted++; 163 /* set consumption flag every once in a while */ 164 if (!((q->host_index + 1) % q->notify_interval)) 165 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 166 else 167 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 168 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 169 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 170 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 171 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 172 /* write to DPP aperture taking advatage of Combined Writes */ 173 tmp = (uint8_t *)temp_wqe; 174 #ifdef __raw_writeq 175 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 176 __raw_writeq(*((uint64_t *)(tmp + i)), 177 q->dpp_regaddr + i); 178 #else 179 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 180 __raw_writel(*((uint32_t *)(tmp + i)), 181 q->dpp_regaddr + i); 182 #endif 183 } 184 /* ensure WQE bcopy and DPP flushed before doorbell write */ 185 wmb(); 186 187 /* Update the host index before invoking device */ 188 host_index = q->host_index; 189 190 q->host_index = idx; 191 192 /* Ring Doorbell */ 193 doorbell.word0 = 0; 194 if (q->db_format == LPFC_DB_LIST_FORMAT) { 195 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 196 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 197 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 198 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 199 q->dpp_id); 200 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 201 q->queue_id); 202 } else { 203 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 204 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 205 206 /* Leave bits <23:16> clear for if_type 6 dpp */ 207 if_type = bf_get(lpfc_sli_intf_if_type, 208 &q->phba->sli4_hba.sli_intf); 209 if (if_type != LPFC_SLI_INTF_IF_TYPE_6) 210 bf_set(lpfc_wq_db_list_fm_index, &doorbell, 211 host_index); 212 } 213 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 214 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 215 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 216 } else { 217 return -EINVAL; 218 } 219 writel(doorbell.word0, q->db_regaddr); 220 221 return 0; 222 } 223 224 /** 225 * lpfc_sli4_wq_release - Updates internal hba index for WQ 226 * @q: The Work Queue to operate on. 227 * @index: The index to advance the hba index to. 228 * 229 * This routine will update the HBA index of a queue to reflect consumption of 230 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 231 * an entry the host calls this function to update the queue's internal 232 * pointers. This routine returns the number of entries that were consumed by 233 * the HBA. 234 **/ 235 static uint32_t 236 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 237 { 238 uint32_t released = 0; 239 240 /* sanity check on queue memory */ 241 if (unlikely(!q)) 242 return 0; 243 244 if (q->hba_index == index) 245 return 0; 246 do { 247 q->hba_index = ((q->hba_index + 1) % q->entry_count); 248 released++; 249 } while (q->hba_index != index); 250 return released; 251 } 252 253 /** 254 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 255 * @q: The Mailbox Queue to operate on. 256 * @wqe: The Mailbox Queue Entry to put on the Work queue. 257 * 258 * This routine will copy the contents of @mqe to the next available entry on 259 * the @q. This function will then ring the Work Queue Doorbell to signal the 260 * HBA to start processing the Work Queue Entry. This function returns 0 if 261 * successful. If no entries are available on @q then this function will return 262 * -ENOMEM. 263 * The caller is expected to hold the hbalock when calling this routine. 264 **/ 265 static uint32_t 266 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 267 { 268 struct lpfc_mqe *temp_mqe; 269 struct lpfc_register doorbell; 270 271 /* sanity check on queue memory */ 272 if (unlikely(!q)) 273 return -ENOMEM; 274 temp_mqe = q->qe[q->host_index].mqe; 275 276 /* If the host has not yet processed the next entry then we are done */ 277 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 278 return -ENOMEM; 279 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 280 /* Save off the mailbox pointer for completion */ 281 q->phba->mbox = (MAILBOX_t *)temp_mqe; 282 283 /* Update the host index before invoking device */ 284 q->host_index = ((q->host_index + 1) % q->entry_count); 285 286 /* Ring Doorbell */ 287 doorbell.word0 = 0; 288 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 289 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 290 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 291 return 0; 292 } 293 294 /** 295 * lpfc_sli4_mq_release - Updates internal hba index for MQ 296 * @q: The Mailbox Queue to operate on. 297 * 298 * This routine will update the HBA index of a queue to reflect consumption of 299 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 300 * an entry the host calls this function to update the queue's internal 301 * pointers. This routine returns the number of entries that were consumed by 302 * the HBA. 303 **/ 304 static uint32_t 305 lpfc_sli4_mq_release(struct lpfc_queue *q) 306 { 307 /* sanity check on queue memory */ 308 if (unlikely(!q)) 309 return 0; 310 311 /* Clear the mailbox pointer for completion */ 312 q->phba->mbox = NULL; 313 q->hba_index = ((q->hba_index + 1) % q->entry_count); 314 return 1; 315 } 316 317 /** 318 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 319 * @q: The Event Queue to get the first valid EQE from 320 * 321 * This routine will get the first valid Event Queue Entry from @q, update 322 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 323 * the Queue (no more work to do), or the Queue is full of EQEs that have been 324 * processed, but not popped back to the HBA then this routine will return NULL. 325 **/ 326 static struct lpfc_eqe * 327 lpfc_sli4_eq_get(struct lpfc_queue *q) 328 { 329 struct lpfc_eqe *eqe; 330 331 /* sanity check on queue memory */ 332 if (unlikely(!q)) 333 return NULL; 334 eqe = q->qe[q->host_index].eqe; 335 336 /* If the next EQE is not valid then we are done */ 337 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 338 return NULL; 339 340 /* 341 * insert barrier for instruction interlock : data from the hardware 342 * must have the valid bit checked before it can be copied and acted 343 * upon. Speculative instructions were allowing a bcopy at the start 344 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 345 * after our return, to copy data before the valid bit check above 346 * was done. As such, some of the copied data was stale. The barrier 347 * ensures the check is before any data is copied. 348 */ 349 mb(); 350 return eqe; 351 } 352 353 /** 354 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 355 * @q: The Event Queue to disable interrupts 356 * 357 **/ 358 inline void 359 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 360 { 361 struct lpfc_register doorbell; 362 363 doorbell.word0 = 0; 364 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 365 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 366 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 367 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 368 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 369 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 370 } 371 372 /** 373 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 374 * @q: The Event Queue to disable interrupts 375 * 376 **/ 377 inline void 378 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 379 { 380 struct lpfc_register doorbell; 381 382 doorbell.word0 = 0; 383 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 384 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 385 } 386 387 /** 388 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state 389 * @phba: adapter with EQ 390 * @q: The Event Queue that the host has completed processing for. 391 * @count: Number of elements that have been consumed 392 * @arm: Indicates whether the host wants to arms this CQ. 393 * 394 * This routine will notify the HBA, by ringing the doorbell, that count 395 * number of EQEs have been processed. The @arm parameter indicates whether 396 * the queue should be rearmed when ringing the doorbell. 397 **/ 398 void 399 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 400 uint32_t count, bool arm) 401 { 402 struct lpfc_register doorbell; 403 404 /* sanity check on queue memory */ 405 if (unlikely(!q || (count == 0 && !arm))) 406 return; 407 408 /* ring doorbell for number popped */ 409 doorbell.word0 = 0; 410 if (arm) { 411 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 412 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 413 } 414 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 415 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 416 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 417 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 418 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 419 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 420 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 421 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 422 readl(q->phba->sli4_hba.EQDBregaddr); 423 } 424 425 /** 426 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state 427 * @phba: adapter with EQ 428 * @q: The Event Queue that the host has completed processing for. 429 * @count: Number of elements that have been consumed 430 * @arm: Indicates whether the host wants to arms this CQ. 431 * 432 * This routine will notify the HBA, by ringing the doorbell, that count 433 * number of EQEs have been processed. The @arm parameter indicates whether 434 * the queue should be rearmed when ringing the doorbell. 435 **/ 436 void 437 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 438 uint32_t count, bool arm) 439 { 440 struct lpfc_register doorbell; 441 442 /* sanity check on queue memory */ 443 if (unlikely(!q || (count == 0 && !arm))) 444 return; 445 446 /* ring doorbell for number popped */ 447 doorbell.word0 = 0; 448 if (arm) 449 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 450 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); 451 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 452 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 453 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 454 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 455 readl(q->phba->sli4_hba.EQDBregaddr); 456 } 457 458 static void 459 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 460 struct lpfc_eqe *eqe) 461 { 462 if (!phba->sli4_hba.pc_sli4_params.eqav) 463 bf_set_le32(lpfc_eqe_valid, eqe, 0); 464 465 eq->host_index = ((eq->host_index + 1) % eq->entry_count); 466 467 /* if the index wrapped around, toggle the valid bit */ 468 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) 469 eq->qe_valid = (eq->qe_valid) ? 0 : 1; 470 } 471 472 static void 473 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 474 { 475 struct lpfc_eqe *eqe; 476 uint32_t count = 0; 477 478 /* walk all the EQ entries and drop on the floor */ 479 eqe = lpfc_sli4_eq_get(eq); 480 while (eqe) { 481 __lpfc_sli4_consume_eqe(phba, eq, eqe); 482 count++; 483 eqe = lpfc_sli4_eq_get(eq); 484 } 485 486 /* Clear and re-arm the EQ */ 487 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM); 488 } 489 490 static int 491 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) 492 { 493 struct lpfc_eqe *eqe; 494 int count = 0, consumed = 0; 495 496 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) 497 goto rearm_and_exit; 498 499 eqe = lpfc_sli4_eq_get(eq); 500 while (eqe) { 501 lpfc_sli4_hba_handle_eqe(phba, eq, eqe); 502 __lpfc_sli4_consume_eqe(phba, eq, eqe); 503 504 consumed++; 505 if (!(++count % eq->max_proc_limit)) 506 break; 507 508 if (!(count % eq->notify_interval)) { 509 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, 510 LPFC_QUEUE_NOARM); 511 consumed = 0; 512 } 513 514 eqe = lpfc_sli4_eq_get(eq); 515 } 516 eq->EQ_processed += count; 517 518 /* Track the max number of EQEs processed in 1 intr */ 519 if (count > eq->EQ_max_eqe) 520 eq->EQ_max_eqe = count; 521 522 eq->queue_claimed = 0; 523 524 rearm_and_exit: 525 /* Always clear and re-arm the EQ */ 526 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM); 527 528 return count; 529 } 530 531 /** 532 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 533 * @q: The Completion Queue to get the first valid CQE from 534 * 535 * This routine will get the first valid Completion Queue Entry from @q, update 536 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 537 * the Queue (no more work to do), or the Queue is full of CQEs that have been 538 * processed, but not popped back to the HBA then this routine will return NULL. 539 **/ 540 static struct lpfc_cqe * 541 lpfc_sli4_cq_get(struct lpfc_queue *q) 542 { 543 struct lpfc_cqe *cqe; 544 545 /* sanity check on queue memory */ 546 if (unlikely(!q)) 547 return NULL; 548 cqe = q->qe[q->host_index].cqe; 549 550 /* If the next CQE is not valid then we are done */ 551 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 552 return NULL; 553 554 /* 555 * insert barrier for instruction interlock : data from the hardware 556 * must have the valid bit checked before it can be copied and acted 557 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 558 * instructions allowing action on content before valid bit checked, 559 * add barrier here as well. May not be needed as "content" is a 560 * single 32-bit entity here (vs multi word structure for cq's). 561 */ 562 mb(); 563 return cqe; 564 } 565 566 static void 567 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 568 struct lpfc_cqe *cqe) 569 { 570 if (!phba->sli4_hba.pc_sli4_params.cqav) 571 bf_set_le32(lpfc_cqe_valid, cqe, 0); 572 573 cq->host_index = ((cq->host_index + 1) % cq->entry_count); 574 575 /* if the index wrapped around, toggle the valid bit */ 576 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) 577 cq->qe_valid = (cq->qe_valid) ? 0 : 1; 578 } 579 580 /** 581 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. 582 * @phba: the adapter with the CQ 583 * @q: The Completion Queue that the host has completed processing for. 584 * @count: the number of elements that were consumed 585 * @arm: Indicates whether the host wants to arms this CQ. 586 * 587 * This routine will notify the HBA, by ringing the doorbell, that the 588 * CQEs have been processed. The @arm parameter specifies whether the 589 * queue should be rearmed when ringing the doorbell. 590 **/ 591 void 592 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 593 uint32_t count, bool arm) 594 { 595 struct lpfc_register doorbell; 596 597 /* sanity check on queue memory */ 598 if (unlikely(!q || (count == 0 && !arm))) 599 return; 600 601 /* ring doorbell for number popped */ 602 doorbell.word0 = 0; 603 if (arm) 604 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 605 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 606 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 607 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 608 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 609 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 610 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 611 } 612 613 /** 614 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. 615 * @phba: the adapter with the CQ 616 * @q: The Completion Queue that the host has completed processing for. 617 * @count: the number of elements that were consumed 618 * @arm: Indicates whether the host wants to arms this CQ. 619 * 620 * This routine will notify the HBA, by ringing the doorbell, that the 621 * CQEs have been processed. The @arm parameter specifies whether the 622 * queue should be rearmed when ringing the doorbell. 623 **/ 624 void 625 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 626 uint32_t count, bool arm) 627 { 628 struct lpfc_register doorbell; 629 630 /* sanity check on queue memory */ 631 if (unlikely(!q || (count == 0 && !arm))) 632 return; 633 634 /* ring doorbell for number popped */ 635 doorbell.word0 = 0; 636 if (arm) 637 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 638 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); 639 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 640 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 641 } 642 643 /** 644 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 645 * @q: The Header Receive Queue to operate on. 646 * @wqe: The Receive Queue Entry to put on the Receive queue. 647 * 648 * This routine will copy the contents of @wqe to the next available entry on 649 * the @q. This function will then ring the Receive Queue Doorbell to signal the 650 * HBA to start processing the Receive Queue Entry. This function returns the 651 * index that the rqe was copied to if successful. If no entries are available 652 * on @q then this function will return -ENOMEM. 653 * The caller is expected to hold the hbalock when calling this routine. 654 **/ 655 int 656 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 657 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 658 { 659 struct lpfc_rqe *temp_hrqe; 660 struct lpfc_rqe *temp_drqe; 661 struct lpfc_register doorbell; 662 int hq_put_index; 663 int dq_put_index; 664 665 /* sanity check on queue memory */ 666 if (unlikely(!hq) || unlikely(!dq)) 667 return -ENOMEM; 668 hq_put_index = hq->host_index; 669 dq_put_index = dq->host_index; 670 temp_hrqe = hq->qe[hq_put_index].rqe; 671 temp_drqe = dq->qe[dq_put_index].rqe; 672 673 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 674 return -EINVAL; 675 if (hq_put_index != dq_put_index) 676 return -EINVAL; 677 /* If the host has not yet processed the next entry then we are done */ 678 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 679 return -EBUSY; 680 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 681 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 682 683 /* Update the host index to point to the next slot */ 684 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 685 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 686 hq->RQ_buf_posted++; 687 688 /* Ring The Header Receive Queue Doorbell */ 689 if (!(hq->host_index % hq->notify_interval)) { 690 doorbell.word0 = 0; 691 if (hq->db_format == LPFC_DB_RING_FORMAT) { 692 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 693 hq->notify_interval); 694 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 695 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 696 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 697 hq->notify_interval); 698 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 699 hq->host_index); 700 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 701 } else { 702 return -EINVAL; 703 } 704 writel(doorbell.word0, hq->db_regaddr); 705 } 706 return hq_put_index; 707 } 708 709 /** 710 * lpfc_sli4_rq_release - Updates internal hba index for RQ 711 * @q: The Header Receive Queue to operate on. 712 * 713 * This routine will update the HBA index of a queue to reflect consumption of 714 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 715 * consumed an entry the host calls this function to update the queue's 716 * internal pointers. This routine returns the number of entries that were 717 * consumed by the HBA. 718 **/ 719 static uint32_t 720 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 721 { 722 /* sanity check on queue memory */ 723 if (unlikely(!hq) || unlikely(!dq)) 724 return 0; 725 726 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 727 return 0; 728 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 729 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 730 return 1; 731 } 732 733 /** 734 * lpfc_cmd_iocb - Get next command iocb entry in the ring 735 * @phba: Pointer to HBA context object. 736 * @pring: Pointer to driver SLI ring object. 737 * 738 * This function returns pointer to next command iocb entry 739 * in the command ring. The caller must hold hbalock to prevent 740 * other threads consume the next command iocb. 741 * SLI-2/SLI-3 provide different sized iocbs. 742 **/ 743 static inline IOCB_t * 744 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 745 { 746 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 747 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 748 } 749 750 /** 751 * lpfc_resp_iocb - Get next response iocb entry in the ring 752 * @phba: Pointer to HBA context object. 753 * @pring: Pointer to driver SLI ring object. 754 * 755 * This function returns pointer to next response iocb entry 756 * in the response ring. The caller must hold hbalock to make sure 757 * that no other thread consume the next response iocb. 758 * SLI-2/SLI-3 provide different sized iocbs. 759 **/ 760 static inline IOCB_t * 761 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 762 { 763 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 764 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 765 } 766 767 /** 768 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 769 * @phba: Pointer to HBA context object. 770 * 771 * This function is called with hbalock held. This function 772 * allocates a new driver iocb object from the iocb pool. If the 773 * allocation is successful, it returns pointer to the newly 774 * allocated iocb object else it returns NULL. 775 **/ 776 struct lpfc_iocbq * 777 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 778 { 779 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 780 struct lpfc_iocbq * iocbq = NULL; 781 782 lockdep_assert_held(&phba->hbalock); 783 784 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 785 if (iocbq) 786 phba->iocb_cnt++; 787 if (phba->iocb_cnt > phba->iocb_max) 788 phba->iocb_max = phba->iocb_cnt; 789 return iocbq; 790 } 791 792 /** 793 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 794 * @phba: Pointer to HBA context object. 795 * @xritag: XRI value. 796 * 797 * This function clears the sglq pointer from the array of acive 798 * sglq's. The xritag that is passed in is used to index into the 799 * array. Before the xritag can be used it needs to be adjusted 800 * by subtracting the xribase. 801 * 802 * Returns sglq ponter = success, NULL = Failure. 803 **/ 804 struct lpfc_sglq * 805 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 806 { 807 struct lpfc_sglq *sglq; 808 809 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 810 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 811 return sglq; 812 } 813 814 /** 815 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 816 * @phba: Pointer to HBA context object. 817 * @xritag: XRI value. 818 * 819 * This function returns the sglq pointer from the array of acive 820 * sglq's. The xritag that is passed in is used to index into the 821 * array. Before the xritag can be used it needs to be adjusted 822 * by subtracting the xribase. 823 * 824 * Returns sglq ponter = success, NULL = Failure. 825 **/ 826 struct lpfc_sglq * 827 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 828 { 829 struct lpfc_sglq *sglq; 830 831 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 832 return sglq; 833 } 834 835 /** 836 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 837 * @phba: Pointer to HBA context object. 838 * @xritag: xri used in this exchange. 839 * @rrq: The RRQ to be cleared. 840 * 841 **/ 842 void 843 lpfc_clr_rrq_active(struct lpfc_hba *phba, 844 uint16_t xritag, 845 struct lpfc_node_rrq *rrq) 846 { 847 struct lpfc_nodelist *ndlp = NULL; 848 849 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 850 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 851 852 /* The target DID could have been swapped (cable swap) 853 * we should use the ndlp from the findnode if it is 854 * available. 855 */ 856 if ((!ndlp) && rrq->ndlp) 857 ndlp = rrq->ndlp; 858 859 if (!ndlp) 860 goto out; 861 862 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 863 rrq->send_rrq = 0; 864 rrq->xritag = 0; 865 rrq->rrq_stop_time = 0; 866 } 867 out: 868 mempool_free(rrq, phba->rrq_pool); 869 } 870 871 /** 872 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 873 * @phba: Pointer to HBA context object. 874 * 875 * This function is called with hbalock held. This function 876 * Checks if stop_time (ratov from setting rrq active) has 877 * been reached, if it has and the send_rrq flag is set then 878 * it will call lpfc_send_rrq. If the send_rrq flag is not set 879 * then it will just call the routine to clear the rrq and 880 * free the rrq resource. 881 * The timer is set to the next rrq that is going to expire before 882 * leaving the routine. 883 * 884 **/ 885 void 886 lpfc_handle_rrq_active(struct lpfc_hba *phba) 887 { 888 struct lpfc_node_rrq *rrq; 889 struct lpfc_node_rrq *nextrrq; 890 unsigned long next_time; 891 unsigned long iflags; 892 LIST_HEAD(send_rrq); 893 894 spin_lock_irqsave(&phba->hbalock, iflags); 895 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 896 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 897 list_for_each_entry_safe(rrq, nextrrq, 898 &phba->active_rrq_list, list) { 899 if (time_after(jiffies, rrq->rrq_stop_time)) 900 list_move(&rrq->list, &send_rrq); 901 else if (time_before(rrq->rrq_stop_time, next_time)) 902 next_time = rrq->rrq_stop_time; 903 } 904 spin_unlock_irqrestore(&phba->hbalock, iflags); 905 if ((!list_empty(&phba->active_rrq_list)) && 906 (!(phba->pport->load_flag & FC_UNLOADING))) 907 mod_timer(&phba->rrq_tmr, next_time); 908 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 909 list_del(&rrq->list); 910 if (!rrq->send_rrq) 911 /* this call will free the rrq */ 912 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 913 else if (lpfc_send_rrq(phba, rrq)) { 914 /* if we send the rrq then the completion handler 915 * will clear the bit in the xribitmap. 916 */ 917 lpfc_clr_rrq_active(phba, rrq->xritag, 918 rrq); 919 } 920 } 921 } 922 923 /** 924 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 925 * @vport: Pointer to vport context object. 926 * @xri: The xri used in the exchange. 927 * @did: The targets DID for this exchange. 928 * 929 * returns NULL = rrq not found in the phba->active_rrq_list. 930 * rrq = rrq for this xri and target. 931 **/ 932 struct lpfc_node_rrq * 933 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 934 { 935 struct lpfc_hba *phba = vport->phba; 936 struct lpfc_node_rrq *rrq; 937 struct lpfc_node_rrq *nextrrq; 938 unsigned long iflags; 939 940 if (phba->sli_rev != LPFC_SLI_REV4) 941 return NULL; 942 spin_lock_irqsave(&phba->hbalock, iflags); 943 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 944 if (rrq->vport == vport && rrq->xritag == xri && 945 rrq->nlp_DID == did){ 946 list_del(&rrq->list); 947 spin_unlock_irqrestore(&phba->hbalock, iflags); 948 return rrq; 949 } 950 } 951 spin_unlock_irqrestore(&phba->hbalock, iflags); 952 return NULL; 953 } 954 955 /** 956 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 957 * @vport: Pointer to vport context object. 958 * @ndlp: Pointer to the lpfc_node_list structure. 959 * If ndlp is NULL Remove all active RRQs for this vport from the 960 * phba->active_rrq_list and clear the rrq. 961 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 962 **/ 963 void 964 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 965 966 { 967 struct lpfc_hba *phba = vport->phba; 968 struct lpfc_node_rrq *rrq; 969 struct lpfc_node_rrq *nextrrq; 970 unsigned long iflags; 971 LIST_HEAD(rrq_list); 972 973 if (phba->sli_rev != LPFC_SLI_REV4) 974 return; 975 if (!ndlp) { 976 lpfc_sli4_vport_delete_els_xri_aborted(vport); 977 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 978 } 979 spin_lock_irqsave(&phba->hbalock, iflags); 980 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 981 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 982 list_move(&rrq->list, &rrq_list); 983 spin_unlock_irqrestore(&phba->hbalock, iflags); 984 985 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 986 list_del(&rrq->list); 987 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 988 } 989 } 990 991 /** 992 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 993 * @phba: Pointer to HBA context object. 994 * @ndlp: Targets nodelist pointer for this exchange. 995 * @xritag the xri in the bitmap to test. 996 * 997 * This function is called with hbalock held. This function 998 * returns 0 = rrq not active for this xri 999 * 1 = rrq is valid for this xri. 1000 **/ 1001 int 1002 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1003 uint16_t xritag) 1004 { 1005 lockdep_assert_held(&phba->hbalock); 1006 if (!ndlp) 1007 return 0; 1008 if (!ndlp->active_rrqs_xri_bitmap) 1009 return 0; 1010 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1011 return 1; 1012 else 1013 return 0; 1014 } 1015 1016 /** 1017 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1018 * @phba: Pointer to HBA context object. 1019 * @ndlp: nodelist pointer for this target. 1020 * @xritag: xri used in this exchange. 1021 * @rxid: Remote Exchange ID. 1022 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1023 * 1024 * This function takes the hbalock. 1025 * The active bit is always set in the active rrq xri_bitmap even 1026 * if there is no slot avaiable for the other rrq information. 1027 * 1028 * returns 0 rrq actived for this xri 1029 * < 0 No memory or invalid ndlp. 1030 **/ 1031 int 1032 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1033 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1034 { 1035 unsigned long iflags; 1036 struct lpfc_node_rrq *rrq; 1037 int empty; 1038 1039 if (!ndlp) 1040 return -EINVAL; 1041 1042 if (!phba->cfg_enable_rrq) 1043 return -EINVAL; 1044 1045 spin_lock_irqsave(&phba->hbalock, iflags); 1046 if (phba->pport->load_flag & FC_UNLOADING) { 1047 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1048 goto out; 1049 } 1050 1051 /* 1052 * set the active bit even if there is no mem available. 1053 */ 1054 if (NLP_CHK_FREE_REQ(ndlp)) 1055 goto out; 1056 1057 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 1058 goto out; 1059 1060 if (!ndlp->active_rrqs_xri_bitmap) 1061 goto out; 1062 1063 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1064 goto out; 1065 1066 spin_unlock_irqrestore(&phba->hbalock, iflags); 1067 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 1068 if (!rrq) { 1069 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1070 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1071 " DID:0x%x Send:%d\n", 1072 xritag, rxid, ndlp->nlp_DID, send_rrq); 1073 return -EINVAL; 1074 } 1075 if (phba->cfg_enable_rrq == 1) 1076 rrq->send_rrq = send_rrq; 1077 else 1078 rrq->send_rrq = 0; 1079 rrq->xritag = xritag; 1080 rrq->rrq_stop_time = jiffies + 1081 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1082 rrq->ndlp = ndlp; 1083 rrq->nlp_DID = ndlp->nlp_DID; 1084 rrq->vport = ndlp->vport; 1085 rrq->rxid = rxid; 1086 spin_lock_irqsave(&phba->hbalock, iflags); 1087 empty = list_empty(&phba->active_rrq_list); 1088 list_add_tail(&rrq->list, &phba->active_rrq_list); 1089 phba->hba_flag |= HBA_RRQ_ACTIVE; 1090 if (empty) 1091 lpfc_worker_wake_up(phba); 1092 spin_unlock_irqrestore(&phba->hbalock, iflags); 1093 return 0; 1094 out: 1095 spin_unlock_irqrestore(&phba->hbalock, iflags); 1096 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1097 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1098 " DID:0x%x Send:%d\n", 1099 xritag, rxid, ndlp->nlp_DID, send_rrq); 1100 return -EINVAL; 1101 } 1102 1103 /** 1104 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1105 * @phba: Pointer to HBA context object. 1106 * @piocb: Pointer to the iocbq. 1107 * 1108 * This function is called with the ring lock held. This function 1109 * gets a new driver sglq object from the sglq list. If the 1110 * list is not empty then it is successful, it returns pointer to the newly 1111 * allocated sglq object else it returns NULL. 1112 **/ 1113 static struct lpfc_sglq * 1114 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1115 { 1116 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1117 struct lpfc_sglq *sglq = NULL; 1118 struct lpfc_sglq *start_sglq = NULL; 1119 struct lpfc_io_buf *lpfc_cmd; 1120 struct lpfc_nodelist *ndlp; 1121 int found = 0; 1122 1123 lockdep_assert_held(&phba->hbalock); 1124 1125 if (piocbq->iocb_flag & LPFC_IO_FCP) { 1126 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; 1127 ndlp = lpfc_cmd->rdata->pnode; 1128 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 1129 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 1130 ndlp = piocbq->context_un.ndlp; 1131 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 1132 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 1133 ndlp = NULL; 1134 else 1135 ndlp = piocbq->context_un.ndlp; 1136 } else { 1137 ndlp = piocbq->context1; 1138 } 1139 1140 spin_lock(&phba->sli4_hba.sgl_list_lock); 1141 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1142 start_sglq = sglq; 1143 while (!found) { 1144 if (!sglq) 1145 break; 1146 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1147 test_bit(sglq->sli4_lxritag, 1148 ndlp->active_rrqs_xri_bitmap)) { 1149 /* This xri has an rrq outstanding for this DID. 1150 * put it back in the list and get another xri. 1151 */ 1152 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1153 sglq = NULL; 1154 list_remove_head(lpfc_els_sgl_list, sglq, 1155 struct lpfc_sglq, list); 1156 if (sglq == start_sglq) { 1157 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1158 sglq = NULL; 1159 break; 1160 } else 1161 continue; 1162 } 1163 sglq->ndlp = ndlp; 1164 found = 1; 1165 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1166 sglq->state = SGL_ALLOCATED; 1167 } 1168 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1169 return sglq; 1170 } 1171 1172 /** 1173 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1174 * @phba: Pointer to HBA context object. 1175 * @piocb: Pointer to the iocbq. 1176 * 1177 * This function is called with the sgl_list lock held. This function 1178 * gets a new driver sglq object from the sglq list. If the 1179 * list is not empty then it is successful, it returns pointer to the newly 1180 * allocated sglq object else it returns NULL. 1181 **/ 1182 struct lpfc_sglq * 1183 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1184 { 1185 struct list_head *lpfc_nvmet_sgl_list; 1186 struct lpfc_sglq *sglq = NULL; 1187 1188 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1189 1190 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1191 1192 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1193 if (!sglq) 1194 return NULL; 1195 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1196 sglq->state = SGL_ALLOCATED; 1197 return sglq; 1198 } 1199 1200 /** 1201 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1202 * @phba: Pointer to HBA context object. 1203 * 1204 * This function is called with no lock held. This function 1205 * allocates a new driver iocb object from the iocb pool. If the 1206 * allocation is successful, it returns pointer to the newly 1207 * allocated iocb object else it returns NULL. 1208 **/ 1209 struct lpfc_iocbq * 1210 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1211 { 1212 struct lpfc_iocbq * iocbq = NULL; 1213 unsigned long iflags; 1214 1215 spin_lock_irqsave(&phba->hbalock, iflags); 1216 iocbq = __lpfc_sli_get_iocbq(phba); 1217 spin_unlock_irqrestore(&phba->hbalock, iflags); 1218 return iocbq; 1219 } 1220 1221 /** 1222 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1223 * @phba: Pointer to HBA context object. 1224 * @iocbq: Pointer to driver iocb object. 1225 * 1226 * This function is called with hbalock held to release driver 1227 * iocb object to the iocb pool. The iotag in the iocb object 1228 * does not change for each use of the iocb object. This function 1229 * clears all other fields of the iocb object when it is freed. 1230 * The sqlq structure that holds the xritag and phys and virtual 1231 * mappings for the scatter gather list is retrieved from the 1232 * active array of sglq. The get of the sglq pointer also clears 1233 * the entry in the array. If the status of the IO indiactes that 1234 * this IO was aborted then the sglq entry it put on the 1235 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1236 * IO has good status or fails for any other reason then the sglq 1237 * entry is added to the free list (lpfc_els_sgl_list). 1238 **/ 1239 static void 1240 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1241 { 1242 struct lpfc_sglq *sglq; 1243 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1244 unsigned long iflag = 0; 1245 struct lpfc_sli_ring *pring; 1246 1247 lockdep_assert_held(&phba->hbalock); 1248 1249 if (iocbq->sli4_xritag == NO_XRI) 1250 sglq = NULL; 1251 else 1252 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1253 1254 1255 if (sglq) { 1256 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1257 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1258 iflag); 1259 sglq->state = SGL_FREED; 1260 sglq->ndlp = NULL; 1261 list_add_tail(&sglq->list, 1262 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1263 spin_unlock_irqrestore( 1264 &phba->sli4_hba.sgl_list_lock, iflag); 1265 goto out; 1266 } 1267 1268 pring = phba->sli4_hba.els_wq->pring; 1269 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1270 (sglq->state != SGL_XRI_ABORTED)) { 1271 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1272 iflag); 1273 list_add(&sglq->list, 1274 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1275 spin_unlock_irqrestore( 1276 &phba->sli4_hba.sgl_list_lock, iflag); 1277 } else { 1278 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1279 iflag); 1280 sglq->state = SGL_FREED; 1281 sglq->ndlp = NULL; 1282 list_add_tail(&sglq->list, 1283 &phba->sli4_hba.lpfc_els_sgl_list); 1284 spin_unlock_irqrestore( 1285 &phba->sli4_hba.sgl_list_lock, iflag); 1286 1287 /* Check if TXQ queue needs to be serviced */ 1288 if (!list_empty(&pring->txq)) 1289 lpfc_worker_wake_up(phba); 1290 } 1291 } 1292 1293 out: 1294 /* 1295 * Clean all volatile data fields, preserve iotag and node struct. 1296 */ 1297 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1298 iocbq->sli4_lxritag = NO_XRI; 1299 iocbq->sli4_xritag = NO_XRI; 1300 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1301 LPFC_IO_NVME_LS); 1302 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1303 } 1304 1305 1306 /** 1307 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1308 * @phba: Pointer to HBA context object. 1309 * @iocbq: Pointer to driver iocb object. 1310 * 1311 * This function is called with hbalock held to release driver 1312 * iocb object to the iocb pool. The iotag in the iocb object 1313 * does not change for each use of the iocb object. This function 1314 * clears all other fields of the iocb object when it is freed. 1315 **/ 1316 static void 1317 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1318 { 1319 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1320 1321 lockdep_assert_held(&phba->hbalock); 1322 1323 /* 1324 * Clean all volatile data fields, preserve iotag and node struct. 1325 */ 1326 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1327 iocbq->sli4_xritag = NO_XRI; 1328 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1329 } 1330 1331 /** 1332 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1333 * @phba: Pointer to HBA context object. 1334 * @iocbq: Pointer to driver iocb object. 1335 * 1336 * This function is called with hbalock held to release driver 1337 * iocb object to the iocb pool. The iotag in the iocb object 1338 * does not change for each use of the iocb object. This function 1339 * clears all other fields of the iocb object when it is freed. 1340 **/ 1341 static void 1342 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1343 { 1344 lockdep_assert_held(&phba->hbalock); 1345 1346 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1347 phba->iocb_cnt--; 1348 } 1349 1350 /** 1351 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1352 * @phba: Pointer to HBA context object. 1353 * @iocbq: Pointer to driver iocb object. 1354 * 1355 * This function is called with no lock held to release the iocb to 1356 * iocb pool. 1357 **/ 1358 void 1359 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1360 { 1361 unsigned long iflags; 1362 1363 /* 1364 * Clean all volatile data fields, preserve iotag and node struct. 1365 */ 1366 spin_lock_irqsave(&phba->hbalock, iflags); 1367 __lpfc_sli_release_iocbq(phba, iocbq); 1368 spin_unlock_irqrestore(&phba->hbalock, iflags); 1369 } 1370 1371 /** 1372 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1373 * @phba: Pointer to HBA context object. 1374 * @iocblist: List of IOCBs. 1375 * @ulpstatus: ULP status in IOCB command field. 1376 * @ulpWord4: ULP word-4 in IOCB command field. 1377 * 1378 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1379 * on the list by invoking the complete callback function associated with the 1380 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1381 * fields. 1382 **/ 1383 void 1384 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1385 uint32_t ulpstatus, uint32_t ulpWord4) 1386 { 1387 struct lpfc_iocbq *piocb; 1388 1389 while (!list_empty(iocblist)) { 1390 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1391 if (!piocb->iocb_cmpl) 1392 lpfc_sli_release_iocbq(phba, piocb); 1393 else { 1394 piocb->iocb.ulpStatus = ulpstatus; 1395 piocb->iocb.un.ulpWord[4] = ulpWord4; 1396 (piocb->iocb_cmpl) (phba, piocb, piocb); 1397 } 1398 } 1399 return; 1400 } 1401 1402 /** 1403 * lpfc_sli_iocb_cmd_type - Get the iocb type 1404 * @iocb_cmnd: iocb command code. 1405 * 1406 * This function is called by ring event handler function to get the iocb type. 1407 * This function translates the iocb command to an iocb command type used to 1408 * decide the final disposition of each completed IOCB. 1409 * The function returns 1410 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1411 * LPFC_SOL_IOCB if it is a solicited iocb completion 1412 * LPFC_ABORT_IOCB if it is an abort iocb 1413 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1414 * 1415 * The caller is not required to hold any lock. 1416 **/ 1417 static lpfc_iocb_type 1418 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1419 { 1420 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1421 1422 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1423 return 0; 1424 1425 switch (iocb_cmnd) { 1426 case CMD_XMIT_SEQUENCE_CR: 1427 case CMD_XMIT_SEQUENCE_CX: 1428 case CMD_XMIT_BCAST_CN: 1429 case CMD_XMIT_BCAST_CX: 1430 case CMD_ELS_REQUEST_CR: 1431 case CMD_ELS_REQUEST_CX: 1432 case CMD_CREATE_XRI_CR: 1433 case CMD_CREATE_XRI_CX: 1434 case CMD_GET_RPI_CN: 1435 case CMD_XMIT_ELS_RSP_CX: 1436 case CMD_GET_RPI_CR: 1437 case CMD_FCP_IWRITE_CR: 1438 case CMD_FCP_IWRITE_CX: 1439 case CMD_FCP_IREAD_CR: 1440 case CMD_FCP_IREAD_CX: 1441 case CMD_FCP_ICMND_CR: 1442 case CMD_FCP_ICMND_CX: 1443 case CMD_FCP_TSEND_CX: 1444 case CMD_FCP_TRSP_CX: 1445 case CMD_FCP_TRECEIVE_CX: 1446 case CMD_FCP_AUTO_TRSP_CX: 1447 case CMD_ADAPTER_MSG: 1448 case CMD_ADAPTER_DUMP: 1449 case CMD_XMIT_SEQUENCE64_CR: 1450 case CMD_XMIT_SEQUENCE64_CX: 1451 case CMD_XMIT_BCAST64_CN: 1452 case CMD_XMIT_BCAST64_CX: 1453 case CMD_ELS_REQUEST64_CR: 1454 case CMD_ELS_REQUEST64_CX: 1455 case CMD_FCP_IWRITE64_CR: 1456 case CMD_FCP_IWRITE64_CX: 1457 case CMD_FCP_IREAD64_CR: 1458 case CMD_FCP_IREAD64_CX: 1459 case CMD_FCP_ICMND64_CR: 1460 case CMD_FCP_ICMND64_CX: 1461 case CMD_FCP_TSEND64_CX: 1462 case CMD_FCP_TRSP64_CX: 1463 case CMD_FCP_TRECEIVE64_CX: 1464 case CMD_GEN_REQUEST64_CR: 1465 case CMD_GEN_REQUEST64_CX: 1466 case CMD_XMIT_ELS_RSP64_CX: 1467 case DSSCMD_IWRITE64_CR: 1468 case DSSCMD_IWRITE64_CX: 1469 case DSSCMD_IREAD64_CR: 1470 case DSSCMD_IREAD64_CX: 1471 type = LPFC_SOL_IOCB; 1472 break; 1473 case CMD_ABORT_XRI_CN: 1474 case CMD_ABORT_XRI_CX: 1475 case CMD_CLOSE_XRI_CN: 1476 case CMD_CLOSE_XRI_CX: 1477 case CMD_XRI_ABORTED_CX: 1478 case CMD_ABORT_MXRI64_CN: 1479 case CMD_XMIT_BLS_RSP64_CX: 1480 type = LPFC_ABORT_IOCB; 1481 break; 1482 case CMD_RCV_SEQUENCE_CX: 1483 case CMD_RCV_ELS_REQ_CX: 1484 case CMD_RCV_SEQUENCE64_CX: 1485 case CMD_RCV_ELS_REQ64_CX: 1486 case CMD_ASYNC_STATUS: 1487 case CMD_IOCB_RCV_SEQ64_CX: 1488 case CMD_IOCB_RCV_ELS64_CX: 1489 case CMD_IOCB_RCV_CONT64_CX: 1490 case CMD_IOCB_RET_XRI64_CX: 1491 type = LPFC_UNSOL_IOCB; 1492 break; 1493 case CMD_IOCB_XMIT_MSEQ64_CR: 1494 case CMD_IOCB_XMIT_MSEQ64_CX: 1495 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1496 case CMD_IOCB_RCV_ELS_LIST64_CX: 1497 case CMD_IOCB_CLOSE_EXTENDED_CN: 1498 case CMD_IOCB_ABORT_EXTENDED_CN: 1499 case CMD_IOCB_RET_HBQE64_CN: 1500 case CMD_IOCB_FCP_IBIDIR64_CR: 1501 case CMD_IOCB_FCP_IBIDIR64_CX: 1502 case CMD_IOCB_FCP_ITASKMGT64_CX: 1503 case CMD_IOCB_LOGENTRY_CN: 1504 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1505 printk("%s - Unhandled SLI-3 Command x%x\n", 1506 __func__, iocb_cmnd); 1507 type = LPFC_UNKNOWN_IOCB; 1508 break; 1509 default: 1510 type = LPFC_UNKNOWN_IOCB; 1511 break; 1512 } 1513 1514 return type; 1515 } 1516 1517 /** 1518 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1519 * @phba: Pointer to HBA context object. 1520 * 1521 * This function is called from SLI initialization code 1522 * to configure every ring of the HBA's SLI interface. The 1523 * caller is not required to hold any lock. This function issues 1524 * a config_ring mailbox command for each ring. 1525 * This function returns zero if successful else returns a negative 1526 * error code. 1527 **/ 1528 static int 1529 lpfc_sli_ring_map(struct lpfc_hba *phba) 1530 { 1531 struct lpfc_sli *psli = &phba->sli; 1532 LPFC_MBOXQ_t *pmb; 1533 MAILBOX_t *pmbox; 1534 int i, rc, ret = 0; 1535 1536 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1537 if (!pmb) 1538 return -ENOMEM; 1539 pmbox = &pmb->u.mb; 1540 phba->link_state = LPFC_INIT_MBX_CMDS; 1541 for (i = 0; i < psli->num_rings; i++) { 1542 lpfc_config_ring(phba, i, pmb); 1543 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1544 if (rc != MBX_SUCCESS) { 1545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1546 "0446 Adapter failed to init (%d), " 1547 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1548 "ring %d\n", 1549 rc, pmbox->mbxCommand, 1550 pmbox->mbxStatus, i); 1551 phba->link_state = LPFC_HBA_ERROR; 1552 ret = -ENXIO; 1553 break; 1554 } 1555 } 1556 mempool_free(pmb, phba->mbox_mem_pool); 1557 return ret; 1558 } 1559 1560 /** 1561 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1562 * @phba: Pointer to HBA context object. 1563 * @pring: Pointer to driver SLI ring object. 1564 * @piocb: Pointer to the driver iocb object. 1565 * 1566 * This function is called with hbalock held. The function adds the 1567 * new iocb to txcmplq of the given ring. This function always returns 1568 * 0. If this function is called for ELS ring, this function checks if 1569 * there is a vport associated with the ELS command. This function also 1570 * starts els_tmofunc timer if this is an ELS command. 1571 **/ 1572 static int 1573 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1574 struct lpfc_iocbq *piocb) 1575 { 1576 lockdep_assert_held(&phba->hbalock); 1577 1578 BUG_ON(!piocb); 1579 1580 list_add_tail(&piocb->list, &pring->txcmplq); 1581 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1582 pring->txcmplq_cnt++; 1583 1584 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1585 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1586 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1587 BUG_ON(!piocb->vport); 1588 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1589 mod_timer(&piocb->vport->els_tmofunc, 1590 jiffies + 1591 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1592 } 1593 1594 return 0; 1595 } 1596 1597 /** 1598 * lpfc_sli_ringtx_get - Get first element of the txq 1599 * @phba: Pointer to HBA context object. 1600 * @pring: Pointer to driver SLI ring object. 1601 * 1602 * This function is called with hbalock held to get next 1603 * iocb in txq of the given ring. If there is any iocb in 1604 * the txq, the function returns first iocb in the list after 1605 * removing the iocb from the list, else it returns NULL. 1606 **/ 1607 struct lpfc_iocbq * 1608 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1609 { 1610 struct lpfc_iocbq *cmd_iocb; 1611 1612 lockdep_assert_held(&phba->hbalock); 1613 1614 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1615 return cmd_iocb; 1616 } 1617 1618 /** 1619 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1620 * @phba: Pointer to HBA context object. 1621 * @pring: Pointer to driver SLI ring object. 1622 * 1623 * This function is called with hbalock held and the caller must post the 1624 * iocb without releasing the lock. If the caller releases the lock, 1625 * iocb slot returned by the function is not guaranteed to be available. 1626 * The function returns pointer to the next available iocb slot if there 1627 * is available slot in the ring, else it returns NULL. 1628 * If the get index of the ring is ahead of the put index, the function 1629 * will post an error attention event to the worker thread to take the 1630 * HBA to offline state. 1631 **/ 1632 static IOCB_t * 1633 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1634 { 1635 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1636 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1637 1638 lockdep_assert_held(&phba->hbalock); 1639 1640 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1641 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1642 pring->sli.sli3.next_cmdidx = 0; 1643 1644 if (unlikely(pring->sli.sli3.local_getidx == 1645 pring->sli.sli3.next_cmdidx)) { 1646 1647 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1648 1649 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1651 "0315 Ring %d issue: portCmdGet %d " 1652 "is bigger than cmd ring %d\n", 1653 pring->ringno, 1654 pring->sli.sli3.local_getidx, 1655 max_cmd_idx); 1656 1657 phba->link_state = LPFC_HBA_ERROR; 1658 /* 1659 * All error attention handlers are posted to 1660 * worker thread 1661 */ 1662 phba->work_ha |= HA_ERATT; 1663 phba->work_hs = HS_FFER3; 1664 1665 lpfc_worker_wake_up(phba); 1666 1667 return NULL; 1668 } 1669 1670 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1671 return NULL; 1672 } 1673 1674 return lpfc_cmd_iocb(phba, pring); 1675 } 1676 1677 /** 1678 * lpfc_sli_next_iotag - Get an iotag for the iocb 1679 * @phba: Pointer to HBA context object. 1680 * @iocbq: Pointer to driver iocb object. 1681 * 1682 * This function gets an iotag for the iocb. If there is no unused iotag and 1683 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1684 * array and assigns a new iotag. 1685 * The function returns the allocated iotag if successful, else returns zero. 1686 * Zero is not a valid iotag. 1687 * The caller is not required to hold any lock. 1688 **/ 1689 uint16_t 1690 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1691 { 1692 struct lpfc_iocbq **new_arr; 1693 struct lpfc_iocbq **old_arr; 1694 size_t new_len; 1695 struct lpfc_sli *psli = &phba->sli; 1696 uint16_t iotag; 1697 1698 spin_lock_irq(&phba->hbalock); 1699 iotag = psli->last_iotag; 1700 if(++iotag < psli->iocbq_lookup_len) { 1701 psli->last_iotag = iotag; 1702 psli->iocbq_lookup[iotag] = iocbq; 1703 spin_unlock_irq(&phba->hbalock); 1704 iocbq->iotag = iotag; 1705 return iotag; 1706 } else if (psli->iocbq_lookup_len < (0xffff 1707 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1708 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1709 spin_unlock_irq(&phba->hbalock); 1710 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), 1711 GFP_KERNEL); 1712 if (new_arr) { 1713 spin_lock_irq(&phba->hbalock); 1714 old_arr = psli->iocbq_lookup; 1715 if (new_len <= psli->iocbq_lookup_len) { 1716 /* highly unprobable case */ 1717 kfree(new_arr); 1718 iotag = psli->last_iotag; 1719 if(++iotag < psli->iocbq_lookup_len) { 1720 psli->last_iotag = iotag; 1721 psli->iocbq_lookup[iotag] = iocbq; 1722 spin_unlock_irq(&phba->hbalock); 1723 iocbq->iotag = iotag; 1724 return iotag; 1725 } 1726 spin_unlock_irq(&phba->hbalock); 1727 return 0; 1728 } 1729 if (psli->iocbq_lookup) 1730 memcpy(new_arr, old_arr, 1731 ((psli->last_iotag + 1) * 1732 sizeof (struct lpfc_iocbq *))); 1733 psli->iocbq_lookup = new_arr; 1734 psli->iocbq_lookup_len = new_len; 1735 psli->last_iotag = iotag; 1736 psli->iocbq_lookup[iotag] = iocbq; 1737 spin_unlock_irq(&phba->hbalock); 1738 iocbq->iotag = iotag; 1739 kfree(old_arr); 1740 return iotag; 1741 } 1742 } else 1743 spin_unlock_irq(&phba->hbalock); 1744 1745 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1746 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1747 psli->last_iotag); 1748 1749 return 0; 1750 } 1751 1752 /** 1753 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1754 * @phba: Pointer to HBA context object. 1755 * @pring: Pointer to driver SLI ring object. 1756 * @iocb: Pointer to iocb slot in the ring. 1757 * @nextiocb: Pointer to driver iocb object which need to be 1758 * posted to firmware. 1759 * 1760 * This function is called with hbalock held to post a new iocb to 1761 * the firmware. This function copies the new iocb to ring iocb slot and 1762 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1763 * a completion call back for this iocb else the function will free the 1764 * iocb object. 1765 **/ 1766 static void 1767 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1768 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1769 { 1770 lockdep_assert_held(&phba->hbalock); 1771 /* 1772 * Set up an iotag 1773 */ 1774 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1775 1776 1777 if (pring->ringno == LPFC_ELS_RING) { 1778 lpfc_debugfs_slow_ring_trc(phba, 1779 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1780 *(((uint32_t *) &nextiocb->iocb) + 4), 1781 *(((uint32_t *) &nextiocb->iocb) + 6), 1782 *(((uint32_t *) &nextiocb->iocb) + 7)); 1783 } 1784 1785 /* 1786 * Issue iocb command to adapter 1787 */ 1788 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1789 wmb(); 1790 pring->stats.iocb_cmd++; 1791 1792 /* 1793 * If there is no completion routine to call, we can release the 1794 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1795 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1796 */ 1797 if (nextiocb->iocb_cmpl) 1798 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1799 else 1800 __lpfc_sli_release_iocbq(phba, nextiocb); 1801 1802 /* 1803 * Let the HBA know what IOCB slot will be the next one the 1804 * driver will put a command into. 1805 */ 1806 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1807 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1808 } 1809 1810 /** 1811 * lpfc_sli_update_full_ring - Update the chip attention register 1812 * @phba: Pointer to HBA context object. 1813 * @pring: Pointer to driver SLI ring object. 1814 * 1815 * The caller is not required to hold any lock for calling this function. 1816 * This function updates the chip attention bits for the ring to inform firmware 1817 * that there are pending work to be done for this ring and requests an 1818 * interrupt when there is space available in the ring. This function is 1819 * called when the driver is unable to post more iocbs to the ring due 1820 * to unavailability of space in the ring. 1821 **/ 1822 static void 1823 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1824 { 1825 int ringno = pring->ringno; 1826 1827 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1828 1829 wmb(); 1830 1831 /* 1832 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1833 * The HBA will tell us when an IOCB entry is available. 1834 */ 1835 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1836 readl(phba->CAregaddr); /* flush */ 1837 1838 pring->stats.iocb_cmd_full++; 1839 } 1840 1841 /** 1842 * lpfc_sli_update_ring - Update chip attention register 1843 * @phba: Pointer to HBA context object. 1844 * @pring: Pointer to driver SLI ring object. 1845 * 1846 * This function updates the chip attention register bit for the 1847 * given ring to inform HBA that there is more work to be done 1848 * in this ring. The caller is not required to hold any lock. 1849 **/ 1850 static void 1851 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1852 { 1853 int ringno = pring->ringno; 1854 1855 /* 1856 * Tell the HBA that there is work to do in this ring. 1857 */ 1858 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1859 wmb(); 1860 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1861 readl(phba->CAregaddr); /* flush */ 1862 } 1863 } 1864 1865 /** 1866 * lpfc_sli_resume_iocb - Process iocbs in the txq 1867 * @phba: Pointer to HBA context object. 1868 * @pring: Pointer to driver SLI ring object. 1869 * 1870 * This function is called with hbalock held to post pending iocbs 1871 * in the txq to the firmware. This function is called when driver 1872 * detects space available in the ring. 1873 **/ 1874 static void 1875 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1876 { 1877 IOCB_t *iocb; 1878 struct lpfc_iocbq *nextiocb; 1879 1880 lockdep_assert_held(&phba->hbalock); 1881 1882 /* 1883 * Check to see if: 1884 * (a) there is anything on the txq to send 1885 * (b) link is up 1886 * (c) link attention events can be processed (fcp ring only) 1887 * (d) IOCB processing is not blocked by the outstanding mbox command. 1888 */ 1889 1890 if (lpfc_is_link_up(phba) && 1891 (!list_empty(&pring->txq)) && 1892 (pring->ringno != LPFC_FCP_RING || 1893 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1894 1895 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1896 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1897 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1898 1899 if (iocb) 1900 lpfc_sli_update_ring(phba, pring); 1901 else 1902 lpfc_sli_update_full_ring(phba, pring); 1903 } 1904 1905 return; 1906 } 1907 1908 /** 1909 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1910 * @phba: Pointer to HBA context object. 1911 * @hbqno: HBQ number. 1912 * 1913 * This function is called with hbalock held to get the next 1914 * available slot for the given HBQ. If there is free slot 1915 * available for the HBQ it will return pointer to the next available 1916 * HBQ entry else it will return NULL. 1917 **/ 1918 static struct lpfc_hbq_entry * 1919 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1920 { 1921 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1922 1923 lockdep_assert_held(&phba->hbalock); 1924 1925 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1926 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1927 hbqp->next_hbqPutIdx = 0; 1928 1929 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1930 uint32_t raw_index = phba->hbq_get[hbqno]; 1931 uint32_t getidx = le32_to_cpu(raw_index); 1932 1933 hbqp->local_hbqGetIdx = getidx; 1934 1935 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1936 lpfc_printf_log(phba, KERN_ERR, 1937 LOG_SLI | LOG_VPORT, 1938 "1802 HBQ %d: local_hbqGetIdx " 1939 "%u is > than hbqp->entry_count %u\n", 1940 hbqno, hbqp->local_hbqGetIdx, 1941 hbqp->entry_count); 1942 1943 phba->link_state = LPFC_HBA_ERROR; 1944 return NULL; 1945 } 1946 1947 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1948 return NULL; 1949 } 1950 1951 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1952 hbqp->hbqPutIdx; 1953 } 1954 1955 /** 1956 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1957 * @phba: Pointer to HBA context object. 1958 * 1959 * This function is called with no lock held to free all the 1960 * hbq buffers while uninitializing the SLI interface. It also 1961 * frees the HBQ buffers returned by the firmware but not yet 1962 * processed by the upper layers. 1963 **/ 1964 void 1965 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1966 { 1967 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1968 struct hbq_dmabuf *hbq_buf; 1969 unsigned long flags; 1970 int i, hbq_count; 1971 1972 hbq_count = lpfc_sli_hbq_count(); 1973 /* Return all memory used by all HBQs */ 1974 spin_lock_irqsave(&phba->hbalock, flags); 1975 for (i = 0; i < hbq_count; ++i) { 1976 list_for_each_entry_safe(dmabuf, next_dmabuf, 1977 &phba->hbqs[i].hbq_buffer_list, list) { 1978 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1979 list_del(&hbq_buf->dbuf.list); 1980 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1981 } 1982 phba->hbqs[i].buffer_count = 0; 1983 } 1984 1985 /* Mark the HBQs not in use */ 1986 phba->hbq_in_use = 0; 1987 spin_unlock_irqrestore(&phba->hbalock, flags); 1988 } 1989 1990 /** 1991 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1992 * @phba: Pointer to HBA context object. 1993 * @hbqno: HBQ number. 1994 * @hbq_buf: Pointer to HBQ buffer. 1995 * 1996 * This function is called with the hbalock held to post a 1997 * hbq buffer to the firmware. If the function finds an empty 1998 * slot in the HBQ, it will post the buffer. The function will return 1999 * pointer to the hbq entry if it successfully post the buffer 2000 * else it will return NULL. 2001 **/ 2002 static int 2003 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2004 struct hbq_dmabuf *hbq_buf) 2005 { 2006 lockdep_assert_held(&phba->hbalock); 2007 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2008 } 2009 2010 /** 2011 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2012 * @phba: Pointer to HBA context object. 2013 * @hbqno: HBQ number. 2014 * @hbq_buf: Pointer to HBQ buffer. 2015 * 2016 * This function is called with the hbalock held to post a hbq buffer to the 2017 * firmware. If the function finds an empty slot in the HBQ, it will post the 2018 * buffer and place it on the hbq_buffer_list. The function will return zero if 2019 * it successfully post the buffer else it will return an error. 2020 **/ 2021 static int 2022 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2023 struct hbq_dmabuf *hbq_buf) 2024 { 2025 struct lpfc_hbq_entry *hbqe; 2026 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2027 2028 lockdep_assert_held(&phba->hbalock); 2029 /* Get next HBQ entry slot to use */ 2030 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2031 if (hbqe) { 2032 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2033 2034 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2035 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2036 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2037 hbqe->bde.tus.f.bdeFlags = 0; 2038 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2039 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2040 /* Sync SLIM */ 2041 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2042 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2043 /* flush */ 2044 readl(phba->hbq_put + hbqno); 2045 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2046 return 0; 2047 } else 2048 return -ENOMEM; 2049 } 2050 2051 /** 2052 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2053 * @phba: Pointer to HBA context object. 2054 * @hbqno: HBQ number. 2055 * @hbq_buf: Pointer to HBQ buffer. 2056 * 2057 * This function is called with the hbalock held to post an RQE to the SLI4 2058 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2059 * the hbq_buffer_list and return zero, otherwise it will return an error. 2060 **/ 2061 static int 2062 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2063 struct hbq_dmabuf *hbq_buf) 2064 { 2065 int rc; 2066 struct lpfc_rqe hrqe; 2067 struct lpfc_rqe drqe; 2068 struct lpfc_queue *hrq; 2069 struct lpfc_queue *drq; 2070 2071 if (hbqno != LPFC_ELS_HBQ) 2072 return 1; 2073 hrq = phba->sli4_hba.hdr_rq; 2074 drq = phba->sli4_hba.dat_rq; 2075 2076 lockdep_assert_held(&phba->hbalock); 2077 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2078 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2079 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2080 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2081 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2082 if (rc < 0) 2083 return rc; 2084 hbq_buf->tag = (rc | (hbqno << 16)); 2085 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2086 return 0; 2087 } 2088 2089 /* HBQ for ELS and CT traffic. */ 2090 static struct lpfc_hbq_init lpfc_els_hbq = { 2091 .rn = 1, 2092 .entry_count = 256, 2093 .mask_count = 0, 2094 .profile = 0, 2095 .ring_mask = (1 << LPFC_ELS_RING), 2096 .buffer_count = 0, 2097 .init_count = 40, 2098 .add_count = 40, 2099 }; 2100 2101 /* Array of HBQs */ 2102 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2103 &lpfc_els_hbq, 2104 }; 2105 2106 /** 2107 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2108 * @phba: Pointer to HBA context object. 2109 * @hbqno: HBQ number. 2110 * @count: Number of HBQ buffers to be posted. 2111 * 2112 * This function is called with no lock held to post more hbq buffers to the 2113 * given HBQ. The function returns the number of HBQ buffers successfully 2114 * posted. 2115 **/ 2116 static int 2117 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2118 { 2119 uint32_t i, posted = 0; 2120 unsigned long flags; 2121 struct hbq_dmabuf *hbq_buffer; 2122 LIST_HEAD(hbq_buf_list); 2123 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2124 return 0; 2125 2126 if ((phba->hbqs[hbqno].buffer_count + count) > 2127 lpfc_hbq_defs[hbqno]->entry_count) 2128 count = lpfc_hbq_defs[hbqno]->entry_count - 2129 phba->hbqs[hbqno].buffer_count; 2130 if (!count) 2131 return 0; 2132 /* Allocate HBQ entries */ 2133 for (i = 0; i < count; i++) { 2134 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2135 if (!hbq_buffer) 2136 break; 2137 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2138 } 2139 /* Check whether HBQ is still in use */ 2140 spin_lock_irqsave(&phba->hbalock, flags); 2141 if (!phba->hbq_in_use) 2142 goto err; 2143 while (!list_empty(&hbq_buf_list)) { 2144 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2145 dbuf.list); 2146 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2147 (hbqno << 16)); 2148 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2149 phba->hbqs[hbqno].buffer_count++; 2150 posted++; 2151 } else 2152 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2153 } 2154 spin_unlock_irqrestore(&phba->hbalock, flags); 2155 return posted; 2156 err: 2157 spin_unlock_irqrestore(&phba->hbalock, flags); 2158 while (!list_empty(&hbq_buf_list)) { 2159 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2160 dbuf.list); 2161 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2162 } 2163 return 0; 2164 } 2165 2166 /** 2167 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2168 * @phba: Pointer to HBA context object. 2169 * @qno: HBQ number. 2170 * 2171 * This function posts more buffers to the HBQ. This function 2172 * is called with no lock held. The function returns the number of HBQ entries 2173 * successfully allocated. 2174 **/ 2175 int 2176 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2177 { 2178 if (phba->sli_rev == LPFC_SLI_REV4) 2179 return 0; 2180 else 2181 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2182 lpfc_hbq_defs[qno]->add_count); 2183 } 2184 2185 /** 2186 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2187 * @phba: Pointer to HBA context object. 2188 * @qno: HBQ queue number. 2189 * 2190 * This function is called from SLI initialization code path with 2191 * no lock held to post initial HBQ buffers to firmware. The 2192 * function returns the number of HBQ entries successfully allocated. 2193 **/ 2194 static int 2195 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2196 { 2197 if (phba->sli_rev == LPFC_SLI_REV4) 2198 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2199 lpfc_hbq_defs[qno]->entry_count); 2200 else 2201 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2202 lpfc_hbq_defs[qno]->init_count); 2203 } 2204 2205 /** 2206 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2207 * @phba: Pointer to HBA context object. 2208 * @hbqno: HBQ number. 2209 * 2210 * This function removes the first hbq buffer on an hbq list and returns a 2211 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2212 **/ 2213 static struct hbq_dmabuf * 2214 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2215 { 2216 struct lpfc_dmabuf *d_buf; 2217 2218 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2219 if (!d_buf) 2220 return NULL; 2221 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2222 } 2223 2224 /** 2225 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2226 * @phba: Pointer to HBA context object. 2227 * @hbqno: HBQ number. 2228 * 2229 * This function removes the first RQ buffer on an RQ buffer list and returns a 2230 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2231 **/ 2232 static struct rqb_dmabuf * 2233 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2234 { 2235 struct lpfc_dmabuf *h_buf; 2236 struct lpfc_rqb *rqbp; 2237 2238 rqbp = hrq->rqbp; 2239 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2240 struct lpfc_dmabuf, list); 2241 if (!h_buf) 2242 return NULL; 2243 rqbp->buffer_count--; 2244 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2245 } 2246 2247 /** 2248 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2249 * @phba: Pointer to HBA context object. 2250 * @tag: Tag of the hbq buffer. 2251 * 2252 * This function searches for the hbq buffer associated with the given tag in 2253 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2254 * otherwise it returns NULL. 2255 **/ 2256 static struct hbq_dmabuf * 2257 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2258 { 2259 struct lpfc_dmabuf *d_buf; 2260 struct hbq_dmabuf *hbq_buf; 2261 uint32_t hbqno; 2262 2263 hbqno = tag >> 16; 2264 if (hbqno >= LPFC_MAX_HBQS) 2265 return NULL; 2266 2267 spin_lock_irq(&phba->hbalock); 2268 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2269 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2270 if (hbq_buf->tag == tag) { 2271 spin_unlock_irq(&phba->hbalock); 2272 return hbq_buf; 2273 } 2274 } 2275 spin_unlock_irq(&phba->hbalock); 2276 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2277 "1803 Bad hbq tag. Data: x%x x%x\n", 2278 tag, phba->hbqs[tag >> 16].buffer_count); 2279 return NULL; 2280 } 2281 2282 /** 2283 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2284 * @phba: Pointer to HBA context object. 2285 * @hbq_buffer: Pointer to HBQ buffer. 2286 * 2287 * This function is called with hbalock. This function gives back 2288 * the hbq buffer to firmware. If the HBQ does not have space to 2289 * post the buffer, it will free the buffer. 2290 **/ 2291 void 2292 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2293 { 2294 uint32_t hbqno; 2295 2296 if (hbq_buffer) { 2297 hbqno = hbq_buffer->tag >> 16; 2298 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2299 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2300 } 2301 } 2302 2303 /** 2304 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2305 * @mbxCommand: mailbox command code. 2306 * 2307 * This function is called by the mailbox event handler function to verify 2308 * that the completed mailbox command is a legitimate mailbox command. If the 2309 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2310 * and the mailbox event handler will take the HBA offline. 2311 **/ 2312 static int 2313 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2314 { 2315 uint8_t ret; 2316 2317 switch (mbxCommand) { 2318 case MBX_LOAD_SM: 2319 case MBX_READ_NV: 2320 case MBX_WRITE_NV: 2321 case MBX_WRITE_VPARMS: 2322 case MBX_RUN_BIU_DIAG: 2323 case MBX_INIT_LINK: 2324 case MBX_DOWN_LINK: 2325 case MBX_CONFIG_LINK: 2326 case MBX_CONFIG_RING: 2327 case MBX_RESET_RING: 2328 case MBX_READ_CONFIG: 2329 case MBX_READ_RCONFIG: 2330 case MBX_READ_SPARM: 2331 case MBX_READ_STATUS: 2332 case MBX_READ_RPI: 2333 case MBX_READ_XRI: 2334 case MBX_READ_REV: 2335 case MBX_READ_LNK_STAT: 2336 case MBX_REG_LOGIN: 2337 case MBX_UNREG_LOGIN: 2338 case MBX_CLEAR_LA: 2339 case MBX_DUMP_MEMORY: 2340 case MBX_DUMP_CONTEXT: 2341 case MBX_RUN_DIAGS: 2342 case MBX_RESTART: 2343 case MBX_UPDATE_CFG: 2344 case MBX_DOWN_LOAD: 2345 case MBX_DEL_LD_ENTRY: 2346 case MBX_RUN_PROGRAM: 2347 case MBX_SET_MASK: 2348 case MBX_SET_VARIABLE: 2349 case MBX_UNREG_D_ID: 2350 case MBX_KILL_BOARD: 2351 case MBX_CONFIG_FARP: 2352 case MBX_BEACON: 2353 case MBX_LOAD_AREA: 2354 case MBX_RUN_BIU_DIAG64: 2355 case MBX_CONFIG_PORT: 2356 case MBX_READ_SPARM64: 2357 case MBX_READ_RPI64: 2358 case MBX_REG_LOGIN64: 2359 case MBX_READ_TOPOLOGY: 2360 case MBX_WRITE_WWN: 2361 case MBX_SET_DEBUG: 2362 case MBX_LOAD_EXP_ROM: 2363 case MBX_ASYNCEVT_ENABLE: 2364 case MBX_REG_VPI: 2365 case MBX_UNREG_VPI: 2366 case MBX_HEARTBEAT: 2367 case MBX_PORT_CAPABILITIES: 2368 case MBX_PORT_IOV_CONTROL: 2369 case MBX_SLI4_CONFIG: 2370 case MBX_SLI4_REQ_FTRS: 2371 case MBX_REG_FCFI: 2372 case MBX_UNREG_FCFI: 2373 case MBX_REG_VFI: 2374 case MBX_UNREG_VFI: 2375 case MBX_INIT_VPI: 2376 case MBX_INIT_VFI: 2377 case MBX_RESUME_RPI: 2378 case MBX_READ_EVENT_LOG_STATUS: 2379 case MBX_READ_EVENT_LOG: 2380 case MBX_SECURITY_MGMT: 2381 case MBX_AUTH_PORT: 2382 case MBX_ACCESS_VDATA: 2383 ret = mbxCommand; 2384 break; 2385 default: 2386 ret = MBX_SHUTDOWN; 2387 break; 2388 } 2389 return ret; 2390 } 2391 2392 /** 2393 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2394 * @phba: Pointer to HBA context object. 2395 * @pmboxq: Pointer to mailbox command. 2396 * 2397 * This is completion handler function for mailbox commands issued from 2398 * lpfc_sli_issue_mbox_wait function. This function is called by the 2399 * mailbox event handler function with no lock held. This function 2400 * will wake up thread waiting on the wait queue pointed by context1 2401 * of the mailbox. 2402 **/ 2403 void 2404 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2405 { 2406 unsigned long drvr_flag; 2407 struct completion *pmbox_done; 2408 2409 /* 2410 * If pmbox_done is empty, the driver thread gave up waiting and 2411 * continued running. 2412 */ 2413 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2414 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2415 pmbox_done = (struct completion *)pmboxq->context3; 2416 if (pmbox_done) 2417 complete(pmbox_done); 2418 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2419 return; 2420 } 2421 2422 2423 /** 2424 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2425 * @phba: Pointer to HBA context object. 2426 * @pmb: Pointer to mailbox object. 2427 * 2428 * This function is the default mailbox completion handler. It 2429 * frees the memory resources associated with the completed mailbox 2430 * command. If the completed command is a REG_LOGIN mailbox command, 2431 * this function will issue a UREG_LOGIN to re-claim the RPI. 2432 **/ 2433 void 2434 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2435 { 2436 struct lpfc_vport *vport = pmb->vport; 2437 struct lpfc_dmabuf *mp; 2438 struct lpfc_nodelist *ndlp; 2439 struct Scsi_Host *shost; 2440 uint16_t rpi, vpi; 2441 int rc; 2442 2443 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 2444 2445 if (mp) { 2446 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2447 kfree(mp); 2448 } 2449 2450 /* 2451 * If a REG_LOGIN succeeded after node is destroyed or node 2452 * is in re-discovery driver need to cleanup the RPI. 2453 */ 2454 if (!(phba->pport->load_flag & FC_UNLOADING) && 2455 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2456 !pmb->u.mb.mbxStatus) { 2457 rpi = pmb->u.mb.un.varWords[0]; 2458 vpi = pmb->u.mb.un.varRegLogin.vpi; 2459 lpfc_unreg_login(phba, vpi, rpi, pmb); 2460 pmb->vport = vport; 2461 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2462 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2463 if (rc != MBX_NOT_FINISHED) 2464 return; 2465 } 2466 2467 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2468 !(phba->pport->load_flag & FC_UNLOADING) && 2469 !pmb->u.mb.mbxStatus) { 2470 shost = lpfc_shost_from_vport(vport); 2471 spin_lock_irq(shost->host_lock); 2472 vport->vpi_state |= LPFC_VPI_REGISTERED; 2473 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2474 spin_unlock_irq(shost->host_lock); 2475 } 2476 2477 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2478 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2479 lpfc_nlp_put(ndlp); 2480 pmb->ctx_buf = NULL; 2481 pmb->ctx_ndlp = NULL; 2482 } 2483 2484 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2485 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2486 2487 /* Check to see if there are any deferred events to process */ 2488 if (ndlp) { 2489 lpfc_printf_vlog( 2490 vport, 2491 KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2492 "1438 UNREG cmpl deferred mbox x%x " 2493 "on NPort x%x Data: x%x x%x %p\n", 2494 ndlp->nlp_rpi, ndlp->nlp_DID, 2495 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); 2496 2497 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2498 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 2499 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2500 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2501 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2502 } else { 2503 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2504 } 2505 } 2506 pmb->ctx_ndlp = NULL; 2507 } 2508 2509 /* Check security permission status on INIT_LINK mailbox command */ 2510 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2511 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2512 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2513 "2860 SLI authentication is required " 2514 "for INIT_LINK but has not done yet\n"); 2515 2516 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2517 lpfc_sli4_mbox_cmd_free(phba, pmb); 2518 else 2519 mempool_free(pmb, phba->mbox_mem_pool); 2520 } 2521 /** 2522 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2523 * @phba: Pointer to HBA context object. 2524 * @pmb: Pointer to mailbox object. 2525 * 2526 * This function is the unreg rpi mailbox completion handler. It 2527 * frees the memory resources associated with the completed mailbox 2528 * command. An additional refrenece is put on the ndlp to prevent 2529 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2530 * the unreg mailbox command completes, this routine puts the 2531 * reference back. 2532 * 2533 **/ 2534 void 2535 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2536 { 2537 struct lpfc_vport *vport = pmb->vport; 2538 struct lpfc_nodelist *ndlp; 2539 2540 ndlp = pmb->ctx_ndlp; 2541 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2542 if (phba->sli_rev == LPFC_SLI_REV4 && 2543 (bf_get(lpfc_sli_intf_if_type, 2544 &phba->sli4_hba.sli_intf) >= 2545 LPFC_SLI_INTF_IF_TYPE_2)) { 2546 if (ndlp) { 2547 lpfc_printf_vlog( 2548 vport, KERN_INFO, LOG_MBOX | LOG_SLI, 2549 "0010 UNREG_LOGIN vpi:%x " 2550 "rpi:%x DID:%x defer x%x flg x%x " 2551 "map:%x %p\n", 2552 vport->vpi, ndlp->nlp_rpi, 2553 ndlp->nlp_DID, ndlp->nlp_defer_did, 2554 ndlp->nlp_flag, 2555 ndlp->nlp_usg_map, ndlp); 2556 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2557 lpfc_nlp_put(ndlp); 2558 2559 /* Check to see if there are any deferred 2560 * events to process 2561 */ 2562 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2563 (ndlp->nlp_defer_did != 2564 NLP_EVT_NOTHING_PENDING)) { 2565 lpfc_printf_vlog( 2566 vport, KERN_INFO, LOG_DISCOVERY, 2567 "4111 UNREG cmpl deferred " 2568 "clr x%x on " 2569 "NPort x%x Data: x%x %p\n", 2570 ndlp->nlp_rpi, ndlp->nlp_DID, 2571 ndlp->nlp_defer_did, ndlp); 2572 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2573 ndlp->nlp_defer_did = 2574 NLP_EVT_NOTHING_PENDING; 2575 lpfc_issue_els_plogi( 2576 vport, ndlp->nlp_DID, 0); 2577 } else { 2578 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2579 } 2580 } 2581 } 2582 } 2583 2584 mempool_free(pmb, phba->mbox_mem_pool); 2585 } 2586 2587 /** 2588 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2589 * @phba: Pointer to HBA context object. 2590 * 2591 * This function is called with no lock held. This function processes all 2592 * the completed mailbox commands and gives it to upper layers. The interrupt 2593 * service routine processes mailbox completion interrupt and adds completed 2594 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2595 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2596 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2597 * function returns the mailbox commands to the upper layer by calling the 2598 * completion handler function of each mailbox. 2599 **/ 2600 int 2601 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2602 { 2603 MAILBOX_t *pmbox; 2604 LPFC_MBOXQ_t *pmb; 2605 int rc; 2606 LIST_HEAD(cmplq); 2607 2608 phba->sli.slistat.mbox_event++; 2609 2610 /* Get all completed mailboxe buffers into the cmplq */ 2611 spin_lock_irq(&phba->hbalock); 2612 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2613 spin_unlock_irq(&phba->hbalock); 2614 2615 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2616 do { 2617 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2618 if (pmb == NULL) 2619 break; 2620 2621 pmbox = &pmb->u.mb; 2622 2623 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2624 if (pmb->vport) { 2625 lpfc_debugfs_disc_trc(pmb->vport, 2626 LPFC_DISC_TRC_MBOX_VPORT, 2627 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2628 (uint32_t)pmbox->mbxCommand, 2629 pmbox->un.varWords[0], 2630 pmbox->un.varWords[1]); 2631 } 2632 else { 2633 lpfc_debugfs_disc_trc(phba->pport, 2634 LPFC_DISC_TRC_MBOX, 2635 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2636 (uint32_t)pmbox->mbxCommand, 2637 pmbox->un.varWords[0], 2638 pmbox->un.varWords[1]); 2639 } 2640 } 2641 2642 /* 2643 * It is a fatal error if unknown mbox command completion. 2644 */ 2645 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2646 MBX_SHUTDOWN) { 2647 /* Unknown mailbox command compl */ 2648 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2649 "(%d):0323 Unknown Mailbox command " 2650 "x%x (x%x/x%x) Cmpl\n", 2651 pmb->vport ? pmb->vport->vpi : 0, 2652 pmbox->mbxCommand, 2653 lpfc_sli_config_mbox_subsys_get(phba, 2654 pmb), 2655 lpfc_sli_config_mbox_opcode_get(phba, 2656 pmb)); 2657 phba->link_state = LPFC_HBA_ERROR; 2658 phba->work_hs = HS_FFER3; 2659 lpfc_handle_eratt(phba); 2660 continue; 2661 } 2662 2663 if (pmbox->mbxStatus) { 2664 phba->sli.slistat.mbox_stat_err++; 2665 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2666 /* Mbox cmd cmpl error - RETRYing */ 2667 lpfc_printf_log(phba, KERN_INFO, 2668 LOG_MBOX | LOG_SLI, 2669 "(%d):0305 Mbox cmd cmpl " 2670 "error - RETRYing Data: x%x " 2671 "(x%x/x%x) x%x x%x x%x\n", 2672 pmb->vport ? pmb->vport->vpi : 0, 2673 pmbox->mbxCommand, 2674 lpfc_sli_config_mbox_subsys_get(phba, 2675 pmb), 2676 lpfc_sli_config_mbox_opcode_get(phba, 2677 pmb), 2678 pmbox->mbxStatus, 2679 pmbox->un.varWords[0], 2680 pmb->vport->port_state); 2681 pmbox->mbxStatus = 0; 2682 pmbox->mbxOwner = OWN_HOST; 2683 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2684 if (rc != MBX_NOT_FINISHED) 2685 continue; 2686 } 2687 } 2688 2689 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2690 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2691 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2692 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2693 "x%x x%x x%x\n", 2694 pmb->vport ? pmb->vport->vpi : 0, 2695 pmbox->mbxCommand, 2696 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2697 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2698 pmb->mbox_cmpl, 2699 *((uint32_t *) pmbox), 2700 pmbox->un.varWords[0], 2701 pmbox->un.varWords[1], 2702 pmbox->un.varWords[2], 2703 pmbox->un.varWords[3], 2704 pmbox->un.varWords[4], 2705 pmbox->un.varWords[5], 2706 pmbox->un.varWords[6], 2707 pmbox->un.varWords[7], 2708 pmbox->un.varWords[8], 2709 pmbox->un.varWords[9], 2710 pmbox->un.varWords[10]); 2711 2712 if (pmb->mbox_cmpl) 2713 pmb->mbox_cmpl(phba,pmb); 2714 } while (1); 2715 return 0; 2716 } 2717 2718 /** 2719 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2720 * @phba: Pointer to HBA context object. 2721 * @pring: Pointer to driver SLI ring object. 2722 * @tag: buffer tag. 2723 * 2724 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2725 * is set in the tag the buffer is posted for a particular exchange, 2726 * the function will return the buffer without replacing the buffer. 2727 * If the buffer is for unsolicited ELS or CT traffic, this function 2728 * returns the buffer and also posts another buffer to the firmware. 2729 **/ 2730 static struct lpfc_dmabuf * 2731 lpfc_sli_get_buff(struct lpfc_hba *phba, 2732 struct lpfc_sli_ring *pring, 2733 uint32_t tag) 2734 { 2735 struct hbq_dmabuf *hbq_entry; 2736 2737 if (tag & QUE_BUFTAG_BIT) 2738 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2739 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2740 if (!hbq_entry) 2741 return NULL; 2742 return &hbq_entry->dbuf; 2743 } 2744 2745 /** 2746 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2747 * @phba: Pointer to HBA context object. 2748 * @pring: Pointer to driver SLI ring object. 2749 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2750 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2751 * @fch_type: the type for the first frame of the sequence. 2752 * 2753 * This function is called with no lock held. This function uses the r_ctl and 2754 * type of the received sequence to find the correct callback function to call 2755 * to process the sequence. 2756 **/ 2757 static int 2758 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2759 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2760 uint32_t fch_type) 2761 { 2762 int i; 2763 2764 switch (fch_type) { 2765 case FC_TYPE_NVME: 2766 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2767 return 1; 2768 default: 2769 break; 2770 } 2771 2772 /* unSolicited Responses */ 2773 if (pring->prt[0].profile) { 2774 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2775 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2776 saveq); 2777 return 1; 2778 } 2779 /* We must search, based on rctl / type 2780 for the right routine */ 2781 for (i = 0; i < pring->num_mask; i++) { 2782 if ((pring->prt[i].rctl == fch_r_ctl) && 2783 (pring->prt[i].type == fch_type)) { 2784 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2785 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2786 (phba, pring, saveq); 2787 return 1; 2788 } 2789 } 2790 return 0; 2791 } 2792 2793 /** 2794 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2795 * @phba: Pointer to HBA context object. 2796 * @pring: Pointer to driver SLI ring object. 2797 * @saveq: Pointer to the unsolicited iocb. 2798 * 2799 * This function is called with no lock held by the ring event handler 2800 * when there is an unsolicited iocb posted to the response ring by the 2801 * firmware. This function gets the buffer associated with the iocbs 2802 * and calls the event handler for the ring. This function handles both 2803 * qring buffers and hbq buffers. 2804 * When the function returns 1 the caller can free the iocb object otherwise 2805 * upper layer functions will free the iocb objects. 2806 **/ 2807 static int 2808 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2809 struct lpfc_iocbq *saveq) 2810 { 2811 IOCB_t * irsp; 2812 WORD5 * w5p; 2813 uint32_t Rctl, Type; 2814 struct lpfc_iocbq *iocbq; 2815 struct lpfc_dmabuf *dmzbuf; 2816 2817 irsp = &(saveq->iocb); 2818 2819 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2820 if (pring->lpfc_sli_rcv_async_status) 2821 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2822 else 2823 lpfc_printf_log(phba, 2824 KERN_WARNING, 2825 LOG_SLI, 2826 "0316 Ring %d handler: unexpected " 2827 "ASYNC_STATUS iocb received evt_code " 2828 "0x%x\n", 2829 pring->ringno, 2830 irsp->un.asyncstat.evt_code); 2831 return 1; 2832 } 2833 2834 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2835 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2836 if (irsp->ulpBdeCount > 0) { 2837 dmzbuf = lpfc_sli_get_buff(phba, pring, 2838 irsp->un.ulpWord[3]); 2839 lpfc_in_buf_free(phba, dmzbuf); 2840 } 2841 2842 if (irsp->ulpBdeCount > 1) { 2843 dmzbuf = lpfc_sli_get_buff(phba, pring, 2844 irsp->unsli3.sli3Words[3]); 2845 lpfc_in_buf_free(phba, dmzbuf); 2846 } 2847 2848 if (irsp->ulpBdeCount > 2) { 2849 dmzbuf = lpfc_sli_get_buff(phba, pring, 2850 irsp->unsli3.sli3Words[7]); 2851 lpfc_in_buf_free(phba, dmzbuf); 2852 } 2853 2854 return 1; 2855 } 2856 2857 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2858 if (irsp->ulpBdeCount != 0) { 2859 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2860 irsp->un.ulpWord[3]); 2861 if (!saveq->context2) 2862 lpfc_printf_log(phba, 2863 KERN_ERR, 2864 LOG_SLI, 2865 "0341 Ring %d Cannot find buffer for " 2866 "an unsolicited iocb. tag 0x%x\n", 2867 pring->ringno, 2868 irsp->un.ulpWord[3]); 2869 } 2870 if (irsp->ulpBdeCount == 2) { 2871 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2872 irsp->unsli3.sli3Words[7]); 2873 if (!saveq->context3) 2874 lpfc_printf_log(phba, 2875 KERN_ERR, 2876 LOG_SLI, 2877 "0342 Ring %d Cannot find buffer for an" 2878 " unsolicited iocb. tag 0x%x\n", 2879 pring->ringno, 2880 irsp->unsli3.sli3Words[7]); 2881 } 2882 list_for_each_entry(iocbq, &saveq->list, list) { 2883 irsp = &(iocbq->iocb); 2884 if (irsp->ulpBdeCount != 0) { 2885 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2886 irsp->un.ulpWord[3]); 2887 if (!iocbq->context2) 2888 lpfc_printf_log(phba, 2889 KERN_ERR, 2890 LOG_SLI, 2891 "0343 Ring %d Cannot find " 2892 "buffer for an unsolicited iocb" 2893 ". tag 0x%x\n", pring->ringno, 2894 irsp->un.ulpWord[3]); 2895 } 2896 if (irsp->ulpBdeCount == 2) { 2897 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2898 irsp->unsli3.sli3Words[7]); 2899 if (!iocbq->context3) 2900 lpfc_printf_log(phba, 2901 KERN_ERR, 2902 LOG_SLI, 2903 "0344 Ring %d Cannot find " 2904 "buffer for an unsolicited " 2905 "iocb. tag 0x%x\n", 2906 pring->ringno, 2907 irsp->unsli3.sli3Words[7]); 2908 } 2909 } 2910 } 2911 if (irsp->ulpBdeCount != 0 && 2912 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2913 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2914 int found = 0; 2915 2916 /* search continue save q for same XRI */ 2917 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2918 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2919 saveq->iocb.unsli3.rcvsli3.ox_id) { 2920 list_add_tail(&saveq->list, &iocbq->list); 2921 found = 1; 2922 break; 2923 } 2924 } 2925 if (!found) 2926 list_add_tail(&saveq->clist, 2927 &pring->iocb_continue_saveq); 2928 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2929 list_del_init(&iocbq->clist); 2930 saveq = iocbq; 2931 irsp = &(saveq->iocb); 2932 } else 2933 return 0; 2934 } 2935 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2936 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2937 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2938 Rctl = FC_RCTL_ELS_REQ; 2939 Type = FC_TYPE_ELS; 2940 } else { 2941 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2942 Rctl = w5p->hcsw.Rctl; 2943 Type = w5p->hcsw.Type; 2944 2945 /* Firmware Workaround */ 2946 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2947 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2948 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2949 Rctl = FC_RCTL_ELS_REQ; 2950 Type = FC_TYPE_ELS; 2951 w5p->hcsw.Rctl = Rctl; 2952 w5p->hcsw.Type = Type; 2953 } 2954 } 2955 2956 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2957 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2958 "0313 Ring %d handler: unexpected Rctl x%x " 2959 "Type x%x received\n", 2960 pring->ringno, Rctl, Type); 2961 2962 return 1; 2963 } 2964 2965 /** 2966 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2967 * @phba: Pointer to HBA context object. 2968 * @pring: Pointer to driver SLI ring object. 2969 * @prspiocb: Pointer to response iocb object. 2970 * 2971 * This function looks up the iocb_lookup table to get the command iocb 2972 * corresponding to the given response iocb using the iotag of the 2973 * response iocb. This function is called with the hbalock held 2974 * for sli3 devices or the ring_lock for sli4 devices. 2975 * This function returns the command iocb object if it finds the command 2976 * iocb else returns NULL. 2977 **/ 2978 static struct lpfc_iocbq * 2979 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2980 struct lpfc_sli_ring *pring, 2981 struct lpfc_iocbq *prspiocb) 2982 { 2983 struct lpfc_iocbq *cmd_iocb = NULL; 2984 uint16_t iotag; 2985 lockdep_assert_held(&phba->hbalock); 2986 2987 iotag = prspiocb->iocb.ulpIoTag; 2988 2989 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2990 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2991 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2992 /* remove from txcmpl queue list */ 2993 list_del_init(&cmd_iocb->list); 2994 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2995 pring->txcmplq_cnt--; 2996 return cmd_iocb; 2997 } 2998 } 2999 3000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3001 "0317 iotag x%x is out of " 3002 "range: max iotag x%x wd0 x%x\n", 3003 iotag, phba->sli.last_iotag, 3004 *(((uint32_t *) &prspiocb->iocb) + 7)); 3005 return NULL; 3006 } 3007 3008 /** 3009 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 3010 * @phba: Pointer to HBA context object. 3011 * @pring: Pointer to driver SLI ring object. 3012 * @iotag: IOCB tag. 3013 * 3014 * This function looks up the iocb_lookup table to get the command iocb 3015 * corresponding to the given iotag. This function is called with the 3016 * hbalock held. 3017 * This function returns the command iocb object if it finds the command 3018 * iocb else returns NULL. 3019 **/ 3020 static struct lpfc_iocbq * 3021 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 3022 struct lpfc_sli_ring *pring, uint16_t iotag) 3023 { 3024 struct lpfc_iocbq *cmd_iocb = NULL; 3025 3026 lockdep_assert_held(&phba->hbalock); 3027 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3028 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3029 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3030 /* remove from txcmpl queue list */ 3031 list_del_init(&cmd_iocb->list); 3032 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3033 pring->txcmplq_cnt--; 3034 return cmd_iocb; 3035 } 3036 } 3037 3038 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3039 "0372 iotag x%x lookup error: max iotag (x%x) " 3040 "iocb_flag x%x\n", 3041 iotag, phba->sli.last_iotag, 3042 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 3043 return NULL; 3044 } 3045 3046 /** 3047 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3048 * @phba: Pointer to HBA context object. 3049 * @pring: Pointer to driver SLI ring object. 3050 * @saveq: Pointer to the response iocb to be processed. 3051 * 3052 * This function is called by the ring event handler for non-fcp 3053 * rings when there is a new response iocb in the response ring. 3054 * The caller is not required to hold any locks. This function 3055 * gets the command iocb associated with the response iocb and 3056 * calls the completion handler for the command iocb. If there 3057 * is no completion handler, the function will free the resources 3058 * associated with command iocb. If the response iocb is for 3059 * an already aborted command iocb, the status of the completion 3060 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3061 * This function always returns 1. 3062 **/ 3063 static int 3064 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3065 struct lpfc_iocbq *saveq) 3066 { 3067 struct lpfc_iocbq *cmdiocbp; 3068 int rc = 1; 3069 unsigned long iflag; 3070 3071 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 3072 if (phba->sli_rev == LPFC_SLI_REV4) 3073 spin_lock_irqsave(&pring->ring_lock, iflag); 3074 else 3075 spin_lock_irqsave(&phba->hbalock, iflag); 3076 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3077 if (phba->sli_rev == LPFC_SLI_REV4) 3078 spin_unlock_irqrestore(&pring->ring_lock, iflag); 3079 else 3080 spin_unlock_irqrestore(&phba->hbalock, iflag); 3081 3082 if (cmdiocbp) { 3083 if (cmdiocbp->iocb_cmpl) { 3084 /* 3085 * If an ELS command failed send an event to mgmt 3086 * application. 3087 */ 3088 if (saveq->iocb.ulpStatus && 3089 (pring->ringno == LPFC_ELS_RING) && 3090 (cmdiocbp->iocb.ulpCommand == 3091 CMD_ELS_REQUEST64_CR)) 3092 lpfc_send_els_failure_event(phba, 3093 cmdiocbp, saveq); 3094 3095 /* 3096 * Post all ELS completions to the worker thread. 3097 * All other are passed to the completion callback. 3098 */ 3099 if (pring->ringno == LPFC_ELS_RING) { 3100 if ((phba->sli_rev < LPFC_SLI_REV4) && 3101 (cmdiocbp->iocb_flag & 3102 LPFC_DRIVER_ABORTED)) { 3103 spin_lock_irqsave(&phba->hbalock, 3104 iflag); 3105 cmdiocbp->iocb_flag &= 3106 ~LPFC_DRIVER_ABORTED; 3107 spin_unlock_irqrestore(&phba->hbalock, 3108 iflag); 3109 saveq->iocb.ulpStatus = 3110 IOSTAT_LOCAL_REJECT; 3111 saveq->iocb.un.ulpWord[4] = 3112 IOERR_SLI_ABORTED; 3113 3114 /* Firmware could still be in progress 3115 * of DMAing payload, so don't free data 3116 * buffer till after a hbeat. 3117 */ 3118 spin_lock_irqsave(&phba->hbalock, 3119 iflag); 3120 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 3121 spin_unlock_irqrestore(&phba->hbalock, 3122 iflag); 3123 } 3124 if (phba->sli_rev == LPFC_SLI_REV4) { 3125 if (saveq->iocb_flag & 3126 LPFC_EXCHANGE_BUSY) { 3127 /* Set cmdiocb flag for the 3128 * exchange busy so sgl (xri) 3129 * will not be released until 3130 * the abort xri is received 3131 * from hba. 3132 */ 3133 spin_lock_irqsave( 3134 &phba->hbalock, iflag); 3135 cmdiocbp->iocb_flag |= 3136 LPFC_EXCHANGE_BUSY; 3137 spin_unlock_irqrestore( 3138 &phba->hbalock, iflag); 3139 } 3140 if (cmdiocbp->iocb_flag & 3141 LPFC_DRIVER_ABORTED) { 3142 /* 3143 * Clear LPFC_DRIVER_ABORTED 3144 * bit in case it was driver 3145 * initiated abort. 3146 */ 3147 spin_lock_irqsave( 3148 &phba->hbalock, iflag); 3149 cmdiocbp->iocb_flag &= 3150 ~LPFC_DRIVER_ABORTED; 3151 spin_unlock_irqrestore( 3152 &phba->hbalock, iflag); 3153 cmdiocbp->iocb.ulpStatus = 3154 IOSTAT_LOCAL_REJECT; 3155 cmdiocbp->iocb.un.ulpWord[4] = 3156 IOERR_ABORT_REQUESTED; 3157 /* 3158 * For SLI4, irsiocb contains 3159 * NO_XRI in sli_xritag, it 3160 * shall not affect releasing 3161 * sgl (xri) process. 3162 */ 3163 saveq->iocb.ulpStatus = 3164 IOSTAT_LOCAL_REJECT; 3165 saveq->iocb.un.ulpWord[4] = 3166 IOERR_SLI_ABORTED; 3167 spin_lock_irqsave( 3168 &phba->hbalock, iflag); 3169 saveq->iocb_flag |= 3170 LPFC_DELAY_MEM_FREE; 3171 spin_unlock_irqrestore( 3172 &phba->hbalock, iflag); 3173 } 3174 } 3175 } 3176 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 3177 } else 3178 lpfc_sli_release_iocbq(phba, cmdiocbp); 3179 } else { 3180 /* 3181 * Unknown initiating command based on the response iotag. 3182 * This could be the case on the ELS ring because of 3183 * lpfc_els_abort(). 3184 */ 3185 if (pring->ringno != LPFC_ELS_RING) { 3186 /* 3187 * Ring <ringno> handler: unexpected completion IoTag 3188 * <IoTag> 3189 */ 3190 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3191 "0322 Ring %d handler: " 3192 "unexpected completion IoTag x%x " 3193 "Data: x%x x%x x%x x%x\n", 3194 pring->ringno, 3195 saveq->iocb.ulpIoTag, 3196 saveq->iocb.ulpStatus, 3197 saveq->iocb.un.ulpWord[4], 3198 saveq->iocb.ulpCommand, 3199 saveq->iocb.ulpContext); 3200 } 3201 } 3202 3203 return rc; 3204 } 3205 3206 /** 3207 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3208 * @phba: Pointer to HBA context object. 3209 * @pring: Pointer to driver SLI ring object. 3210 * 3211 * This function is called from the iocb ring event handlers when 3212 * put pointer is ahead of the get pointer for a ring. This function signal 3213 * an error attention condition to the worker thread and the worker 3214 * thread will transition the HBA to offline state. 3215 **/ 3216 static void 3217 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3218 { 3219 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3220 /* 3221 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3222 * rsp ring <portRspMax> 3223 */ 3224 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3225 "0312 Ring %d handler: portRspPut %d " 3226 "is bigger than rsp ring %d\n", 3227 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3228 pring->sli.sli3.numRiocb); 3229 3230 phba->link_state = LPFC_HBA_ERROR; 3231 3232 /* 3233 * All error attention handlers are posted to 3234 * worker thread 3235 */ 3236 phba->work_ha |= HA_ERATT; 3237 phba->work_hs = HS_FFER3; 3238 3239 lpfc_worker_wake_up(phba); 3240 3241 return; 3242 } 3243 3244 /** 3245 * lpfc_poll_eratt - Error attention polling timer timeout handler 3246 * @ptr: Pointer to address of HBA context object. 3247 * 3248 * This function is invoked by the Error Attention polling timer when the 3249 * timer times out. It will check the SLI Error Attention register for 3250 * possible attention events. If so, it will post an Error Attention event 3251 * and wake up worker thread to process it. Otherwise, it will set up the 3252 * Error Attention polling timer for the next poll. 3253 **/ 3254 void lpfc_poll_eratt(struct timer_list *t) 3255 { 3256 struct lpfc_hba *phba; 3257 uint32_t eratt = 0; 3258 uint64_t sli_intr, cnt; 3259 3260 phba = from_timer(phba, t, eratt_poll); 3261 3262 /* Here we will also keep track of interrupts per sec of the hba */ 3263 sli_intr = phba->sli.slistat.sli_intr; 3264 3265 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3266 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3267 sli_intr); 3268 else 3269 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3270 3271 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3272 do_div(cnt, phba->eratt_poll_interval); 3273 phba->sli.slistat.sli_ips = cnt; 3274 3275 phba->sli.slistat.sli_prev_intr = sli_intr; 3276 3277 /* Check chip HA register for error event */ 3278 eratt = lpfc_sli_check_eratt(phba); 3279 3280 if (eratt) 3281 /* Tell the worker thread there is work to do */ 3282 lpfc_worker_wake_up(phba); 3283 else 3284 /* Restart the timer for next eratt poll */ 3285 mod_timer(&phba->eratt_poll, 3286 jiffies + 3287 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3288 return; 3289 } 3290 3291 3292 /** 3293 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3294 * @phba: Pointer to HBA context object. 3295 * @pring: Pointer to driver SLI ring object. 3296 * @mask: Host attention register mask for this ring. 3297 * 3298 * This function is called from the interrupt context when there is a ring 3299 * event for the fcp ring. The caller does not hold any lock. 3300 * The function processes each response iocb in the response ring until it 3301 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3302 * LE bit set. The function will call the completion handler of the command iocb 3303 * if the response iocb indicates a completion for a command iocb or it is 3304 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3305 * function if this is an unsolicited iocb. 3306 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3307 * to check it explicitly. 3308 */ 3309 int 3310 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3311 struct lpfc_sli_ring *pring, uint32_t mask) 3312 { 3313 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3314 IOCB_t *irsp = NULL; 3315 IOCB_t *entry = NULL; 3316 struct lpfc_iocbq *cmdiocbq = NULL; 3317 struct lpfc_iocbq rspiocbq; 3318 uint32_t status; 3319 uint32_t portRspPut, portRspMax; 3320 int rc = 1; 3321 lpfc_iocb_type type; 3322 unsigned long iflag; 3323 uint32_t rsp_cmpl = 0; 3324 3325 spin_lock_irqsave(&phba->hbalock, iflag); 3326 pring->stats.iocb_event++; 3327 3328 /* 3329 * The next available response entry should never exceed the maximum 3330 * entries. If it does, treat it as an adapter hardware error. 3331 */ 3332 portRspMax = pring->sli.sli3.numRiocb; 3333 portRspPut = le32_to_cpu(pgp->rspPutInx); 3334 if (unlikely(portRspPut >= portRspMax)) { 3335 lpfc_sli_rsp_pointers_error(phba, pring); 3336 spin_unlock_irqrestore(&phba->hbalock, iflag); 3337 return 1; 3338 } 3339 if (phba->fcp_ring_in_use) { 3340 spin_unlock_irqrestore(&phba->hbalock, iflag); 3341 return 1; 3342 } else 3343 phba->fcp_ring_in_use = 1; 3344 3345 rmb(); 3346 while (pring->sli.sli3.rspidx != portRspPut) { 3347 /* 3348 * Fetch an entry off the ring and copy it into a local data 3349 * structure. The copy involves a byte-swap since the 3350 * network byte order and pci byte orders are different. 3351 */ 3352 entry = lpfc_resp_iocb(phba, pring); 3353 phba->last_completion_time = jiffies; 3354 3355 if (++pring->sli.sli3.rspidx >= portRspMax) 3356 pring->sli.sli3.rspidx = 0; 3357 3358 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3359 (uint32_t *) &rspiocbq.iocb, 3360 phba->iocb_rsp_size); 3361 INIT_LIST_HEAD(&(rspiocbq.list)); 3362 irsp = &rspiocbq.iocb; 3363 3364 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3365 pring->stats.iocb_rsp++; 3366 rsp_cmpl++; 3367 3368 if (unlikely(irsp->ulpStatus)) { 3369 /* 3370 * If resource errors reported from HBA, reduce 3371 * queuedepths of the SCSI device. 3372 */ 3373 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3374 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3375 IOERR_NO_RESOURCES)) { 3376 spin_unlock_irqrestore(&phba->hbalock, iflag); 3377 phba->lpfc_rampdown_queue_depth(phba); 3378 spin_lock_irqsave(&phba->hbalock, iflag); 3379 } 3380 3381 /* Rsp ring <ringno> error: IOCB */ 3382 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3383 "0336 Rsp Ring %d error: IOCB Data: " 3384 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3385 pring->ringno, 3386 irsp->un.ulpWord[0], 3387 irsp->un.ulpWord[1], 3388 irsp->un.ulpWord[2], 3389 irsp->un.ulpWord[3], 3390 irsp->un.ulpWord[4], 3391 irsp->un.ulpWord[5], 3392 *(uint32_t *)&irsp->un1, 3393 *((uint32_t *)&irsp->un1 + 1)); 3394 } 3395 3396 switch (type) { 3397 case LPFC_ABORT_IOCB: 3398 case LPFC_SOL_IOCB: 3399 /* 3400 * Idle exchange closed via ABTS from port. No iocb 3401 * resources need to be recovered. 3402 */ 3403 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3404 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3405 "0333 IOCB cmd 0x%x" 3406 " processed. Skipping" 3407 " completion\n", 3408 irsp->ulpCommand); 3409 break; 3410 } 3411 3412 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3413 &rspiocbq); 3414 if (unlikely(!cmdiocbq)) 3415 break; 3416 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3417 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3418 if (cmdiocbq->iocb_cmpl) { 3419 spin_unlock_irqrestore(&phba->hbalock, iflag); 3420 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3421 &rspiocbq); 3422 spin_lock_irqsave(&phba->hbalock, iflag); 3423 } 3424 break; 3425 case LPFC_UNSOL_IOCB: 3426 spin_unlock_irqrestore(&phba->hbalock, iflag); 3427 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3428 spin_lock_irqsave(&phba->hbalock, iflag); 3429 break; 3430 default: 3431 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3432 char adaptermsg[LPFC_MAX_ADPTMSG]; 3433 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3434 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3435 MAX_MSG_DATA); 3436 dev_warn(&((phba->pcidev)->dev), 3437 "lpfc%d: %s\n", 3438 phba->brd_no, adaptermsg); 3439 } else { 3440 /* Unknown IOCB command */ 3441 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3442 "0334 Unknown IOCB command " 3443 "Data: x%x, x%x x%x x%x x%x\n", 3444 type, irsp->ulpCommand, 3445 irsp->ulpStatus, 3446 irsp->ulpIoTag, 3447 irsp->ulpContext); 3448 } 3449 break; 3450 } 3451 3452 /* 3453 * The response IOCB has been processed. Update the ring 3454 * pointer in SLIM. If the port response put pointer has not 3455 * been updated, sync the pgp->rspPutInx and fetch the new port 3456 * response put pointer. 3457 */ 3458 writel(pring->sli.sli3.rspidx, 3459 &phba->host_gp[pring->ringno].rspGetInx); 3460 3461 if (pring->sli.sli3.rspidx == portRspPut) 3462 portRspPut = le32_to_cpu(pgp->rspPutInx); 3463 } 3464 3465 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3466 pring->stats.iocb_rsp_full++; 3467 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3468 writel(status, phba->CAregaddr); 3469 readl(phba->CAregaddr); 3470 } 3471 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3472 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3473 pring->stats.iocb_cmd_empty++; 3474 3475 /* Force update of the local copy of cmdGetInx */ 3476 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3477 lpfc_sli_resume_iocb(phba, pring); 3478 3479 if ((pring->lpfc_sli_cmd_available)) 3480 (pring->lpfc_sli_cmd_available) (phba, pring); 3481 3482 } 3483 3484 phba->fcp_ring_in_use = 0; 3485 spin_unlock_irqrestore(&phba->hbalock, iflag); 3486 return rc; 3487 } 3488 3489 /** 3490 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3491 * @phba: Pointer to HBA context object. 3492 * @pring: Pointer to driver SLI ring object. 3493 * @rspiocbp: Pointer to driver response IOCB object. 3494 * 3495 * This function is called from the worker thread when there is a slow-path 3496 * response IOCB to process. This function chains all the response iocbs until 3497 * seeing the iocb with the LE bit set. The function will call 3498 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3499 * completion of a command iocb. The function will call the 3500 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3501 * The function frees the resources or calls the completion handler if this 3502 * iocb is an abort completion. The function returns NULL when the response 3503 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3504 * this function shall chain the iocb on to the iocb_continueq and return the 3505 * response iocb passed in. 3506 **/ 3507 static struct lpfc_iocbq * 3508 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3509 struct lpfc_iocbq *rspiocbp) 3510 { 3511 struct lpfc_iocbq *saveq; 3512 struct lpfc_iocbq *cmdiocbp; 3513 struct lpfc_iocbq *next_iocb; 3514 IOCB_t *irsp = NULL; 3515 uint32_t free_saveq; 3516 uint8_t iocb_cmd_type; 3517 lpfc_iocb_type type; 3518 unsigned long iflag; 3519 int rc; 3520 3521 spin_lock_irqsave(&phba->hbalock, iflag); 3522 /* First add the response iocb to the countinueq list */ 3523 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3524 pring->iocb_continueq_cnt++; 3525 3526 /* Now, determine whether the list is completed for processing */ 3527 irsp = &rspiocbp->iocb; 3528 if (irsp->ulpLe) { 3529 /* 3530 * By default, the driver expects to free all resources 3531 * associated with this iocb completion. 3532 */ 3533 free_saveq = 1; 3534 saveq = list_get_first(&pring->iocb_continueq, 3535 struct lpfc_iocbq, list); 3536 irsp = &(saveq->iocb); 3537 list_del_init(&pring->iocb_continueq); 3538 pring->iocb_continueq_cnt = 0; 3539 3540 pring->stats.iocb_rsp++; 3541 3542 /* 3543 * If resource errors reported from HBA, reduce 3544 * queuedepths of the SCSI device. 3545 */ 3546 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3547 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3548 IOERR_NO_RESOURCES)) { 3549 spin_unlock_irqrestore(&phba->hbalock, iflag); 3550 phba->lpfc_rampdown_queue_depth(phba); 3551 spin_lock_irqsave(&phba->hbalock, iflag); 3552 } 3553 3554 if (irsp->ulpStatus) { 3555 /* Rsp ring <ringno> error: IOCB */ 3556 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3557 "0328 Rsp Ring %d error: " 3558 "IOCB Data: " 3559 "x%x x%x x%x x%x " 3560 "x%x x%x x%x x%x " 3561 "x%x x%x x%x x%x " 3562 "x%x x%x x%x x%x\n", 3563 pring->ringno, 3564 irsp->un.ulpWord[0], 3565 irsp->un.ulpWord[1], 3566 irsp->un.ulpWord[2], 3567 irsp->un.ulpWord[3], 3568 irsp->un.ulpWord[4], 3569 irsp->un.ulpWord[5], 3570 *(((uint32_t *) irsp) + 6), 3571 *(((uint32_t *) irsp) + 7), 3572 *(((uint32_t *) irsp) + 8), 3573 *(((uint32_t *) irsp) + 9), 3574 *(((uint32_t *) irsp) + 10), 3575 *(((uint32_t *) irsp) + 11), 3576 *(((uint32_t *) irsp) + 12), 3577 *(((uint32_t *) irsp) + 13), 3578 *(((uint32_t *) irsp) + 14), 3579 *(((uint32_t *) irsp) + 15)); 3580 } 3581 3582 /* 3583 * Fetch the IOCB command type and call the correct completion 3584 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3585 * get freed back to the lpfc_iocb_list by the discovery 3586 * kernel thread. 3587 */ 3588 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3589 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3590 switch (type) { 3591 case LPFC_SOL_IOCB: 3592 spin_unlock_irqrestore(&phba->hbalock, iflag); 3593 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3594 spin_lock_irqsave(&phba->hbalock, iflag); 3595 break; 3596 3597 case LPFC_UNSOL_IOCB: 3598 spin_unlock_irqrestore(&phba->hbalock, iflag); 3599 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3600 spin_lock_irqsave(&phba->hbalock, iflag); 3601 if (!rc) 3602 free_saveq = 0; 3603 break; 3604 3605 case LPFC_ABORT_IOCB: 3606 cmdiocbp = NULL; 3607 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3608 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3609 saveq); 3610 if (cmdiocbp) { 3611 /* Call the specified completion routine */ 3612 if (cmdiocbp->iocb_cmpl) { 3613 spin_unlock_irqrestore(&phba->hbalock, 3614 iflag); 3615 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3616 saveq); 3617 spin_lock_irqsave(&phba->hbalock, 3618 iflag); 3619 } else 3620 __lpfc_sli_release_iocbq(phba, 3621 cmdiocbp); 3622 } 3623 break; 3624 3625 case LPFC_UNKNOWN_IOCB: 3626 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3627 char adaptermsg[LPFC_MAX_ADPTMSG]; 3628 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3629 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3630 MAX_MSG_DATA); 3631 dev_warn(&((phba->pcidev)->dev), 3632 "lpfc%d: %s\n", 3633 phba->brd_no, adaptermsg); 3634 } else { 3635 /* Unknown IOCB command */ 3636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3637 "0335 Unknown IOCB " 3638 "command Data: x%x " 3639 "x%x x%x x%x\n", 3640 irsp->ulpCommand, 3641 irsp->ulpStatus, 3642 irsp->ulpIoTag, 3643 irsp->ulpContext); 3644 } 3645 break; 3646 } 3647 3648 if (free_saveq) { 3649 list_for_each_entry_safe(rspiocbp, next_iocb, 3650 &saveq->list, list) { 3651 list_del_init(&rspiocbp->list); 3652 __lpfc_sli_release_iocbq(phba, rspiocbp); 3653 } 3654 __lpfc_sli_release_iocbq(phba, saveq); 3655 } 3656 rspiocbp = NULL; 3657 } 3658 spin_unlock_irqrestore(&phba->hbalock, iflag); 3659 return rspiocbp; 3660 } 3661 3662 /** 3663 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3664 * @phba: Pointer to HBA context object. 3665 * @pring: Pointer to driver SLI ring object. 3666 * @mask: Host attention register mask for this ring. 3667 * 3668 * This routine wraps the actual slow_ring event process routine from the 3669 * API jump table function pointer from the lpfc_hba struct. 3670 **/ 3671 void 3672 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3673 struct lpfc_sli_ring *pring, uint32_t mask) 3674 { 3675 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3676 } 3677 3678 /** 3679 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3680 * @phba: Pointer to HBA context object. 3681 * @pring: Pointer to driver SLI ring object. 3682 * @mask: Host attention register mask for this ring. 3683 * 3684 * This function is called from the worker thread when there is a ring event 3685 * for non-fcp rings. The caller does not hold any lock. The function will 3686 * remove each response iocb in the response ring and calls the handle 3687 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3688 **/ 3689 static void 3690 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3691 struct lpfc_sli_ring *pring, uint32_t mask) 3692 { 3693 struct lpfc_pgp *pgp; 3694 IOCB_t *entry; 3695 IOCB_t *irsp = NULL; 3696 struct lpfc_iocbq *rspiocbp = NULL; 3697 uint32_t portRspPut, portRspMax; 3698 unsigned long iflag; 3699 uint32_t status; 3700 3701 pgp = &phba->port_gp[pring->ringno]; 3702 spin_lock_irqsave(&phba->hbalock, iflag); 3703 pring->stats.iocb_event++; 3704 3705 /* 3706 * The next available response entry should never exceed the maximum 3707 * entries. If it does, treat it as an adapter hardware error. 3708 */ 3709 portRspMax = pring->sli.sli3.numRiocb; 3710 portRspPut = le32_to_cpu(pgp->rspPutInx); 3711 if (portRspPut >= portRspMax) { 3712 /* 3713 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3714 * rsp ring <portRspMax> 3715 */ 3716 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3717 "0303 Ring %d handler: portRspPut %d " 3718 "is bigger than rsp ring %d\n", 3719 pring->ringno, portRspPut, portRspMax); 3720 3721 phba->link_state = LPFC_HBA_ERROR; 3722 spin_unlock_irqrestore(&phba->hbalock, iflag); 3723 3724 phba->work_hs = HS_FFER3; 3725 lpfc_handle_eratt(phba); 3726 3727 return; 3728 } 3729 3730 rmb(); 3731 while (pring->sli.sli3.rspidx != portRspPut) { 3732 /* 3733 * Build a completion list and call the appropriate handler. 3734 * The process is to get the next available response iocb, get 3735 * a free iocb from the list, copy the response data into the 3736 * free iocb, insert to the continuation list, and update the 3737 * next response index to slim. This process makes response 3738 * iocb's in the ring available to DMA as fast as possible but 3739 * pays a penalty for a copy operation. Since the iocb is 3740 * only 32 bytes, this penalty is considered small relative to 3741 * the PCI reads for register values and a slim write. When 3742 * the ulpLe field is set, the entire Command has been 3743 * received. 3744 */ 3745 entry = lpfc_resp_iocb(phba, pring); 3746 3747 phba->last_completion_time = jiffies; 3748 rspiocbp = __lpfc_sli_get_iocbq(phba); 3749 if (rspiocbp == NULL) { 3750 printk(KERN_ERR "%s: out of buffers! Failing " 3751 "completion.\n", __func__); 3752 break; 3753 } 3754 3755 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3756 phba->iocb_rsp_size); 3757 irsp = &rspiocbp->iocb; 3758 3759 if (++pring->sli.sli3.rspidx >= portRspMax) 3760 pring->sli.sli3.rspidx = 0; 3761 3762 if (pring->ringno == LPFC_ELS_RING) { 3763 lpfc_debugfs_slow_ring_trc(phba, 3764 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3765 *(((uint32_t *) irsp) + 4), 3766 *(((uint32_t *) irsp) + 6), 3767 *(((uint32_t *) irsp) + 7)); 3768 } 3769 3770 writel(pring->sli.sli3.rspidx, 3771 &phba->host_gp[pring->ringno].rspGetInx); 3772 3773 spin_unlock_irqrestore(&phba->hbalock, iflag); 3774 /* Handle the response IOCB */ 3775 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3776 spin_lock_irqsave(&phba->hbalock, iflag); 3777 3778 /* 3779 * If the port response put pointer has not been updated, sync 3780 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3781 * response put pointer. 3782 */ 3783 if (pring->sli.sli3.rspidx == portRspPut) { 3784 portRspPut = le32_to_cpu(pgp->rspPutInx); 3785 } 3786 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3787 3788 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3789 /* At least one response entry has been freed */ 3790 pring->stats.iocb_rsp_full++; 3791 /* SET RxRE_RSP in Chip Att register */ 3792 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3793 writel(status, phba->CAregaddr); 3794 readl(phba->CAregaddr); /* flush */ 3795 } 3796 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3797 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3798 pring->stats.iocb_cmd_empty++; 3799 3800 /* Force update of the local copy of cmdGetInx */ 3801 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3802 lpfc_sli_resume_iocb(phba, pring); 3803 3804 if ((pring->lpfc_sli_cmd_available)) 3805 (pring->lpfc_sli_cmd_available) (phba, pring); 3806 3807 } 3808 3809 spin_unlock_irqrestore(&phba->hbalock, iflag); 3810 return; 3811 } 3812 3813 /** 3814 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3815 * @phba: Pointer to HBA context object. 3816 * @pring: Pointer to driver SLI ring object. 3817 * @mask: Host attention register mask for this ring. 3818 * 3819 * This function is called from the worker thread when there is a pending 3820 * ELS response iocb on the driver internal slow-path response iocb worker 3821 * queue. The caller does not hold any lock. The function will remove each 3822 * response iocb from the response worker queue and calls the handle 3823 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3824 **/ 3825 static void 3826 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3827 struct lpfc_sli_ring *pring, uint32_t mask) 3828 { 3829 struct lpfc_iocbq *irspiocbq; 3830 struct hbq_dmabuf *dmabuf; 3831 struct lpfc_cq_event *cq_event; 3832 unsigned long iflag; 3833 int count = 0; 3834 3835 spin_lock_irqsave(&phba->hbalock, iflag); 3836 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3837 spin_unlock_irqrestore(&phba->hbalock, iflag); 3838 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3839 /* Get the response iocb from the head of work queue */ 3840 spin_lock_irqsave(&phba->hbalock, iflag); 3841 list_remove_head(&phba->sli4_hba.sp_queue_event, 3842 cq_event, struct lpfc_cq_event, list); 3843 spin_unlock_irqrestore(&phba->hbalock, iflag); 3844 3845 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3846 case CQE_CODE_COMPL_WQE: 3847 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3848 cq_event); 3849 /* Translate ELS WCQE to response IOCBQ */ 3850 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3851 irspiocbq); 3852 if (irspiocbq) 3853 lpfc_sli_sp_handle_rspiocb(phba, pring, 3854 irspiocbq); 3855 count++; 3856 break; 3857 case CQE_CODE_RECEIVE: 3858 case CQE_CODE_RECEIVE_V1: 3859 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3860 cq_event); 3861 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3862 count++; 3863 break; 3864 default: 3865 break; 3866 } 3867 3868 /* Limit the number of events to 64 to avoid soft lockups */ 3869 if (count == 64) 3870 break; 3871 } 3872 } 3873 3874 /** 3875 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3876 * @phba: Pointer to HBA context object. 3877 * @pring: Pointer to driver SLI ring object. 3878 * 3879 * This function aborts all iocbs in the given ring and frees all the iocb 3880 * objects in txq. This function issues an abort iocb for all the iocb commands 3881 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3882 * the return of this function. The caller is not required to hold any locks. 3883 **/ 3884 void 3885 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3886 { 3887 LIST_HEAD(completions); 3888 struct lpfc_iocbq *iocb, *next_iocb; 3889 3890 if (pring->ringno == LPFC_ELS_RING) { 3891 lpfc_fabric_abort_hba(phba); 3892 } 3893 3894 /* Error everything on txq and txcmplq 3895 * First do the txq. 3896 */ 3897 if (phba->sli_rev >= LPFC_SLI_REV4) { 3898 spin_lock_irq(&pring->ring_lock); 3899 list_splice_init(&pring->txq, &completions); 3900 pring->txq_cnt = 0; 3901 spin_unlock_irq(&pring->ring_lock); 3902 3903 spin_lock_irq(&phba->hbalock); 3904 /* Next issue ABTS for everything on the txcmplq */ 3905 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3906 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3907 spin_unlock_irq(&phba->hbalock); 3908 } else { 3909 spin_lock_irq(&phba->hbalock); 3910 list_splice_init(&pring->txq, &completions); 3911 pring->txq_cnt = 0; 3912 3913 /* Next issue ABTS for everything on the txcmplq */ 3914 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3915 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3916 spin_unlock_irq(&phba->hbalock); 3917 } 3918 3919 /* Cancel all the IOCBs from the completions list */ 3920 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3921 IOERR_SLI_ABORTED); 3922 } 3923 3924 /** 3925 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring 3926 * @phba: Pointer to HBA context object. 3927 * @pring: Pointer to driver SLI ring object. 3928 * 3929 * This function aborts all iocbs in the given ring and frees all the iocb 3930 * objects in txq. This function issues an abort iocb for all the iocb commands 3931 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3932 * the return of this function. The caller is not required to hold any locks. 3933 **/ 3934 void 3935 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3936 { 3937 LIST_HEAD(completions); 3938 struct lpfc_iocbq *iocb, *next_iocb; 3939 3940 if (pring->ringno == LPFC_ELS_RING) 3941 lpfc_fabric_abort_hba(phba); 3942 3943 spin_lock_irq(&phba->hbalock); 3944 /* Next issue ABTS for everything on the txcmplq */ 3945 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3946 lpfc_sli4_abort_nvme_io(phba, pring, iocb); 3947 spin_unlock_irq(&phba->hbalock); 3948 } 3949 3950 3951 /** 3952 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3953 * @phba: Pointer to HBA context object. 3954 * @pring: Pointer to driver SLI ring object. 3955 * 3956 * This function aborts all iocbs in FCP rings and frees all the iocb 3957 * objects in txq. This function issues an abort iocb for all the iocb commands 3958 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3959 * the return of this function. The caller is not required to hold any locks. 3960 **/ 3961 void 3962 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3963 { 3964 struct lpfc_sli *psli = &phba->sli; 3965 struct lpfc_sli_ring *pring; 3966 uint32_t i; 3967 3968 /* Look on all the FCP Rings for the iotag */ 3969 if (phba->sli_rev >= LPFC_SLI_REV4) { 3970 for (i = 0; i < phba->cfg_hdw_queue; i++) { 3971 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 3972 lpfc_sli_abort_iocb_ring(phba, pring); 3973 } 3974 } else { 3975 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3976 lpfc_sli_abort_iocb_ring(phba, pring); 3977 } 3978 } 3979 3980 /** 3981 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings 3982 * @phba: Pointer to HBA context object. 3983 * 3984 * This function aborts all wqes in NVME rings. This function issues an 3985 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in 3986 * the txcmplq is not guaranteed to complete before the return of this 3987 * function. The caller is not required to hold any locks. 3988 **/ 3989 void 3990 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) 3991 { 3992 struct lpfc_sli_ring *pring; 3993 uint32_t i; 3994 3995 if ((phba->sli_rev < LPFC_SLI_REV4) || 3996 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 3997 return; 3998 3999 /* Abort all IO on each NVME ring. */ 4000 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4001 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 4002 lpfc_sli_abort_wqe_ring(phba, pring); 4003 } 4004 } 4005 4006 4007 /** 4008 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 4009 * @phba: Pointer to HBA context object. 4010 * 4011 * This function flushes all iocbs in the fcp ring and frees all the iocb 4012 * objects in txq and txcmplq. This function will not issue abort iocbs 4013 * for all the iocb commands in txcmplq, they will just be returned with 4014 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4015 * slot has been permanently disabled. 4016 **/ 4017 void 4018 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 4019 { 4020 LIST_HEAD(txq); 4021 LIST_HEAD(txcmplq); 4022 struct lpfc_sli *psli = &phba->sli; 4023 struct lpfc_sli_ring *pring; 4024 uint32_t i; 4025 struct lpfc_iocbq *piocb, *next_iocb; 4026 4027 spin_lock_irq(&phba->hbalock); 4028 /* Indicate the I/O queues are flushed */ 4029 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 4030 spin_unlock_irq(&phba->hbalock); 4031 4032 /* Look on all the FCP Rings for the iotag */ 4033 if (phba->sli_rev >= LPFC_SLI_REV4) { 4034 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4035 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 4036 4037 spin_lock_irq(&pring->ring_lock); 4038 /* Retrieve everything on txq */ 4039 list_splice_init(&pring->txq, &txq); 4040 list_for_each_entry_safe(piocb, next_iocb, 4041 &pring->txcmplq, list) 4042 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4043 /* Retrieve everything on the txcmplq */ 4044 list_splice_init(&pring->txcmplq, &txcmplq); 4045 pring->txq_cnt = 0; 4046 pring->txcmplq_cnt = 0; 4047 spin_unlock_irq(&pring->ring_lock); 4048 4049 /* Flush the txq */ 4050 lpfc_sli_cancel_iocbs(phba, &txq, 4051 IOSTAT_LOCAL_REJECT, 4052 IOERR_SLI_DOWN); 4053 /* Flush the txcmpq */ 4054 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4055 IOSTAT_LOCAL_REJECT, 4056 IOERR_SLI_DOWN); 4057 } 4058 } else { 4059 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4060 4061 spin_lock_irq(&phba->hbalock); 4062 /* Retrieve everything on txq */ 4063 list_splice_init(&pring->txq, &txq); 4064 list_for_each_entry_safe(piocb, next_iocb, 4065 &pring->txcmplq, list) 4066 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4067 /* Retrieve everything on the txcmplq */ 4068 list_splice_init(&pring->txcmplq, &txcmplq); 4069 pring->txq_cnt = 0; 4070 pring->txcmplq_cnt = 0; 4071 spin_unlock_irq(&phba->hbalock); 4072 4073 /* Flush the txq */ 4074 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4075 IOERR_SLI_DOWN); 4076 /* Flush the txcmpq */ 4077 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4078 IOERR_SLI_DOWN); 4079 } 4080 } 4081 4082 /** 4083 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings 4084 * @phba: Pointer to HBA context object. 4085 * 4086 * This function flushes all wqes in the nvme rings and frees all resources 4087 * in the txcmplq. This function does not issue abort wqes for the IO 4088 * commands in txcmplq, they will just be returned with 4089 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4090 * slot has been permanently disabled. 4091 **/ 4092 void 4093 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) 4094 { 4095 LIST_HEAD(txcmplq); 4096 struct lpfc_sli_ring *pring; 4097 uint32_t i; 4098 struct lpfc_iocbq *piocb, *next_iocb; 4099 4100 if ((phba->sli_rev < LPFC_SLI_REV4) || 4101 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 4102 return; 4103 4104 /* Hint to other driver operations that a flush is in progress. */ 4105 spin_lock_irq(&phba->hbalock); 4106 phba->hba_flag |= HBA_NVME_IOQ_FLUSH; 4107 spin_unlock_irq(&phba->hbalock); 4108 4109 /* Cycle through all NVME rings and complete each IO with 4110 * a local driver reason code. This is a flush so no 4111 * abort exchange to FW. 4112 */ 4113 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4114 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 4115 4116 spin_lock_irq(&pring->ring_lock); 4117 list_for_each_entry_safe(piocb, next_iocb, 4118 &pring->txcmplq, list) 4119 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4120 /* Retrieve everything on the txcmplq */ 4121 list_splice_init(&pring->txcmplq, &txcmplq); 4122 pring->txcmplq_cnt = 0; 4123 spin_unlock_irq(&pring->ring_lock); 4124 4125 /* Flush the txcmpq &&&PAE */ 4126 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4127 IOSTAT_LOCAL_REJECT, 4128 IOERR_SLI_DOWN); 4129 } 4130 } 4131 4132 /** 4133 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4134 * @phba: Pointer to HBA context object. 4135 * @mask: Bit mask to be checked. 4136 * 4137 * This function reads the host status register and compares 4138 * with the provided bit mask to check if HBA completed 4139 * the restart. This function will wait in a loop for the 4140 * HBA to complete restart. If the HBA does not restart within 4141 * 15 iterations, the function will reset the HBA again. The 4142 * function returns 1 when HBA fail to restart otherwise returns 4143 * zero. 4144 **/ 4145 static int 4146 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4147 { 4148 uint32_t status; 4149 int i = 0; 4150 int retval = 0; 4151 4152 /* Read the HBA Host Status Register */ 4153 if (lpfc_readl(phba->HSregaddr, &status)) 4154 return 1; 4155 4156 /* 4157 * Check status register every 100ms for 5 retries, then every 4158 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4159 * every 2.5 sec for 4. 4160 * Break our of the loop if errors occurred during init. 4161 */ 4162 while (((status & mask) != mask) && 4163 !(status & HS_FFERM) && 4164 i++ < 20) { 4165 4166 if (i <= 5) 4167 msleep(10); 4168 else if (i <= 10) 4169 msleep(500); 4170 else 4171 msleep(2500); 4172 4173 if (i == 15) { 4174 /* Do post */ 4175 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4176 lpfc_sli_brdrestart(phba); 4177 } 4178 /* Read the HBA Host Status Register */ 4179 if (lpfc_readl(phba->HSregaddr, &status)) { 4180 retval = 1; 4181 break; 4182 } 4183 } 4184 4185 /* Check to see if any errors occurred during init */ 4186 if ((status & HS_FFERM) || (i >= 20)) { 4187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4188 "2751 Adapter failed to restart, " 4189 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4190 status, 4191 readl(phba->MBslimaddr + 0xa8), 4192 readl(phba->MBslimaddr + 0xac)); 4193 phba->link_state = LPFC_HBA_ERROR; 4194 retval = 1; 4195 } 4196 4197 return retval; 4198 } 4199 4200 /** 4201 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4202 * @phba: Pointer to HBA context object. 4203 * @mask: Bit mask to be checked. 4204 * 4205 * This function checks the host status register to check if HBA is 4206 * ready. This function will wait in a loop for the HBA to be ready 4207 * If the HBA is not ready , the function will will reset the HBA PCI 4208 * function again. The function returns 1 when HBA fail to be ready 4209 * otherwise returns zero. 4210 **/ 4211 static int 4212 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4213 { 4214 uint32_t status; 4215 int retval = 0; 4216 4217 /* Read the HBA Host Status Register */ 4218 status = lpfc_sli4_post_status_check(phba); 4219 4220 if (status) { 4221 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4222 lpfc_sli_brdrestart(phba); 4223 status = lpfc_sli4_post_status_check(phba); 4224 } 4225 4226 /* Check to see if any errors occurred during init */ 4227 if (status) { 4228 phba->link_state = LPFC_HBA_ERROR; 4229 retval = 1; 4230 } else 4231 phba->sli4_hba.intr_enable = 0; 4232 4233 return retval; 4234 } 4235 4236 /** 4237 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4238 * @phba: Pointer to HBA context object. 4239 * @mask: Bit mask to be checked. 4240 * 4241 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4242 * from the API jump table function pointer from the lpfc_hba struct. 4243 **/ 4244 int 4245 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4246 { 4247 return phba->lpfc_sli_brdready(phba, mask); 4248 } 4249 4250 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4251 4252 /** 4253 * lpfc_reset_barrier - Make HBA ready for HBA reset 4254 * @phba: Pointer to HBA context object. 4255 * 4256 * This function is called before resetting an HBA. This function is called 4257 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4258 **/ 4259 void lpfc_reset_barrier(struct lpfc_hba *phba) 4260 { 4261 uint32_t __iomem *resp_buf; 4262 uint32_t __iomem *mbox_buf; 4263 volatile uint32_t mbox; 4264 uint32_t hc_copy, ha_copy, resp_data; 4265 int i; 4266 uint8_t hdrtype; 4267 4268 lockdep_assert_held(&phba->hbalock); 4269 4270 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4271 if (hdrtype != 0x80 || 4272 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4273 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4274 return; 4275 4276 /* 4277 * Tell the other part of the chip to suspend temporarily all 4278 * its DMA activity. 4279 */ 4280 resp_buf = phba->MBslimaddr; 4281 4282 /* Disable the error attention */ 4283 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4284 return; 4285 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4286 readl(phba->HCregaddr); /* flush */ 4287 phba->link_flag |= LS_IGNORE_ERATT; 4288 4289 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4290 return; 4291 if (ha_copy & HA_ERATT) { 4292 /* Clear Chip error bit */ 4293 writel(HA_ERATT, phba->HAregaddr); 4294 phba->pport->stopped = 1; 4295 } 4296 4297 mbox = 0; 4298 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4299 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4300 4301 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4302 mbox_buf = phba->MBslimaddr; 4303 writel(mbox, mbox_buf); 4304 4305 for (i = 0; i < 50; i++) { 4306 if (lpfc_readl((resp_buf + 1), &resp_data)) 4307 return; 4308 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4309 mdelay(1); 4310 else 4311 break; 4312 } 4313 resp_data = 0; 4314 if (lpfc_readl((resp_buf + 1), &resp_data)) 4315 return; 4316 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4317 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4318 phba->pport->stopped) 4319 goto restore_hc; 4320 else 4321 goto clear_errat; 4322 } 4323 4324 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4325 resp_data = 0; 4326 for (i = 0; i < 500; i++) { 4327 if (lpfc_readl(resp_buf, &resp_data)) 4328 return; 4329 if (resp_data != mbox) 4330 mdelay(1); 4331 else 4332 break; 4333 } 4334 4335 clear_errat: 4336 4337 while (++i < 500) { 4338 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4339 return; 4340 if (!(ha_copy & HA_ERATT)) 4341 mdelay(1); 4342 else 4343 break; 4344 } 4345 4346 if (readl(phba->HAregaddr) & HA_ERATT) { 4347 writel(HA_ERATT, phba->HAregaddr); 4348 phba->pport->stopped = 1; 4349 } 4350 4351 restore_hc: 4352 phba->link_flag &= ~LS_IGNORE_ERATT; 4353 writel(hc_copy, phba->HCregaddr); 4354 readl(phba->HCregaddr); /* flush */ 4355 } 4356 4357 /** 4358 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4359 * @phba: Pointer to HBA context object. 4360 * 4361 * This function issues a kill_board mailbox command and waits for 4362 * the error attention interrupt. This function is called for stopping 4363 * the firmware processing. The caller is not required to hold any 4364 * locks. This function calls lpfc_hba_down_post function to free 4365 * any pending commands after the kill. The function will return 1 when it 4366 * fails to kill the board else will return 0. 4367 **/ 4368 int 4369 lpfc_sli_brdkill(struct lpfc_hba *phba) 4370 { 4371 struct lpfc_sli *psli; 4372 LPFC_MBOXQ_t *pmb; 4373 uint32_t status; 4374 uint32_t ha_copy; 4375 int retval; 4376 int i = 0; 4377 4378 psli = &phba->sli; 4379 4380 /* Kill HBA */ 4381 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4382 "0329 Kill HBA Data: x%x x%x\n", 4383 phba->pport->port_state, psli->sli_flag); 4384 4385 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4386 if (!pmb) 4387 return 1; 4388 4389 /* Disable the error attention */ 4390 spin_lock_irq(&phba->hbalock); 4391 if (lpfc_readl(phba->HCregaddr, &status)) { 4392 spin_unlock_irq(&phba->hbalock); 4393 mempool_free(pmb, phba->mbox_mem_pool); 4394 return 1; 4395 } 4396 status &= ~HC_ERINT_ENA; 4397 writel(status, phba->HCregaddr); 4398 readl(phba->HCregaddr); /* flush */ 4399 phba->link_flag |= LS_IGNORE_ERATT; 4400 spin_unlock_irq(&phba->hbalock); 4401 4402 lpfc_kill_board(phba, pmb); 4403 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4404 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4405 4406 if (retval != MBX_SUCCESS) { 4407 if (retval != MBX_BUSY) 4408 mempool_free(pmb, phba->mbox_mem_pool); 4409 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4410 "2752 KILL_BOARD command failed retval %d\n", 4411 retval); 4412 spin_lock_irq(&phba->hbalock); 4413 phba->link_flag &= ~LS_IGNORE_ERATT; 4414 spin_unlock_irq(&phba->hbalock); 4415 return 1; 4416 } 4417 4418 spin_lock_irq(&phba->hbalock); 4419 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4420 spin_unlock_irq(&phba->hbalock); 4421 4422 mempool_free(pmb, phba->mbox_mem_pool); 4423 4424 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4425 * attention every 100ms for 3 seconds. If we don't get ERATT after 4426 * 3 seconds we still set HBA_ERROR state because the status of the 4427 * board is now undefined. 4428 */ 4429 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4430 return 1; 4431 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4432 mdelay(100); 4433 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4434 return 1; 4435 } 4436 4437 del_timer_sync(&psli->mbox_tmo); 4438 if (ha_copy & HA_ERATT) { 4439 writel(HA_ERATT, phba->HAregaddr); 4440 phba->pport->stopped = 1; 4441 } 4442 spin_lock_irq(&phba->hbalock); 4443 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4444 psli->mbox_active = NULL; 4445 phba->link_flag &= ~LS_IGNORE_ERATT; 4446 spin_unlock_irq(&phba->hbalock); 4447 4448 lpfc_hba_down_post(phba); 4449 phba->link_state = LPFC_HBA_ERROR; 4450 4451 return ha_copy & HA_ERATT ? 0 : 1; 4452 } 4453 4454 /** 4455 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4456 * @phba: Pointer to HBA context object. 4457 * 4458 * This function resets the HBA by writing HC_INITFF to the control 4459 * register. After the HBA resets, this function resets all the iocb ring 4460 * indices. This function disables PCI layer parity checking during 4461 * the reset. 4462 * This function returns 0 always. 4463 * The caller is not required to hold any locks. 4464 **/ 4465 int 4466 lpfc_sli_brdreset(struct lpfc_hba *phba) 4467 { 4468 struct lpfc_sli *psli; 4469 struct lpfc_sli_ring *pring; 4470 uint16_t cfg_value; 4471 int i; 4472 4473 psli = &phba->sli; 4474 4475 /* Reset HBA */ 4476 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4477 "0325 Reset HBA Data: x%x x%x\n", 4478 (phba->pport) ? phba->pport->port_state : 0, 4479 psli->sli_flag); 4480 4481 /* perform board reset */ 4482 phba->fc_eventTag = 0; 4483 phba->link_events = 0; 4484 if (phba->pport) { 4485 phba->pport->fc_myDID = 0; 4486 phba->pport->fc_prevDID = 0; 4487 } 4488 4489 /* Turn off parity checking and serr during the physical reset */ 4490 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4491 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4492 (cfg_value & 4493 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4494 4495 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4496 4497 /* Now toggle INITFF bit in the Host Control Register */ 4498 writel(HC_INITFF, phba->HCregaddr); 4499 mdelay(1); 4500 readl(phba->HCregaddr); /* flush */ 4501 writel(0, phba->HCregaddr); 4502 readl(phba->HCregaddr); /* flush */ 4503 4504 /* Restore PCI cmd register */ 4505 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4506 4507 /* Initialize relevant SLI info */ 4508 for (i = 0; i < psli->num_rings; i++) { 4509 pring = &psli->sli3_ring[i]; 4510 pring->flag = 0; 4511 pring->sli.sli3.rspidx = 0; 4512 pring->sli.sli3.next_cmdidx = 0; 4513 pring->sli.sli3.local_getidx = 0; 4514 pring->sli.sli3.cmdidx = 0; 4515 pring->missbufcnt = 0; 4516 } 4517 4518 phba->link_state = LPFC_WARM_START; 4519 return 0; 4520 } 4521 4522 /** 4523 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4524 * @phba: Pointer to HBA context object. 4525 * 4526 * This function resets a SLI4 HBA. This function disables PCI layer parity 4527 * checking during resets the device. The caller is not required to hold 4528 * any locks. 4529 * 4530 * This function returns 0 always. 4531 **/ 4532 int 4533 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4534 { 4535 struct lpfc_sli *psli = &phba->sli; 4536 uint16_t cfg_value; 4537 int rc = 0; 4538 4539 /* Reset HBA */ 4540 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4541 "0295 Reset HBA Data: x%x x%x x%x\n", 4542 phba->pport->port_state, psli->sli_flag, 4543 phba->hba_flag); 4544 4545 /* perform board reset */ 4546 phba->fc_eventTag = 0; 4547 phba->link_events = 0; 4548 phba->pport->fc_myDID = 0; 4549 phba->pport->fc_prevDID = 0; 4550 4551 spin_lock_irq(&phba->hbalock); 4552 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4553 phba->fcf.fcf_flag = 0; 4554 spin_unlock_irq(&phba->hbalock); 4555 4556 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4557 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4558 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4559 return rc; 4560 } 4561 4562 /* Now physically reset the device */ 4563 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4564 "0389 Performing PCI function reset!\n"); 4565 4566 /* Turn off parity checking and serr during the physical reset */ 4567 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4568 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4569 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4570 4571 /* Perform FCoE PCI function reset before freeing queue memory */ 4572 rc = lpfc_pci_function_reset(phba); 4573 4574 /* Restore PCI cmd register */ 4575 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4576 4577 return rc; 4578 } 4579 4580 /** 4581 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4582 * @phba: Pointer to HBA context object. 4583 * 4584 * This function is called in the SLI initialization code path to 4585 * restart the HBA. The caller is not required to hold any lock. 4586 * This function writes MBX_RESTART mailbox command to the SLIM and 4587 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4588 * function to free any pending commands. The function enables 4589 * POST only during the first initialization. The function returns zero. 4590 * The function does not guarantee completion of MBX_RESTART mailbox 4591 * command before the return of this function. 4592 **/ 4593 static int 4594 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4595 { 4596 MAILBOX_t *mb; 4597 struct lpfc_sli *psli; 4598 volatile uint32_t word0; 4599 void __iomem *to_slim; 4600 uint32_t hba_aer_enabled; 4601 4602 spin_lock_irq(&phba->hbalock); 4603 4604 /* Take PCIe device Advanced Error Reporting (AER) state */ 4605 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4606 4607 psli = &phba->sli; 4608 4609 /* Restart HBA */ 4610 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4611 "0337 Restart HBA Data: x%x x%x\n", 4612 (phba->pport) ? phba->pport->port_state : 0, 4613 psli->sli_flag); 4614 4615 word0 = 0; 4616 mb = (MAILBOX_t *) &word0; 4617 mb->mbxCommand = MBX_RESTART; 4618 mb->mbxHc = 1; 4619 4620 lpfc_reset_barrier(phba); 4621 4622 to_slim = phba->MBslimaddr; 4623 writel(*(uint32_t *) mb, to_slim); 4624 readl(to_slim); /* flush */ 4625 4626 /* Only skip post after fc_ffinit is completed */ 4627 if (phba->pport && phba->pport->port_state) 4628 word0 = 1; /* This is really setting up word1 */ 4629 else 4630 word0 = 0; /* This is really setting up word1 */ 4631 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4632 writel(*(uint32_t *) mb, to_slim); 4633 readl(to_slim); /* flush */ 4634 4635 lpfc_sli_brdreset(phba); 4636 if (phba->pport) 4637 phba->pport->stopped = 0; 4638 phba->link_state = LPFC_INIT_START; 4639 phba->hba_flag = 0; 4640 spin_unlock_irq(&phba->hbalock); 4641 4642 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4643 psli->stats_start = ktime_get_seconds(); 4644 4645 /* Give the INITFF and Post time to settle. */ 4646 mdelay(100); 4647 4648 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4649 if (hba_aer_enabled) 4650 pci_disable_pcie_error_reporting(phba->pcidev); 4651 4652 lpfc_hba_down_post(phba); 4653 4654 return 0; 4655 } 4656 4657 /** 4658 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4659 * @phba: Pointer to HBA context object. 4660 * 4661 * This function is called in the SLI initialization code path to restart 4662 * a SLI4 HBA. The caller is not required to hold any lock. 4663 * At the end of the function, it calls lpfc_hba_down_post function to 4664 * free any pending commands. 4665 **/ 4666 static int 4667 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4668 { 4669 struct lpfc_sli *psli = &phba->sli; 4670 uint32_t hba_aer_enabled; 4671 int rc; 4672 4673 /* Restart HBA */ 4674 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4675 "0296 Restart HBA Data: x%x x%x\n", 4676 phba->pport->port_state, psli->sli_flag); 4677 4678 /* Take PCIe device Advanced Error Reporting (AER) state */ 4679 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4680 4681 rc = lpfc_sli4_brdreset(phba); 4682 if (rc) 4683 return rc; 4684 4685 spin_lock_irq(&phba->hbalock); 4686 phba->pport->stopped = 0; 4687 phba->link_state = LPFC_INIT_START; 4688 phba->hba_flag = 0; 4689 spin_unlock_irq(&phba->hbalock); 4690 4691 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4692 psli->stats_start = ktime_get_seconds(); 4693 4694 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4695 if (hba_aer_enabled) 4696 pci_disable_pcie_error_reporting(phba->pcidev); 4697 4698 lpfc_hba_down_post(phba); 4699 lpfc_sli4_queue_destroy(phba); 4700 4701 return rc; 4702 } 4703 4704 /** 4705 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4706 * @phba: Pointer to HBA context object. 4707 * 4708 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4709 * API jump table function pointer from the lpfc_hba struct. 4710 **/ 4711 int 4712 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4713 { 4714 return phba->lpfc_sli_brdrestart(phba); 4715 } 4716 4717 /** 4718 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4719 * @phba: Pointer to HBA context object. 4720 * 4721 * This function is called after a HBA restart to wait for successful 4722 * restart of the HBA. Successful restart of the HBA is indicated by 4723 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4724 * iteration, the function will restart the HBA again. The function returns 4725 * zero if HBA successfully restarted else returns negative error code. 4726 **/ 4727 int 4728 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4729 { 4730 uint32_t status, i = 0; 4731 4732 /* Read the HBA Host Status Register */ 4733 if (lpfc_readl(phba->HSregaddr, &status)) 4734 return -EIO; 4735 4736 /* Check status register to see what current state is */ 4737 i = 0; 4738 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4739 4740 /* Check every 10ms for 10 retries, then every 100ms for 90 4741 * retries, then every 1 sec for 50 retires for a total of 4742 * ~60 seconds before reset the board again and check every 4743 * 1 sec for 50 retries. The up to 60 seconds before the 4744 * board ready is required by the Falcon FIPS zeroization 4745 * complete, and any reset the board in between shall cause 4746 * restart of zeroization, further delay the board ready. 4747 */ 4748 if (i++ >= 200) { 4749 /* Adapter failed to init, timeout, status reg 4750 <status> */ 4751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4752 "0436 Adapter failed to init, " 4753 "timeout, status reg x%x, " 4754 "FW Data: A8 x%x AC x%x\n", status, 4755 readl(phba->MBslimaddr + 0xa8), 4756 readl(phba->MBslimaddr + 0xac)); 4757 phba->link_state = LPFC_HBA_ERROR; 4758 return -ETIMEDOUT; 4759 } 4760 4761 /* Check to see if any errors occurred during init */ 4762 if (status & HS_FFERM) { 4763 /* ERROR: During chipset initialization */ 4764 /* Adapter failed to init, chipset, status reg 4765 <status> */ 4766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4767 "0437 Adapter failed to init, " 4768 "chipset, status reg x%x, " 4769 "FW Data: A8 x%x AC x%x\n", status, 4770 readl(phba->MBslimaddr + 0xa8), 4771 readl(phba->MBslimaddr + 0xac)); 4772 phba->link_state = LPFC_HBA_ERROR; 4773 return -EIO; 4774 } 4775 4776 if (i <= 10) 4777 msleep(10); 4778 else if (i <= 100) 4779 msleep(100); 4780 else 4781 msleep(1000); 4782 4783 if (i == 150) { 4784 /* Do post */ 4785 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4786 lpfc_sli_brdrestart(phba); 4787 } 4788 /* Read the HBA Host Status Register */ 4789 if (lpfc_readl(phba->HSregaddr, &status)) 4790 return -EIO; 4791 } 4792 4793 /* Check to see if any errors occurred during init */ 4794 if (status & HS_FFERM) { 4795 /* ERROR: During chipset initialization */ 4796 /* Adapter failed to init, chipset, status reg <status> */ 4797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4798 "0438 Adapter failed to init, chipset, " 4799 "status reg x%x, " 4800 "FW Data: A8 x%x AC x%x\n", status, 4801 readl(phba->MBslimaddr + 0xa8), 4802 readl(phba->MBslimaddr + 0xac)); 4803 phba->link_state = LPFC_HBA_ERROR; 4804 return -EIO; 4805 } 4806 4807 /* Clear all interrupt enable conditions */ 4808 writel(0, phba->HCregaddr); 4809 readl(phba->HCregaddr); /* flush */ 4810 4811 /* setup host attn register */ 4812 writel(0xffffffff, phba->HAregaddr); 4813 readl(phba->HAregaddr); /* flush */ 4814 return 0; 4815 } 4816 4817 /** 4818 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4819 * 4820 * This function calculates and returns the number of HBQs required to be 4821 * configured. 4822 **/ 4823 int 4824 lpfc_sli_hbq_count(void) 4825 { 4826 return ARRAY_SIZE(lpfc_hbq_defs); 4827 } 4828 4829 /** 4830 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4831 * 4832 * This function adds the number of hbq entries in every HBQ to get 4833 * the total number of hbq entries required for the HBA and returns 4834 * the total count. 4835 **/ 4836 static int 4837 lpfc_sli_hbq_entry_count(void) 4838 { 4839 int hbq_count = lpfc_sli_hbq_count(); 4840 int count = 0; 4841 int i; 4842 4843 for (i = 0; i < hbq_count; ++i) 4844 count += lpfc_hbq_defs[i]->entry_count; 4845 return count; 4846 } 4847 4848 /** 4849 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4850 * 4851 * This function calculates amount of memory required for all hbq entries 4852 * to be configured and returns the total memory required. 4853 **/ 4854 int 4855 lpfc_sli_hbq_size(void) 4856 { 4857 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4858 } 4859 4860 /** 4861 * lpfc_sli_hbq_setup - configure and initialize HBQs 4862 * @phba: Pointer to HBA context object. 4863 * 4864 * This function is called during the SLI initialization to configure 4865 * all the HBQs and post buffers to the HBQ. The caller is not 4866 * required to hold any locks. This function will return zero if successful 4867 * else it will return negative error code. 4868 **/ 4869 static int 4870 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4871 { 4872 int hbq_count = lpfc_sli_hbq_count(); 4873 LPFC_MBOXQ_t *pmb; 4874 MAILBOX_t *pmbox; 4875 uint32_t hbqno; 4876 uint32_t hbq_entry_index; 4877 4878 /* Get a Mailbox buffer to setup mailbox 4879 * commands for HBA initialization 4880 */ 4881 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4882 4883 if (!pmb) 4884 return -ENOMEM; 4885 4886 pmbox = &pmb->u.mb; 4887 4888 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4889 phba->link_state = LPFC_INIT_MBX_CMDS; 4890 phba->hbq_in_use = 1; 4891 4892 hbq_entry_index = 0; 4893 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4894 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4895 phba->hbqs[hbqno].hbqPutIdx = 0; 4896 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4897 phba->hbqs[hbqno].entry_count = 4898 lpfc_hbq_defs[hbqno]->entry_count; 4899 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4900 hbq_entry_index, pmb); 4901 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4902 4903 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4904 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4905 mbxStatus <status>, ring <num> */ 4906 4907 lpfc_printf_log(phba, KERN_ERR, 4908 LOG_SLI | LOG_VPORT, 4909 "1805 Adapter failed to init. " 4910 "Data: x%x x%x x%x\n", 4911 pmbox->mbxCommand, 4912 pmbox->mbxStatus, hbqno); 4913 4914 phba->link_state = LPFC_HBA_ERROR; 4915 mempool_free(pmb, phba->mbox_mem_pool); 4916 return -ENXIO; 4917 } 4918 } 4919 phba->hbq_count = hbq_count; 4920 4921 mempool_free(pmb, phba->mbox_mem_pool); 4922 4923 /* Initially populate or replenish the HBQs */ 4924 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4925 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4926 return 0; 4927 } 4928 4929 /** 4930 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4931 * @phba: Pointer to HBA context object. 4932 * 4933 * This function is called during the SLI initialization to configure 4934 * all the HBQs and post buffers to the HBQ. The caller is not 4935 * required to hold any locks. This function will return zero if successful 4936 * else it will return negative error code. 4937 **/ 4938 static int 4939 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4940 { 4941 phba->hbq_in_use = 1; 4942 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4943 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4944 phba->hbq_count = 1; 4945 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4946 /* Initially populate or replenish the HBQs */ 4947 return 0; 4948 } 4949 4950 /** 4951 * lpfc_sli_config_port - Issue config port mailbox command 4952 * @phba: Pointer to HBA context object. 4953 * @sli_mode: sli mode - 2/3 4954 * 4955 * This function is called by the sli initialization code path 4956 * to issue config_port mailbox command. This function restarts the 4957 * HBA firmware and issues a config_port mailbox command to configure 4958 * the SLI interface in the sli mode specified by sli_mode 4959 * variable. The caller is not required to hold any locks. 4960 * The function returns 0 if successful, else returns negative error 4961 * code. 4962 **/ 4963 int 4964 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4965 { 4966 LPFC_MBOXQ_t *pmb; 4967 uint32_t resetcount = 0, rc = 0, done = 0; 4968 4969 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4970 if (!pmb) { 4971 phba->link_state = LPFC_HBA_ERROR; 4972 return -ENOMEM; 4973 } 4974 4975 phba->sli_rev = sli_mode; 4976 while (resetcount < 2 && !done) { 4977 spin_lock_irq(&phba->hbalock); 4978 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4979 spin_unlock_irq(&phba->hbalock); 4980 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4981 lpfc_sli_brdrestart(phba); 4982 rc = lpfc_sli_chipset_init(phba); 4983 if (rc) 4984 break; 4985 4986 spin_lock_irq(&phba->hbalock); 4987 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4988 spin_unlock_irq(&phba->hbalock); 4989 resetcount++; 4990 4991 /* Call pre CONFIG_PORT mailbox command initialization. A 4992 * value of 0 means the call was successful. Any other 4993 * nonzero value is a failure, but if ERESTART is returned, 4994 * the driver may reset the HBA and try again. 4995 */ 4996 rc = lpfc_config_port_prep(phba); 4997 if (rc == -ERESTART) { 4998 phba->link_state = LPFC_LINK_UNKNOWN; 4999 continue; 5000 } else if (rc) 5001 break; 5002 5003 phba->link_state = LPFC_INIT_MBX_CMDS; 5004 lpfc_config_port(phba, pmb); 5005 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5006 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 5007 LPFC_SLI3_HBQ_ENABLED | 5008 LPFC_SLI3_CRP_ENABLED | 5009 LPFC_SLI3_DSS_ENABLED); 5010 if (rc != MBX_SUCCESS) { 5011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5012 "0442 Adapter failed to init, mbxCmd x%x " 5013 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 5014 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 5015 spin_lock_irq(&phba->hbalock); 5016 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 5017 spin_unlock_irq(&phba->hbalock); 5018 rc = -ENXIO; 5019 } else { 5020 /* Allow asynchronous mailbox command to go through */ 5021 spin_lock_irq(&phba->hbalock); 5022 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5023 spin_unlock_irq(&phba->hbalock); 5024 done = 1; 5025 5026 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 5027 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 5028 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5029 "3110 Port did not grant ASABT\n"); 5030 } 5031 } 5032 if (!done) { 5033 rc = -EINVAL; 5034 goto do_prep_failed; 5035 } 5036 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 5037 if (!pmb->u.mb.un.varCfgPort.cMA) { 5038 rc = -ENXIO; 5039 goto do_prep_failed; 5040 } 5041 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 5042 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 5043 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 5044 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 5045 phba->max_vpi : phba->max_vports; 5046 5047 } else 5048 phba->max_vpi = 0; 5049 phba->fips_level = 0; 5050 phba->fips_spec_rev = 0; 5051 if (pmb->u.mb.un.varCfgPort.gdss) { 5052 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 5053 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 5054 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 5055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5056 "2850 Security Crypto Active. FIPS x%d " 5057 "(Spec Rev: x%d)", 5058 phba->fips_level, phba->fips_spec_rev); 5059 } 5060 if (pmb->u.mb.un.varCfgPort.sec_err) { 5061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5062 "2856 Config Port Security Crypto " 5063 "Error: x%x ", 5064 pmb->u.mb.un.varCfgPort.sec_err); 5065 } 5066 if (pmb->u.mb.un.varCfgPort.gerbm) 5067 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5068 if (pmb->u.mb.un.varCfgPort.gcrp) 5069 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5070 5071 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5072 phba->port_gp = phba->mbox->us.s3_pgp.port; 5073 5074 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5075 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5076 phba->cfg_enable_bg = 0; 5077 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5079 "0443 Adapter did not grant " 5080 "BlockGuard\n"); 5081 } 5082 } 5083 } else { 5084 phba->hbq_get = NULL; 5085 phba->port_gp = phba->mbox->us.s2.port; 5086 phba->max_vpi = 0; 5087 } 5088 do_prep_failed: 5089 mempool_free(pmb, phba->mbox_mem_pool); 5090 return rc; 5091 } 5092 5093 5094 /** 5095 * lpfc_sli_hba_setup - SLI initialization function 5096 * @phba: Pointer to HBA context object. 5097 * 5098 * This function is the main SLI initialization function. This function 5099 * is called by the HBA initialization code, HBA reset code and HBA 5100 * error attention handler code. Caller is not required to hold any 5101 * locks. This function issues config_port mailbox command to configure 5102 * the SLI, setup iocb rings and HBQ rings. In the end the function 5103 * calls the config_port_post function to issue init_link mailbox 5104 * command and to start the discovery. The function will return zero 5105 * if successful, else it will return negative error code. 5106 **/ 5107 int 5108 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5109 { 5110 uint32_t rc; 5111 int mode = 3, i; 5112 int longs; 5113 5114 switch (phba->cfg_sli_mode) { 5115 case 2: 5116 if (phba->cfg_enable_npiv) { 5117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5118 "1824 NPIV enabled: Override sli_mode " 5119 "parameter (%d) to auto (0).\n", 5120 phba->cfg_sli_mode); 5121 break; 5122 } 5123 mode = 2; 5124 break; 5125 case 0: 5126 case 3: 5127 break; 5128 default: 5129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5130 "1819 Unrecognized sli_mode parameter: %d.\n", 5131 phba->cfg_sli_mode); 5132 5133 break; 5134 } 5135 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5136 5137 rc = lpfc_sli_config_port(phba, mode); 5138 5139 if (rc && phba->cfg_sli_mode == 3) 5140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5141 "1820 Unable to select SLI-3. " 5142 "Not supported by adapter.\n"); 5143 if (rc && mode != 2) 5144 rc = lpfc_sli_config_port(phba, 2); 5145 else if (rc && mode == 2) 5146 rc = lpfc_sli_config_port(phba, 3); 5147 if (rc) 5148 goto lpfc_sli_hba_setup_error; 5149 5150 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 5151 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 5152 rc = pci_enable_pcie_error_reporting(phba->pcidev); 5153 if (!rc) { 5154 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5155 "2709 This device supports " 5156 "Advanced Error Reporting (AER)\n"); 5157 spin_lock_irq(&phba->hbalock); 5158 phba->hba_flag |= HBA_AER_ENABLED; 5159 spin_unlock_irq(&phba->hbalock); 5160 } else { 5161 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5162 "2708 This device does not support " 5163 "Advanced Error Reporting (AER): %d\n", 5164 rc); 5165 phba->cfg_aer_support = 0; 5166 } 5167 } 5168 5169 if (phba->sli_rev == 3) { 5170 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5171 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5172 } else { 5173 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5174 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5175 phba->sli3_options = 0; 5176 } 5177 5178 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5179 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5180 phba->sli_rev, phba->max_vpi); 5181 rc = lpfc_sli_ring_map(phba); 5182 5183 if (rc) 5184 goto lpfc_sli_hba_setup_error; 5185 5186 /* Initialize VPIs. */ 5187 if (phba->sli_rev == LPFC_SLI_REV3) { 5188 /* 5189 * The VPI bitmask and physical ID array are allocated 5190 * and initialized once only - at driver load. A port 5191 * reset doesn't need to reinitialize this memory. 5192 */ 5193 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5194 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5195 phba->vpi_bmask = kcalloc(longs, 5196 sizeof(unsigned long), 5197 GFP_KERNEL); 5198 if (!phba->vpi_bmask) { 5199 rc = -ENOMEM; 5200 goto lpfc_sli_hba_setup_error; 5201 } 5202 5203 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5204 sizeof(uint16_t), 5205 GFP_KERNEL); 5206 if (!phba->vpi_ids) { 5207 kfree(phba->vpi_bmask); 5208 rc = -ENOMEM; 5209 goto lpfc_sli_hba_setup_error; 5210 } 5211 for (i = 0; i < phba->max_vpi; i++) 5212 phba->vpi_ids[i] = i; 5213 } 5214 } 5215 5216 /* Init HBQs */ 5217 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5218 rc = lpfc_sli_hbq_setup(phba); 5219 if (rc) 5220 goto lpfc_sli_hba_setup_error; 5221 } 5222 spin_lock_irq(&phba->hbalock); 5223 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5224 spin_unlock_irq(&phba->hbalock); 5225 5226 rc = lpfc_config_port_post(phba); 5227 if (rc) 5228 goto lpfc_sli_hba_setup_error; 5229 5230 return rc; 5231 5232 lpfc_sli_hba_setup_error: 5233 phba->link_state = LPFC_HBA_ERROR; 5234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5235 "0445 Firmware initialization failed\n"); 5236 return rc; 5237 } 5238 5239 /** 5240 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5241 * @phba: Pointer to HBA context object. 5242 * @mboxq: mailbox pointer. 5243 * This function issue a dump mailbox command to read config region 5244 * 23 and parse the records in the region and populate driver 5245 * data structure. 5246 **/ 5247 static int 5248 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5249 { 5250 LPFC_MBOXQ_t *mboxq; 5251 struct lpfc_dmabuf *mp; 5252 struct lpfc_mqe *mqe; 5253 uint32_t data_length; 5254 int rc; 5255 5256 /* Program the default value of vlan_id and fc_map */ 5257 phba->valid_vlan = 0; 5258 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5259 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5260 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5261 5262 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5263 if (!mboxq) 5264 return -ENOMEM; 5265 5266 mqe = &mboxq->u.mqe; 5267 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5268 rc = -ENOMEM; 5269 goto out_free_mboxq; 5270 } 5271 5272 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 5273 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5274 5275 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5276 "(%d):2571 Mailbox cmd x%x Status x%x " 5277 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5278 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5279 "CQ: x%x x%x x%x x%x\n", 5280 mboxq->vport ? mboxq->vport->vpi : 0, 5281 bf_get(lpfc_mqe_command, mqe), 5282 bf_get(lpfc_mqe_status, mqe), 5283 mqe->un.mb_words[0], mqe->un.mb_words[1], 5284 mqe->un.mb_words[2], mqe->un.mb_words[3], 5285 mqe->un.mb_words[4], mqe->un.mb_words[5], 5286 mqe->un.mb_words[6], mqe->un.mb_words[7], 5287 mqe->un.mb_words[8], mqe->un.mb_words[9], 5288 mqe->un.mb_words[10], mqe->un.mb_words[11], 5289 mqe->un.mb_words[12], mqe->un.mb_words[13], 5290 mqe->un.mb_words[14], mqe->un.mb_words[15], 5291 mqe->un.mb_words[16], mqe->un.mb_words[50], 5292 mboxq->mcqe.word0, 5293 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5294 mboxq->mcqe.trailer); 5295 5296 if (rc) { 5297 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5298 kfree(mp); 5299 rc = -EIO; 5300 goto out_free_mboxq; 5301 } 5302 data_length = mqe->un.mb_words[5]; 5303 if (data_length > DMP_RGN23_SIZE) { 5304 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5305 kfree(mp); 5306 rc = -EIO; 5307 goto out_free_mboxq; 5308 } 5309 5310 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5311 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5312 kfree(mp); 5313 rc = 0; 5314 5315 out_free_mboxq: 5316 mempool_free(mboxq, phba->mbox_mem_pool); 5317 return rc; 5318 } 5319 5320 /** 5321 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5322 * @phba: pointer to lpfc hba data structure. 5323 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5324 * @vpd: pointer to the memory to hold resulting port vpd data. 5325 * @vpd_size: On input, the number of bytes allocated to @vpd. 5326 * On output, the number of data bytes in @vpd. 5327 * 5328 * This routine executes a READ_REV SLI4 mailbox command. In 5329 * addition, this routine gets the port vpd data. 5330 * 5331 * Return codes 5332 * 0 - successful 5333 * -ENOMEM - could not allocated memory. 5334 **/ 5335 static int 5336 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5337 uint8_t *vpd, uint32_t *vpd_size) 5338 { 5339 int rc = 0; 5340 uint32_t dma_size; 5341 struct lpfc_dmabuf *dmabuf; 5342 struct lpfc_mqe *mqe; 5343 5344 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5345 if (!dmabuf) 5346 return -ENOMEM; 5347 5348 /* 5349 * Get a DMA buffer for the vpd data resulting from the READ_REV 5350 * mailbox command. 5351 */ 5352 dma_size = *vpd_size; 5353 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5354 &dmabuf->phys, GFP_KERNEL); 5355 if (!dmabuf->virt) { 5356 kfree(dmabuf); 5357 return -ENOMEM; 5358 } 5359 5360 /* 5361 * The SLI4 implementation of READ_REV conflicts at word1, 5362 * bits 31:16 and SLI4 adds vpd functionality not present 5363 * in SLI3. This code corrects the conflicts. 5364 */ 5365 lpfc_read_rev(phba, mboxq); 5366 mqe = &mboxq->u.mqe; 5367 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5368 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5369 mqe->un.read_rev.word1 &= 0x0000FFFF; 5370 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5371 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5372 5373 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5374 if (rc) { 5375 dma_free_coherent(&phba->pcidev->dev, dma_size, 5376 dmabuf->virt, dmabuf->phys); 5377 kfree(dmabuf); 5378 return -EIO; 5379 } 5380 5381 /* 5382 * The available vpd length cannot be bigger than the 5383 * DMA buffer passed to the port. Catch the less than 5384 * case and update the caller's size. 5385 */ 5386 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5387 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5388 5389 memcpy(vpd, dmabuf->virt, *vpd_size); 5390 5391 dma_free_coherent(&phba->pcidev->dev, dma_size, 5392 dmabuf->virt, dmabuf->phys); 5393 kfree(dmabuf); 5394 return 0; 5395 } 5396 5397 /** 5398 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5399 * @phba: pointer to lpfc hba data structure. 5400 * 5401 * This routine retrieves SLI4 device physical port name this PCI function 5402 * is attached to. 5403 * 5404 * Return codes 5405 * 0 - successful 5406 * otherwise - failed to retrieve physical port name 5407 **/ 5408 static int 5409 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5410 { 5411 LPFC_MBOXQ_t *mboxq; 5412 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5413 struct lpfc_controller_attribute *cntl_attr; 5414 struct lpfc_mbx_get_port_name *get_port_name; 5415 void *virtaddr = NULL; 5416 uint32_t alloclen, reqlen; 5417 uint32_t shdr_status, shdr_add_status; 5418 union lpfc_sli4_cfg_shdr *shdr; 5419 char cport_name = 0; 5420 int rc; 5421 5422 /* We assume nothing at this point */ 5423 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5424 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5425 5426 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5427 if (!mboxq) 5428 return -ENOMEM; 5429 /* obtain link type and link number via READ_CONFIG */ 5430 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5431 lpfc_sli4_read_config(phba); 5432 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5433 goto retrieve_ppname; 5434 5435 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5436 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5437 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5438 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5439 LPFC_SLI4_MBX_NEMBED); 5440 if (alloclen < reqlen) { 5441 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5442 "3084 Allocated DMA memory size (%d) is " 5443 "less than the requested DMA memory size " 5444 "(%d)\n", alloclen, reqlen); 5445 rc = -ENOMEM; 5446 goto out_free_mboxq; 5447 } 5448 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5449 virtaddr = mboxq->sge_array->addr[0]; 5450 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5451 shdr = &mbx_cntl_attr->cfg_shdr; 5452 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5453 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5454 if (shdr_status || shdr_add_status || rc) { 5455 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5456 "3085 Mailbox x%x (x%x/x%x) failed, " 5457 "rc:x%x, status:x%x, add_status:x%x\n", 5458 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5459 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5460 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5461 rc, shdr_status, shdr_add_status); 5462 rc = -ENXIO; 5463 goto out_free_mboxq; 5464 } 5465 cntl_attr = &mbx_cntl_attr->cntl_attr; 5466 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5467 phba->sli4_hba.lnk_info.lnk_tp = 5468 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5469 phba->sli4_hba.lnk_info.lnk_no = 5470 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5471 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5472 "3086 lnk_type:%d, lnk_numb:%d\n", 5473 phba->sli4_hba.lnk_info.lnk_tp, 5474 phba->sli4_hba.lnk_info.lnk_no); 5475 5476 retrieve_ppname: 5477 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5478 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5479 sizeof(struct lpfc_mbx_get_port_name) - 5480 sizeof(struct lpfc_sli4_cfg_mhdr), 5481 LPFC_SLI4_MBX_EMBED); 5482 get_port_name = &mboxq->u.mqe.un.get_port_name; 5483 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5484 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5485 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5486 phba->sli4_hba.lnk_info.lnk_tp); 5487 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5488 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5489 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5490 if (shdr_status || shdr_add_status || rc) { 5491 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5492 "3087 Mailbox x%x (x%x/x%x) failed: " 5493 "rc:x%x, status:x%x, add_status:x%x\n", 5494 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5495 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5496 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5497 rc, shdr_status, shdr_add_status); 5498 rc = -ENXIO; 5499 goto out_free_mboxq; 5500 } 5501 switch (phba->sli4_hba.lnk_info.lnk_no) { 5502 case LPFC_LINK_NUMBER_0: 5503 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5504 &get_port_name->u.response); 5505 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5506 break; 5507 case LPFC_LINK_NUMBER_1: 5508 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5509 &get_port_name->u.response); 5510 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5511 break; 5512 case LPFC_LINK_NUMBER_2: 5513 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5514 &get_port_name->u.response); 5515 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5516 break; 5517 case LPFC_LINK_NUMBER_3: 5518 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5519 &get_port_name->u.response); 5520 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5521 break; 5522 default: 5523 break; 5524 } 5525 5526 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5527 phba->Port[0] = cport_name; 5528 phba->Port[1] = '\0'; 5529 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5530 "3091 SLI get port name: %s\n", phba->Port); 5531 } 5532 5533 out_free_mboxq: 5534 if (rc != MBX_TIMEOUT) { 5535 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5536 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5537 else 5538 mempool_free(mboxq, phba->mbox_mem_pool); 5539 } 5540 return rc; 5541 } 5542 5543 /** 5544 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5545 * @phba: pointer to lpfc hba data structure. 5546 * 5547 * This routine is called to explicitly arm the SLI4 device's completion and 5548 * event queues 5549 **/ 5550 static void 5551 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5552 { 5553 int qidx; 5554 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 5555 struct lpfc_sli4_hdw_queue *qp; 5556 5557 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); 5558 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); 5559 if (sli4_hba->nvmels_cq) 5560 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, 5561 LPFC_QUEUE_REARM); 5562 5563 qp = sli4_hba->hdwq; 5564 if (sli4_hba->hdwq) { 5565 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 5566 sli4_hba->sli4_write_cq_db(phba, qp[qidx].fcp_cq, 0, 5567 LPFC_QUEUE_REARM); 5568 sli4_hba->sli4_write_cq_db(phba, qp[qidx].nvme_cq, 0, 5569 LPFC_QUEUE_REARM); 5570 } 5571 5572 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) 5573 sli4_hba->sli4_write_eq_db(phba, qp[qidx].hba_eq, 5574 0, LPFC_QUEUE_REARM); 5575 } 5576 5577 if (phba->nvmet_support) { 5578 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5579 sli4_hba->sli4_write_cq_db(phba, 5580 sli4_hba->nvmet_cqset[qidx], 0, 5581 LPFC_QUEUE_REARM); 5582 } 5583 } 5584 } 5585 5586 /** 5587 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5588 * @phba: Pointer to HBA context object. 5589 * @type: The resource extent type. 5590 * @extnt_count: buffer to hold port available extent count. 5591 * @extnt_size: buffer to hold element count per extent. 5592 * 5593 * This function calls the port and retrievs the number of available 5594 * extents and their size for a particular extent type. 5595 * 5596 * Returns: 0 if successful. Nonzero otherwise. 5597 **/ 5598 int 5599 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5600 uint16_t *extnt_count, uint16_t *extnt_size) 5601 { 5602 int rc = 0; 5603 uint32_t length; 5604 uint32_t mbox_tmo; 5605 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5606 LPFC_MBOXQ_t *mbox; 5607 5608 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5609 if (!mbox) 5610 return -ENOMEM; 5611 5612 /* Find out how many extents are available for this resource type */ 5613 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5614 sizeof(struct lpfc_sli4_cfg_mhdr)); 5615 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5616 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5617 length, LPFC_SLI4_MBX_EMBED); 5618 5619 /* Send an extents count of 0 - the GET doesn't use it. */ 5620 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5621 LPFC_SLI4_MBX_EMBED); 5622 if (unlikely(rc)) { 5623 rc = -EIO; 5624 goto err_exit; 5625 } 5626 5627 if (!phba->sli4_hba.intr_enable) 5628 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5629 else { 5630 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5631 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5632 } 5633 if (unlikely(rc)) { 5634 rc = -EIO; 5635 goto err_exit; 5636 } 5637 5638 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5639 if (bf_get(lpfc_mbox_hdr_status, 5640 &rsrc_info->header.cfg_shdr.response)) { 5641 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5642 "2930 Failed to get resource extents " 5643 "Status 0x%x Add'l Status 0x%x\n", 5644 bf_get(lpfc_mbox_hdr_status, 5645 &rsrc_info->header.cfg_shdr.response), 5646 bf_get(lpfc_mbox_hdr_add_status, 5647 &rsrc_info->header.cfg_shdr.response)); 5648 rc = -EIO; 5649 goto err_exit; 5650 } 5651 5652 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5653 &rsrc_info->u.rsp); 5654 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5655 &rsrc_info->u.rsp); 5656 5657 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5658 "3162 Retrieved extents type-%d from port: count:%d, " 5659 "size:%d\n", type, *extnt_count, *extnt_size); 5660 5661 err_exit: 5662 mempool_free(mbox, phba->mbox_mem_pool); 5663 return rc; 5664 } 5665 5666 /** 5667 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5668 * @phba: Pointer to HBA context object. 5669 * @type: The extent type to check. 5670 * 5671 * This function reads the current available extents from the port and checks 5672 * if the extent count or extent size has changed since the last access. 5673 * Callers use this routine post port reset to understand if there is a 5674 * extent reprovisioning requirement. 5675 * 5676 * Returns: 5677 * -Error: error indicates problem. 5678 * 1: Extent count or size has changed. 5679 * 0: No changes. 5680 **/ 5681 static int 5682 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5683 { 5684 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5685 uint16_t size_diff, rsrc_ext_size; 5686 int rc = 0; 5687 struct lpfc_rsrc_blks *rsrc_entry; 5688 struct list_head *rsrc_blk_list = NULL; 5689 5690 size_diff = 0; 5691 curr_ext_cnt = 0; 5692 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5693 &rsrc_ext_cnt, 5694 &rsrc_ext_size); 5695 if (unlikely(rc)) 5696 return -EIO; 5697 5698 switch (type) { 5699 case LPFC_RSC_TYPE_FCOE_RPI: 5700 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5701 break; 5702 case LPFC_RSC_TYPE_FCOE_VPI: 5703 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5704 break; 5705 case LPFC_RSC_TYPE_FCOE_XRI: 5706 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5707 break; 5708 case LPFC_RSC_TYPE_FCOE_VFI: 5709 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5710 break; 5711 default: 5712 break; 5713 } 5714 5715 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5716 curr_ext_cnt++; 5717 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5718 size_diff++; 5719 } 5720 5721 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5722 rc = 1; 5723 5724 return rc; 5725 } 5726 5727 /** 5728 * lpfc_sli4_cfg_post_extnts - 5729 * @phba: Pointer to HBA context object. 5730 * @extnt_cnt - number of available extents. 5731 * @type - the extent type (rpi, xri, vfi, vpi). 5732 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5733 * @mbox - pointer to the caller's allocated mailbox structure. 5734 * 5735 * This function executes the extents allocation request. It also 5736 * takes care of the amount of memory needed to allocate or get the 5737 * allocated extents. It is the caller's responsibility to evaluate 5738 * the response. 5739 * 5740 * Returns: 5741 * -Error: Error value describes the condition found. 5742 * 0: if successful 5743 **/ 5744 static int 5745 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5746 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5747 { 5748 int rc = 0; 5749 uint32_t req_len; 5750 uint32_t emb_len; 5751 uint32_t alloc_len, mbox_tmo; 5752 5753 /* Calculate the total requested length of the dma memory */ 5754 req_len = extnt_cnt * sizeof(uint16_t); 5755 5756 /* 5757 * Calculate the size of an embedded mailbox. The uint32_t 5758 * accounts for extents-specific word. 5759 */ 5760 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5761 sizeof(uint32_t); 5762 5763 /* 5764 * Presume the allocation and response will fit into an embedded 5765 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5766 */ 5767 *emb = LPFC_SLI4_MBX_EMBED; 5768 if (req_len > emb_len) { 5769 req_len = extnt_cnt * sizeof(uint16_t) + 5770 sizeof(union lpfc_sli4_cfg_shdr) + 5771 sizeof(uint32_t); 5772 *emb = LPFC_SLI4_MBX_NEMBED; 5773 } 5774 5775 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5776 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5777 req_len, *emb); 5778 if (alloc_len < req_len) { 5779 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5780 "2982 Allocated DMA memory size (x%x) is " 5781 "less than the requested DMA memory " 5782 "size (x%x)\n", alloc_len, req_len); 5783 return -ENOMEM; 5784 } 5785 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5786 if (unlikely(rc)) 5787 return -EIO; 5788 5789 if (!phba->sli4_hba.intr_enable) 5790 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5791 else { 5792 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5793 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5794 } 5795 5796 if (unlikely(rc)) 5797 rc = -EIO; 5798 return rc; 5799 } 5800 5801 /** 5802 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5803 * @phba: Pointer to HBA context object. 5804 * @type: The resource extent type to allocate. 5805 * 5806 * This function allocates the number of elements for the specified 5807 * resource type. 5808 **/ 5809 static int 5810 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5811 { 5812 bool emb = false; 5813 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5814 uint16_t rsrc_id, rsrc_start, j, k; 5815 uint16_t *ids; 5816 int i, rc; 5817 unsigned long longs; 5818 unsigned long *bmask; 5819 struct lpfc_rsrc_blks *rsrc_blks; 5820 LPFC_MBOXQ_t *mbox; 5821 uint32_t length; 5822 struct lpfc_id_range *id_array = NULL; 5823 void *virtaddr = NULL; 5824 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5825 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5826 struct list_head *ext_blk_list; 5827 5828 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5829 &rsrc_cnt, 5830 &rsrc_size); 5831 if (unlikely(rc)) 5832 return -EIO; 5833 5834 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5835 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5836 "3009 No available Resource Extents " 5837 "for resource type 0x%x: Count: 0x%x, " 5838 "Size 0x%x\n", type, rsrc_cnt, 5839 rsrc_size); 5840 return -ENOMEM; 5841 } 5842 5843 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5844 "2903 Post resource extents type-0x%x: " 5845 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5846 5847 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5848 if (!mbox) 5849 return -ENOMEM; 5850 5851 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5852 if (unlikely(rc)) { 5853 rc = -EIO; 5854 goto err_exit; 5855 } 5856 5857 /* 5858 * Figure out where the response is located. Then get local pointers 5859 * to the response data. The port does not guarantee to respond to 5860 * all extents counts request so update the local variable with the 5861 * allocated count from the port. 5862 */ 5863 if (emb == LPFC_SLI4_MBX_EMBED) { 5864 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5865 id_array = &rsrc_ext->u.rsp.id[0]; 5866 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5867 } else { 5868 virtaddr = mbox->sge_array->addr[0]; 5869 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5870 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5871 id_array = &n_rsrc->id; 5872 } 5873 5874 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5875 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5876 5877 /* 5878 * Based on the resource size and count, correct the base and max 5879 * resource values. 5880 */ 5881 length = sizeof(struct lpfc_rsrc_blks); 5882 switch (type) { 5883 case LPFC_RSC_TYPE_FCOE_RPI: 5884 phba->sli4_hba.rpi_bmask = kcalloc(longs, 5885 sizeof(unsigned long), 5886 GFP_KERNEL); 5887 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5888 rc = -ENOMEM; 5889 goto err_exit; 5890 } 5891 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 5892 sizeof(uint16_t), 5893 GFP_KERNEL); 5894 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5895 kfree(phba->sli4_hba.rpi_bmask); 5896 rc = -ENOMEM; 5897 goto err_exit; 5898 } 5899 5900 /* 5901 * The next_rpi was initialized with the maximum available 5902 * count but the port may allocate a smaller number. Catch 5903 * that case and update the next_rpi. 5904 */ 5905 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5906 5907 /* Initialize local ptrs for common extent processing later. */ 5908 bmask = phba->sli4_hba.rpi_bmask; 5909 ids = phba->sli4_hba.rpi_ids; 5910 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5911 break; 5912 case LPFC_RSC_TYPE_FCOE_VPI: 5913 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 5914 GFP_KERNEL); 5915 if (unlikely(!phba->vpi_bmask)) { 5916 rc = -ENOMEM; 5917 goto err_exit; 5918 } 5919 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 5920 GFP_KERNEL); 5921 if (unlikely(!phba->vpi_ids)) { 5922 kfree(phba->vpi_bmask); 5923 rc = -ENOMEM; 5924 goto err_exit; 5925 } 5926 5927 /* Initialize local ptrs for common extent processing later. */ 5928 bmask = phba->vpi_bmask; 5929 ids = phba->vpi_ids; 5930 ext_blk_list = &phba->lpfc_vpi_blk_list; 5931 break; 5932 case LPFC_RSC_TYPE_FCOE_XRI: 5933 phba->sli4_hba.xri_bmask = kcalloc(longs, 5934 sizeof(unsigned long), 5935 GFP_KERNEL); 5936 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5937 rc = -ENOMEM; 5938 goto err_exit; 5939 } 5940 phba->sli4_hba.max_cfg_param.xri_used = 0; 5941 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 5942 sizeof(uint16_t), 5943 GFP_KERNEL); 5944 if (unlikely(!phba->sli4_hba.xri_ids)) { 5945 kfree(phba->sli4_hba.xri_bmask); 5946 rc = -ENOMEM; 5947 goto err_exit; 5948 } 5949 5950 /* Initialize local ptrs for common extent processing later. */ 5951 bmask = phba->sli4_hba.xri_bmask; 5952 ids = phba->sli4_hba.xri_ids; 5953 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5954 break; 5955 case LPFC_RSC_TYPE_FCOE_VFI: 5956 phba->sli4_hba.vfi_bmask = kcalloc(longs, 5957 sizeof(unsigned long), 5958 GFP_KERNEL); 5959 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5960 rc = -ENOMEM; 5961 goto err_exit; 5962 } 5963 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 5964 sizeof(uint16_t), 5965 GFP_KERNEL); 5966 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5967 kfree(phba->sli4_hba.vfi_bmask); 5968 rc = -ENOMEM; 5969 goto err_exit; 5970 } 5971 5972 /* Initialize local ptrs for common extent processing later. */ 5973 bmask = phba->sli4_hba.vfi_bmask; 5974 ids = phba->sli4_hba.vfi_ids; 5975 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5976 break; 5977 default: 5978 /* Unsupported Opcode. Fail call. */ 5979 id_array = NULL; 5980 bmask = NULL; 5981 ids = NULL; 5982 ext_blk_list = NULL; 5983 goto err_exit; 5984 } 5985 5986 /* 5987 * Complete initializing the extent configuration with the 5988 * allocated ids assigned to this function. The bitmask serves 5989 * as an index into the array and manages the available ids. The 5990 * array just stores the ids communicated to the port via the wqes. 5991 */ 5992 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5993 if ((i % 2) == 0) 5994 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5995 &id_array[k]); 5996 else 5997 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5998 &id_array[k]); 5999 6000 rsrc_blks = kzalloc(length, GFP_KERNEL); 6001 if (unlikely(!rsrc_blks)) { 6002 rc = -ENOMEM; 6003 kfree(bmask); 6004 kfree(ids); 6005 goto err_exit; 6006 } 6007 rsrc_blks->rsrc_start = rsrc_id; 6008 rsrc_blks->rsrc_size = rsrc_size; 6009 list_add_tail(&rsrc_blks->list, ext_blk_list); 6010 rsrc_start = rsrc_id; 6011 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 6012 phba->sli4_hba.io_xri_start = rsrc_start + 6013 lpfc_sli4_get_iocb_cnt(phba); 6014 } 6015 6016 while (rsrc_id < (rsrc_start + rsrc_size)) { 6017 ids[j] = rsrc_id; 6018 rsrc_id++; 6019 j++; 6020 } 6021 /* Entire word processed. Get next word.*/ 6022 if ((i % 2) == 1) 6023 k++; 6024 } 6025 err_exit: 6026 lpfc_sli4_mbox_cmd_free(phba, mbox); 6027 return rc; 6028 } 6029 6030 6031 6032 /** 6033 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 6034 * @phba: Pointer to HBA context object. 6035 * @type: the extent's type. 6036 * 6037 * This function deallocates all extents of a particular resource type. 6038 * SLI4 does not allow for deallocating a particular extent range. It 6039 * is the caller's responsibility to release all kernel memory resources. 6040 **/ 6041 static int 6042 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6043 { 6044 int rc; 6045 uint32_t length, mbox_tmo = 0; 6046 LPFC_MBOXQ_t *mbox; 6047 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6048 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6049 6050 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6051 if (!mbox) 6052 return -ENOMEM; 6053 6054 /* 6055 * This function sends an embedded mailbox because it only sends the 6056 * the resource type. All extents of this type are released by the 6057 * port. 6058 */ 6059 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6060 sizeof(struct lpfc_sli4_cfg_mhdr)); 6061 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6062 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6063 length, LPFC_SLI4_MBX_EMBED); 6064 6065 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6066 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6067 LPFC_SLI4_MBX_EMBED); 6068 if (unlikely(rc)) { 6069 rc = -EIO; 6070 goto out_free_mbox; 6071 } 6072 if (!phba->sli4_hba.intr_enable) 6073 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6074 else { 6075 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6076 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6077 } 6078 if (unlikely(rc)) { 6079 rc = -EIO; 6080 goto out_free_mbox; 6081 } 6082 6083 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6084 if (bf_get(lpfc_mbox_hdr_status, 6085 &dealloc_rsrc->header.cfg_shdr.response)) { 6086 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6087 "2919 Failed to release resource extents " 6088 "for type %d - Status 0x%x Add'l Status 0x%x. " 6089 "Resource memory not released.\n", 6090 type, 6091 bf_get(lpfc_mbox_hdr_status, 6092 &dealloc_rsrc->header.cfg_shdr.response), 6093 bf_get(lpfc_mbox_hdr_add_status, 6094 &dealloc_rsrc->header.cfg_shdr.response)); 6095 rc = -EIO; 6096 goto out_free_mbox; 6097 } 6098 6099 /* Release kernel memory resources for the specific type. */ 6100 switch (type) { 6101 case LPFC_RSC_TYPE_FCOE_VPI: 6102 kfree(phba->vpi_bmask); 6103 kfree(phba->vpi_ids); 6104 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6105 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6106 &phba->lpfc_vpi_blk_list, list) { 6107 list_del_init(&rsrc_blk->list); 6108 kfree(rsrc_blk); 6109 } 6110 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6111 break; 6112 case LPFC_RSC_TYPE_FCOE_XRI: 6113 kfree(phba->sli4_hba.xri_bmask); 6114 kfree(phba->sli4_hba.xri_ids); 6115 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6116 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6117 list_del_init(&rsrc_blk->list); 6118 kfree(rsrc_blk); 6119 } 6120 break; 6121 case LPFC_RSC_TYPE_FCOE_VFI: 6122 kfree(phba->sli4_hba.vfi_bmask); 6123 kfree(phba->sli4_hba.vfi_ids); 6124 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6125 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6126 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6127 list_del_init(&rsrc_blk->list); 6128 kfree(rsrc_blk); 6129 } 6130 break; 6131 case LPFC_RSC_TYPE_FCOE_RPI: 6132 /* RPI bitmask and physical id array are cleaned up earlier. */ 6133 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6134 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6135 list_del_init(&rsrc_blk->list); 6136 kfree(rsrc_blk); 6137 } 6138 break; 6139 default: 6140 break; 6141 } 6142 6143 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6144 6145 out_free_mbox: 6146 mempool_free(mbox, phba->mbox_mem_pool); 6147 return rc; 6148 } 6149 6150 static void 6151 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6152 uint32_t feature) 6153 { 6154 uint32_t len; 6155 6156 len = sizeof(struct lpfc_mbx_set_feature) - 6157 sizeof(struct lpfc_sli4_cfg_mhdr); 6158 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6159 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6160 LPFC_SLI4_MBX_EMBED); 6161 6162 switch (feature) { 6163 case LPFC_SET_UE_RECOVERY: 6164 bf_set(lpfc_mbx_set_feature_UER, 6165 &mbox->u.mqe.un.set_feature, 1); 6166 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6167 mbox->u.mqe.un.set_feature.param_len = 8; 6168 break; 6169 case LPFC_SET_MDS_DIAGS: 6170 bf_set(lpfc_mbx_set_feature_mds, 6171 &mbox->u.mqe.un.set_feature, 1); 6172 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6173 &mbox->u.mqe.un.set_feature, 1); 6174 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6175 mbox->u.mqe.un.set_feature.param_len = 8; 6176 break; 6177 } 6178 6179 return; 6180 } 6181 6182 /** 6183 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter 6184 * @phba: Pointer to HBA context object. 6185 * 6186 * Disable FW logging into host memory on the adapter. To 6187 * be done before reading logs from the host memory. 6188 **/ 6189 void 6190 lpfc_ras_stop_fwlog(struct lpfc_hba *phba) 6191 { 6192 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6193 6194 ras_fwlog->ras_active = false; 6195 6196 /* Disable FW logging to host memory */ 6197 writel(LPFC_CTL_PDEV_CTL_DDL_RAS, 6198 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 6199 } 6200 6201 /** 6202 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6203 * @phba: Pointer to HBA context object. 6204 * 6205 * This function is called to free memory allocated for RAS FW logging 6206 * support in the driver. 6207 **/ 6208 void 6209 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) 6210 { 6211 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6212 struct lpfc_dmabuf *dmabuf, *next; 6213 6214 if (!list_empty(&ras_fwlog->fwlog_buff_list)) { 6215 list_for_each_entry_safe(dmabuf, next, 6216 &ras_fwlog->fwlog_buff_list, 6217 list) { 6218 list_del(&dmabuf->list); 6219 dma_free_coherent(&phba->pcidev->dev, 6220 LPFC_RAS_MAX_ENTRY_SIZE, 6221 dmabuf->virt, dmabuf->phys); 6222 kfree(dmabuf); 6223 } 6224 } 6225 6226 if (ras_fwlog->lwpd.virt) { 6227 dma_free_coherent(&phba->pcidev->dev, 6228 sizeof(uint32_t) * 2, 6229 ras_fwlog->lwpd.virt, 6230 ras_fwlog->lwpd.phys); 6231 ras_fwlog->lwpd.virt = NULL; 6232 } 6233 6234 ras_fwlog->ras_active = false; 6235 } 6236 6237 /** 6238 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support 6239 * @phba: Pointer to HBA context object. 6240 * @fwlog_buff_count: Count of buffers to be created. 6241 * 6242 * This routine DMA memory for Log Write Position Data[LPWD] and buffer 6243 * to update FW log is posted to the adapter. 6244 * Buffer count is calculated based on module param ras_fwlog_buffsize 6245 * Size of each buffer posted to FW is 64K. 6246 **/ 6247 6248 static int 6249 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, 6250 uint32_t fwlog_buff_count) 6251 { 6252 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6253 struct lpfc_dmabuf *dmabuf; 6254 int rc = 0, i = 0; 6255 6256 /* Initialize List */ 6257 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); 6258 6259 /* Allocate memory for the LWPD */ 6260 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, 6261 sizeof(uint32_t) * 2, 6262 &ras_fwlog->lwpd.phys, 6263 GFP_KERNEL); 6264 if (!ras_fwlog->lwpd.virt) { 6265 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6266 "6185 LWPD Memory Alloc Failed\n"); 6267 6268 return -ENOMEM; 6269 } 6270 6271 ras_fwlog->fw_buffcount = fwlog_buff_count; 6272 for (i = 0; i < ras_fwlog->fw_buffcount; i++) { 6273 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 6274 GFP_KERNEL); 6275 if (!dmabuf) { 6276 rc = -ENOMEM; 6277 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6278 "6186 Memory Alloc failed FW logging"); 6279 goto free_mem; 6280 } 6281 6282 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6283 LPFC_RAS_MAX_ENTRY_SIZE, 6284 &dmabuf->phys, GFP_KERNEL); 6285 if (!dmabuf->virt) { 6286 kfree(dmabuf); 6287 rc = -ENOMEM; 6288 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6289 "6187 DMA Alloc Failed FW logging"); 6290 goto free_mem; 6291 } 6292 dmabuf->buffer_tag = i; 6293 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6294 } 6295 6296 free_mem: 6297 if (rc) 6298 lpfc_sli4_ras_dma_free(phba); 6299 6300 return rc; 6301 } 6302 6303 /** 6304 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command 6305 * @phba: pointer to lpfc hba data structure. 6306 * @pmboxq: pointer to the driver internal queue element for mailbox command. 6307 * 6308 * Completion handler for driver's RAS MBX command to the device. 6309 **/ 6310 static void 6311 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6312 { 6313 MAILBOX_t *mb; 6314 union lpfc_sli4_cfg_shdr *shdr; 6315 uint32_t shdr_status, shdr_add_status; 6316 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6317 6318 mb = &pmb->u.mb; 6319 6320 shdr = (union lpfc_sli4_cfg_shdr *) 6321 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; 6322 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6323 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6324 6325 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6326 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 6327 "6188 FW LOG mailbox " 6328 "completed with status x%x add_status x%x," 6329 " mbx status x%x\n", 6330 shdr_status, shdr_add_status, mb->mbxStatus); 6331 6332 ras_fwlog->ras_hwsupport = false; 6333 goto disable_ras; 6334 } 6335 6336 ras_fwlog->ras_active = true; 6337 mempool_free(pmb, phba->mbox_mem_pool); 6338 6339 return; 6340 6341 disable_ras: 6342 /* Free RAS DMA memory */ 6343 lpfc_sli4_ras_dma_free(phba); 6344 mempool_free(pmb, phba->mbox_mem_pool); 6345 } 6346 6347 /** 6348 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command 6349 * @phba: pointer to lpfc hba data structure. 6350 * @fwlog_level: Logging verbosity level. 6351 * @fwlog_enable: Enable/Disable logging. 6352 * 6353 * Initialize memory and post mailbox command to enable FW logging in host 6354 * memory. 6355 **/ 6356 int 6357 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, 6358 uint32_t fwlog_level, 6359 uint32_t fwlog_enable) 6360 { 6361 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6362 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; 6363 struct lpfc_dmabuf *dmabuf; 6364 LPFC_MBOXQ_t *mbox; 6365 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; 6366 int rc = 0; 6367 6368 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * 6369 phba->cfg_ras_fwlog_buffsize); 6370 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); 6371 6372 /* 6373 * If re-enabling FW logging support use earlier allocated 6374 * DMA buffers while posting MBX command. 6375 **/ 6376 if (!ras_fwlog->lwpd.virt) { 6377 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 6378 if (rc) { 6379 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6380 "6189 FW Log Memory Allocation Failed"); 6381 return rc; 6382 } 6383 } 6384 6385 /* Setup Mailbox command */ 6386 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6387 if (!mbox) { 6388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6389 "6190 RAS MBX Alloc Failed"); 6390 rc = -ENOMEM; 6391 goto mem_free; 6392 } 6393 6394 ras_fwlog->fw_loglevel = fwlog_level; 6395 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - 6396 sizeof(struct lpfc_sli4_cfg_mhdr)); 6397 6398 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, 6399 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, 6400 len, LPFC_SLI4_MBX_EMBED); 6401 6402 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; 6403 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, 6404 fwlog_enable); 6405 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, 6406 ras_fwlog->fw_loglevel); 6407 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, 6408 ras_fwlog->fw_buffcount); 6409 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, 6410 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); 6411 6412 /* Update DMA buffer address */ 6413 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { 6414 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 6415 6416 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = 6417 putPaddrLow(dmabuf->phys); 6418 6419 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = 6420 putPaddrHigh(dmabuf->phys); 6421 } 6422 6423 /* Update LPWD address */ 6424 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); 6425 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); 6426 6427 mbox->vport = phba->pport; 6428 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; 6429 6430 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6431 6432 if (rc == MBX_NOT_FINISHED) { 6433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6434 "6191 FW-Log Mailbox failed. " 6435 "status %d mbxStatus : x%x", rc, 6436 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6437 mempool_free(mbox, phba->mbox_mem_pool); 6438 rc = -EIO; 6439 goto mem_free; 6440 } else 6441 rc = 0; 6442 mem_free: 6443 if (rc) 6444 lpfc_sli4_ras_dma_free(phba); 6445 6446 return rc; 6447 } 6448 6449 /** 6450 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter 6451 * @phba: Pointer to HBA context object. 6452 * 6453 * Check if RAS is supported on the adapter and initialize it. 6454 **/ 6455 void 6456 lpfc_sli4_ras_setup(struct lpfc_hba *phba) 6457 { 6458 /* Check RAS FW Log needs to be enabled or not */ 6459 if (lpfc_check_fwlog_support(phba)) 6460 return; 6461 6462 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 6463 LPFC_RAS_ENABLE_LOGGING); 6464 } 6465 6466 /** 6467 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 6468 * @phba: Pointer to HBA context object. 6469 * 6470 * This function allocates all SLI4 resource identifiers. 6471 **/ 6472 int 6473 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 6474 { 6475 int i, rc, error = 0; 6476 uint16_t count, base; 6477 unsigned long longs; 6478 6479 if (!phba->sli4_hba.rpi_hdrs_in_use) 6480 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 6481 if (phba->sli4_hba.extents_in_use) { 6482 /* 6483 * The port supports resource extents. The XRI, VPI, VFI, RPI 6484 * resource extent count must be read and allocated before 6485 * provisioning the resource id arrays. 6486 */ 6487 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6488 LPFC_IDX_RSRC_RDY) { 6489 /* 6490 * Extent-based resources are set - the driver could 6491 * be in a port reset. Figure out if any corrective 6492 * actions need to be taken. 6493 */ 6494 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6495 LPFC_RSC_TYPE_FCOE_VFI); 6496 if (rc != 0) 6497 error++; 6498 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6499 LPFC_RSC_TYPE_FCOE_VPI); 6500 if (rc != 0) 6501 error++; 6502 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6503 LPFC_RSC_TYPE_FCOE_XRI); 6504 if (rc != 0) 6505 error++; 6506 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6507 LPFC_RSC_TYPE_FCOE_RPI); 6508 if (rc != 0) 6509 error++; 6510 6511 /* 6512 * It's possible that the number of resources 6513 * provided to this port instance changed between 6514 * resets. Detect this condition and reallocate 6515 * resources. Otherwise, there is no action. 6516 */ 6517 if (error) { 6518 lpfc_printf_log(phba, KERN_INFO, 6519 LOG_MBOX | LOG_INIT, 6520 "2931 Detected extent resource " 6521 "change. Reallocating all " 6522 "extents.\n"); 6523 rc = lpfc_sli4_dealloc_extent(phba, 6524 LPFC_RSC_TYPE_FCOE_VFI); 6525 rc = lpfc_sli4_dealloc_extent(phba, 6526 LPFC_RSC_TYPE_FCOE_VPI); 6527 rc = lpfc_sli4_dealloc_extent(phba, 6528 LPFC_RSC_TYPE_FCOE_XRI); 6529 rc = lpfc_sli4_dealloc_extent(phba, 6530 LPFC_RSC_TYPE_FCOE_RPI); 6531 } else 6532 return 0; 6533 } 6534 6535 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6536 if (unlikely(rc)) 6537 goto err_exit; 6538 6539 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6540 if (unlikely(rc)) 6541 goto err_exit; 6542 6543 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6544 if (unlikely(rc)) 6545 goto err_exit; 6546 6547 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6548 if (unlikely(rc)) 6549 goto err_exit; 6550 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6551 LPFC_IDX_RSRC_RDY); 6552 return rc; 6553 } else { 6554 /* 6555 * The port does not support resource extents. The XRI, VPI, 6556 * VFI, RPI resource ids were determined from READ_CONFIG. 6557 * Just allocate the bitmasks and provision the resource id 6558 * arrays. If a port reset is active, the resources don't 6559 * need any action - just exit. 6560 */ 6561 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6562 LPFC_IDX_RSRC_RDY) { 6563 lpfc_sli4_dealloc_resource_identifiers(phba); 6564 lpfc_sli4_remove_rpis(phba); 6565 } 6566 /* RPIs. */ 6567 count = phba->sli4_hba.max_cfg_param.max_rpi; 6568 if (count <= 0) { 6569 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6570 "3279 Invalid provisioning of " 6571 "rpi:%d\n", count); 6572 rc = -EINVAL; 6573 goto err_exit; 6574 } 6575 base = phba->sli4_hba.max_cfg_param.rpi_base; 6576 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6577 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6578 sizeof(unsigned long), 6579 GFP_KERNEL); 6580 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6581 rc = -ENOMEM; 6582 goto err_exit; 6583 } 6584 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 6585 GFP_KERNEL); 6586 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6587 rc = -ENOMEM; 6588 goto free_rpi_bmask; 6589 } 6590 6591 for (i = 0; i < count; i++) 6592 phba->sli4_hba.rpi_ids[i] = base + i; 6593 6594 /* VPIs. */ 6595 count = phba->sli4_hba.max_cfg_param.max_vpi; 6596 if (count <= 0) { 6597 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6598 "3280 Invalid provisioning of " 6599 "vpi:%d\n", count); 6600 rc = -EINVAL; 6601 goto free_rpi_ids; 6602 } 6603 base = phba->sli4_hba.max_cfg_param.vpi_base; 6604 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6605 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6606 GFP_KERNEL); 6607 if (unlikely(!phba->vpi_bmask)) { 6608 rc = -ENOMEM; 6609 goto free_rpi_ids; 6610 } 6611 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 6612 GFP_KERNEL); 6613 if (unlikely(!phba->vpi_ids)) { 6614 rc = -ENOMEM; 6615 goto free_vpi_bmask; 6616 } 6617 6618 for (i = 0; i < count; i++) 6619 phba->vpi_ids[i] = base + i; 6620 6621 /* XRIs. */ 6622 count = phba->sli4_hba.max_cfg_param.max_xri; 6623 if (count <= 0) { 6624 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6625 "3281 Invalid provisioning of " 6626 "xri:%d\n", count); 6627 rc = -EINVAL; 6628 goto free_vpi_ids; 6629 } 6630 base = phba->sli4_hba.max_cfg_param.xri_base; 6631 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6632 phba->sli4_hba.xri_bmask = kcalloc(longs, 6633 sizeof(unsigned long), 6634 GFP_KERNEL); 6635 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6636 rc = -ENOMEM; 6637 goto free_vpi_ids; 6638 } 6639 phba->sli4_hba.max_cfg_param.xri_used = 0; 6640 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 6641 GFP_KERNEL); 6642 if (unlikely(!phba->sli4_hba.xri_ids)) { 6643 rc = -ENOMEM; 6644 goto free_xri_bmask; 6645 } 6646 6647 for (i = 0; i < count; i++) 6648 phba->sli4_hba.xri_ids[i] = base + i; 6649 6650 /* VFIs. */ 6651 count = phba->sli4_hba.max_cfg_param.max_vfi; 6652 if (count <= 0) { 6653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6654 "3282 Invalid provisioning of " 6655 "vfi:%d\n", count); 6656 rc = -EINVAL; 6657 goto free_xri_ids; 6658 } 6659 base = phba->sli4_hba.max_cfg_param.vfi_base; 6660 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6661 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6662 sizeof(unsigned long), 6663 GFP_KERNEL); 6664 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6665 rc = -ENOMEM; 6666 goto free_xri_ids; 6667 } 6668 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 6669 GFP_KERNEL); 6670 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6671 rc = -ENOMEM; 6672 goto free_vfi_bmask; 6673 } 6674 6675 for (i = 0; i < count; i++) 6676 phba->sli4_hba.vfi_ids[i] = base + i; 6677 6678 /* 6679 * Mark all resources ready. An HBA reset doesn't need 6680 * to reset the initialization. 6681 */ 6682 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6683 LPFC_IDX_RSRC_RDY); 6684 return 0; 6685 } 6686 6687 free_vfi_bmask: 6688 kfree(phba->sli4_hba.vfi_bmask); 6689 phba->sli4_hba.vfi_bmask = NULL; 6690 free_xri_ids: 6691 kfree(phba->sli4_hba.xri_ids); 6692 phba->sli4_hba.xri_ids = NULL; 6693 free_xri_bmask: 6694 kfree(phba->sli4_hba.xri_bmask); 6695 phba->sli4_hba.xri_bmask = NULL; 6696 free_vpi_ids: 6697 kfree(phba->vpi_ids); 6698 phba->vpi_ids = NULL; 6699 free_vpi_bmask: 6700 kfree(phba->vpi_bmask); 6701 phba->vpi_bmask = NULL; 6702 free_rpi_ids: 6703 kfree(phba->sli4_hba.rpi_ids); 6704 phba->sli4_hba.rpi_ids = NULL; 6705 free_rpi_bmask: 6706 kfree(phba->sli4_hba.rpi_bmask); 6707 phba->sli4_hba.rpi_bmask = NULL; 6708 err_exit: 6709 return rc; 6710 } 6711 6712 /** 6713 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6714 * @phba: Pointer to HBA context object. 6715 * 6716 * This function allocates the number of elements for the specified 6717 * resource type. 6718 **/ 6719 int 6720 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6721 { 6722 if (phba->sli4_hba.extents_in_use) { 6723 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6724 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6725 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6726 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6727 } else { 6728 kfree(phba->vpi_bmask); 6729 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6730 kfree(phba->vpi_ids); 6731 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6732 kfree(phba->sli4_hba.xri_bmask); 6733 kfree(phba->sli4_hba.xri_ids); 6734 kfree(phba->sli4_hba.vfi_bmask); 6735 kfree(phba->sli4_hba.vfi_ids); 6736 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6737 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6738 } 6739 6740 return 0; 6741 } 6742 6743 /** 6744 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6745 * @phba: Pointer to HBA context object. 6746 * @type: The resource extent type. 6747 * @extnt_count: buffer to hold port extent count response 6748 * @extnt_size: buffer to hold port extent size response. 6749 * 6750 * This function calls the port to read the host allocated extents 6751 * for a particular type. 6752 **/ 6753 int 6754 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6755 uint16_t *extnt_cnt, uint16_t *extnt_size) 6756 { 6757 bool emb; 6758 int rc = 0; 6759 uint16_t curr_blks = 0; 6760 uint32_t req_len, emb_len; 6761 uint32_t alloc_len, mbox_tmo; 6762 struct list_head *blk_list_head; 6763 struct lpfc_rsrc_blks *rsrc_blk; 6764 LPFC_MBOXQ_t *mbox; 6765 void *virtaddr = NULL; 6766 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6767 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6768 union lpfc_sli4_cfg_shdr *shdr; 6769 6770 switch (type) { 6771 case LPFC_RSC_TYPE_FCOE_VPI: 6772 blk_list_head = &phba->lpfc_vpi_blk_list; 6773 break; 6774 case LPFC_RSC_TYPE_FCOE_XRI: 6775 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6776 break; 6777 case LPFC_RSC_TYPE_FCOE_VFI: 6778 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6779 break; 6780 case LPFC_RSC_TYPE_FCOE_RPI: 6781 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6782 break; 6783 default: 6784 return -EIO; 6785 } 6786 6787 /* Count the number of extents currently allocatd for this type. */ 6788 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6789 if (curr_blks == 0) { 6790 /* 6791 * The GET_ALLOCATED mailbox does not return the size, 6792 * just the count. The size should be just the size 6793 * stored in the current allocated block and all sizes 6794 * for an extent type are the same so set the return 6795 * value now. 6796 */ 6797 *extnt_size = rsrc_blk->rsrc_size; 6798 } 6799 curr_blks++; 6800 } 6801 6802 /* 6803 * Calculate the size of an embedded mailbox. The uint32_t 6804 * accounts for extents-specific word. 6805 */ 6806 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6807 sizeof(uint32_t); 6808 6809 /* 6810 * Presume the allocation and response will fit into an embedded 6811 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6812 */ 6813 emb = LPFC_SLI4_MBX_EMBED; 6814 req_len = emb_len; 6815 if (req_len > emb_len) { 6816 req_len = curr_blks * sizeof(uint16_t) + 6817 sizeof(union lpfc_sli4_cfg_shdr) + 6818 sizeof(uint32_t); 6819 emb = LPFC_SLI4_MBX_NEMBED; 6820 } 6821 6822 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6823 if (!mbox) 6824 return -ENOMEM; 6825 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6826 6827 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6828 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6829 req_len, emb); 6830 if (alloc_len < req_len) { 6831 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6832 "2983 Allocated DMA memory size (x%x) is " 6833 "less than the requested DMA memory " 6834 "size (x%x)\n", alloc_len, req_len); 6835 rc = -ENOMEM; 6836 goto err_exit; 6837 } 6838 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6839 if (unlikely(rc)) { 6840 rc = -EIO; 6841 goto err_exit; 6842 } 6843 6844 if (!phba->sli4_hba.intr_enable) 6845 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6846 else { 6847 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6848 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6849 } 6850 6851 if (unlikely(rc)) { 6852 rc = -EIO; 6853 goto err_exit; 6854 } 6855 6856 /* 6857 * Figure out where the response is located. Then get local pointers 6858 * to the response data. The port does not guarantee to respond to 6859 * all extents counts request so update the local variable with the 6860 * allocated count from the port. 6861 */ 6862 if (emb == LPFC_SLI4_MBX_EMBED) { 6863 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6864 shdr = &rsrc_ext->header.cfg_shdr; 6865 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6866 } else { 6867 virtaddr = mbox->sge_array->addr[0]; 6868 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6869 shdr = &n_rsrc->cfg_shdr; 6870 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6871 } 6872 6873 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6874 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6875 "2984 Failed to read allocated resources " 6876 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6877 type, 6878 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6879 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6880 rc = -EIO; 6881 goto err_exit; 6882 } 6883 err_exit: 6884 lpfc_sli4_mbox_cmd_free(phba, mbox); 6885 return rc; 6886 } 6887 6888 /** 6889 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6890 * @phba: pointer to lpfc hba data structure. 6891 * @pring: Pointer to driver SLI ring object. 6892 * @sgl_list: linked link of sgl buffers to post 6893 * @cnt: number of linked list buffers 6894 * 6895 * This routine walks the list of buffers that have been allocated and 6896 * repost them to the port by using SGL block post. This is needed after a 6897 * pci_function_reset/warm_start or start. It attempts to construct blocks 6898 * of buffer sgls which contains contiguous xris and uses the non-embedded 6899 * SGL block post mailbox commands to post them to the port. For single 6900 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6901 * mailbox command for posting. 6902 * 6903 * Returns: 0 = success, non-zero failure. 6904 **/ 6905 static int 6906 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6907 struct list_head *sgl_list, int cnt) 6908 { 6909 struct lpfc_sglq *sglq_entry = NULL; 6910 struct lpfc_sglq *sglq_entry_next = NULL; 6911 struct lpfc_sglq *sglq_entry_first = NULL; 6912 int status, total_cnt; 6913 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6914 int last_xritag = NO_XRI; 6915 LIST_HEAD(prep_sgl_list); 6916 LIST_HEAD(blck_sgl_list); 6917 LIST_HEAD(allc_sgl_list); 6918 LIST_HEAD(post_sgl_list); 6919 LIST_HEAD(free_sgl_list); 6920 6921 spin_lock_irq(&phba->hbalock); 6922 spin_lock(&phba->sli4_hba.sgl_list_lock); 6923 list_splice_init(sgl_list, &allc_sgl_list); 6924 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6925 spin_unlock_irq(&phba->hbalock); 6926 6927 total_cnt = cnt; 6928 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6929 &allc_sgl_list, list) { 6930 list_del_init(&sglq_entry->list); 6931 block_cnt++; 6932 if ((last_xritag != NO_XRI) && 6933 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6934 /* a hole in xri block, form a sgl posting block */ 6935 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6936 post_cnt = block_cnt - 1; 6937 /* prepare list for next posting block */ 6938 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6939 block_cnt = 1; 6940 } else { 6941 /* prepare list for next posting block */ 6942 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6943 /* enough sgls for non-embed sgl mbox command */ 6944 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6945 list_splice_init(&prep_sgl_list, 6946 &blck_sgl_list); 6947 post_cnt = block_cnt; 6948 block_cnt = 0; 6949 } 6950 } 6951 num_posted++; 6952 6953 /* keep track of last sgl's xritag */ 6954 last_xritag = sglq_entry->sli4_xritag; 6955 6956 /* end of repost sgl list condition for buffers */ 6957 if (num_posted == total_cnt) { 6958 if (post_cnt == 0) { 6959 list_splice_init(&prep_sgl_list, 6960 &blck_sgl_list); 6961 post_cnt = block_cnt; 6962 } else if (block_cnt == 1) { 6963 status = lpfc_sli4_post_sgl(phba, 6964 sglq_entry->phys, 0, 6965 sglq_entry->sli4_xritag); 6966 if (!status) { 6967 /* successful, put sgl to posted list */ 6968 list_add_tail(&sglq_entry->list, 6969 &post_sgl_list); 6970 } else { 6971 /* Failure, put sgl to free list */ 6972 lpfc_printf_log(phba, KERN_WARNING, 6973 LOG_SLI, 6974 "3159 Failed to post " 6975 "sgl, xritag:x%x\n", 6976 sglq_entry->sli4_xritag); 6977 list_add_tail(&sglq_entry->list, 6978 &free_sgl_list); 6979 total_cnt--; 6980 } 6981 } 6982 } 6983 6984 /* continue until a nembed page worth of sgls */ 6985 if (post_cnt == 0) 6986 continue; 6987 6988 /* post the buffer list sgls as a block */ 6989 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 6990 post_cnt); 6991 6992 if (!status) { 6993 /* success, put sgl list to posted sgl list */ 6994 list_splice_init(&blck_sgl_list, &post_sgl_list); 6995 } else { 6996 /* Failure, put sgl list to free sgl list */ 6997 sglq_entry_first = list_first_entry(&blck_sgl_list, 6998 struct lpfc_sglq, 6999 list); 7000 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 7001 "3160 Failed to post sgl-list, " 7002 "xritag:x%x-x%x\n", 7003 sglq_entry_first->sli4_xritag, 7004 (sglq_entry_first->sli4_xritag + 7005 post_cnt - 1)); 7006 list_splice_init(&blck_sgl_list, &free_sgl_list); 7007 total_cnt -= post_cnt; 7008 } 7009 7010 /* don't reset xirtag due to hole in xri block */ 7011 if (block_cnt == 0) 7012 last_xritag = NO_XRI; 7013 7014 /* reset sgl post count for next round of posting */ 7015 post_cnt = 0; 7016 } 7017 7018 /* free the sgls failed to post */ 7019 lpfc_free_sgl_list(phba, &free_sgl_list); 7020 7021 /* push sgls posted to the available list */ 7022 if (!list_empty(&post_sgl_list)) { 7023 spin_lock_irq(&phba->hbalock); 7024 spin_lock(&phba->sli4_hba.sgl_list_lock); 7025 list_splice_init(&post_sgl_list, sgl_list); 7026 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7027 spin_unlock_irq(&phba->hbalock); 7028 } else { 7029 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7030 "3161 Failure to post sgl to port.\n"); 7031 return -EIO; 7032 } 7033 7034 /* return the number of XRIs actually posted */ 7035 return total_cnt; 7036 } 7037 7038 /** 7039 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls 7040 * @phba: pointer to lpfc hba data structure. 7041 * 7042 * This routine walks the list of nvme buffers that have been allocated and 7043 * repost them to the port by using SGL block post. This is needed after a 7044 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 7045 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list 7046 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. 7047 * 7048 * Returns: 0 = success, non-zero failure. 7049 **/ 7050 int 7051 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) 7052 { 7053 LIST_HEAD(post_nblist); 7054 int num_posted, rc = 0; 7055 7056 /* get all NVME buffers need to repost to a local list */ 7057 lpfc_io_buf_flush(phba, &post_nblist); 7058 7059 /* post the list of nvme buffer sgls to port if available */ 7060 if (!list_empty(&post_nblist)) { 7061 num_posted = lpfc_sli4_post_io_sgl_list( 7062 phba, &post_nblist, phba->sli4_hba.io_xri_cnt); 7063 /* failed to post any nvme buffer, return error */ 7064 if (num_posted == 0) 7065 rc = -EIO; 7066 } 7067 return rc; 7068 } 7069 7070 void 7071 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 7072 { 7073 uint32_t len; 7074 7075 len = sizeof(struct lpfc_mbx_set_host_data) - 7076 sizeof(struct lpfc_sli4_cfg_mhdr); 7077 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7078 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 7079 LPFC_SLI4_MBX_EMBED); 7080 7081 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 7082 mbox->u.mqe.un.set_host_data.param_len = 7083 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 7084 snprintf(mbox->u.mqe.un.set_host_data.data, 7085 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7086 "Linux %s v"LPFC_DRIVER_VERSION, 7087 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 7088 } 7089 7090 int 7091 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 7092 struct lpfc_queue *drq, int count, int idx) 7093 { 7094 int rc, i; 7095 struct lpfc_rqe hrqe; 7096 struct lpfc_rqe drqe; 7097 struct lpfc_rqb *rqbp; 7098 unsigned long flags; 7099 struct rqb_dmabuf *rqb_buffer; 7100 LIST_HEAD(rqb_buf_list); 7101 7102 spin_lock_irqsave(&phba->hbalock, flags); 7103 rqbp = hrq->rqbp; 7104 for (i = 0; i < count; i++) { 7105 /* IF RQ is already full, don't bother */ 7106 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 7107 break; 7108 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 7109 if (!rqb_buffer) 7110 break; 7111 rqb_buffer->hrq = hrq; 7112 rqb_buffer->drq = drq; 7113 rqb_buffer->idx = idx; 7114 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 7115 } 7116 while (!list_empty(&rqb_buf_list)) { 7117 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 7118 hbuf.list); 7119 7120 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 7121 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 7122 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 7123 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7124 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7125 if (rc < 0) { 7126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7127 "6421 Cannot post to HRQ %d: %x %x %x " 7128 "DRQ %x %x\n", 7129 hrq->queue_id, 7130 hrq->host_index, 7131 hrq->hba_index, 7132 hrq->entry_count, 7133 drq->host_index, 7134 drq->hba_index); 7135 rqbp->rqb_free_buffer(phba, rqb_buffer); 7136 } else { 7137 list_add_tail(&rqb_buffer->hbuf.list, 7138 &rqbp->rqb_buffer_list); 7139 rqbp->buffer_count++; 7140 } 7141 } 7142 spin_unlock_irqrestore(&phba->hbalock, flags); 7143 return 1; 7144 } 7145 7146 /** 7147 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 7148 * @phba: Pointer to HBA context object. 7149 * 7150 * This function is the main SLI4 device initialization PCI function. This 7151 * function is called by the HBA initialization code, HBA reset code and 7152 * HBA error attention handler code. Caller is not required to hold any 7153 * locks. 7154 **/ 7155 int 7156 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 7157 { 7158 int rc, i, cnt, len; 7159 LPFC_MBOXQ_t *mboxq; 7160 struct lpfc_mqe *mqe; 7161 uint8_t *vpd; 7162 uint32_t vpd_size; 7163 uint32_t ftr_rsp = 0; 7164 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 7165 struct lpfc_vport *vport = phba->pport; 7166 struct lpfc_dmabuf *mp; 7167 struct lpfc_rqb *rqbp; 7168 7169 /* Perform a PCI function reset to start from clean */ 7170 rc = lpfc_pci_function_reset(phba); 7171 if (unlikely(rc)) 7172 return -ENODEV; 7173 7174 /* Check the HBA Host Status Register for readyness */ 7175 rc = lpfc_sli4_post_status_check(phba); 7176 if (unlikely(rc)) 7177 return -ENODEV; 7178 else { 7179 spin_lock_irq(&phba->hbalock); 7180 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 7181 spin_unlock_irq(&phba->hbalock); 7182 } 7183 7184 /* 7185 * Allocate a single mailbox container for initializing the 7186 * port. 7187 */ 7188 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7189 if (!mboxq) 7190 return -ENOMEM; 7191 7192 /* Issue READ_REV to collect vpd and FW information. */ 7193 vpd_size = SLI4_PAGE_SIZE; 7194 vpd = kzalloc(vpd_size, GFP_KERNEL); 7195 if (!vpd) { 7196 rc = -ENOMEM; 7197 goto out_free_mbox; 7198 } 7199 7200 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 7201 if (unlikely(rc)) { 7202 kfree(vpd); 7203 goto out_free_mbox; 7204 } 7205 7206 mqe = &mboxq->u.mqe; 7207 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 7208 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 7209 phba->hba_flag |= HBA_FCOE_MODE; 7210 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 7211 } else { 7212 phba->hba_flag &= ~HBA_FCOE_MODE; 7213 } 7214 7215 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 7216 LPFC_DCBX_CEE_MODE) 7217 phba->hba_flag |= HBA_FIP_SUPPORT; 7218 else 7219 phba->hba_flag &= ~HBA_FIP_SUPPORT; 7220 7221 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 7222 7223 if (phba->sli_rev != LPFC_SLI_REV4) { 7224 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7225 "0376 READ_REV Error. SLI Level %d " 7226 "FCoE enabled %d\n", 7227 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 7228 rc = -EIO; 7229 kfree(vpd); 7230 goto out_free_mbox; 7231 } 7232 7233 /* 7234 * Continue initialization with default values even if driver failed 7235 * to read FCoE param config regions, only read parameters if the 7236 * board is FCoE 7237 */ 7238 if (phba->hba_flag & HBA_FCOE_MODE && 7239 lpfc_sli4_read_fcoe_params(phba)) 7240 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 7241 "2570 Failed to read FCoE parameters\n"); 7242 7243 /* 7244 * Retrieve sli4 device physical port name, failure of doing it 7245 * is considered as non-fatal. 7246 */ 7247 rc = lpfc_sli4_retrieve_pport_name(phba); 7248 if (!rc) 7249 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7250 "3080 Successful retrieving SLI4 device " 7251 "physical port name: %s.\n", phba->Port); 7252 7253 /* 7254 * Evaluate the read rev and vpd data. Populate the driver 7255 * state with the results. If this routine fails, the failure 7256 * is not fatal as the driver will use generic values. 7257 */ 7258 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 7259 if (unlikely(!rc)) { 7260 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7261 "0377 Error %d parsing vpd. " 7262 "Using defaults.\n", rc); 7263 rc = 0; 7264 } 7265 kfree(vpd); 7266 7267 /* Save information as VPD data */ 7268 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 7269 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 7270 7271 /* 7272 * This is because first G7 ASIC doesn't support the standard 7273 * 0x5a NVME cmd descriptor type/subtype 7274 */ 7275 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7276 LPFC_SLI_INTF_IF_TYPE_6) && 7277 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 7278 (phba->vpd.rev.smRev == 0) && 7279 (phba->cfg_nvme_embed_cmd == 1)) 7280 phba->cfg_nvme_embed_cmd = 0; 7281 7282 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 7283 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 7284 &mqe->un.read_rev); 7285 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 7286 &mqe->un.read_rev); 7287 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 7288 &mqe->un.read_rev); 7289 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 7290 &mqe->un.read_rev); 7291 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 7292 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 7293 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 7294 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 7295 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 7296 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 7297 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7298 "(%d):0380 READ_REV Status x%x " 7299 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 7300 mboxq->vport ? mboxq->vport->vpi : 0, 7301 bf_get(lpfc_mqe_status, mqe), 7302 phba->vpd.rev.opFwName, 7303 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 7304 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 7305 7306 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 7307 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 7308 if (phba->pport->cfg_lun_queue_depth > rc) { 7309 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7310 "3362 LUN queue depth changed from %d to %d\n", 7311 phba->pport->cfg_lun_queue_depth, rc); 7312 phba->pport->cfg_lun_queue_depth = rc; 7313 } 7314 7315 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7316 LPFC_SLI_INTF_IF_TYPE_0) { 7317 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 7318 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7319 if (rc == MBX_SUCCESS) { 7320 phba->hba_flag |= HBA_RECOVERABLE_UE; 7321 /* Set 1Sec interval to detect UE */ 7322 phba->eratt_poll_interval = 1; 7323 phba->sli4_hba.ue_to_sr = bf_get( 7324 lpfc_mbx_set_feature_UESR, 7325 &mboxq->u.mqe.un.set_feature); 7326 phba->sli4_hba.ue_to_rp = bf_get( 7327 lpfc_mbx_set_feature_UERP, 7328 &mboxq->u.mqe.un.set_feature); 7329 } 7330 } 7331 7332 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 7333 /* Enable MDS Diagnostics only if the SLI Port supports it */ 7334 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 7335 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7336 if (rc != MBX_SUCCESS) 7337 phba->mds_diags_support = 0; 7338 } 7339 7340 /* 7341 * Discover the port's supported feature set and match it against the 7342 * hosts requests. 7343 */ 7344 lpfc_request_features(phba, mboxq); 7345 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7346 if (unlikely(rc)) { 7347 rc = -EIO; 7348 goto out_free_mbox; 7349 } 7350 7351 /* 7352 * The port must support FCP initiator mode as this is the 7353 * only mode running in the host. 7354 */ 7355 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 7356 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7357 "0378 No support for fcpi mode.\n"); 7358 ftr_rsp++; 7359 } 7360 7361 /* Performance Hints are ONLY for FCoE */ 7362 if (phba->hba_flag & HBA_FCOE_MODE) { 7363 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 7364 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 7365 else 7366 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 7367 } 7368 7369 /* 7370 * If the port cannot support the host's requested features 7371 * then turn off the global config parameters to disable the 7372 * feature in the driver. This is not a fatal error. 7373 */ 7374 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 7375 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 7376 phba->cfg_enable_bg = 0; 7377 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 7378 ftr_rsp++; 7379 } 7380 } 7381 7382 if (phba->max_vpi && phba->cfg_enable_npiv && 7383 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7384 ftr_rsp++; 7385 7386 if (ftr_rsp) { 7387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7388 "0379 Feature Mismatch Data: x%08x %08x " 7389 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 7390 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 7391 phba->cfg_enable_npiv, phba->max_vpi); 7392 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 7393 phba->cfg_enable_bg = 0; 7394 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7395 phba->cfg_enable_npiv = 0; 7396 } 7397 7398 /* These SLI3 features are assumed in SLI4 */ 7399 spin_lock_irq(&phba->hbalock); 7400 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 7401 spin_unlock_irq(&phba->hbalock); 7402 7403 /* 7404 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 7405 * calls depends on these resources to complete port setup. 7406 */ 7407 rc = lpfc_sli4_alloc_resource_identifiers(phba); 7408 if (rc) { 7409 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7410 "2920 Failed to alloc Resource IDs " 7411 "rc = x%x\n", rc); 7412 goto out_free_mbox; 7413 } 7414 7415 lpfc_set_host_data(phba, mboxq); 7416 7417 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7418 if (rc) { 7419 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7420 "2134 Failed to set host os driver version %x", 7421 rc); 7422 } 7423 7424 /* Read the port's service parameters. */ 7425 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 7426 if (rc) { 7427 phba->link_state = LPFC_HBA_ERROR; 7428 rc = -ENOMEM; 7429 goto out_free_mbox; 7430 } 7431 7432 mboxq->vport = vport; 7433 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7434 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 7435 if (rc == MBX_SUCCESS) { 7436 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 7437 rc = 0; 7438 } 7439 7440 /* 7441 * This memory was allocated by the lpfc_read_sparam routine. Release 7442 * it to the mbuf pool. 7443 */ 7444 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7445 kfree(mp); 7446 mboxq->ctx_buf = NULL; 7447 if (unlikely(rc)) { 7448 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7449 "0382 READ_SPARAM command failed " 7450 "status %d, mbxStatus x%x\n", 7451 rc, bf_get(lpfc_mqe_status, mqe)); 7452 phba->link_state = LPFC_HBA_ERROR; 7453 rc = -EIO; 7454 goto out_free_mbox; 7455 } 7456 7457 lpfc_update_vport_wwn(vport); 7458 7459 /* Update the fc_host data structures with new wwn. */ 7460 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 7461 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 7462 7463 /* Create all the SLI4 queues */ 7464 rc = lpfc_sli4_queue_create(phba); 7465 if (rc) { 7466 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7467 "3089 Failed to allocate queues\n"); 7468 rc = -ENODEV; 7469 goto out_free_mbox; 7470 } 7471 /* Set up all the queues to the device */ 7472 rc = lpfc_sli4_queue_setup(phba); 7473 if (unlikely(rc)) { 7474 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7475 "0381 Error %d during queue setup.\n ", rc); 7476 goto out_stop_timers; 7477 } 7478 /* Initialize the driver internal SLI layer lists. */ 7479 lpfc_sli4_setup(phba); 7480 lpfc_sli4_queue_init(phba); 7481 7482 /* update host els xri-sgl sizes and mappings */ 7483 rc = lpfc_sli4_els_sgl_update(phba); 7484 if (unlikely(rc)) { 7485 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7486 "1400 Failed to update xri-sgl size and " 7487 "mapping: %d\n", rc); 7488 goto out_destroy_queue; 7489 } 7490 7491 /* register the els sgl pool to the port */ 7492 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 7493 phba->sli4_hba.els_xri_cnt); 7494 if (unlikely(rc < 0)) { 7495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7496 "0582 Error %d during els sgl post " 7497 "operation\n", rc); 7498 rc = -ENODEV; 7499 goto out_destroy_queue; 7500 } 7501 phba->sli4_hba.els_xri_cnt = rc; 7502 7503 if (phba->nvmet_support) { 7504 /* update host nvmet xri-sgl sizes and mappings */ 7505 rc = lpfc_sli4_nvmet_sgl_update(phba); 7506 if (unlikely(rc)) { 7507 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7508 "6308 Failed to update nvmet-sgl size " 7509 "and mapping: %d\n", rc); 7510 goto out_destroy_queue; 7511 } 7512 7513 /* register the nvmet sgl pool to the port */ 7514 rc = lpfc_sli4_repost_sgl_list( 7515 phba, 7516 &phba->sli4_hba.lpfc_nvmet_sgl_list, 7517 phba->sli4_hba.nvmet_xri_cnt); 7518 if (unlikely(rc < 0)) { 7519 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7520 "3117 Error %d during nvmet " 7521 "sgl post\n", rc); 7522 rc = -ENODEV; 7523 goto out_destroy_queue; 7524 } 7525 phba->sli4_hba.nvmet_xri_cnt = rc; 7526 7527 cnt = phba->cfg_iocb_cnt * 1024; 7528 /* We need 1 iocbq for every SGL, for IO processing */ 7529 cnt += phba->sli4_hba.nvmet_xri_cnt; 7530 } else { 7531 /* update host common xri-sgl sizes and mappings */ 7532 rc = lpfc_sli4_io_sgl_update(phba); 7533 if (unlikely(rc)) { 7534 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7535 "6082 Failed to update nvme-sgl size " 7536 "and mapping: %d\n", rc); 7537 goto out_destroy_queue; 7538 } 7539 7540 /* register the allocated common sgl pool to the port */ 7541 rc = lpfc_sli4_repost_io_sgl_list(phba); 7542 if (unlikely(rc)) { 7543 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7544 "6116 Error %d during nvme sgl post " 7545 "operation\n", rc); 7546 /* Some NVME buffers were moved to abort nvme list */ 7547 /* A pci function reset will repost them */ 7548 rc = -ENODEV; 7549 goto out_destroy_queue; 7550 } 7551 cnt = phba->cfg_iocb_cnt * 1024; 7552 } 7553 7554 if (!phba->sli.iocbq_lookup) { 7555 /* Initialize and populate the iocb list per host */ 7556 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7557 "2821 initialize iocb list %d total %d\n", 7558 phba->cfg_iocb_cnt, cnt); 7559 rc = lpfc_init_iocb_list(phba, cnt); 7560 if (rc) { 7561 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7562 "1413 Failed to init iocb list.\n"); 7563 goto out_destroy_queue; 7564 } 7565 } 7566 7567 if (phba->nvmet_support) 7568 lpfc_nvmet_create_targetport(phba); 7569 7570 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 7571 /* Post initial buffers to all RQs created */ 7572 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 7573 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 7574 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 7575 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 7576 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 7577 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 7578 rqbp->buffer_count = 0; 7579 7580 lpfc_post_rq_buffer( 7581 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 7582 phba->sli4_hba.nvmet_mrq_data[i], 7583 phba->cfg_nvmet_mrq_post, i); 7584 } 7585 } 7586 7587 /* Post the rpi header region to the device. */ 7588 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7589 if (unlikely(rc)) { 7590 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7591 "0393 Error %d during rpi post operation\n", 7592 rc); 7593 rc = -ENODEV; 7594 goto out_destroy_queue; 7595 } 7596 lpfc_sli4_node_prep(phba); 7597 7598 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7599 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7600 /* 7601 * The FC Port needs to register FCFI (index 0) 7602 */ 7603 lpfc_reg_fcfi(phba, mboxq); 7604 mboxq->vport = phba->pport; 7605 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7606 if (rc != MBX_SUCCESS) 7607 goto out_unset_queue; 7608 rc = 0; 7609 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7610 &mboxq->u.mqe.un.reg_fcfi); 7611 } else { 7612 /* We are a NVME Target mode with MRQ > 1 */ 7613 7614 /* First register the FCFI */ 7615 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7616 mboxq->vport = phba->pport; 7617 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7618 if (rc != MBX_SUCCESS) 7619 goto out_unset_queue; 7620 rc = 0; 7621 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7622 &mboxq->u.mqe.un.reg_fcfi_mrq); 7623 7624 /* Next register the MRQs */ 7625 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7626 mboxq->vport = phba->pport; 7627 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7628 if (rc != MBX_SUCCESS) 7629 goto out_unset_queue; 7630 rc = 0; 7631 } 7632 /* Check if the port is configured to be disabled */ 7633 lpfc_sli_read_link_ste(phba); 7634 } 7635 7636 /* Don't post more new bufs if repost already recovered 7637 * the nvme sgls. 7638 */ 7639 if (phba->nvmet_support == 0) { 7640 if (phba->sli4_hba.io_xri_cnt == 0) { 7641 len = lpfc_new_io_buf( 7642 phba, phba->sli4_hba.io_xri_max); 7643 if (len == 0) { 7644 rc = -ENOMEM; 7645 goto out_unset_queue; 7646 } 7647 7648 if (phba->cfg_xri_rebalancing) 7649 lpfc_create_multixri_pools(phba); 7650 } 7651 } else { 7652 phba->cfg_xri_rebalancing = 0; 7653 } 7654 7655 /* Arm the CQs and then EQs on device */ 7656 lpfc_sli4_arm_cqeq_intr(phba); 7657 7658 /* Indicate device interrupt mode */ 7659 phba->sli4_hba.intr_enable = 1; 7660 7661 /* Allow asynchronous mailbox command to go through */ 7662 spin_lock_irq(&phba->hbalock); 7663 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7664 spin_unlock_irq(&phba->hbalock); 7665 7666 /* Post receive buffers to the device */ 7667 lpfc_sli4_rb_setup(phba); 7668 7669 /* Reset HBA FCF states after HBA reset */ 7670 phba->fcf.fcf_flag = 0; 7671 phba->fcf.current_rec.flag = 0; 7672 7673 /* Start the ELS watchdog timer */ 7674 mod_timer(&vport->els_tmofunc, 7675 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7676 7677 /* Start heart beat timer */ 7678 mod_timer(&phba->hb_tmofunc, 7679 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7680 phba->hb_outstanding = 0; 7681 phba->last_completion_time = jiffies; 7682 7683 /* start eq_delay heartbeat */ 7684 if (phba->cfg_auto_imax) 7685 queue_delayed_work(phba->wq, &phba->eq_delay_work, 7686 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 7687 7688 /* Start error attention (ERATT) polling timer */ 7689 mod_timer(&phba->eratt_poll, 7690 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7691 7692 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7693 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7694 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7695 if (!rc) { 7696 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7697 "2829 This device supports " 7698 "Advanced Error Reporting (AER)\n"); 7699 spin_lock_irq(&phba->hbalock); 7700 phba->hba_flag |= HBA_AER_ENABLED; 7701 spin_unlock_irq(&phba->hbalock); 7702 } else { 7703 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7704 "2830 This device does not support " 7705 "Advanced Error Reporting (AER)\n"); 7706 phba->cfg_aer_support = 0; 7707 } 7708 rc = 0; 7709 } 7710 7711 /* 7712 * The port is ready, set the host's link state to LINK_DOWN 7713 * in preparation for link interrupts. 7714 */ 7715 spin_lock_irq(&phba->hbalock); 7716 phba->link_state = LPFC_LINK_DOWN; 7717 7718 /* Check if physical ports are trunked */ 7719 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 7720 phba->trunk_link.link0.state = LPFC_LINK_DOWN; 7721 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 7722 phba->trunk_link.link1.state = LPFC_LINK_DOWN; 7723 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 7724 phba->trunk_link.link2.state = LPFC_LINK_DOWN; 7725 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 7726 phba->trunk_link.link3.state = LPFC_LINK_DOWN; 7727 spin_unlock_irq(&phba->hbalock); 7728 7729 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7730 (phba->hba_flag & LINK_DISABLED)) { 7731 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7732 "3103 Adapter Link is disabled.\n"); 7733 lpfc_down_link(phba, mboxq); 7734 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7735 if (rc != MBX_SUCCESS) { 7736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7737 "3104 Adapter failed to issue " 7738 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7739 goto out_io_buff_free; 7740 } 7741 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7742 /* don't perform init_link on SLI4 FC port loopback test */ 7743 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7744 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7745 if (rc) 7746 goto out_io_buff_free; 7747 } 7748 } 7749 mempool_free(mboxq, phba->mbox_mem_pool); 7750 return rc; 7751 out_io_buff_free: 7752 /* Free allocated IO Buffers */ 7753 lpfc_io_free(phba); 7754 out_unset_queue: 7755 /* Unset all the queues set up in this routine when error out */ 7756 lpfc_sli4_queue_unset(phba); 7757 out_destroy_queue: 7758 lpfc_free_iocb_list(phba); 7759 lpfc_sli4_queue_destroy(phba); 7760 out_stop_timers: 7761 lpfc_stop_hba_timers(phba); 7762 out_free_mbox: 7763 mempool_free(mboxq, phba->mbox_mem_pool); 7764 return rc; 7765 } 7766 7767 /** 7768 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7769 * @ptr: context object - pointer to hba structure. 7770 * 7771 * This is the callback function for mailbox timer. The mailbox 7772 * timer is armed when a new mailbox command is issued and the timer 7773 * is deleted when the mailbox complete. The function is called by 7774 * the kernel timer code when a mailbox does not complete within 7775 * expected time. This function wakes up the worker thread to 7776 * process the mailbox timeout and returns. All the processing is 7777 * done by the worker thread function lpfc_mbox_timeout_handler. 7778 **/ 7779 void 7780 lpfc_mbox_timeout(struct timer_list *t) 7781 { 7782 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7783 unsigned long iflag; 7784 uint32_t tmo_posted; 7785 7786 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7787 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7788 if (!tmo_posted) 7789 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7790 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7791 7792 if (!tmo_posted) 7793 lpfc_worker_wake_up(phba); 7794 return; 7795 } 7796 7797 /** 7798 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7799 * are pending 7800 * @phba: Pointer to HBA context object. 7801 * 7802 * This function checks if any mailbox completions are present on the mailbox 7803 * completion queue. 7804 **/ 7805 static bool 7806 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7807 { 7808 7809 uint32_t idx; 7810 struct lpfc_queue *mcq; 7811 struct lpfc_mcqe *mcqe; 7812 bool pending_completions = false; 7813 uint8_t qe_valid; 7814 7815 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7816 return false; 7817 7818 /* Check for completions on mailbox completion queue */ 7819 7820 mcq = phba->sli4_hba.mbx_cq; 7821 idx = mcq->hba_index; 7822 qe_valid = mcq->qe_valid; 7823 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) { 7824 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; 7825 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7826 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7827 pending_completions = true; 7828 break; 7829 } 7830 idx = (idx + 1) % mcq->entry_count; 7831 if (mcq->hba_index == idx) 7832 break; 7833 7834 /* if the index wrapped around, toggle the valid bit */ 7835 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 7836 qe_valid = (qe_valid) ? 0 : 1; 7837 } 7838 return pending_completions; 7839 7840 } 7841 7842 /** 7843 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7844 * that were missed. 7845 * @phba: Pointer to HBA context object. 7846 * 7847 * For sli4, it is possible to miss an interrupt. As such mbox completions 7848 * maybe missed causing erroneous mailbox timeouts to occur. This function 7849 * checks to see if mbox completions are on the mailbox completion queue 7850 * and will process all the completions associated with the eq for the 7851 * mailbox completion queue. 7852 **/ 7853 bool 7854 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7855 { 7856 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 7857 uint32_t eqidx; 7858 struct lpfc_queue *fpeq = NULL; 7859 bool mbox_pending; 7860 7861 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7862 return false; 7863 7864 /* Find the eq associated with the mcq */ 7865 7866 if (sli4_hba->hdwq) 7867 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) 7868 if (sli4_hba->hdwq[eqidx].hba_eq->queue_id == 7869 sli4_hba->mbx_cq->assoc_qid) { 7870 fpeq = sli4_hba->hdwq[eqidx].hba_eq; 7871 break; 7872 } 7873 if (!fpeq) 7874 return false; 7875 7876 /* Turn off interrupts from this EQ */ 7877 7878 sli4_hba->sli4_eq_clr_intr(fpeq); 7879 7880 /* Check to see if a mbox completion is pending */ 7881 7882 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7883 7884 /* 7885 * If a mbox completion is pending, process all the events on EQ 7886 * associated with the mbox completion queue (this could include 7887 * mailbox commands, async events, els commands, receive queue data 7888 * and fcp commands) 7889 */ 7890 7891 if (mbox_pending) 7892 /* process and rearm the EQ */ 7893 lpfc_sli4_process_eq(phba, fpeq); 7894 else 7895 /* Always clear and re-arm the EQ */ 7896 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); 7897 7898 return mbox_pending; 7899 7900 } 7901 7902 /** 7903 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7904 * @phba: Pointer to HBA context object. 7905 * 7906 * This function is called from worker thread when a mailbox command times out. 7907 * The caller is not required to hold any locks. This function will reset the 7908 * HBA and recover all the pending commands. 7909 **/ 7910 void 7911 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 7912 { 7913 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 7914 MAILBOX_t *mb = NULL; 7915 7916 struct lpfc_sli *psli = &phba->sli; 7917 7918 /* If the mailbox completed, process the completion and return */ 7919 if (lpfc_sli4_process_missed_mbox_completions(phba)) 7920 return; 7921 7922 if (pmbox != NULL) 7923 mb = &pmbox->u.mb; 7924 /* Check the pmbox pointer first. There is a race condition 7925 * between the mbox timeout handler getting executed in the 7926 * worklist and the mailbox actually completing. When this 7927 * race condition occurs, the mbox_active will be NULL. 7928 */ 7929 spin_lock_irq(&phba->hbalock); 7930 if (pmbox == NULL) { 7931 lpfc_printf_log(phba, KERN_WARNING, 7932 LOG_MBOX | LOG_SLI, 7933 "0353 Active Mailbox cleared - mailbox timeout " 7934 "exiting\n"); 7935 spin_unlock_irq(&phba->hbalock); 7936 return; 7937 } 7938 7939 /* Mbox cmd <mbxCommand> timeout */ 7940 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7941 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 7942 mb->mbxCommand, 7943 phba->pport->port_state, 7944 phba->sli.sli_flag, 7945 phba->sli.mbox_active); 7946 spin_unlock_irq(&phba->hbalock); 7947 7948 /* Setting state unknown so lpfc_sli_abort_iocb_ring 7949 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 7950 * it to fail all outstanding SCSI IO. 7951 */ 7952 spin_lock_irq(&phba->pport->work_port_lock); 7953 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7954 spin_unlock_irq(&phba->pport->work_port_lock); 7955 spin_lock_irq(&phba->hbalock); 7956 phba->link_state = LPFC_LINK_UNKNOWN; 7957 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7958 spin_unlock_irq(&phba->hbalock); 7959 7960 lpfc_sli_abort_fcp_rings(phba); 7961 7962 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7963 "0345 Resetting board due to mailbox timeout\n"); 7964 7965 /* Reset the HBA device */ 7966 lpfc_reset_hba(phba); 7967 } 7968 7969 /** 7970 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 7971 * @phba: Pointer to HBA context object. 7972 * @pmbox: Pointer to mailbox object. 7973 * @flag: Flag indicating how the mailbox need to be processed. 7974 * 7975 * This function is called by discovery code and HBA management code 7976 * to submit a mailbox command to firmware with SLI-3 interface spec. This 7977 * function gets the hbalock to protect the data structures. 7978 * The mailbox command can be submitted in polling mode, in which case 7979 * this function will wait in a polling loop for the completion of the 7980 * mailbox. 7981 * If the mailbox is submitted in no_wait mode (not polling) the 7982 * function will submit the command and returns immediately without waiting 7983 * for the mailbox completion. The no_wait is supported only when HBA 7984 * is in SLI2/SLI3 mode - interrupts are enabled. 7985 * The SLI interface allows only one mailbox pending at a time. If the 7986 * mailbox is issued in polling mode and there is already a mailbox 7987 * pending, then the function will return an error. If the mailbox is issued 7988 * in NO_WAIT mode and there is a mailbox pending already, the function 7989 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 7990 * The sli layer owns the mailbox object until the completion of mailbox 7991 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 7992 * return codes the caller owns the mailbox command after the return of 7993 * the function. 7994 **/ 7995 static int 7996 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 7997 uint32_t flag) 7998 { 7999 MAILBOX_t *mbx; 8000 struct lpfc_sli *psli = &phba->sli; 8001 uint32_t status, evtctr; 8002 uint32_t ha_copy, hc_copy; 8003 int i; 8004 unsigned long timeout; 8005 unsigned long drvr_flag = 0; 8006 uint32_t word0, ldata; 8007 void __iomem *to_slim; 8008 int processing_queue = 0; 8009 8010 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8011 if (!pmbox) { 8012 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8013 /* processing mbox queue from intr_handler */ 8014 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8015 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8016 return MBX_SUCCESS; 8017 } 8018 processing_queue = 1; 8019 pmbox = lpfc_mbox_get(phba); 8020 if (!pmbox) { 8021 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8022 return MBX_SUCCESS; 8023 } 8024 } 8025 8026 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 8027 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 8028 if(!pmbox->vport) { 8029 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8030 lpfc_printf_log(phba, KERN_ERR, 8031 LOG_MBOX | LOG_VPORT, 8032 "1806 Mbox x%x failed. No vport\n", 8033 pmbox->u.mb.mbxCommand); 8034 dump_stack(); 8035 goto out_not_finished; 8036 } 8037 } 8038 8039 /* If the PCI channel is in offline state, do not post mbox. */ 8040 if (unlikely(pci_channel_offline(phba->pcidev))) { 8041 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8042 goto out_not_finished; 8043 } 8044 8045 /* If HBA has a deferred error attention, fail the iocb. */ 8046 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8047 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8048 goto out_not_finished; 8049 } 8050 8051 psli = &phba->sli; 8052 8053 mbx = &pmbox->u.mb; 8054 status = MBX_SUCCESS; 8055 8056 if (phba->link_state == LPFC_HBA_ERROR) { 8057 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8058 8059 /* Mbox command <mbxCommand> cannot issue */ 8060 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8061 "(%d):0311 Mailbox command x%x cannot " 8062 "issue Data: x%x x%x\n", 8063 pmbox->vport ? pmbox->vport->vpi : 0, 8064 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8065 goto out_not_finished; 8066 } 8067 8068 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 8069 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 8070 !(hc_copy & HC_MBINT_ENA)) { 8071 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8072 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8073 "(%d):2528 Mailbox command x%x cannot " 8074 "issue Data: x%x x%x\n", 8075 pmbox->vport ? pmbox->vport->vpi : 0, 8076 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8077 goto out_not_finished; 8078 } 8079 } 8080 8081 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8082 /* Polling for a mbox command when another one is already active 8083 * is not allowed in SLI. Also, the driver must have established 8084 * SLI2 mode to queue and process multiple mbox commands. 8085 */ 8086 8087 if (flag & MBX_POLL) { 8088 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8089 8090 /* Mbox command <mbxCommand> cannot issue */ 8091 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8092 "(%d):2529 Mailbox command x%x " 8093 "cannot issue Data: x%x x%x\n", 8094 pmbox->vport ? pmbox->vport->vpi : 0, 8095 pmbox->u.mb.mbxCommand, 8096 psli->sli_flag, flag); 8097 goto out_not_finished; 8098 } 8099 8100 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 8101 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8102 /* Mbox command <mbxCommand> cannot issue */ 8103 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8104 "(%d):2530 Mailbox command x%x " 8105 "cannot issue Data: x%x x%x\n", 8106 pmbox->vport ? pmbox->vport->vpi : 0, 8107 pmbox->u.mb.mbxCommand, 8108 psli->sli_flag, flag); 8109 goto out_not_finished; 8110 } 8111 8112 /* Another mailbox command is still being processed, queue this 8113 * command to be processed later. 8114 */ 8115 lpfc_mbox_put(phba, pmbox); 8116 8117 /* Mbox cmd issue - BUSY */ 8118 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8119 "(%d):0308 Mbox cmd issue - BUSY Data: " 8120 "x%x x%x x%x x%x\n", 8121 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 8122 mbx->mbxCommand, 8123 phba->pport ? phba->pport->port_state : 0xff, 8124 psli->sli_flag, flag); 8125 8126 psli->slistat.mbox_busy++; 8127 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8128 8129 if (pmbox->vport) { 8130 lpfc_debugfs_disc_trc(pmbox->vport, 8131 LPFC_DISC_TRC_MBOX_VPORT, 8132 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 8133 (uint32_t)mbx->mbxCommand, 8134 mbx->un.varWords[0], mbx->un.varWords[1]); 8135 } 8136 else { 8137 lpfc_debugfs_disc_trc(phba->pport, 8138 LPFC_DISC_TRC_MBOX, 8139 "MBOX Bsy: cmd:x%x mb:x%x x%x", 8140 (uint32_t)mbx->mbxCommand, 8141 mbx->un.varWords[0], mbx->un.varWords[1]); 8142 } 8143 8144 return MBX_BUSY; 8145 } 8146 8147 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8148 8149 /* If we are not polling, we MUST be in SLI2 mode */ 8150 if (flag != MBX_POLL) { 8151 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 8152 (mbx->mbxCommand != MBX_KILL_BOARD)) { 8153 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8154 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8155 /* Mbox command <mbxCommand> cannot issue */ 8156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8157 "(%d):2531 Mailbox command x%x " 8158 "cannot issue Data: x%x x%x\n", 8159 pmbox->vport ? pmbox->vport->vpi : 0, 8160 pmbox->u.mb.mbxCommand, 8161 psli->sli_flag, flag); 8162 goto out_not_finished; 8163 } 8164 /* timeout active mbox command */ 8165 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8166 1000); 8167 mod_timer(&psli->mbox_tmo, jiffies + timeout); 8168 } 8169 8170 /* Mailbox cmd <cmd> issue */ 8171 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8172 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 8173 "x%x\n", 8174 pmbox->vport ? pmbox->vport->vpi : 0, 8175 mbx->mbxCommand, 8176 phba->pport ? phba->pport->port_state : 0xff, 8177 psli->sli_flag, flag); 8178 8179 if (mbx->mbxCommand != MBX_HEARTBEAT) { 8180 if (pmbox->vport) { 8181 lpfc_debugfs_disc_trc(pmbox->vport, 8182 LPFC_DISC_TRC_MBOX_VPORT, 8183 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8184 (uint32_t)mbx->mbxCommand, 8185 mbx->un.varWords[0], mbx->un.varWords[1]); 8186 } 8187 else { 8188 lpfc_debugfs_disc_trc(phba->pport, 8189 LPFC_DISC_TRC_MBOX, 8190 "MBOX Send: cmd:x%x mb:x%x x%x", 8191 (uint32_t)mbx->mbxCommand, 8192 mbx->un.varWords[0], mbx->un.varWords[1]); 8193 } 8194 } 8195 8196 psli->slistat.mbox_cmd++; 8197 evtctr = psli->slistat.mbox_event; 8198 8199 /* next set own bit for the adapter and copy over command word */ 8200 mbx->mbxOwner = OWN_CHIP; 8201 8202 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8203 /* Populate mbox extension offset word. */ 8204 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 8205 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8206 = (uint8_t *)phba->mbox_ext 8207 - (uint8_t *)phba->mbox; 8208 } 8209 8210 /* Copy the mailbox extension data */ 8211 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) { 8212 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf, 8213 (uint8_t *)phba->mbox_ext, 8214 pmbox->in_ext_byte_len); 8215 } 8216 /* Copy command data to host SLIM area */ 8217 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 8218 } else { 8219 /* Populate mbox extension offset word. */ 8220 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 8221 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8222 = MAILBOX_HBA_EXT_OFFSET; 8223 8224 /* Copy the mailbox extension data */ 8225 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) 8226 lpfc_memcpy_to_slim(phba->MBslimaddr + 8227 MAILBOX_HBA_EXT_OFFSET, 8228 pmbox->ctx_buf, pmbox->in_ext_byte_len); 8229 8230 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8231 /* copy command data into host mbox for cmpl */ 8232 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 8233 MAILBOX_CMD_SIZE); 8234 8235 /* First copy mbox command data to HBA SLIM, skip past first 8236 word */ 8237 to_slim = phba->MBslimaddr + sizeof (uint32_t); 8238 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 8239 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 8240 8241 /* Next copy over first word, with mbxOwner set */ 8242 ldata = *((uint32_t *)mbx); 8243 to_slim = phba->MBslimaddr; 8244 writel(ldata, to_slim); 8245 readl(to_slim); /* flush */ 8246 8247 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8248 /* switch over to host mailbox */ 8249 psli->sli_flag |= LPFC_SLI_ACTIVE; 8250 } 8251 8252 wmb(); 8253 8254 switch (flag) { 8255 case MBX_NOWAIT: 8256 /* Set up reference to mailbox command */ 8257 psli->mbox_active = pmbox; 8258 /* Interrupt board to do it */ 8259 writel(CA_MBATT, phba->CAregaddr); 8260 readl(phba->CAregaddr); /* flush */ 8261 /* Don't wait for it to finish, just return */ 8262 break; 8263 8264 case MBX_POLL: 8265 /* Set up null reference to mailbox command */ 8266 psli->mbox_active = NULL; 8267 /* Interrupt board to do it */ 8268 writel(CA_MBATT, phba->CAregaddr); 8269 readl(phba->CAregaddr); /* flush */ 8270 8271 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8272 /* First read mbox status word */ 8273 word0 = *((uint32_t *)phba->mbox); 8274 word0 = le32_to_cpu(word0); 8275 } else { 8276 /* First read mbox status word */ 8277 if (lpfc_readl(phba->MBslimaddr, &word0)) { 8278 spin_unlock_irqrestore(&phba->hbalock, 8279 drvr_flag); 8280 goto out_not_finished; 8281 } 8282 } 8283 8284 /* Read the HBA Host Attention Register */ 8285 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8286 spin_unlock_irqrestore(&phba->hbalock, 8287 drvr_flag); 8288 goto out_not_finished; 8289 } 8290 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8291 1000) + jiffies; 8292 i = 0; 8293 /* Wait for command to complete */ 8294 while (((word0 & OWN_CHIP) == OWN_CHIP) || 8295 (!(ha_copy & HA_MBATT) && 8296 (phba->link_state > LPFC_WARM_START))) { 8297 if (time_after(jiffies, timeout)) { 8298 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8299 spin_unlock_irqrestore(&phba->hbalock, 8300 drvr_flag); 8301 goto out_not_finished; 8302 } 8303 8304 /* Check if we took a mbox interrupt while we were 8305 polling */ 8306 if (((word0 & OWN_CHIP) != OWN_CHIP) 8307 && (evtctr != psli->slistat.mbox_event)) 8308 break; 8309 8310 if (i++ > 10) { 8311 spin_unlock_irqrestore(&phba->hbalock, 8312 drvr_flag); 8313 msleep(1); 8314 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8315 } 8316 8317 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8318 /* First copy command data */ 8319 word0 = *((uint32_t *)phba->mbox); 8320 word0 = le32_to_cpu(word0); 8321 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 8322 MAILBOX_t *slimmb; 8323 uint32_t slimword0; 8324 /* Check real SLIM for any errors */ 8325 slimword0 = readl(phba->MBslimaddr); 8326 slimmb = (MAILBOX_t *) & slimword0; 8327 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 8328 && slimmb->mbxStatus) { 8329 psli->sli_flag &= 8330 ~LPFC_SLI_ACTIVE; 8331 word0 = slimword0; 8332 } 8333 } 8334 } else { 8335 /* First copy command data */ 8336 word0 = readl(phba->MBslimaddr); 8337 } 8338 /* Read the HBA Host Attention Register */ 8339 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8340 spin_unlock_irqrestore(&phba->hbalock, 8341 drvr_flag); 8342 goto out_not_finished; 8343 } 8344 } 8345 8346 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8347 /* copy results back to user */ 8348 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 8349 MAILBOX_CMD_SIZE); 8350 /* Copy the mailbox extension data */ 8351 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8352 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 8353 pmbox->ctx_buf, 8354 pmbox->out_ext_byte_len); 8355 } 8356 } else { 8357 /* First copy command data */ 8358 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 8359 MAILBOX_CMD_SIZE); 8360 /* Copy the mailbox extension data */ 8361 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8362 lpfc_memcpy_from_slim( 8363 pmbox->ctx_buf, 8364 phba->MBslimaddr + 8365 MAILBOX_HBA_EXT_OFFSET, 8366 pmbox->out_ext_byte_len); 8367 } 8368 } 8369 8370 writel(HA_MBATT, phba->HAregaddr); 8371 readl(phba->HAregaddr); /* flush */ 8372 8373 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8374 status = mbx->mbxStatus; 8375 } 8376 8377 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8378 return status; 8379 8380 out_not_finished: 8381 if (processing_queue) { 8382 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 8383 lpfc_mbox_cmpl_put(phba, pmbox); 8384 } 8385 return MBX_NOT_FINISHED; 8386 } 8387 8388 /** 8389 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 8390 * @phba: Pointer to HBA context object. 8391 * 8392 * The function blocks the posting of SLI4 asynchronous mailbox commands from 8393 * the driver internal pending mailbox queue. It will then try to wait out the 8394 * possible outstanding mailbox command before return. 8395 * 8396 * Returns: 8397 * 0 - the outstanding mailbox command completed; otherwise, the wait for 8398 * the outstanding mailbox command timed out. 8399 **/ 8400 static int 8401 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 8402 { 8403 struct lpfc_sli *psli = &phba->sli; 8404 int rc = 0; 8405 unsigned long timeout = 0; 8406 8407 /* Mark the asynchronous mailbox command posting as blocked */ 8408 spin_lock_irq(&phba->hbalock); 8409 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8410 /* Determine how long we might wait for the active mailbox 8411 * command to be gracefully completed by firmware. 8412 */ 8413 if (phba->sli.mbox_active) 8414 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 8415 phba->sli.mbox_active) * 8416 1000) + jiffies; 8417 spin_unlock_irq(&phba->hbalock); 8418 8419 /* Make sure the mailbox is really active */ 8420 if (timeout) 8421 lpfc_sli4_process_missed_mbox_completions(phba); 8422 8423 /* Wait for the outstnading mailbox command to complete */ 8424 while (phba->sli.mbox_active) { 8425 /* Check active mailbox complete status every 2ms */ 8426 msleep(2); 8427 if (time_after(jiffies, timeout)) { 8428 /* Timeout, marked the outstanding cmd not complete */ 8429 rc = 1; 8430 break; 8431 } 8432 } 8433 8434 /* Can not cleanly block async mailbox command, fails it */ 8435 if (rc) { 8436 spin_lock_irq(&phba->hbalock); 8437 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8438 spin_unlock_irq(&phba->hbalock); 8439 } 8440 return rc; 8441 } 8442 8443 /** 8444 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 8445 * @phba: Pointer to HBA context object. 8446 * 8447 * The function unblocks and resume posting of SLI4 asynchronous mailbox 8448 * commands from the driver internal pending mailbox queue. It makes sure 8449 * that there is no outstanding mailbox command before resuming posting 8450 * asynchronous mailbox commands. If, for any reason, there is outstanding 8451 * mailbox command, it will try to wait it out before resuming asynchronous 8452 * mailbox command posting. 8453 **/ 8454 static void 8455 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 8456 { 8457 struct lpfc_sli *psli = &phba->sli; 8458 8459 spin_lock_irq(&phba->hbalock); 8460 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8461 /* Asynchronous mailbox posting is not blocked, do nothing */ 8462 spin_unlock_irq(&phba->hbalock); 8463 return; 8464 } 8465 8466 /* Outstanding synchronous mailbox command is guaranteed to be done, 8467 * successful or timeout, after timing-out the outstanding mailbox 8468 * command shall always be removed, so just unblock posting async 8469 * mailbox command and resume 8470 */ 8471 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8472 spin_unlock_irq(&phba->hbalock); 8473 8474 /* wake up worker thread to post asynchronlous mailbox command */ 8475 lpfc_worker_wake_up(phba); 8476 } 8477 8478 /** 8479 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 8480 * @phba: Pointer to HBA context object. 8481 * @mboxq: Pointer to mailbox object. 8482 * 8483 * The function waits for the bootstrap mailbox register ready bit from 8484 * port for twice the regular mailbox command timeout value. 8485 * 8486 * 0 - no timeout on waiting for bootstrap mailbox register ready. 8487 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 8488 **/ 8489 static int 8490 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8491 { 8492 uint32_t db_ready; 8493 unsigned long timeout; 8494 struct lpfc_register bmbx_reg; 8495 8496 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 8497 * 1000) + jiffies; 8498 8499 do { 8500 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 8501 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 8502 if (!db_ready) 8503 msleep(2); 8504 8505 if (time_after(jiffies, timeout)) 8506 return MBXERR_ERROR; 8507 } while (!db_ready); 8508 8509 return 0; 8510 } 8511 8512 /** 8513 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 8514 * @phba: Pointer to HBA context object. 8515 * @mboxq: Pointer to mailbox object. 8516 * 8517 * The function posts a mailbox to the port. The mailbox is expected 8518 * to be comletely filled in and ready for the port to operate on it. 8519 * This routine executes a synchronous completion operation on the 8520 * mailbox by polling for its completion. 8521 * 8522 * The caller must not be holding any locks when calling this routine. 8523 * 8524 * Returns: 8525 * MBX_SUCCESS - mailbox posted successfully 8526 * Any of the MBX error values. 8527 **/ 8528 static int 8529 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8530 { 8531 int rc = MBX_SUCCESS; 8532 unsigned long iflag; 8533 uint32_t mcqe_status; 8534 uint32_t mbx_cmnd; 8535 struct lpfc_sli *psli = &phba->sli; 8536 struct lpfc_mqe *mb = &mboxq->u.mqe; 8537 struct lpfc_bmbx_create *mbox_rgn; 8538 struct dma_address *dma_address; 8539 8540 /* 8541 * Only one mailbox can be active to the bootstrap mailbox region 8542 * at a time and there is no queueing provided. 8543 */ 8544 spin_lock_irqsave(&phba->hbalock, iflag); 8545 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8546 spin_unlock_irqrestore(&phba->hbalock, iflag); 8547 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8548 "(%d):2532 Mailbox command x%x (x%x/x%x) " 8549 "cannot issue Data: x%x x%x\n", 8550 mboxq->vport ? mboxq->vport->vpi : 0, 8551 mboxq->u.mb.mbxCommand, 8552 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8553 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8554 psli->sli_flag, MBX_POLL); 8555 return MBXERR_ERROR; 8556 } 8557 /* The server grabs the token and owns it until release */ 8558 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8559 phba->sli.mbox_active = mboxq; 8560 spin_unlock_irqrestore(&phba->hbalock, iflag); 8561 8562 /* wait for bootstrap mbox register for readyness */ 8563 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8564 if (rc) 8565 goto exit; 8566 /* 8567 * Initialize the bootstrap memory region to avoid stale data areas 8568 * in the mailbox post. Then copy the caller's mailbox contents to 8569 * the bmbx mailbox region. 8570 */ 8571 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 8572 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 8573 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 8574 sizeof(struct lpfc_mqe)); 8575 8576 /* Post the high mailbox dma address to the port and wait for ready. */ 8577 dma_address = &phba->sli4_hba.bmbx.dma_address; 8578 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 8579 8580 /* wait for bootstrap mbox register for hi-address write done */ 8581 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8582 if (rc) 8583 goto exit; 8584 8585 /* Post the low mailbox dma address to the port. */ 8586 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 8587 8588 /* wait for bootstrap mbox register for low address write done */ 8589 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8590 if (rc) 8591 goto exit; 8592 8593 /* 8594 * Read the CQ to ensure the mailbox has completed. 8595 * If so, update the mailbox status so that the upper layers 8596 * can complete the request normally. 8597 */ 8598 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 8599 sizeof(struct lpfc_mqe)); 8600 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 8601 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 8602 sizeof(struct lpfc_mcqe)); 8603 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8604 /* 8605 * When the CQE status indicates a failure and the mailbox status 8606 * indicates success then copy the CQE status into the mailbox status 8607 * (and prefix it with x4000). 8608 */ 8609 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8610 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8611 bf_set(lpfc_mqe_status, mb, 8612 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8613 rc = MBXERR_ERROR; 8614 } else 8615 lpfc_sli4_swap_str(phba, mboxq); 8616 8617 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8618 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8619 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8620 " x%x x%x CQ: x%x x%x x%x x%x\n", 8621 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8622 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8623 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8624 bf_get(lpfc_mqe_status, mb), 8625 mb->un.mb_words[0], mb->un.mb_words[1], 8626 mb->un.mb_words[2], mb->un.mb_words[3], 8627 mb->un.mb_words[4], mb->un.mb_words[5], 8628 mb->un.mb_words[6], mb->un.mb_words[7], 8629 mb->un.mb_words[8], mb->un.mb_words[9], 8630 mb->un.mb_words[10], mb->un.mb_words[11], 8631 mb->un.mb_words[12], mboxq->mcqe.word0, 8632 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8633 mboxq->mcqe.trailer); 8634 exit: 8635 /* We are holding the token, no needed for lock when release */ 8636 spin_lock_irqsave(&phba->hbalock, iflag); 8637 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8638 phba->sli.mbox_active = NULL; 8639 spin_unlock_irqrestore(&phba->hbalock, iflag); 8640 return rc; 8641 } 8642 8643 /** 8644 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8645 * @phba: Pointer to HBA context object. 8646 * @pmbox: Pointer to mailbox object. 8647 * @flag: Flag indicating how the mailbox need to be processed. 8648 * 8649 * This function is called by discovery code and HBA management code to submit 8650 * a mailbox command to firmware with SLI-4 interface spec. 8651 * 8652 * Return codes the caller owns the mailbox command after the return of the 8653 * function. 8654 **/ 8655 static int 8656 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8657 uint32_t flag) 8658 { 8659 struct lpfc_sli *psli = &phba->sli; 8660 unsigned long iflags; 8661 int rc; 8662 8663 /* dump from issue mailbox command if setup */ 8664 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8665 8666 rc = lpfc_mbox_dev_check(phba); 8667 if (unlikely(rc)) { 8668 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8669 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8670 "cannot issue Data: x%x x%x\n", 8671 mboxq->vport ? mboxq->vport->vpi : 0, 8672 mboxq->u.mb.mbxCommand, 8673 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8674 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8675 psli->sli_flag, flag); 8676 goto out_not_finished; 8677 } 8678 8679 /* Detect polling mode and jump to a handler */ 8680 if (!phba->sli4_hba.intr_enable) { 8681 if (flag == MBX_POLL) 8682 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8683 else 8684 rc = -EIO; 8685 if (rc != MBX_SUCCESS) 8686 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8687 "(%d):2541 Mailbox command x%x " 8688 "(x%x/x%x) failure: " 8689 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8690 "Data: x%x x%x\n,", 8691 mboxq->vport ? mboxq->vport->vpi : 0, 8692 mboxq->u.mb.mbxCommand, 8693 lpfc_sli_config_mbox_subsys_get(phba, 8694 mboxq), 8695 lpfc_sli_config_mbox_opcode_get(phba, 8696 mboxq), 8697 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8698 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8699 bf_get(lpfc_mcqe_ext_status, 8700 &mboxq->mcqe), 8701 psli->sli_flag, flag); 8702 return rc; 8703 } else if (flag == MBX_POLL) { 8704 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8705 "(%d):2542 Try to issue mailbox command " 8706 "x%x (x%x/x%x) synchronously ahead of async " 8707 "mailbox command queue: x%x x%x\n", 8708 mboxq->vport ? mboxq->vport->vpi : 0, 8709 mboxq->u.mb.mbxCommand, 8710 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8711 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8712 psli->sli_flag, flag); 8713 /* Try to block the asynchronous mailbox posting */ 8714 rc = lpfc_sli4_async_mbox_block(phba); 8715 if (!rc) { 8716 /* Successfully blocked, now issue sync mbox cmd */ 8717 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8718 if (rc != MBX_SUCCESS) 8719 lpfc_printf_log(phba, KERN_WARNING, 8720 LOG_MBOX | LOG_SLI, 8721 "(%d):2597 Sync Mailbox command " 8722 "x%x (x%x/x%x) failure: " 8723 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8724 "Data: x%x x%x\n,", 8725 mboxq->vport ? mboxq->vport->vpi : 0, 8726 mboxq->u.mb.mbxCommand, 8727 lpfc_sli_config_mbox_subsys_get(phba, 8728 mboxq), 8729 lpfc_sli_config_mbox_opcode_get(phba, 8730 mboxq), 8731 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8732 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8733 bf_get(lpfc_mcqe_ext_status, 8734 &mboxq->mcqe), 8735 psli->sli_flag, flag); 8736 /* Unblock the async mailbox posting afterward */ 8737 lpfc_sli4_async_mbox_unblock(phba); 8738 } 8739 return rc; 8740 } 8741 8742 /* Now, interrupt mode asynchrous mailbox command */ 8743 rc = lpfc_mbox_cmd_check(phba, mboxq); 8744 if (rc) { 8745 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8746 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8747 "cannot issue Data: x%x x%x\n", 8748 mboxq->vport ? mboxq->vport->vpi : 0, 8749 mboxq->u.mb.mbxCommand, 8750 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8751 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8752 psli->sli_flag, flag); 8753 goto out_not_finished; 8754 } 8755 8756 /* Put the mailbox command to the driver internal FIFO */ 8757 psli->slistat.mbox_busy++; 8758 spin_lock_irqsave(&phba->hbalock, iflags); 8759 lpfc_mbox_put(phba, mboxq); 8760 spin_unlock_irqrestore(&phba->hbalock, iflags); 8761 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8762 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8763 "x%x (x%x/x%x) x%x x%x x%x\n", 8764 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8765 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8766 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8767 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8768 phba->pport->port_state, 8769 psli->sli_flag, MBX_NOWAIT); 8770 /* Wake up worker thread to transport mailbox command from head */ 8771 lpfc_worker_wake_up(phba); 8772 8773 return MBX_BUSY; 8774 8775 out_not_finished: 8776 return MBX_NOT_FINISHED; 8777 } 8778 8779 /** 8780 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8781 * @phba: Pointer to HBA context object. 8782 * 8783 * This function is called by worker thread to send a mailbox command to 8784 * SLI4 HBA firmware. 8785 * 8786 **/ 8787 int 8788 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8789 { 8790 struct lpfc_sli *psli = &phba->sli; 8791 LPFC_MBOXQ_t *mboxq; 8792 int rc = MBX_SUCCESS; 8793 unsigned long iflags; 8794 struct lpfc_mqe *mqe; 8795 uint32_t mbx_cmnd; 8796 8797 /* Check interrupt mode before post async mailbox command */ 8798 if (unlikely(!phba->sli4_hba.intr_enable)) 8799 return MBX_NOT_FINISHED; 8800 8801 /* Check for mailbox command service token */ 8802 spin_lock_irqsave(&phba->hbalock, iflags); 8803 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8804 spin_unlock_irqrestore(&phba->hbalock, iflags); 8805 return MBX_NOT_FINISHED; 8806 } 8807 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8808 spin_unlock_irqrestore(&phba->hbalock, iflags); 8809 return MBX_NOT_FINISHED; 8810 } 8811 if (unlikely(phba->sli.mbox_active)) { 8812 spin_unlock_irqrestore(&phba->hbalock, iflags); 8813 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8814 "0384 There is pending active mailbox cmd\n"); 8815 return MBX_NOT_FINISHED; 8816 } 8817 /* Take the mailbox command service token */ 8818 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8819 8820 /* Get the next mailbox command from head of queue */ 8821 mboxq = lpfc_mbox_get(phba); 8822 8823 /* If no more mailbox command waiting for post, we're done */ 8824 if (!mboxq) { 8825 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8826 spin_unlock_irqrestore(&phba->hbalock, iflags); 8827 return MBX_SUCCESS; 8828 } 8829 phba->sli.mbox_active = mboxq; 8830 spin_unlock_irqrestore(&phba->hbalock, iflags); 8831 8832 /* Check device readiness for posting mailbox command */ 8833 rc = lpfc_mbox_dev_check(phba); 8834 if (unlikely(rc)) 8835 /* Driver clean routine will clean up pending mailbox */ 8836 goto out_not_finished; 8837 8838 /* Prepare the mbox command to be posted */ 8839 mqe = &mboxq->u.mqe; 8840 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8841 8842 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8843 mod_timer(&psli->mbox_tmo, (jiffies + 8844 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8845 8846 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8847 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8848 "x%x x%x\n", 8849 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8850 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8851 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8852 phba->pport->port_state, psli->sli_flag); 8853 8854 if (mbx_cmnd != MBX_HEARTBEAT) { 8855 if (mboxq->vport) { 8856 lpfc_debugfs_disc_trc(mboxq->vport, 8857 LPFC_DISC_TRC_MBOX_VPORT, 8858 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8859 mbx_cmnd, mqe->un.mb_words[0], 8860 mqe->un.mb_words[1]); 8861 } else { 8862 lpfc_debugfs_disc_trc(phba->pport, 8863 LPFC_DISC_TRC_MBOX, 8864 "MBOX Send: cmd:x%x mb:x%x x%x", 8865 mbx_cmnd, mqe->un.mb_words[0], 8866 mqe->un.mb_words[1]); 8867 } 8868 } 8869 psli->slistat.mbox_cmd++; 8870 8871 /* Post the mailbox command to the port */ 8872 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8873 if (rc != MBX_SUCCESS) { 8874 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8875 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8876 "cannot issue Data: x%x x%x\n", 8877 mboxq->vport ? mboxq->vport->vpi : 0, 8878 mboxq->u.mb.mbxCommand, 8879 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8880 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8881 psli->sli_flag, MBX_NOWAIT); 8882 goto out_not_finished; 8883 } 8884 8885 return rc; 8886 8887 out_not_finished: 8888 spin_lock_irqsave(&phba->hbalock, iflags); 8889 if (phba->sli.mbox_active) { 8890 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8891 __lpfc_mbox_cmpl_put(phba, mboxq); 8892 /* Release the token */ 8893 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8894 phba->sli.mbox_active = NULL; 8895 } 8896 spin_unlock_irqrestore(&phba->hbalock, iflags); 8897 8898 return MBX_NOT_FINISHED; 8899 } 8900 8901 /** 8902 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8903 * @phba: Pointer to HBA context object. 8904 * @pmbox: Pointer to mailbox object. 8905 * @flag: Flag indicating how the mailbox need to be processed. 8906 * 8907 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 8908 * the API jump table function pointer from the lpfc_hba struct. 8909 * 8910 * Return codes the caller owns the mailbox command after the return of the 8911 * function. 8912 **/ 8913 int 8914 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 8915 { 8916 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 8917 } 8918 8919 /** 8920 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 8921 * @phba: The hba struct for which this call is being executed. 8922 * @dev_grp: The HBA PCI-Device group number. 8923 * 8924 * This routine sets up the mbox interface API function jump table in @phba 8925 * struct. 8926 * Returns: 0 - success, -ENODEV - failure. 8927 **/ 8928 int 8929 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8930 { 8931 8932 switch (dev_grp) { 8933 case LPFC_PCI_DEV_LP: 8934 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 8935 phba->lpfc_sli_handle_slow_ring_event = 8936 lpfc_sli_handle_slow_ring_event_s3; 8937 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 8938 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 8939 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 8940 break; 8941 case LPFC_PCI_DEV_OC: 8942 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 8943 phba->lpfc_sli_handle_slow_ring_event = 8944 lpfc_sli_handle_slow_ring_event_s4; 8945 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 8946 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 8947 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 8948 break; 8949 default: 8950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8951 "1420 Invalid HBA PCI-device group: 0x%x\n", 8952 dev_grp); 8953 return -ENODEV; 8954 break; 8955 } 8956 return 0; 8957 } 8958 8959 /** 8960 * __lpfc_sli_ringtx_put - Add an iocb to the txq 8961 * @phba: Pointer to HBA context object. 8962 * @pring: Pointer to driver SLI ring object. 8963 * @piocb: Pointer to address of newly added command iocb. 8964 * 8965 * This function is called with hbalock held to add a command 8966 * iocb to the txq when SLI layer cannot submit the command iocb 8967 * to the ring. 8968 **/ 8969 void 8970 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8971 struct lpfc_iocbq *piocb) 8972 { 8973 lockdep_assert_held(&phba->hbalock); 8974 /* Insert the caller's iocb in the txq tail for later processing. */ 8975 list_add_tail(&piocb->list, &pring->txq); 8976 } 8977 8978 /** 8979 * lpfc_sli_next_iocb - Get the next iocb in the txq 8980 * @phba: Pointer to HBA context object. 8981 * @pring: Pointer to driver SLI ring object. 8982 * @piocb: Pointer to address of newly added command iocb. 8983 * 8984 * This function is called with hbalock held before a new 8985 * iocb is submitted to the firmware. This function checks 8986 * txq to flush the iocbs in txq to Firmware before 8987 * submitting new iocbs to the Firmware. 8988 * If there are iocbs in the txq which need to be submitted 8989 * to firmware, lpfc_sli_next_iocb returns the first element 8990 * of the txq after dequeuing it from txq. 8991 * If there is no iocb in the txq then the function will return 8992 * *piocb and *piocb is set to NULL. Caller needs to check 8993 * *piocb to find if there are more commands in the txq. 8994 **/ 8995 static struct lpfc_iocbq * 8996 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8997 struct lpfc_iocbq **piocb) 8998 { 8999 struct lpfc_iocbq * nextiocb; 9000 9001 lockdep_assert_held(&phba->hbalock); 9002 9003 nextiocb = lpfc_sli_ringtx_get(phba, pring); 9004 if (!nextiocb) { 9005 nextiocb = *piocb; 9006 *piocb = NULL; 9007 } 9008 9009 return nextiocb; 9010 } 9011 9012 /** 9013 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 9014 * @phba: Pointer to HBA context object. 9015 * @ring_number: SLI ring number to issue iocb on. 9016 * @piocb: Pointer to command iocb. 9017 * @flag: Flag indicating if this command can be put into txq. 9018 * 9019 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 9020 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 9021 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 9022 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 9023 * this function allows only iocbs for posting buffers. This function finds 9024 * next available slot in the command ring and posts the command to the 9025 * available slot and writes the port attention register to request HBA start 9026 * processing new iocb. If there is no slot available in the ring and 9027 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 9028 * the function returns IOCB_BUSY. 9029 * 9030 * This function is called with hbalock held. The function will return success 9031 * after it successfully submit the iocb to firmware or after adding to the 9032 * txq. 9033 **/ 9034 static int 9035 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 9036 struct lpfc_iocbq *piocb, uint32_t flag) 9037 { 9038 struct lpfc_iocbq *nextiocb; 9039 IOCB_t *iocb; 9040 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 9041 9042 lockdep_assert_held(&phba->hbalock); 9043 9044 if (piocb->iocb_cmpl && (!piocb->vport) && 9045 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 9046 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 9047 lpfc_printf_log(phba, KERN_ERR, 9048 LOG_SLI | LOG_VPORT, 9049 "1807 IOCB x%x failed. No vport\n", 9050 piocb->iocb.ulpCommand); 9051 dump_stack(); 9052 return IOCB_ERROR; 9053 } 9054 9055 9056 /* If the PCI channel is in offline state, do not post iocbs. */ 9057 if (unlikely(pci_channel_offline(phba->pcidev))) 9058 return IOCB_ERROR; 9059 9060 /* If HBA has a deferred error attention, fail the iocb. */ 9061 if (unlikely(phba->hba_flag & DEFER_ERATT)) 9062 return IOCB_ERROR; 9063 9064 /* 9065 * We should never get an IOCB if we are in a < LINK_DOWN state 9066 */ 9067 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 9068 return IOCB_ERROR; 9069 9070 /* 9071 * Check to see if we are blocking IOCB processing because of a 9072 * outstanding event. 9073 */ 9074 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 9075 goto iocb_busy; 9076 9077 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 9078 /* 9079 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 9080 * can be issued if the link is not up. 9081 */ 9082 switch (piocb->iocb.ulpCommand) { 9083 case CMD_GEN_REQUEST64_CR: 9084 case CMD_GEN_REQUEST64_CX: 9085 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 9086 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 9087 FC_RCTL_DD_UNSOL_CMD) || 9088 (piocb->iocb.un.genreq64.w5.hcsw.Type != 9089 MENLO_TRANSPORT_TYPE)) 9090 9091 goto iocb_busy; 9092 break; 9093 case CMD_QUE_RING_BUF_CN: 9094 case CMD_QUE_RING_BUF64_CN: 9095 /* 9096 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 9097 * completion, iocb_cmpl MUST be 0. 9098 */ 9099 if (piocb->iocb_cmpl) 9100 piocb->iocb_cmpl = NULL; 9101 /*FALLTHROUGH*/ 9102 case CMD_CREATE_XRI_CR: 9103 case CMD_CLOSE_XRI_CN: 9104 case CMD_CLOSE_XRI_CX: 9105 break; 9106 default: 9107 goto iocb_busy; 9108 } 9109 9110 /* 9111 * For FCP commands, we must be in a state where we can process link 9112 * attention events. 9113 */ 9114 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 9115 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 9116 goto iocb_busy; 9117 } 9118 9119 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 9120 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 9121 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 9122 9123 if (iocb) 9124 lpfc_sli_update_ring(phba, pring); 9125 else 9126 lpfc_sli_update_full_ring(phba, pring); 9127 9128 if (!piocb) 9129 return IOCB_SUCCESS; 9130 9131 goto out_busy; 9132 9133 iocb_busy: 9134 pring->stats.iocb_cmd_delay++; 9135 9136 out_busy: 9137 9138 if (!(flag & SLI_IOCB_RET_IOCB)) { 9139 __lpfc_sli_ringtx_put(phba, pring, piocb); 9140 return IOCB_SUCCESS; 9141 } 9142 9143 return IOCB_BUSY; 9144 } 9145 9146 /** 9147 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 9148 * @phba: Pointer to HBA context object. 9149 * @piocb: Pointer to command iocb. 9150 * @sglq: Pointer to the scatter gather queue object. 9151 * 9152 * This routine converts the bpl or bde that is in the IOCB 9153 * to a sgl list for the sli4 hardware. The physical address 9154 * of the bpl/bde is converted back to a virtual address. 9155 * If the IOCB contains a BPL then the list of BDE's is 9156 * converted to sli4_sge's. If the IOCB contains a single 9157 * BDE then it is converted to a single sli_sge. 9158 * The IOCB is still in cpu endianess so the contents of 9159 * the bpl can be used without byte swapping. 9160 * 9161 * Returns valid XRI = Success, NO_XRI = Failure. 9162 **/ 9163 static uint16_t 9164 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 9165 struct lpfc_sglq *sglq) 9166 { 9167 uint16_t xritag = NO_XRI; 9168 struct ulp_bde64 *bpl = NULL; 9169 struct ulp_bde64 bde; 9170 struct sli4_sge *sgl = NULL; 9171 struct lpfc_dmabuf *dmabuf; 9172 IOCB_t *icmd; 9173 int numBdes = 0; 9174 int i = 0; 9175 uint32_t offset = 0; /* accumulated offset in the sg request list */ 9176 int inbound = 0; /* number of sg reply entries inbound from firmware */ 9177 9178 if (!piocbq || !sglq) 9179 return xritag; 9180 9181 sgl = (struct sli4_sge *)sglq->sgl; 9182 icmd = &piocbq->iocb; 9183 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 9184 return sglq->sli4_xritag; 9185 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9186 numBdes = icmd->un.genreq64.bdl.bdeSize / 9187 sizeof(struct ulp_bde64); 9188 /* The addrHigh and addrLow fields within the IOCB 9189 * have not been byteswapped yet so there is no 9190 * need to swap them back. 9191 */ 9192 if (piocbq->context3) 9193 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 9194 else 9195 return xritag; 9196 9197 bpl = (struct ulp_bde64 *)dmabuf->virt; 9198 if (!bpl) 9199 return xritag; 9200 9201 for (i = 0; i < numBdes; i++) { 9202 /* Should already be byte swapped. */ 9203 sgl->addr_hi = bpl->addrHigh; 9204 sgl->addr_lo = bpl->addrLow; 9205 9206 sgl->word2 = le32_to_cpu(sgl->word2); 9207 if ((i+1) == numBdes) 9208 bf_set(lpfc_sli4_sge_last, sgl, 1); 9209 else 9210 bf_set(lpfc_sli4_sge_last, sgl, 0); 9211 /* swap the size field back to the cpu so we 9212 * can assign it to the sgl. 9213 */ 9214 bde.tus.w = le32_to_cpu(bpl->tus.w); 9215 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 9216 /* The offsets in the sgl need to be accumulated 9217 * separately for the request and reply lists. 9218 * The request is always first, the reply follows. 9219 */ 9220 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 9221 /* add up the reply sg entries */ 9222 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 9223 inbound++; 9224 /* first inbound? reset the offset */ 9225 if (inbound == 1) 9226 offset = 0; 9227 bf_set(lpfc_sli4_sge_offset, sgl, offset); 9228 bf_set(lpfc_sli4_sge_type, sgl, 9229 LPFC_SGE_TYPE_DATA); 9230 offset += bde.tus.f.bdeSize; 9231 } 9232 sgl->word2 = cpu_to_le32(sgl->word2); 9233 bpl++; 9234 sgl++; 9235 } 9236 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 9237 /* The addrHigh and addrLow fields of the BDE have not 9238 * been byteswapped yet so they need to be swapped 9239 * before putting them in the sgl. 9240 */ 9241 sgl->addr_hi = 9242 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 9243 sgl->addr_lo = 9244 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 9245 sgl->word2 = le32_to_cpu(sgl->word2); 9246 bf_set(lpfc_sli4_sge_last, sgl, 1); 9247 sgl->word2 = cpu_to_le32(sgl->word2); 9248 sgl->sge_len = 9249 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 9250 } 9251 return sglq->sli4_xritag; 9252 } 9253 9254 /** 9255 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 9256 * @phba: Pointer to HBA context object. 9257 * @piocb: Pointer to command iocb. 9258 * @wqe: Pointer to the work queue entry. 9259 * 9260 * This routine converts the iocb command to its Work Queue Entry 9261 * equivalent. The wqe pointer should not have any fields set when 9262 * this routine is called because it will memcpy over them. 9263 * This routine does not set the CQ_ID or the WQEC bits in the 9264 * wqe. 9265 * 9266 * Returns: 0 = Success, IOCB_ERROR = Failure. 9267 **/ 9268 static int 9269 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 9270 union lpfc_wqe128 *wqe) 9271 { 9272 uint32_t xmit_len = 0, total_len = 0; 9273 uint8_t ct = 0; 9274 uint32_t fip; 9275 uint32_t abort_tag; 9276 uint8_t command_type = ELS_COMMAND_NON_FIP; 9277 uint8_t cmnd; 9278 uint16_t xritag; 9279 uint16_t abrt_iotag; 9280 struct lpfc_iocbq *abrtiocbq; 9281 struct ulp_bde64 *bpl = NULL; 9282 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 9283 int numBdes, i; 9284 struct ulp_bde64 bde; 9285 struct lpfc_nodelist *ndlp; 9286 uint32_t *pcmd; 9287 uint32_t if_type; 9288 9289 fip = phba->hba_flag & HBA_FIP_SUPPORT; 9290 /* The fcp commands will set command type */ 9291 if (iocbq->iocb_flag & LPFC_IO_FCP) 9292 command_type = FCP_COMMAND; 9293 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 9294 command_type = ELS_COMMAND_FIP; 9295 else 9296 command_type = ELS_COMMAND_NON_FIP; 9297 9298 if (phba->fcp_embed_io) 9299 memset(wqe, 0, sizeof(union lpfc_wqe128)); 9300 /* Some of the fields are in the right position already */ 9301 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 9302 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { 9303 /* The ct field has moved so reset */ 9304 wqe->generic.wqe_com.word7 = 0; 9305 wqe->generic.wqe_com.word10 = 0; 9306 } 9307 9308 abort_tag = (uint32_t) iocbq->iotag; 9309 xritag = iocbq->sli4_xritag; 9310 /* words0-2 bpl convert bde */ 9311 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9312 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9313 sizeof(struct ulp_bde64); 9314 bpl = (struct ulp_bde64 *) 9315 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 9316 if (!bpl) 9317 return IOCB_ERROR; 9318 9319 /* Should already be byte swapped. */ 9320 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 9321 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 9322 /* swap the size field back to the cpu so we 9323 * can assign it to the sgl. 9324 */ 9325 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 9326 xmit_len = wqe->generic.bde.tus.f.bdeSize; 9327 total_len = 0; 9328 for (i = 0; i < numBdes; i++) { 9329 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9330 total_len += bde.tus.f.bdeSize; 9331 } 9332 } else 9333 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 9334 9335 iocbq->iocb.ulpIoTag = iocbq->iotag; 9336 cmnd = iocbq->iocb.ulpCommand; 9337 9338 switch (iocbq->iocb.ulpCommand) { 9339 case CMD_ELS_REQUEST64_CR: 9340 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 9341 ndlp = iocbq->context_un.ndlp; 9342 else 9343 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9344 if (!iocbq->iocb.ulpLe) { 9345 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9346 "2007 Only Limited Edition cmd Format" 9347 " supported 0x%x\n", 9348 iocbq->iocb.ulpCommand); 9349 return IOCB_ERROR; 9350 } 9351 9352 wqe->els_req.payload_len = xmit_len; 9353 /* Els_reguest64 has a TMO */ 9354 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 9355 iocbq->iocb.ulpTimeout); 9356 /* Need a VF for word 4 set the vf bit*/ 9357 bf_set(els_req64_vf, &wqe->els_req, 0); 9358 /* And a VFID for word 12 */ 9359 bf_set(els_req64_vfid, &wqe->els_req, 0); 9360 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9361 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9362 iocbq->iocb.ulpContext); 9363 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 9364 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 9365 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 9366 if (command_type == ELS_COMMAND_FIP) 9367 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 9368 >> LPFC_FIP_ELS_ID_SHIFT); 9369 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9370 iocbq->context2)->virt); 9371 if_type = bf_get(lpfc_sli_intf_if_type, 9372 &phba->sli4_hba.sli_intf); 9373 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9374 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 9375 *pcmd == ELS_CMD_SCR || 9376 *pcmd == ELS_CMD_FDISC || 9377 *pcmd == ELS_CMD_LOGO || 9378 *pcmd == ELS_CMD_PLOGI)) { 9379 bf_set(els_req64_sp, &wqe->els_req, 1); 9380 bf_set(els_req64_sid, &wqe->els_req, 9381 iocbq->vport->fc_myDID); 9382 if ((*pcmd == ELS_CMD_FLOGI) && 9383 !(phba->fc_topology == 9384 LPFC_TOPOLOGY_LOOP)) 9385 bf_set(els_req64_sid, &wqe->els_req, 0); 9386 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 9387 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9388 phba->vpi_ids[iocbq->vport->vpi]); 9389 } else if (pcmd && iocbq->context1) { 9390 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 9391 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9392 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9393 } 9394 } 9395 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 9396 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9397 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 9398 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 9399 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 9400 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 9401 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9402 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 9403 wqe->els_req.max_response_payload_len = total_len - xmit_len; 9404 break; 9405 case CMD_XMIT_SEQUENCE64_CX: 9406 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 9407 iocbq->iocb.un.ulpWord[3]); 9408 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 9409 iocbq->iocb.unsli3.rcvsli3.ox_id); 9410 /* The entire sequence is transmitted for this IOCB */ 9411 xmit_len = total_len; 9412 cmnd = CMD_XMIT_SEQUENCE64_CR; 9413 if (phba->link_flag & LS_LOOPBACK_MODE) 9414 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9415 /* fall through */ 9416 case CMD_XMIT_SEQUENCE64_CR: 9417 /* word3 iocb=io_tag32 wqe=reserved */ 9418 wqe->xmit_sequence.rsvd3 = 0; 9419 /* word4 relative_offset memcpy */ 9420 /* word5 r_ctl/df_ctl memcpy */ 9421 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 9422 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 9423 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 9424 LPFC_WQE_IOD_WRITE); 9425 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 9426 LPFC_WQE_LENLOC_WORD12); 9427 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 9428 wqe->xmit_sequence.xmit_len = xmit_len; 9429 command_type = OTHER_COMMAND; 9430 break; 9431 case CMD_XMIT_BCAST64_CN: 9432 /* word3 iocb=iotag32 wqe=seq_payload_len */ 9433 wqe->xmit_bcast64.seq_payload_len = xmit_len; 9434 /* word4 iocb=rsvd wqe=rsvd */ 9435 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 9436 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 9437 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 9438 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9439 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 9440 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 9441 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 9442 LPFC_WQE_LENLOC_WORD3); 9443 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 9444 break; 9445 case CMD_FCP_IWRITE64_CR: 9446 command_type = FCP_COMMAND_DATA_OUT; 9447 /* word3 iocb=iotag wqe=payload_offset_len */ 9448 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9449 bf_set(payload_offset_len, &wqe->fcp_iwrite, 9450 xmit_len + sizeof(struct fcp_rsp)); 9451 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 9452 0); 9453 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9454 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9455 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 9456 iocbq->iocb.ulpFCP2Rcvy); 9457 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 9458 /* Always open the exchange */ 9459 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 9460 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 9461 LPFC_WQE_LENLOC_WORD4); 9462 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 9463 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 9464 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9465 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 9466 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 9467 if (iocbq->priority) { 9468 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9469 (iocbq->priority << 1)); 9470 } else { 9471 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9472 (phba->cfg_XLanePriority << 1)); 9473 } 9474 } 9475 /* Note, word 10 is already initialized to 0 */ 9476 9477 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9478 if (phba->cfg_enable_pbde) 9479 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); 9480 else 9481 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 9482 9483 if (phba->fcp_embed_io) { 9484 struct lpfc_io_buf *lpfc_cmd; 9485 struct sli4_sge *sgl; 9486 struct fcp_cmnd *fcp_cmnd; 9487 uint32_t *ptr; 9488 9489 /* 128 byte wqe support here */ 9490 9491 lpfc_cmd = iocbq->context1; 9492 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9493 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9494 9495 /* Word 0-2 - FCP_CMND */ 9496 wqe->generic.bde.tus.f.bdeFlags = 9497 BUFF_TYPE_BDE_IMMED; 9498 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9499 wqe->generic.bde.addrHigh = 0; 9500 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9501 9502 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 9503 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 9504 9505 /* Word 22-29 FCP CMND Payload */ 9506 ptr = &wqe->words[22]; 9507 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9508 } 9509 break; 9510 case CMD_FCP_IREAD64_CR: 9511 /* word3 iocb=iotag wqe=payload_offset_len */ 9512 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9513 bf_set(payload_offset_len, &wqe->fcp_iread, 9514 xmit_len + sizeof(struct fcp_rsp)); 9515 bf_set(cmd_buff_len, &wqe->fcp_iread, 9516 0); 9517 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9518 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9519 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 9520 iocbq->iocb.ulpFCP2Rcvy); 9521 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 9522 /* Always open the exchange */ 9523 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 9524 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 9525 LPFC_WQE_LENLOC_WORD4); 9526 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 9527 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 9528 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9529 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 9530 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 9531 if (iocbq->priority) { 9532 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9533 (iocbq->priority << 1)); 9534 } else { 9535 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9536 (phba->cfg_XLanePriority << 1)); 9537 } 9538 } 9539 /* Note, word 10 is already initialized to 0 */ 9540 9541 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9542 if (phba->cfg_enable_pbde) 9543 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); 9544 else 9545 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 9546 9547 if (phba->fcp_embed_io) { 9548 struct lpfc_io_buf *lpfc_cmd; 9549 struct sli4_sge *sgl; 9550 struct fcp_cmnd *fcp_cmnd; 9551 uint32_t *ptr; 9552 9553 /* 128 byte wqe support here */ 9554 9555 lpfc_cmd = iocbq->context1; 9556 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9557 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9558 9559 /* Word 0-2 - FCP_CMND */ 9560 wqe->generic.bde.tus.f.bdeFlags = 9561 BUFF_TYPE_BDE_IMMED; 9562 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9563 wqe->generic.bde.addrHigh = 0; 9564 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9565 9566 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 9567 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 9568 9569 /* Word 22-29 FCP CMND Payload */ 9570 ptr = &wqe->words[22]; 9571 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9572 } 9573 break; 9574 case CMD_FCP_ICMND64_CR: 9575 /* word3 iocb=iotag wqe=payload_offset_len */ 9576 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9577 bf_set(payload_offset_len, &wqe->fcp_icmd, 9578 xmit_len + sizeof(struct fcp_rsp)); 9579 bf_set(cmd_buff_len, &wqe->fcp_icmd, 9580 0); 9581 /* word3 iocb=IO_TAG wqe=reserved */ 9582 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 9583 /* Always open the exchange */ 9584 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 9585 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 9586 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 9587 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 9588 LPFC_WQE_LENLOC_NONE); 9589 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 9590 iocbq->iocb.ulpFCP2Rcvy); 9591 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9592 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 9593 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 9594 if (iocbq->priority) { 9595 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9596 (iocbq->priority << 1)); 9597 } else { 9598 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9599 (phba->cfg_XLanePriority << 1)); 9600 } 9601 } 9602 /* Note, word 10 is already initialized to 0 */ 9603 9604 if (phba->fcp_embed_io) { 9605 struct lpfc_io_buf *lpfc_cmd; 9606 struct sli4_sge *sgl; 9607 struct fcp_cmnd *fcp_cmnd; 9608 uint32_t *ptr; 9609 9610 /* 128 byte wqe support here */ 9611 9612 lpfc_cmd = iocbq->context1; 9613 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9614 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9615 9616 /* Word 0-2 - FCP_CMND */ 9617 wqe->generic.bde.tus.f.bdeFlags = 9618 BUFF_TYPE_BDE_IMMED; 9619 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9620 wqe->generic.bde.addrHigh = 0; 9621 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9622 9623 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 9624 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 9625 9626 /* Word 22-29 FCP CMND Payload */ 9627 ptr = &wqe->words[22]; 9628 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9629 } 9630 break; 9631 case CMD_GEN_REQUEST64_CR: 9632 /* For this command calculate the xmit length of the 9633 * request bde. 9634 */ 9635 xmit_len = 0; 9636 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9637 sizeof(struct ulp_bde64); 9638 for (i = 0; i < numBdes; i++) { 9639 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9640 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9641 break; 9642 xmit_len += bde.tus.f.bdeSize; 9643 } 9644 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9645 wqe->gen_req.request_payload_len = xmit_len; 9646 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9647 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9648 /* word6 context tag copied in memcpy */ 9649 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9650 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9651 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9652 "2015 Invalid CT %x command 0x%x\n", 9653 ct, iocbq->iocb.ulpCommand); 9654 return IOCB_ERROR; 9655 } 9656 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9657 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9658 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9659 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9660 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9661 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9662 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9663 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9664 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9665 command_type = OTHER_COMMAND; 9666 break; 9667 case CMD_XMIT_ELS_RSP64_CX: 9668 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9669 /* words0-2 BDE memcpy */ 9670 /* word3 iocb=iotag32 wqe=response_payload_len */ 9671 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9672 /* word4 */ 9673 wqe->xmit_els_rsp.word4 = 0; 9674 /* word5 iocb=rsvd wge=did */ 9675 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9676 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9677 9678 if_type = bf_get(lpfc_sli_intf_if_type, 9679 &phba->sli4_hba.sli_intf); 9680 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9681 if (iocbq->vport->fc_flag & FC_PT2PT) { 9682 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9683 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9684 iocbq->vport->fc_myDID); 9685 if (iocbq->vport->fc_myDID == Fabric_DID) { 9686 bf_set(wqe_els_did, 9687 &wqe->xmit_els_rsp.wqe_dest, 0); 9688 } 9689 } 9690 } 9691 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9692 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9693 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9694 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9695 iocbq->iocb.unsli3.rcvsli3.ox_id); 9696 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9697 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9698 phba->vpi_ids[iocbq->vport->vpi]); 9699 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9700 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9701 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9702 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9703 LPFC_WQE_LENLOC_WORD3); 9704 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9705 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9706 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9707 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9708 iocbq->context2)->virt); 9709 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9710 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9711 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9712 iocbq->vport->fc_myDID); 9713 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9714 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9715 phba->vpi_ids[phba->pport->vpi]); 9716 } 9717 command_type = OTHER_COMMAND; 9718 break; 9719 case CMD_CLOSE_XRI_CN: 9720 case CMD_ABORT_XRI_CN: 9721 case CMD_ABORT_XRI_CX: 9722 /* words 0-2 memcpy should be 0 rserved */ 9723 /* port will send abts */ 9724 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9725 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9726 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9727 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9728 } else 9729 fip = 0; 9730 9731 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9732 /* 9733 * The link is down, or the command was ELS_FIP 9734 * so the fw does not need to send abts 9735 * on the wire. 9736 */ 9737 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9738 else 9739 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9740 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9741 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9742 wqe->abort_cmd.rsrvd5 = 0; 9743 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9744 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9745 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9746 /* 9747 * The abort handler will send us CMD_ABORT_XRI_CN or 9748 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9749 */ 9750 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9751 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9752 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9753 LPFC_WQE_LENLOC_NONE); 9754 cmnd = CMD_ABORT_XRI_CX; 9755 command_type = OTHER_COMMAND; 9756 xritag = 0; 9757 break; 9758 case CMD_XMIT_BLS_RSP64_CX: 9759 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9760 /* As BLS ABTS RSP WQE is very different from other WQEs, 9761 * we re-construct this WQE here based on information in 9762 * iocbq from scratch. 9763 */ 9764 memset(wqe, 0, sizeof(union lpfc_wqe)); 9765 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9766 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9767 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9768 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9769 LPFC_ABTS_UNSOL_INT) { 9770 /* ABTS sent by initiator to CT exchange, the 9771 * RX_ID field will be filled with the newly 9772 * allocated responder XRI. 9773 */ 9774 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9775 iocbq->sli4_xritag); 9776 } else { 9777 /* ABTS sent by responder to CT exchange, the 9778 * RX_ID field will be filled with the responder 9779 * RX_ID from ABTS. 9780 */ 9781 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9782 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9783 } 9784 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9785 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9786 9787 /* Use CT=VPI */ 9788 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9789 ndlp->nlp_DID); 9790 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9791 iocbq->iocb.ulpContext); 9792 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9793 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9794 phba->vpi_ids[phba->pport->vpi]); 9795 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9796 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9797 LPFC_WQE_LENLOC_NONE); 9798 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9799 command_type = OTHER_COMMAND; 9800 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9801 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9802 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9803 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9804 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9805 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9806 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9807 } 9808 9809 break; 9810 case CMD_SEND_FRAME: 9811 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9812 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9813 return 0; 9814 case CMD_XRI_ABORTED_CX: 9815 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9816 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9817 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9818 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9819 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9820 default: 9821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9822 "2014 Invalid command 0x%x\n", 9823 iocbq->iocb.ulpCommand); 9824 return IOCB_ERROR; 9825 break; 9826 } 9827 9828 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9829 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9830 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9831 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9832 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9833 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9834 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9835 LPFC_IO_DIF_INSERT); 9836 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9837 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9838 wqe->generic.wqe_com.abort_tag = abort_tag; 9839 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9840 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9841 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9842 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9843 return 0; 9844 } 9845 9846 /** 9847 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9848 * @phba: Pointer to HBA context object. 9849 * @ring_number: SLI ring number to issue iocb on. 9850 * @piocb: Pointer to command iocb. 9851 * @flag: Flag indicating if this command can be put into txq. 9852 * 9853 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9854 * an iocb command to an HBA with SLI-4 interface spec. 9855 * 9856 * This function is called with hbalock held. The function will return success 9857 * after it successfully submit the iocb to firmware or after adding to the 9858 * txq. 9859 **/ 9860 static int 9861 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9862 struct lpfc_iocbq *piocb, uint32_t flag) 9863 { 9864 struct lpfc_sglq *sglq; 9865 union lpfc_wqe128 wqe; 9866 struct lpfc_queue *wq; 9867 struct lpfc_sli_ring *pring; 9868 9869 /* Get the WQ */ 9870 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9871 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9872 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq; 9873 } else { 9874 wq = phba->sli4_hba.els_wq; 9875 } 9876 9877 /* Get corresponding ring */ 9878 pring = wq->pring; 9879 9880 /* 9881 * The WQE can be either 64 or 128 bytes, 9882 */ 9883 9884 lockdep_assert_held(&pring->ring_lock); 9885 9886 if (piocb->sli4_xritag == NO_XRI) { 9887 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9888 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9889 sglq = NULL; 9890 else { 9891 if (!list_empty(&pring->txq)) { 9892 if (!(flag & SLI_IOCB_RET_IOCB)) { 9893 __lpfc_sli_ringtx_put(phba, 9894 pring, piocb); 9895 return IOCB_SUCCESS; 9896 } else { 9897 return IOCB_BUSY; 9898 } 9899 } else { 9900 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 9901 if (!sglq) { 9902 if (!(flag & SLI_IOCB_RET_IOCB)) { 9903 __lpfc_sli_ringtx_put(phba, 9904 pring, 9905 piocb); 9906 return IOCB_SUCCESS; 9907 } else 9908 return IOCB_BUSY; 9909 } 9910 } 9911 } 9912 } else if (piocb->iocb_flag & LPFC_IO_FCP) 9913 /* These IO's already have an XRI and a mapped sgl. */ 9914 sglq = NULL; 9915 else { 9916 /* 9917 * This is a continuation of a commandi,(CX) so this 9918 * sglq is on the active list 9919 */ 9920 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 9921 if (!sglq) 9922 return IOCB_ERROR; 9923 } 9924 9925 if (sglq) { 9926 piocb->sli4_lxritag = sglq->sli4_lxritag; 9927 piocb->sli4_xritag = sglq->sli4_xritag; 9928 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 9929 return IOCB_ERROR; 9930 } 9931 9932 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 9933 return IOCB_ERROR; 9934 9935 if (lpfc_sli4_wq_put(wq, &wqe)) 9936 return IOCB_ERROR; 9937 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 9938 9939 return 0; 9940 } 9941 9942 /** 9943 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 9944 * 9945 * This routine wraps the actual lockless version for issusing IOCB function 9946 * pointer from the lpfc_hba struct. 9947 * 9948 * Return codes: 9949 * IOCB_ERROR - Error 9950 * IOCB_SUCCESS - Success 9951 * IOCB_BUSY - Busy 9952 **/ 9953 int 9954 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9955 struct lpfc_iocbq *piocb, uint32_t flag) 9956 { 9957 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9958 } 9959 9960 /** 9961 * lpfc_sli_api_table_setup - Set up sli api function jump table 9962 * @phba: The hba struct for which this call is being executed. 9963 * @dev_grp: The HBA PCI-Device group number. 9964 * 9965 * This routine sets up the SLI interface API function jump table in @phba 9966 * struct. 9967 * Returns: 0 - success, -ENODEV - failure. 9968 **/ 9969 int 9970 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 9971 { 9972 9973 switch (dev_grp) { 9974 case LPFC_PCI_DEV_LP: 9975 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 9976 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 9977 break; 9978 case LPFC_PCI_DEV_OC: 9979 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 9980 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 9981 break; 9982 default: 9983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9984 "1419 Invalid HBA PCI-device group: 0x%x\n", 9985 dev_grp); 9986 return -ENODEV; 9987 break; 9988 } 9989 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 9990 return 0; 9991 } 9992 9993 /** 9994 * lpfc_sli4_calc_ring - Calculates which ring to use 9995 * @phba: Pointer to HBA context object. 9996 * @piocb: Pointer to command iocb. 9997 * 9998 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 9999 * hba_wqidx, thus we need to calculate the corresponding ring. 10000 * Since ABORTS must go on the same WQ of the command they are 10001 * aborting, we use command's hba_wqidx. 10002 */ 10003 struct lpfc_sli_ring * 10004 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 10005 { 10006 struct lpfc_io_buf *lpfc_cmd; 10007 10008 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 10009 if (unlikely(!phba->sli4_hba.hdwq)) 10010 return NULL; 10011 /* 10012 * for abort iocb hba_wqidx should already 10013 * be setup based on what work queue we used. 10014 */ 10015 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 10016 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; 10017 piocb->hba_wqidx = lpfc_cmd->hdwq_no; 10018 } 10019 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; 10020 } else { 10021 if (unlikely(!phba->sli4_hba.els_wq)) 10022 return NULL; 10023 piocb->hba_wqidx = 0; 10024 return phba->sli4_hba.els_wq->pring; 10025 } 10026 } 10027 10028 /** 10029 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 10030 * @phba: Pointer to HBA context object. 10031 * @pring: Pointer to driver SLI ring object. 10032 * @piocb: Pointer to command iocb. 10033 * @flag: Flag indicating if this command can be put into txq. 10034 * 10035 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 10036 * function. This function gets the hbalock and calls 10037 * __lpfc_sli_issue_iocb function and will return the error returned 10038 * by __lpfc_sli_issue_iocb function. This wrapper is used by 10039 * functions which do not hold hbalock. 10040 **/ 10041 int 10042 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 10043 struct lpfc_iocbq *piocb, uint32_t flag) 10044 { 10045 struct lpfc_sli_ring *pring; 10046 unsigned long iflags; 10047 int rc; 10048 10049 if (phba->sli_rev == LPFC_SLI_REV4) { 10050 pring = lpfc_sli4_calc_ring(phba, piocb); 10051 if (unlikely(pring == NULL)) 10052 return IOCB_ERROR; 10053 10054 spin_lock_irqsave(&pring->ring_lock, iflags); 10055 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10056 spin_unlock_irqrestore(&pring->ring_lock, iflags); 10057 } else { 10058 /* For now, SLI2/3 will still use hbalock */ 10059 spin_lock_irqsave(&phba->hbalock, iflags); 10060 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10061 spin_unlock_irqrestore(&phba->hbalock, iflags); 10062 } 10063 return rc; 10064 } 10065 10066 /** 10067 * lpfc_extra_ring_setup - Extra ring setup function 10068 * @phba: Pointer to HBA context object. 10069 * 10070 * This function is called while driver attaches with the 10071 * HBA to setup the extra ring. The extra ring is used 10072 * only when driver needs to support target mode functionality 10073 * or IP over FC functionalities. 10074 * 10075 * This function is called with no lock held. SLI3 only. 10076 **/ 10077 static int 10078 lpfc_extra_ring_setup( struct lpfc_hba *phba) 10079 { 10080 struct lpfc_sli *psli; 10081 struct lpfc_sli_ring *pring; 10082 10083 psli = &phba->sli; 10084 10085 /* Adjust cmd/rsp ring iocb entries more evenly */ 10086 10087 /* Take some away from the FCP ring */ 10088 pring = &psli->sli3_ring[LPFC_FCP_RING]; 10089 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10090 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10091 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10092 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10093 10094 /* and give them to the extra ring */ 10095 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 10096 10097 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10098 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10099 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10100 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10101 10102 /* Setup default profile for this ring */ 10103 pring->iotag_max = 4096; 10104 pring->num_mask = 1; 10105 pring->prt[0].profile = 0; /* Mask 0 */ 10106 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 10107 pring->prt[0].type = phba->cfg_multi_ring_type; 10108 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 10109 return 0; 10110 } 10111 10112 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 10113 * @phba: Pointer to HBA context object. 10114 * @iocbq: Pointer to iocb object. 10115 * 10116 * The async_event handler calls this routine when it receives 10117 * an ASYNC_STATUS_CN event from the port. The port generates 10118 * this event when an Abort Sequence request to an rport fails 10119 * twice in succession. The abort could be originated by the 10120 * driver or by the port. The ABTS could have been for an ELS 10121 * or FCP IO. The port only generates this event when an ABTS 10122 * fails to complete after one retry. 10123 */ 10124 static void 10125 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 10126 struct lpfc_iocbq *iocbq) 10127 { 10128 struct lpfc_nodelist *ndlp = NULL; 10129 uint16_t rpi = 0, vpi = 0; 10130 struct lpfc_vport *vport = NULL; 10131 10132 /* The rpi in the ulpContext is vport-sensitive. */ 10133 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 10134 rpi = iocbq->iocb.ulpContext; 10135 10136 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10137 "3092 Port generated ABTS async event " 10138 "on vpi %d rpi %d status 0x%x\n", 10139 vpi, rpi, iocbq->iocb.ulpStatus); 10140 10141 vport = lpfc_find_vport_by_vpid(phba, vpi); 10142 if (!vport) 10143 goto err_exit; 10144 ndlp = lpfc_findnode_rpi(vport, rpi); 10145 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 10146 goto err_exit; 10147 10148 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 10149 lpfc_sli_abts_recover_port(vport, ndlp); 10150 return; 10151 10152 err_exit: 10153 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10154 "3095 Event Context not found, no " 10155 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 10156 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 10157 vpi, rpi); 10158 } 10159 10160 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 10161 * @phba: pointer to HBA context object. 10162 * @ndlp: nodelist pointer for the impacted rport. 10163 * @axri: pointer to the wcqe containing the failed exchange. 10164 * 10165 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 10166 * port. The port generates this event when an abort exchange request to an 10167 * rport fails twice in succession with no reply. The abort could be originated 10168 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 10169 */ 10170 void 10171 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 10172 struct lpfc_nodelist *ndlp, 10173 struct sli4_wcqe_xri_aborted *axri) 10174 { 10175 struct lpfc_vport *vport; 10176 uint32_t ext_status = 0; 10177 10178 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 10179 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10180 "3115 Node Context not found, driver " 10181 "ignoring abts err event\n"); 10182 return; 10183 } 10184 10185 vport = ndlp->vport; 10186 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10187 "3116 Port generated FCP XRI ABORT event on " 10188 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 10189 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 10190 bf_get(lpfc_wcqe_xa_xri, axri), 10191 bf_get(lpfc_wcqe_xa_status, axri), 10192 axri->parameter); 10193 10194 /* 10195 * Catch the ABTS protocol failure case. Older OCe FW releases returned 10196 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 10197 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 10198 */ 10199 ext_status = axri->parameter & IOERR_PARAM_MASK; 10200 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 10201 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 10202 lpfc_sli_abts_recover_port(vport, ndlp); 10203 } 10204 10205 /** 10206 * lpfc_sli_async_event_handler - ASYNC iocb handler function 10207 * @phba: Pointer to HBA context object. 10208 * @pring: Pointer to driver SLI ring object. 10209 * @iocbq: Pointer to iocb object. 10210 * 10211 * This function is called by the slow ring event handler 10212 * function when there is an ASYNC event iocb in the ring. 10213 * This function is called with no lock held. 10214 * Currently this function handles only temperature related 10215 * ASYNC events. The function decodes the temperature sensor 10216 * event message and posts events for the management applications. 10217 **/ 10218 static void 10219 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 10220 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 10221 { 10222 IOCB_t *icmd; 10223 uint16_t evt_code; 10224 struct temp_event temp_event_data; 10225 struct Scsi_Host *shost; 10226 uint32_t *iocb_w; 10227 10228 icmd = &iocbq->iocb; 10229 evt_code = icmd->un.asyncstat.evt_code; 10230 10231 switch (evt_code) { 10232 case ASYNC_TEMP_WARN: 10233 case ASYNC_TEMP_SAFE: 10234 temp_event_data.data = (uint32_t) icmd->ulpContext; 10235 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 10236 if (evt_code == ASYNC_TEMP_WARN) { 10237 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 10238 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10239 "0347 Adapter is very hot, please take " 10240 "corrective action. temperature : %d Celsius\n", 10241 (uint32_t) icmd->ulpContext); 10242 } else { 10243 temp_event_data.event_code = LPFC_NORMAL_TEMP; 10244 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10245 "0340 Adapter temperature is OK now. " 10246 "temperature : %d Celsius\n", 10247 (uint32_t) icmd->ulpContext); 10248 } 10249 10250 /* Send temperature change event to applications */ 10251 shost = lpfc_shost_from_vport(phba->pport); 10252 fc_host_post_vendor_event(shost, fc_get_event_number(), 10253 sizeof(temp_event_data), (char *) &temp_event_data, 10254 LPFC_NL_VENDOR_ID); 10255 break; 10256 case ASYNC_STATUS_CN: 10257 lpfc_sli_abts_err_handler(phba, iocbq); 10258 break; 10259 default: 10260 iocb_w = (uint32_t *) icmd; 10261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10262 "0346 Ring %d handler: unexpected ASYNC_STATUS" 10263 " evt_code 0x%x\n" 10264 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 10265 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 10266 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 10267 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 10268 pring->ringno, icmd->un.asyncstat.evt_code, 10269 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 10270 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 10271 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 10272 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 10273 10274 break; 10275 } 10276 } 10277 10278 10279 /** 10280 * lpfc_sli4_setup - SLI ring setup function 10281 * @phba: Pointer to HBA context object. 10282 * 10283 * lpfc_sli_setup sets up rings of the SLI interface with 10284 * number of iocbs per ring and iotags. This function is 10285 * called while driver attach to the HBA and before the 10286 * interrupts are enabled. So there is no need for locking. 10287 * 10288 * This function always returns 0. 10289 **/ 10290 int 10291 lpfc_sli4_setup(struct lpfc_hba *phba) 10292 { 10293 struct lpfc_sli_ring *pring; 10294 10295 pring = phba->sli4_hba.els_wq->pring; 10296 pring->num_mask = LPFC_MAX_RING_MASK; 10297 pring->prt[0].profile = 0; /* Mask 0 */ 10298 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10299 pring->prt[0].type = FC_TYPE_ELS; 10300 pring->prt[0].lpfc_sli_rcv_unsol_event = 10301 lpfc_els_unsol_event; 10302 pring->prt[1].profile = 0; /* Mask 1 */ 10303 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10304 pring->prt[1].type = FC_TYPE_ELS; 10305 pring->prt[1].lpfc_sli_rcv_unsol_event = 10306 lpfc_els_unsol_event; 10307 pring->prt[2].profile = 0; /* Mask 2 */ 10308 /* NameServer Inquiry */ 10309 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10310 /* NameServer */ 10311 pring->prt[2].type = FC_TYPE_CT; 10312 pring->prt[2].lpfc_sli_rcv_unsol_event = 10313 lpfc_ct_unsol_event; 10314 pring->prt[3].profile = 0; /* Mask 3 */ 10315 /* NameServer response */ 10316 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10317 /* NameServer */ 10318 pring->prt[3].type = FC_TYPE_CT; 10319 pring->prt[3].lpfc_sli_rcv_unsol_event = 10320 lpfc_ct_unsol_event; 10321 return 0; 10322 } 10323 10324 /** 10325 * lpfc_sli_setup - SLI ring setup function 10326 * @phba: Pointer to HBA context object. 10327 * 10328 * lpfc_sli_setup sets up rings of the SLI interface with 10329 * number of iocbs per ring and iotags. This function is 10330 * called while driver attach to the HBA and before the 10331 * interrupts are enabled. So there is no need for locking. 10332 * 10333 * This function always returns 0. SLI3 only. 10334 **/ 10335 int 10336 lpfc_sli_setup(struct lpfc_hba *phba) 10337 { 10338 int i, totiocbsize = 0; 10339 struct lpfc_sli *psli = &phba->sli; 10340 struct lpfc_sli_ring *pring; 10341 10342 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 10343 psli->sli_flag = 0; 10344 10345 psli->iocbq_lookup = NULL; 10346 psli->iocbq_lookup_len = 0; 10347 psli->last_iotag = 0; 10348 10349 for (i = 0; i < psli->num_rings; i++) { 10350 pring = &psli->sli3_ring[i]; 10351 switch (i) { 10352 case LPFC_FCP_RING: /* ring 0 - FCP */ 10353 /* numCiocb and numRiocb are used in config_port */ 10354 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 10355 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 10356 pring->sli.sli3.numCiocb += 10357 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10358 pring->sli.sli3.numRiocb += 10359 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10360 pring->sli.sli3.numCiocb += 10361 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10362 pring->sli.sli3.numRiocb += 10363 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10364 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10365 SLI3_IOCB_CMD_SIZE : 10366 SLI2_IOCB_CMD_SIZE; 10367 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10368 SLI3_IOCB_RSP_SIZE : 10369 SLI2_IOCB_RSP_SIZE; 10370 pring->iotag_ctr = 0; 10371 pring->iotag_max = 10372 (phba->cfg_hba_queue_depth * 2); 10373 pring->fast_iotag = pring->iotag_max; 10374 pring->num_mask = 0; 10375 break; 10376 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 10377 /* numCiocb and numRiocb are used in config_port */ 10378 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 10379 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 10380 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10381 SLI3_IOCB_CMD_SIZE : 10382 SLI2_IOCB_CMD_SIZE; 10383 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10384 SLI3_IOCB_RSP_SIZE : 10385 SLI2_IOCB_RSP_SIZE; 10386 pring->iotag_max = phba->cfg_hba_queue_depth; 10387 pring->num_mask = 0; 10388 break; 10389 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 10390 /* numCiocb and numRiocb are used in config_port */ 10391 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 10392 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 10393 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10394 SLI3_IOCB_CMD_SIZE : 10395 SLI2_IOCB_CMD_SIZE; 10396 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10397 SLI3_IOCB_RSP_SIZE : 10398 SLI2_IOCB_RSP_SIZE; 10399 pring->fast_iotag = 0; 10400 pring->iotag_ctr = 0; 10401 pring->iotag_max = 4096; 10402 pring->lpfc_sli_rcv_async_status = 10403 lpfc_sli_async_event_handler; 10404 pring->num_mask = LPFC_MAX_RING_MASK; 10405 pring->prt[0].profile = 0; /* Mask 0 */ 10406 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10407 pring->prt[0].type = FC_TYPE_ELS; 10408 pring->prt[0].lpfc_sli_rcv_unsol_event = 10409 lpfc_els_unsol_event; 10410 pring->prt[1].profile = 0; /* Mask 1 */ 10411 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10412 pring->prt[1].type = FC_TYPE_ELS; 10413 pring->prt[1].lpfc_sli_rcv_unsol_event = 10414 lpfc_els_unsol_event; 10415 pring->prt[2].profile = 0; /* Mask 2 */ 10416 /* NameServer Inquiry */ 10417 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10418 /* NameServer */ 10419 pring->prt[2].type = FC_TYPE_CT; 10420 pring->prt[2].lpfc_sli_rcv_unsol_event = 10421 lpfc_ct_unsol_event; 10422 pring->prt[3].profile = 0; /* Mask 3 */ 10423 /* NameServer response */ 10424 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10425 /* NameServer */ 10426 pring->prt[3].type = FC_TYPE_CT; 10427 pring->prt[3].lpfc_sli_rcv_unsol_event = 10428 lpfc_ct_unsol_event; 10429 break; 10430 } 10431 totiocbsize += (pring->sli.sli3.numCiocb * 10432 pring->sli.sli3.sizeCiocb) + 10433 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 10434 } 10435 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 10436 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 10437 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 10438 "SLI2 SLIM Data: x%x x%lx\n", 10439 phba->brd_no, totiocbsize, 10440 (unsigned long) MAX_SLIM_IOCB_SIZE); 10441 } 10442 if (phba->cfg_multi_ring_support == 2) 10443 lpfc_extra_ring_setup(phba); 10444 10445 return 0; 10446 } 10447 10448 /** 10449 * lpfc_sli4_queue_init - Queue initialization function 10450 * @phba: Pointer to HBA context object. 10451 * 10452 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 10453 * ring. This function also initializes ring indices of each ring. 10454 * This function is called during the initialization of the SLI 10455 * interface of an HBA. 10456 * This function is called with no lock held and always returns 10457 * 1. 10458 **/ 10459 void 10460 lpfc_sli4_queue_init(struct lpfc_hba *phba) 10461 { 10462 struct lpfc_sli *psli; 10463 struct lpfc_sli_ring *pring; 10464 int i; 10465 10466 psli = &phba->sli; 10467 spin_lock_irq(&phba->hbalock); 10468 INIT_LIST_HEAD(&psli->mboxq); 10469 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10470 /* Initialize list headers for txq and txcmplq as double linked lists */ 10471 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10472 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 10473 pring->flag = 0; 10474 pring->ringno = LPFC_FCP_RING; 10475 pring->txcmplq_cnt = 0; 10476 INIT_LIST_HEAD(&pring->txq); 10477 INIT_LIST_HEAD(&pring->txcmplq); 10478 INIT_LIST_HEAD(&pring->iocb_continueq); 10479 spin_lock_init(&pring->ring_lock); 10480 } 10481 pring = phba->sli4_hba.els_wq->pring; 10482 pring->flag = 0; 10483 pring->ringno = LPFC_ELS_RING; 10484 pring->txcmplq_cnt = 0; 10485 INIT_LIST_HEAD(&pring->txq); 10486 INIT_LIST_HEAD(&pring->txcmplq); 10487 INIT_LIST_HEAD(&pring->iocb_continueq); 10488 spin_lock_init(&pring->ring_lock); 10489 10490 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10491 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10492 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 10493 pring->flag = 0; 10494 pring->ringno = LPFC_FCP_RING; 10495 pring->txcmplq_cnt = 0; 10496 INIT_LIST_HEAD(&pring->txq); 10497 INIT_LIST_HEAD(&pring->txcmplq); 10498 INIT_LIST_HEAD(&pring->iocb_continueq); 10499 spin_lock_init(&pring->ring_lock); 10500 } 10501 pring = phba->sli4_hba.nvmels_wq->pring; 10502 pring->flag = 0; 10503 pring->ringno = LPFC_ELS_RING; 10504 pring->txcmplq_cnt = 0; 10505 INIT_LIST_HEAD(&pring->txq); 10506 INIT_LIST_HEAD(&pring->txcmplq); 10507 INIT_LIST_HEAD(&pring->iocb_continueq); 10508 spin_lock_init(&pring->ring_lock); 10509 } 10510 10511 spin_unlock_irq(&phba->hbalock); 10512 } 10513 10514 /** 10515 * lpfc_sli_queue_init - Queue initialization function 10516 * @phba: Pointer to HBA context object. 10517 * 10518 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 10519 * ring. This function also initializes ring indices of each ring. 10520 * This function is called during the initialization of the SLI 10521 * interface of an HBA. 10522 * This function is called with no lock held and always returns 10523 * 1. 10524 **/ 10525 void 10526 lpfc_sli_queue_init(struct lpfc_hba *phba) 10527 { 10528 struct lpfc_sli *psli; 10529 struct lpfc_sli_ring *pring; 10530 int i; 10531 10532 psli = &phba->sli; 10533 spin_lock_irq(&phba->hbalock); 10534 INIT_LIST_HEAD(&psli->mboxq); 10535 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10536 /* Initialize list headers for txq and txcmplq as double linked lists */ 10537 for (i = 0; i < psli->num_rings; i++) { 10538 pring = &psli->sli3_ring[i]; 10539 pring->ringno = i; 10540 pring->sli.sli3.next_cmdidx = 0; 10541 pring->sli.sli3.local_getidx = 0; 10542 pring->sli.sli3.cmdidx = 0; 10543 INIT_LIST_HEAD(&pring->iocb_continueq); 10544 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 10545 INIT_LIST_HEAD(&pring->postbufq); 10546 pring->flag = 0; 10547 INIT_LIST_HEAD(&pring->txq); 10548 INIT_LIST_HEAD(&pring->txcmplq); 10549 spin_lock_init(&pring->ring_lock); 10550 } 10551 spin_unlock_irq(&phba->hbalock); 10552 } 10553 10554 /** 10555 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 10556 * @phba: Pointer to HBA context object. 10557 * 10558 * This routine flushes the mailbox command subsystem. It will unconditionally 10559 * flush all the mailbox commands in the three possible stages in the mailbox 10560 * command sub-system: pending mailbox command queue; the outstanding mailbox 10561 * command; and completed mailbox command queue. It is caller's responsibility 10562 * to make sure that the driver is in the proper state to flush the mailbox 10563 * command sub-system. Namely, the posting of mailbox commands into the 10564 * pending mailbox command queue from the various clients must be stopped; 10565 * either the HBA is in a state that it will never works on the outstanding 10566 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 10567 * mailbox command has been completed. 10568 **/ 10569 static void 10570 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10571 { 10572 LIST_HEAD(completions); 10573 struct lpfc_sli *psli = &phba->sli; 10574 LPFC_MBOXQ_t *pmb; 10575 unsigned long iflag; 10576 10577 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10578 local_bh_disable(); 10579 10580 /* Flush all the mailbox commands in the mbox system */ 10581 spin_lock_irqsave(&phba->hbalock, iflag); 10582 10583 /* The pending mailbox command queue */ 10584 list_splice_init(&phba->sli.mboxq, &completions); 10585 /* The outstanding active mailbox command */ 10586 if (psli->mbox_active) { 10587 list_add_tail(&psli->mbox_active->list, &completions); 10588 psli->mbox_active = NULL; 10589 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10590 } 10591 /* The completed mailbox command queue */ 10592 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10593 spin_unlock_irqrestore(&phba->hbalock, iflag); 10594 10595 /* Enable softirqs again, done with phba->hbalock */ 10596 local_bh_enable(); 10597 10598 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10599 while (!list_empty(&completions)) { 10600 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10601 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10602 if (pmb->mbox_cmpl) 10603 pmb->mbox_cmpl(phba, pmb); 10604 } 10605 } 10606 10607 /** 10608 * lpfc_sli_host_down - Vport cleanup function 10609 * @vport: Pointer to virtual port object. 10610 * 10611 * lpfc_sli_host_down is called to clean up the resources 10612 * associated with a vport before destroying virtual 10613 * port data structures. 10614 * This function does following operations: 10615 * - Free discovery resources associated with this virtual 10616 * port. 10617 * - Free iocbs associated with this virtual port in 10618 * the txq. 10619 * - Send abort for all iocb commands associated with this 10620 * vport in txcmplq. 10621 * 10622 * This function is called with no lock held and always returns 1. 10623 **/ 10624 int 10625 lpfc_sli_host_down(struct lpfc_vport *vport) 10626 { 10627 LIST_HEAD(completions); 10628 struct lpfc_hba *phba = vport->phba; 10629 struct lpfc_sli *psli = &phba->sli; 10630 struct lpfc_queue *qp = NULL; 10631 struct lpfc_sli_ring *pring; 10632 struct lpfc_iocbq *iocb, *next_iocb; 10633 int i; 10634 unsigned long flags = 0; 10635 uint16_t prev_pring_flag; 10636 10637 lpfc_cleanup_discovery_resources(vport); 10638 10639 spin_lock_irqsave(&phba->hbalock, flags); 10640 10641 /* 10642 * Error everything on the txq since these iocbs 10643 * have not been given to the FW yet. 10644 * Also issue ABTS for everything on the txcmplq 10645 */ 10646 if (phba->sli_rev != LPFC_SLI_REV4) { 10647 for (i = 0; i < psli->num_rings; i++) { 10648 pring = &psli->sli3_ring[i]; 10649 prev_pring_flag = pring->flag; 10650 /* Only slow rings */ 10651 if (pring->ringno == LPFC_ELS_RING) { 10652 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10653 /* Set the lpfc data pending flag */ 10654 set_bit(LPFC_DATA_READY, &phba->data_flags); 10655 } 10656 list_for_each_entry_safe(iocb, next_iocb, 10657 &pring->txq, list) { 10658 if (iocb->vport != vport) 10659 continue; 10660 list_move_tail(&iocb->list, &completions); 10661 } 10662 list_for_each_entry_safe(iocb, next_iocb, 10663 &pring->txcmplq, list) { 10664 if (iocb->vport != vport) 10665 continue; 10666 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10667 } 10668 pring->flag = prev_pring_flag; 10669 } 10670 } else { 10671 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10672 pring = qp->pring; 10673 if (!pring) 10674 continue; 10675 if (pring == phba->sli4_hba.els_wq->pring) { 10676 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10677 /* Set the lpfc data pending flag */ 10678 set_bit(LPFC_DATA_READY, &phba->data_flags); 10679 } 10680 prev_pring_flag = pring->flag; 10681 spin_lock_irq(&pring->ring_lock); 10682 list_for_each_entry_safe(iocb, next_iocb, 10683 &pring->txq, list) { 10684 if (iocb->vport != vport) 10685 continue; 10686 list_move_tail(&iocb->list, &completions); 10687 } 10688 spin_unlock_irq(&pring->ring_lock); 10689 list_for_each_entry_safe(iocb, next_iocb, 10690 &pring->txcmplq, list) { 10691 if (iocb->vport != vport) 10692 continue; 10693 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10694 } 10695 pring->flag = prev_pring_flag; 10696 } 10697 } 10698 spin_unlock_irqrestore(&phba->hbalock, flags); 10699 10700 /* Cancel all the IOCBs from the completions list */ 10701 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10702 IOERR_SLI_DOWN); 10703 return 1; 10704 } 10705 10706 /** 10707 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10708 * @phba: Pointer to HBA context object. 10709 * 10710 * This function cleans up all iocb, buffers, mailbox commands 10711 * while shutting down the HBA. This function is called with no 10712 * lock held and always returns 1. 10713 * This function does the following to cleanup driver resources: 10714 * - Free discovery resources for each virtual port 10715 * - Cleanup any pending fabric iocbs 10716 * - Iterate through the iocb txq and free each entry 10717 * in the list. 10718 * - Free up any buffer posted to the HBA 10719 * - Free mailbox commands in the mailbox queue. 10720 **/ 10721 int 10722 lpfc_sli_hba_down(struct lpfc_hba *phba) 10723 { 10724 LIST_HEAD(completions); 10725 struct lpfc_sli *psli = &phba->sli; 10726 struct lpfc_queue *qp = NULL; 10727 struct lpfc_sli_ring *pring; 10728 struct lpfc_dmabuf *buf_ptr; 10729 unsigned long flags = 0; 10730 int i; 10731 10732 /* Shutdown the mailbox command sub-system */ 10733 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10734 10735 lpfc_hba_down_prep(phba); 10736 10737 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10738 local_bh_disable(); 10739 10740 lpfc_fabric_abort_hba(phba); 10741 10742 spin_lock_irqsave(&phba->hbalock, flags); 10743 10744 /* 10745 * Error everything on the txq since these iocbs 10746 * have not been given to the FW yet. 10747 */ 10748 if (phba->sli_rev != LPFC_SLI_REV4) { 10749 for (i = 0; i < psli->num_rings; i++) { 10750 pring = &psli->sli3_ring[i]; 10751 /* Only slow rings */ 10752 if (pring->ringno == LPFC_ELS_RING) { 10753 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10754 /* Set the lpfc data pending flag */ 10755 set_bit(LPFC_DATA_READY, &phba->data_flags); 10756 } 10757 list_splice_init(&pring->txq, &completions); 10758 } 10759 } else { 10760 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10761 pring = qp->pring; 10762 if (!pring) 10763 continue; 10764 spin_lock_irq(&pring->ring_lock); 10765 list_splice_init(&pring->txq, &completions); 10766 spin_unlock_irq(&pring->ring_lock); 10767 if (pring == phba->sli4_hba.els_wq->pring) { 10768 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10769 /* Set the lpfc data pending flag */ 10770 set_bit(LPFC_DATA_READY, &phba->data_flags); 10771 } 10772 } 10773 } 10774 spin_unlock_irqrestore(&phba->hbalock, flags); 10775 10776 /* Cancel all the IOCBs from the completions list */ 10777 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10778 IOERR_SLI_DOWN); 10779 10780 spin_lock_irqsave(&phba->hbalock, flags); 10781 list_splice_init(&phba->elsbuf, &completions); 10782 phba->elsbuf_cnt = 0; 10783 phba->elsbuf_prev_cnt = 0; 10784 spin_unlock_irqrestore(&phba->hbalock, flags); 10785 10786 while (!list_empty(&completions)) { 10787 list_remove_head(&completions, buf_ptr, 10788 struct lpfc_dmabuf, list); 10789 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10790 kfree(buf_ptr); 10791 } 10792 10793 /* Enable softirqs again, done with phba->hbalock */ 10794 local_bh_enable(); 10795 10796 /* Return any active mbox cmds */ 10797 del_timer_sync(&psli->mbox_tmo); 10798 10799 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10800 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10801 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10802 10803 return 1; 10804 } 10805 10806 /** 10807 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10808 * @srcp: Source memory pointer. 10809 * @destp: Destination memory pointer. 10810 * @cnt: Number of words required to be copied. 10811 * 10812 * This function is used for copying data between driver memory 10813 * and the SLI memory. This function also changes the endianness 10814 * of each word if native endianness is different from SLI 10815 * endianness. This function can be called with or without 10816 * lock. 10817 **/ 10818 void 10819 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10820 { 10821 uint32_t *src = srcp; 10822 uint32_t *dest = destp; 10823 uint32_t ldata; 10824 int i; 10825 10826 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10827 ldata = *src; 10828 ldata = le32_to_cpu(ldata); 10829 *dest = ldata; 10830 src++; 10831 dest++; 10832 } 10833 } 10834 10835 10836 /** 10837 * lpfc_sli_bemem_bcopy - SLI memory copy function 10838 * @srcp: Source memory pointer. 10839 * @destp: Destination memory pointer. 10840 * @cnt: Number of words required to be copied. 10841 * 10842 * This function is used for copying data between a data structure 10843 * with big endian representation to local endianness. 10844 * This function can be called with or without lock. 10845 **/ 10846 void 10847 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10848 { 10849 uint32_t *src = srcp; 10850 uint32_t *dest = destp; 10851 uint32_t ldata; 10852 int i; 10853 10854 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10855 ldata = *src; 10856 ldata = be32_to_cpu(ldata); 10857 *dest = ldata; 10858 src++; 10859 dest++; 10860 } 10861 } 10862 10863 /** 10864 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10865 * @phba: Pointer to HBA context object. 10866 * @pring: Pointer to driver SLI ring object. 10867 * @mp: Pointer to driver buffer object. 10868 * 10869 * This function is called with no lock held. 10870 * It always return zero after adding the buffer to the postbufq 10871 * buffer list. 10872 **/ 10873 int 10874 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10875 struct lpfc_dmabuf *mp) 10876 { 10877 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10878 later */ 10879 spin_lock_irq(&phba->hbalock); 10880 list_add_tail(&mp->list, &pring->postbufq); 10881 pring->postbufq_cnt++; 10882 spin_unlock_irq(&phba->hbalock); 10883 return 0; 10884 } 10885 10886 /** 10887 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10888 * @phba: Pointer to HBA context object. 10889 * 10890 * When HBQ is enabled, buffers are searched based on tags. This function 10891 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10892 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10893 * does not conflict with tags of buffer posted for unsolicited events. 10894 * The function returns the allocated tag. The function is called with 10895 * no locks held. 10896 **/ 10897 uint32_t 10898 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10899 { 10900 spin_lock_irq(&phba->hbalock); 10901 phba->buffer_tag_count++; 10902 /* 10903 * Always set the QUE_BUFTAG_BIT to distiguish between 10904 * a tag assigned by HBQ. 10905 */ 10906 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 10907 spin_unlock_irq(&phba->hbalock); 10908 return phba->buffer_tag_count; 10909 } 10910 10911 /** 10912 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 10913 * @phba: Pointer to HBA context object. 10914 * @pring: Pointer to driver SLI ring object. 10915 * @tag: Buffer tag. 10916 * 10917 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 10918 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 10919 * iocb is posted to the response ring with the tag of the buffer. 10920 * This function searches the pring->postbufq list using the tag 10921 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 10922 * iocb. If the buffer is found then lpfc_dmabuf object of the 10923 * buffer is returned to the caller else NULL is returned. 10924 * This function is called with no lock held. 10925 **/ 10926 struct lpfc_dmabuf * 10927 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10928 uint32_t tag) 10929 { 10930 struct lpfc_dmabuf *mp, *next_mp; 10931 struct list_head *slp = &pring->postbufq; 10932 10933 /* Search postbufq, from the beginning, looking for a match on tag */ 10934 spin_lock_irq(&phba->hbalock); 10935 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10936 if (mp->buffer_tag == tag) { 10937 list_del_init(&mp->list); 10938 pring->postbufq_cnt--; 10939 spin_unlock_irq(&phba->hbalock); 10940 return mp; 10941 } 10942 } 10943 10944 spin_unlock_irq(&phba->hbalock); 10945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10946 "0402 Cannot find virtual addr for buffer tag on " 10947 "ring %d Data x%lx x%p x%p x%x\n", 10948 pring->ringno, (unsigned long) tag, 10949 slp->next, slp->prev, pring->postbufq_cnt); 10950 10951 return NULL; 10952 } 10953 10954 /** 10955 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 10956 * @phba: Pointer to HBA context object. 10957 * @pring: Pointer to driver SLI ring object. 10958 * @phys: DMA address of the buffer. 10959 * 10960 * This function searches the buffer list using the dma_address 10961 * of unsolicited event to find the driver's lpfc_dmabuf object 10962 * corresponding to the dma_address. The function returns the 10963 * lpfc_dmabuf object if a buffer is found else it returns NULL. 10964 * This function is called by the ct and els unsolicited event 10965 * handlers to get the buffer associated with the unsolicited 10966 * event. 10967 * 10968 * This function is called with no lock held. 10969 **/ 10970 struct lpfc_dmabuf * 10971 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10972 dma_addr_t phys) 10973 { 10974 struct lpfc_dmabuf *mp, *next_mp; 10975 struct list_head *slp = &pring->postbufq; 10976 10977 /* Search postbufq, from the beginning, looking for a match on phys */ 10978 spin_lock_irq(&phba->hbalock); 10979 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10980 if (mp->phys == phys) { 10981 list_del_init(&mp->list); 10982 pring->postbufq_cnt--; 10983 spin_unlock_irq(&phba->hbalock); 10984 return mp; 10985 } 10986 } 10987 10988 spin_unlock_irq(&phba->hbalock); 10989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10990 "0410 Cannot find virtual addr for mapped buf on " 10991 "ring %d Data x%llx x%p x%p x%x\n", 10992 pring->ringno, (unsigned long long)phys, 10993 slp->next, slp->prev, pring->postbufq_cnt); 10994 return NULL; 10995 } 10996 10997 /** 10998 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 10999 * @phba: Pointer to HBA context object. 11000 * @cmdiocb: Pointer to driver command iocb object. 11001 * @rspiocb: Pointer to driver response iocb object. 11002 * 11003 * This function is the completion handler for the abort iocbs for 11004 * ELS commands. This function is called from the ELS ring event 11005 * handler with no lock held. This function frees memory resources 11006 * associated with the abort iocb. 11007 **/ 11008 static void 11009 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11010 struct lpfc_iocbq *rspiocb) 11011 { 11012 IOCB_t *irsp = &rspiocb->iocb; 11013 uint16_t abort_iotag, abort_context; 11014 struct lpfc_iocbq *abort_iocb = NULL; 11015 11016 if (irsp->ulpStatus) { 11017 11018 /* 11019 * Assume that the port already completed and returned, or 11020 * will return the iocb. Just Log the message. 11021 */ 11022 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 11023 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 11024 11025 spin_lock_irq(&phba->hbalock); 11026 if (phba->sli_rev < LPFC_SLI_REV4) { 11027 if (irsp->ulpCommand == CMD_ABORT_XRI_CX && 11028 irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 11029 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { 11030 spin_unlock_irq(&phba->hbalock); 11031 goto release_iocb; 11032 } 11033 if (abort_iotag != 0 && 11034 abort_iotag <= phba->sli.last_iotag) 11035 abort_iocb = 11036 phba->sli.iocbq_lookup[abort_iotag]; 11037 } else 11038 /* For sli4 the abort_tag is the XRI, 11039 * so the abort routine puts the iotag of the iocb 11040 * being aborted in the context field of the abort 11041 * IOCB. 11042 */ 11043 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 11044 11045 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 11046 "0327 Cannot abort els iocb %p " 11047 "with tag %x context %x, abort status %x, " 11048 "abort code %x\n", 11049 abort_iocb, abort_iotag, abort_context, 11050 irsp->ulpStatus, irsp->un.ulpWord[4]); 11051 11052 spin_unlock_irq(&phba->hbalock); 11053 } 11054 release_iocb: 11055 lpfc_sli_release_iocbq(phba, cmdiocb); 11056 return; 11057 } 11058 11059 /** 11060 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 11061 * @phba: Pointer to HBA context object. 11062 * @cmdiocb: Pointer to driver command iocb object. 11063 * @rspiocb: Pointer to driver response iocb object. 11064 * 11065 * The function is called from SLI ring event handler with no 11066 * lock held. This function is the completion handler for ELS commands 11067 * which are aborted. The function frees memory resources used for 11068 * the aborted ELS commands. 11069 **/ 11070 static void 11071 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11072 struct lpfc_iocbq *rspiocb) 11073 { 11074 IOCB_t *irsp = &rspiocb->iocb; 11075 11076 /* ELS cmd tag <ulpIoTag> completes */ 11077 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11078 "0139 Ignoring ELS cmd tag x%x completion Data: " 11079 "x%x x%x x%x\n", 11080 irsp->ulpIoTag, irsp->ulpStatus, 11081 irsp->un.ulpWord[4], irsp->ulpTimeout); 11082 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 11083 lpfc_ct_free_iocb(phba, cmdiocb); 11084 else 11085 lpfc_els_free_iocb(phba, cmdiocb); 11086 return; 11087 } 11088 11089 /** 11090 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 11091 * @phba: Pointer to HBA context object. 11092 * @pring: Pointer to driver SLI ring object. 11093 * @cmdiocb: Pointer to driver command iocb object. 11094 * 11095 * This function issues an abort iocb for the provided command iocb down to 11096 * the port. Other than the case the outstanding command iocb is an abort 11097 * request, this function issues abort out unconditionally. This function is 11098 * called with hbalock held. The function returns 0 when it fails due to 11099 * memory allocation failure or when the command iocb is an abort request. 11100 **/ 11101 static int 11102 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11103 struct lpfc_iocbq *cmdiocb) 11104 { 11105 struct lpfc_vport *vport = cmdiocb->vport; 11106 struct lpfc_iocbq *abtsiocbp; 11107 IOCB_t *icmd = NULL; 11108 IOCB_t *iabt = NULL; 11109 int retval; 11110 unsigned long iflags; 11111 struct lpfc_nodelist *ndlp; 11112 11113 lockdep_assert_held(&phba->hbalock); 11114 11115 /* 11116 * There are certain command types we don't want to abort. And we 11117 * don't want to abort commands that are already in the process of 11118 * being aborted. 11119 */ 11120 icmd = &cmdiocb->iocb; 11121 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11122 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11123 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11124 return 0; 11125 11126 /* issue ABTS for this IOCB based on iotag */ 11127 abtsiocbp = __lpfc_sli_get_iocbq(phba); 11128 if (abtsiocbp == NULL) 11129 return 0; 11130 11131 /* This signals the response to set the correct status 11132 * before calling the completion handler 11133 */ 11134 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 11135 11136 iabt = &abtsiocbp->iocb; 11137 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 11138 iabt->un.acxri.abortContextTag = icmd->ulpContext; 11139 if (phba->sli_rev == LPFC_SLI_REV4) { 11140 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 11141 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 11142 } else { 11143 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 11144 if (pring->ringno == LPFC_ELS_RING) { 11145 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); 11146 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; 11147 } 11148 } 11149 iabt->ulpLe = 1; 11150 iabt->ulpClass = icmd->ulpClass; 11151 11152 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11153 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 11154 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 11155 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 11156 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 11157 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 11158 11159 if (phba->link_state >= LPFC_LINK_UP) 11160 iabt->ulpCommand = CMD_ABORT_XRI_CN; 11161 else 11162 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 11163 11164 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 11165 abtsiocbp->vport = vport; 11166 11167 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 11168 "0339 Abort xri x%x, original iotag x%x, " 11169 "abort cmd iotag x%x\n", 11170 iabt->un.acxri.abortIoTag, 11171 iabt->un.acxri.abortContextTag, 11172 abtsiocbp->iotag); 11173 11174 if (phba->sli_rev == LPFC_SLI_REV4) { 11175 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 11176 if (unlikely(pring == NULL)) 11177 return 0; 11178 /* Note: both hbalock and ring_lock need to be set here */ 11179 spin_lock_irqsave(&pring->ring_lock, iflags); 11180 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11181 abtsiocbp, 0); 11182 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11183 } else { 11184 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11185 abtsiocbp, 0); 11186 } 11187 11188 if (retval) 11189 __lpfc_sli_release_iocbq(phba, abtsiocbp); 11190 11191 /* 11192 * Caller to this routine should check for IOCB_ERROR 11193 * and handle it properly. This routine no longer removes 11194 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11195 */ 11196 return retval; 11197 } 11198 11199 /** 11200 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 11201 * @phba: Pointer to HBA context object. 11202 * @pring: Pointer to driver SLI ring object. 11203 * @cmdiocb: Pointer to driver command iocb object. 11204 * 11205 * This function issues an abort iocb for the provided command iocb. In case 11206 * of unloading, the abort iocb will not be issued to commands on the ELS 11207 * ring. Instead, the callback function shall be changed to those commands 11208 * so that nothing happens when them finishes. This function is called with 11209 * hbalock held. The function returns 0 when the command iocb is an abort 11210 * request. 11211 **/ 11212 int 11213 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11214 struct lpfc_iocbq *cmdiocb) 11215 { 11216 struct lpfc_vport *vport = cmdiocb->vport; 11217 int retval = IOCB_ERROR; 11218 IOCB_t *icmd = NULL; 11219 11220 lockdep_assert_held(&phba->hbalock); 11221 11222 /* 11223 * There are certain command types we don't want to abort. And we 11224 * don't want to abort commands that are already in the process of 11225 * being aborted. 11226 */ 11227 icmd = &cmdiocb->iocb; 11228 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11229 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11230 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11231 return 0; 11232 11233 if (!pring) { 11234 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11235 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11236 else 11237 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11238 goto abort_iotag_exit; 11239 } 11240 11241 /* 11242 * If we're unloading, don't abort iocb on the ELS ring, but change 11243 * the callback so that nothing happens when it finishes. 11244 */ 11245 if ((vport->load_flag & FC_UNLOADING) && 11246 (pring->ringno == LPFC_ELS_RING)) { 11247 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11248 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11249 else 11250 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11251 goto abort_iotag_exit; 11252 } 11253 11254 /* Now, we try to issue the abort to the cmdiocb out */ 11255 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 11256 11257 abort_iotag_exit: 11258 /* 11259 * Caller to this routine should check for IOCB_ERROR 11260 * and handle it properly. This routine no longer removes 11261 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11262 */ 11263 return retval; 11264 } 11265 11266 /** 11267 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb 11268 * @phba: Pointer to HBA context object. 11269 * @pring: Pointer to driver SLI ring object. 11270 * @cmdiocb: Pointer to driver command iocb object. 11271 * 11272 * This function issues an abort iocb for the provided command iocb down to 11273 * the port. Other than the case the outstanding command iocb is an abort 11274 * request, this function issues abort out unconditionally. This function is 11275 * called with hbalock held. The function returns 0 when it fails due to 11276 * memory allocation failure or when the command iocb is an abort request. 11277 **/ 11278 static int 11279 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11280 struct lpfc_iocbq *cmdiocb) 11281 { 11282 struct lpfc_vport *vport = cmdiocb->vport; 11283 struct lpfc_iocbq *abtsiocbp; 11284 union lpfc_wqe128 *abts_wqe; 11285 int retval; 11286 int idx = cmdiocb->hba_wqidx; 11287 11288 /* 11289 * There are certain command types we don't want to abort. And we 11290 * don't want to abort commands that are already in the process of 11291 * being aborted. 11292 */ 11293 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 11294 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || 11295 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11296 return 0; 11297 11298 /* issue ABTS for this io based on iotag */ 11299 abtsiocbp = __lpfc_sli_get_iocbq(phba); 11300 if (abtsiocbp == NULL) 11301 return 0; 11302 11303 /* This signals the response to set the correct status 11304 * before calling the completion handler 11305 */ 11306 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 11307 11308 /* Complete prepping the abort wqe and issue to the FW. */ 11309 abts_wqe = &abtsiocbp->wqe; 11310 11311 /* Clear any stale WQE contents */ 11312 memset(abts_wqe, 0, sizeof(union lpfc_wqe)); 11313 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 11314 11315 /* word 7 */ 11316 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 11317 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, 11318 cmdiocb->iocb.ulpClass); 11319 11320 /* word 8 - tell the FW to abort the IO associated with this 11321 * outstanding exchange ID. 11322 */ 11323 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag; 11324 11325 /* word 9 - this is the iotag for the abts_wqe completion. */ 11326 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 11327 abtsiocbp->iotag); 11328 11329 /* word 10 */ 11330 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 11331 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 11332 11333 /* word 11 */ 11334 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 11335 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 11336 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 11337 11338 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11339 abtsiocbp->iocb_flag |= LPFC_IO_NVME; 11340 abtsiocbp->vport = vport; 11341 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; 11342 retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx], 11343 abtsiocbp); 11344 if (retval) { 11345 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 11346 "6147 Failed abts issue_wqe with status x%x " 11347 "for oxid x%x\n", 11348 retval, cmdiocb->sli4_xritag); 11349 lpfc_sli_release_iocbq(phba, abtsiocbp); 11350 return retval; 11351 } 11352 11353 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 11354 "6148 Drv Abort NVME Request Issued for " 11355 "ox_id x%x on reqtag x%x\n", 11356 cmdiocb->sli4_xritag, 11357 abtsiocbp->iotag); 11358 11359 return retval; 11360 } 11361 11362 /** 11363 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 11364 * @phba: pointer to lpfc HBA data structure. 11365 * 11366 * This routine will abort all pending and outstanding iocbs to an HBA. 11367 **/ 11368 void 11369 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 11370 { 11371 struct lpfc_sli *psli = &phba->sli; 11372 struct lpfc_sli_ring *pring; 11373 struct lpfc_queue *qp = NULL; 11374 int i; 11375 11376 if (phba->sli_rev != LPFC_SLI_REV4) { 11377 for (i = 0; i < psli->num_rings; i++) { 11378 pring = &psli->sli3_ring[i]; 11379 lpfc_sli_abort_iocb_ring(phba, pring); 11380 } 11381 return; 11382 } 11383 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 11384 pring = qp->pring; 11385 if (!pring) 11386 continue; 11387 lpfc_sli_abort_iocb_ring(phba, pring); 11388 } 11389 } 11390 11391 /** 11392 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 11393 * @iocbq: Pointer to driver iocb object. 11394 * @vport: Pointer to driver virtual port object. 11395 * @tgt_id: SCSI ID of the target. 11396 * @lun_id: LUN ID of the scsi device. 11397 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 11398 * 11399 * This function acts as an iocb filter for functions which abort or count 11400 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 11401 * 0 if the filtering criteria is met for the given iocb and will return 11402 * 1 if the filtering criteria is not met. 11403 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 11404 * given iocb is for the SCSI device specified by vport, tgt_id and 11405 * lun_id parameter. 11406 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 11407 * given iocb is for the SCSI target specified by vport and tgt_id 11408 * parameters. 11409 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 11410 * given iocb is for the SCSI host associated with the given vport. 11411 * This function is called with no locks held. 11412 **/ 11413 static int 11414 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 11415 uint16_t tgt_id, uint64_t lun_id, 11416 lpfc_ctx_cmd ctx_cmd) 11417 { 11418 struct lpfc_io_buf *lpfc_cmd; 11419 int rc = 1; 11420 11421 if (iocbq->vport != vport) 11422 return rc; 11423 11424 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 11425 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) 11426 return rc; 11427 11428 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11429 11430 if (lpfc_cmd->pCmd == NULL) 11431 return rc; 11432 11433 switch (ctx_cmd) { 11434 case LPFC_CTX_LUN: 11435 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11436 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 11437 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 11438 rc = 0; 11439 break; 11440 case LPFC_CTX_TGT: 11441 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11442 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 11443 rc = 0; 11444 break; 11445 case LPFC_CTX_HOST: 11446 rc = 0; 11447 break; 11448 default: 11449 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 11450 __func__, ctx_cmd); 11451 break; 11452 } 11453 11454 return rc; 11455 } 11456 11457 /** 11458 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 11459 * @vport: Pointer to virtual port. 11460 * @tgt_id: SCSI ID of the target. 11461 * @lun_id: LUN ID of the scsi device. 11462 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11463 * 11464 * This function returns number of FCP commands pending for the vport. 11465 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 11466 * commands pending on the vport associated with SCSI device specified 11467 * by tgt_id and lun_id parameters. 11468 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 11469 * commands pending on the vport associated with SCSI target specified 11470 * by tgt_id parameter. 11471 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 11472 * commands pending on the vport. 11473 * This function returns the number of iocbs which satisfy the filter. 11474 * This function is called without any lock held. 11475 **/ 11476 int 11477 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 11478 lpfc_ctx_cmd ctx_cmd) 11479 { 11480 struct lpfc_hba *phba = vport->phba; 11481 struct lpfc_iocbq *iocbq; 11482 int sum, i; 11483 11484 spin_lock_irq(&phba->hbalock); 11485 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 11486 iocbq = phba->sli.iocbq_lookup[i]; 11487 11488 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 11489 ctx_cmd) == 0) 11490 sum++; 11491 } 11492 spin_unlock_irq(&phba->hbalock); 11493 11494 return sum; 11495 } 11496 11497 /** 11498 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 11499 * @phba: Pointer to HBA context object 11500 * @cmdiocb: Pointer to command iocb object. 11501 * @rspiocb: Pointer to response iocb object. 11502 * 11503 * This function is called when an aborted FCP iocb completes. This 11504 * function is called by the ring event handler with no lock held. 11505 * This function frees the iocb. 11506 **/ 11507 void 11508 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11509 struct lpfc_iocbq *rspiocb) 11510 { 11511 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11512 "3096 ABORT_XRI_CN completing on rpi x%x " 11513 "original iotag x%x, abort cmd iotag x%x " 11514 "status 0x%x, reason 0x%x\n", 11515 cmdiocb->iocb.un.acxri.abortContextTag, 11516 cmdiocb->iocb.un.acxri.abortIoTag, 11517 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 11518 rspiocb->iocb.un.ulpWord[4]); 11519 lpfc_sli_release_iocbq(phba, cmdiocb); 11520 return; 11521 } 11522 11523 /** 11524 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 11525 * @vport: Pointer to virtual port. 11526 * @pring: Pointer to driver SLI ring object. 11527 * @tgt_id: SCSI ID of the target. 11528 * @lun_id: LUN ID of the scsi device. 11529 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11530 * 11531 * This function sends an abort command for every SCSI command 11532 * associated with the given virtual port pending on the ring 11533 * filtered by lpfc_sli_validate_fcp_iocb function. 11534 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 11535 * FCP iocbs associated with lun specified by tgt_id and lun_id 11536 * parameters 11537 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 11538 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11539 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 11540 * FCP iocbs associated with virtual port. 11541 * This function returns number of iocbs it failed to abort. 11542 * This function is called with no locks held. 11543 **/ 11544 int 11545 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11546 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 11547 { 11548 struct lpfc_hba *phba = vport->phba; 11549 struct lpfc_iocbq *iocbq; 11550 struct lpfc_iocbq *abtsiocb; 11551 struct lpfc_sli_ring *pring_s4; 11552 IOCB_t *cmd = NULL; 11553 int errcnt = 0, ret_val = 0; 11554 int i; 11555 11556 /* all I/Os are in process of being flushed */ 11557 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) 11558 return errcnt; 11559 11560 for (i = 1; i <= phba->sli.last_iotag; i++) { 11561 iocbq = phba->sli.iocbq_lookup[i]; 11562 11563 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11564 abort_cmd) != 0) 11565 continue; 11566 11567 /* 11568 * If the iocbq is already being aborted, don't take a second 11569 * action, but do count it. 11570 */ 11571 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11572 continue; 11573 11574 /* issue ABTS for this IOCB based on iotag */ 11575 abtsiocb = lpfc_sli_get_iocbq(phba); 11576 if (abtsiocb == NULL) { 11577 errcnt++; 11578 continue; 11579 } 11580 11581 /* indicate the IO is being aborted by the driver. */ 11582 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11583 11584 cmd = &iocbq->iocb; 11585 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11586 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 11587 if (phba->sli_rev == LPFC_SLI_REV4) 11588 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 11589 else 11590 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 11591 abtsiocb->iocb.ulpLe = 1; 11592 abtsiocb->iocb.ulpClass = cmd->ulpClass; 11593 abtsiocb->vport = vport; 11594 11595 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11596 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 11597 if (iocbq->iocb_flag & LPFC_IO_FCP) 11598 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 11599 if (iocbq->iocb_flag & LPFC_IO_FOF) 11600 abtsiocb->iocb_flag |= LPFC_IO_FOF; 11601 11602 if (lpfc_is_link_up(phba)) 11603 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11604 else 11605 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11606 11607 /* Setup callback routine and issue the command. */ 11608 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11609 if (phba->sli_rev == LPFC_SLI_REV4) { 11610 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11611 if (!pring_s4) 11612 continue; 11613 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11614 abtsiocb, 0); 11615 } else 11616 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11617 abtsiocb, 0); 11618 if (ret_val == IOCB_ERROR) { 11619 lpfc_sli_release_iocbq(phba, abtsiocb); 11620 errcnt++; 11621 continue; 11622 } 11623 } 11624 11625 return errcnt; 11626 } 11627 11628 /** 11629 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11630 * @vport: Pointer to virtual port. 11631 * @pring: Pointer to driver SLI ring object. 11632 * @tgt_id: SCSI ID of the target. 11633 * @lun_id: LUN ID of the scsi device. 11634 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11635 * 11636 * This function sends an abort command for every SCSI command 11637 * associated with the given virtual port pending on the ring 11638 * filtered by lpfc_sli_validate_fcp_iocb function. 11639 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11640 * FCP iocbs associated with lun specified by tgt_id and lun_id 11641 * parameters 11642 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11643 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11644 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11645 * FCP iocbs associated with virtual port. 11646 * This function returns number of iocbs it aborted . 11647 * This function is called with no locks held right after a taskmgmt 11648 * command is sent. 11649 **/ 11650 int 11651 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11652 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11653 { 11654 struct lpfc_hba *phba = vport->phba; 11655 struct lpfc_io_buf *lpfc_cmd; 11656 struct lpfc_iocbq *abtsiocbq; 11657 struct lpfc_nodelist *ndlp; 11658 struct lpfc_iocbq *iocbq; 11659 IOCB_t *icmd; 11660 int sum, i, ret_val; 11661 unsigned long iflags; 11662 struct lpfc_sli_ring *pring_s4 = NULL; 11663 11664 spin_lock_irqsave(&phba->hbalock, iflags); 11665 11666 /* all I/Os are in process of being flushed */ 11667 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11668 spin_unlock_irqrestore(&phba->hbalock, iflags); 11669 return 0; 11670 } 11671 sum = 0; 11672 11673 for (i = 1; i <= phba->sli.last_iotag; i++) { 11674 iocbq = phba->sli.iocbq_lookup[i]; 11675 11676 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11677 cmd) != 0) 11678 continue; 11679 11680 /* Guard against IO completion being called at same time */ 11681 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11682 spin_lock(&lpfc_cmd->buf_lock); 11683 11684 if (!lpfc_cmd->pCmd) { 11685 spin_unlock(&lpfc_cmd->buf_lock); 11686 continue; 11687 } 11688 11689 if (phba->sli_rev == LPFC_SLI_REV4) { 11690 pring_s4 = 11691 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring; 11692 if (!pring_s4) { 11693 spin_unlock(&lpfc_cmd->buf_lock); 11694 continue; 11695 } 11696 /* Note: both hbalock and ring_lock must be set here */ 11697 spin_lock(&pring_s4->ring_lock); 11698 } 11699 11700 /* 11701 * If the iocbq is already being aborted, don't take a second 11702 * action, but do count it. 11703 */ 11704 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || 11705 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 11706 if (phba->sli_rev == LPFC_SLI_REV4) 11707 spin_unlock(&pring_s4->ring_lock); 11708 spin_unlock(&lpfc_cmd->buf_lock); 11709 continue; 11710 } 11711 11712 /* issue ABTS for this IOCB based on iotag */ 11713 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11714 if (!abtsiocbq) { 11715 if (phba->sli_rev == LPFC_SLI_REV4) 11716 spin_unlock(&pring_s4->ring_lock); 11717 spin_unlock(&lpfc_cmd->buf_lock); 11718 continue; 11719 } 11720 11721 icmd = &iocbq->iocb; 11722 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11723 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11724 if (phba->sli_rev == LPFC_SLI_REV4) 11725 abtsiocbq->iocb.un.acxri.abortIoTag = 11726 iocbq->sli4_xritag; 11727 else 11728 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11729 abtsiocbq->iocb.ulpLe = 1; 11730 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11731 abtsiocbq->vport = vport; 11732 11733 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11734 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11735 if (iocbq->iocb_flag & LPFC_IO_FCP) 11736 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11737 if (iocbq->iocb_flag & LPFC_IO_FOF) 11738 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11739 11740 ndlp = lpfc_cmd->rdata->pnode; 11741 11742 if (lpfc_is_link_up(phba) && 11743 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11744 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11745 else 11746 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11747 11748 /* Setup callback routine and issue the command. */ 11749 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11750 11751 /* 11752 * Indicate the IO is being aborted by the driver and set 11753 * the caller's flag into the aborted IO. 11754 */ 11755 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11756 11757 if (phba->sli_rev == LPFC_SLI_REV4) { 11758 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11759 abtsiocbq, 0); 11760 spin_unlock(&pring_s4->ring_lock); 11761 } else { 11762 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11763 abtsiocbq, 0); 11764 } 11765 11766 spin_unlock(&lpfc_cmd->buf_lock); 11767 11768 if (ret_val == IOCB_ERROR) 11769 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11770 else 11771 sum++; 11772 } 11773 spin_unlock_irqrestore(&phba->hbalock, iflags); 11774 return sum; 11775 } 11776 11777 /** 11778 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11779 * @phba: Pointer to HBA context object. 11780 * @cmdiocbq: Pointer to command iocb. 11781 * @rspiocbq: Pointer to response iocb. 11782 * 11783 * This function is the completion handler for iocbs issued using 11784 * lpfc_sli_issue_iocb_wait function. This function is called by the 11785 * ring event handler function without any lock held. This function 11786 * can be called from both worker thread context and interrupt 11787 * context. This function also can be called from other thread which 11788 * cleans up the SLI layer objects. 11789 * This function copy the contents of the response iocb to the 11790 * response iocb memory object provided by the caller of 11791 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11792 * sleeps for the iocb completion. 11793 **/ 11794 static void 11795 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11796 struct lpfc_iocbq *cmdiocbq, 11797 struct lpfc_iocbq *rspiocbq) 11798 { 11799 wait_queue_head_t *pdone_q; 11800 unsigned long iflags; 11801 struct lpfc_io_buf *lpfc_cmd; 11802 11803 spin_lock_irqsave(&phba->hbalock, iflags); 11804 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11805 11806 /* 11807 * A time out has occurred for the iocb. If a time out 11808 * completion handler has been supplied, call it. Otherwise, 11809 * just free the iocbq. 11810 */ 11811 11812 spin_unlock_irqrestore(&phba->hbalock, iflags); 11813 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11814 cmdiocbq->wait_iocb_cmpl = NULL; 11815 if (cmdiocbq->iocb_cmpl) 11816 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11817 else 11818 lpfc_sli_release_iocbq(phba, cmdiocbq); 11819 return; 11820 } 11821 11822 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11823 if (cmdiocbq->context2 && rspiocbq) 11824 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11825 &rspiocbq->iocb, sizeof(IOCB_t)); 11826 11827 /* Set the exchange busy flag for task management commands */ 11828 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11829 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11830 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, 11831 cur_iocbq); 11832 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 11833 } 11834 11835 pdone_q = cmdiocbq->context_un.wait_queue; 11836 if (pdone_q) 11837 wake_up(pdone_q); 11838 spin_unlock_irqrestore(&phba->hbalock, iflags); 11839 return; 11840 } 11841 11842 /** 11843 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11844 * @phba: Pointer to HBA context object.. 11845 * @piocbq: Pointer to command iocb. 11846 * @flag: Flag to test. 11847 * 11848 * This routine grabs the hbalock and then test the iocb_flag to 11849 * see if the passed in flag is set. 11850 * Returns: 11851 * 1 if flag is set. 11852 * 0 if flag is not set. 11853 **/ 11854 static int 11855 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11856 struct lpfc_iocbq *piocbq, uint32_t flag) 11857 { 11858 unsigned long iflags; 11859 int ret; 11860 11861 spin_lock_irqsave(&phba->hbalock, iflags); 11862 ret = piocbq->iocb_flag & flag; 11863 spin_unlock_irqrestore(&phba->hbalock, iflags); 11864 return ret; 11865 11866 } 11867 11868 /** 11869 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11870 * @phba: Pointer to HBA context object.. 11871 * @pring: Pointer to sli ring. 11872 * @piocb: Pointer to command iocb. 11873 * @prspiocbq: Pointer to response iocb. 11874 * @timeout: Timeout in number of seconds. 11875 * 11876 * This function issues the iocb to firmware and waits for the 11877 * iocb to complete. The iocb_cmpl field of the shall be used 11878 * to handle iocbs which time out. If the field is NULL, the 11879 * function shall free the iocbq structure. If more clean up is 11880 * needed, the caller is expected to provide a completion function 11881 * that will provide the needed clean up. If the iocb command is 11882 * not completed within timeout seconds, the function will either 11883 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11884 * completion function set in the iocb_cmpl field and then return 11885 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11886 * resources if this function returns IOCB_TIMEDOUT. 11887 * The function waits for the iocb completion using an 11888 * non-interruptible wait. 11889 * This function will sleep while waiting for iocb completion. 11890 * So, this function should not be called from any context which 11891 * does not allow sleeping. Due to the same reason, this function 11892 * cannot be called with interrupt disabled. 11893 * This function assumes that the iocb completions occur while 11894 * this function sleep. So, this function cannot be called from 11895 * the thread which process iocb completion for this ring. 11896 * This function clears the iocb_flag of the iocb object before 11897 * issuing the iocb and the iocb completion handler sets this 11898 * flag and wakes this thread when the iocb completes. 11899 * The contents of the response iocb will be copied to prspiocbq 11900 * by the completion handler when the command completes. 11901 * This function returns IOCB_SUCCESS when success. 11902 * This function is called with no lock held. 11903 **/ 11904 int 11905 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11906 uint32_t ring_number, 11907 struct lpfc_iocbq *piocb, 11908 struct lpfc_iocbq *prspiocbq, 11909 uint32_t timeout) 11910 { 11911 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11912 long timeleft, timeout_req = 0; 11913 int retval = IOCB_SUCCESS; 11914 uint32_t creg_val; 11915 struct lpfc_iocbq *iocb; 11916 int txq_cnt = 0; 11917 int txcmplq_cnt = 0; 11918 struct lpfc_sli_ring *pring; 11919 unsigned long iflags; 11920 bool iocb_completed = true; 11921 11922 if (phba->sli_rev >= LPFC_SLI_REV4) 11923 pring = lpfc_sli4_calc_ring(phba, piocb); 11924 else 11925 pring = &phba->sli.sli3_ring[ring_number]; 11926 /* 11927 * If the caller has provided a response iocbq buffer, then context2 11928 * is NULL or its an error. 11929 */ 11930 if (prspiocbq) { 11931 if (piocb->context2) 11932 return IOCB_ERROR; 11933 piocb->context2 = prspiocbq; 11934 } 11935 11936 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11937 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11938 piocb->context_un.wait_queue = &done_q; 11939 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11940 11941 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11942 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11943 return IOCB_ERROR; 11944 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11945 writel(creg_val, phba->HCregaddr); 11946 readl(phba->HCregaddr); /* flush */ 11947 } 11948 11949 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11950 SLI_IOCB_RET_IOCB); 11951 if (retval == IOCB_SUCCESS) { 11952 timeout_req = msecs_to_jiffies(timeout * 1000); 11953 timeleft = wait_event_timeout(done_q, 11954 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11955 timeout_req); 11956 spin_lock_irqsave(&phba->hbalock, iflags); 11957 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11958 11959 /* 11960 * IOCB timed out. Inform the wake iocb wait 11961 * completion function and set local status 11962 */ 11963 11964 iocb_completed = false; 11965 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11966 } 11967 spin_unlock_irqrestore(&phba->hbalock, iflags); 11968 if (iocb_completed) { 11969 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11970 "0331 IOCB wake signaled\n"); 11971 /* Note: we are not indicating if the IOCB has a success 11972 * status or not - that's for the caller to check. 11973 * IOCB_SUCCESS means just that the command was sent and 11974 * completed. Not that it completed successfully. 11975 * */ 11976 } else if (timeleft == 0) { 11977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11978 "0338 IOCB wait timeout error - no " 11979 "wake response Data x%x\n", timeout); 11980 retval = IOCB_TIMEDOUT; 11981 } else { 11982 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11983 "0330 IOCB wake NOT set, " 11984 "Data x%x x%lx\n", 11985 timeout, (timeleft / jiffies)); 11986 retval = IOCB_TIMEDOUT; 11987 } 11988 } else if (retval == IOCB_BUSY) { 11989 if (phba->cfg_log_verbose & LOG_SLI) { 11990 list_for_each_entry(iocb, &pring->txq, list) { 11991 txq_cnt++; 11992 } 11993 list_for_each_entry(iocb, &pring->txcmplq, list) { 11994 txcmplq_cnt++; 11995 } 11996 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11997 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 11998 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 11999 } 12000 return retval; 12001 } else { 12002 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 12003 "0332 IOCB wait issue failed, Data x%x\n", 12004 retval); 12005 retval = IOCB_ERROR; 12006 } 12007 12008 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 12009 if (lpfc_readl(phba->HCregaddr, &creg_val)) 12010 return IOCB_ERROR; 12011 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 12012 writel(creg_val, phba->HCregaddr); 12013 readl(phba->HCregaddr); /* flush */ 12014 } 12015 12016 if (prspiocbq) 12017 piocb->context2 = NULL; 12018 12019 piocb->context_un.wait_queue = NULL; 12020 piocb->iocb_cmpl = NULL; 12021 return retval; 12022 } 12023 12024 /** 12025 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 12026 * @phba: Pointer to HBA context object. 12027 * @pmboxq: Pointer to driver mailbox object. 12028 * @timeout: Timeout in number of seconds. 12029 * 12030 * This function issues the mailbox to firmware and waits for the 12031 * mailbox command to complete. If the mailbox command is not 12032 * completed within timeout seconds, it returns MBX_TIMEOUT. 12033 * The function waits for the mailbox completion using an 12034 * interruptible wait. If the thread is woken up due to a 12035 * signal, MBX_TIMEOUT error is returned to the caller. Caller 12036 * should not free the mailbox resources, if this function returns 12037 * MBX_TIMEOUT. 12038 * This function will sleep while waiting for mailbox completion. 12039 * So, this function should not be called from any context which 12040 * does not allow sleeping. Due to the same reason, this function 12041 * cannot be called with interrupt disabled. 12042 * This function assumes that the mailbox completion occurs while 12043 * this function sleep. So, this function cannot be called from 12044 * the worker thread which processes mailbox completion. 12045 * This function is called in the context of HBA management 12046 * applications. 12047 * This function returns MBX_SUCCESS when successful. 12048 * This function is called with no lock held. 12049 **/ 12050 int 12051 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 12052 uint32_t timeout) 12053 { 12054 struct completion mbox_done; 12055 int retval; 12056 unsigned long flag; 12057 12058 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 12059 /* setup wake call as IOCB callback */ 12060 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 12061 12062 /* setup context3 field to pass wait_queue pointer to wake function */ 12063 init_completion(&mbox_done); 12064 pmboxq->context3 = &mbox_done; 12065 /* now issue the command */ 12066 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 12067 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 12068 wait_for_completion_timeout(&mbox_done, 12069 msecs_to_jiffies(timeout * 1000)); 12070 12071 spin_lock_irqsave(&phba->hbalock, flag); 12072 pmboxq->context3 = NULL; 12073 /* 12074 * if LPFC_MBX_WAKE flag is set the mailbox is completed 12075 * else do not free the resources. 12076 */ 12077 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 12078 retval = MBX_SUCCESS; 12079 } else { 12080 retval = MBX_TIMEOUT; 12081 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12082 } 12083 spin_unlock_irqrestore(&phba->hbalock, flag); 12084 } 12085 return retval; 12086 } 12087 12088 /** 12089 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 12090 * @phba: Pointer to HBA context. 12091 * 12092 * This function is called to shutdown the driver's mailbox sub-system. 12093 * It first marks the mailbox sub-system is in a block state to prevent 12094 * the asynchronous mailbox command from issued off the pending mailbox 12095 * command queue. If the mailbox command sub-system shutdown is due to 12096 * HBA error conditions such as EEH or ERATT, this routine shall invoke 12097 * the mailbox sub-system flush routine to forcefully bring down the 12098 * mailbox sub-system. Otherwise, if it is due to normal condition (such 12099 * as with offline or HBA function reset), this routine will wait for the 12100 * outstanding mailbox command to complete before invoking the mailbox 12101 * sub-system flush routine to gracefully bring down mailbox sub-system. 12102 **/ 12103 void 12104 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 12105 { 12106 struct lpfc_sli *psli = &phba->sli; 12107 unsigned long timeout; 12108 12109 if (mbx_action == LPFC_MBX_NO_WAIT) { 12110 /* delay 100ms for port state */ 12111 msleep(100); 12112 lpfc_sli_mbox_sys_flush(phba); 12113 return; 12114 } 12115 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 12116 12117 /* Disable softirqs, including timers from obtaining phba->hbalock */ 12118 local_bh_disable(); 12119 12120 spin_lock_irq(&phba->hbalock); 12121 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 12122 12123 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 12124 /* Determine how long we might wait for the active mailbox 12125 * command to be gracefully completed by firmware. 12126 */ 12127 if (phba->sli.mbox_active) 12128 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 12129 phba->sli.mbox_active) * 12130 1000) + jiffies; 12131 spin_unlock_irq(&phba->hbalock); 12132 12133 /* Enable softirqs again, done with phba->hbalock */ 12134 local_bh_enable(); 12135 12136 while (phba->sli.mbox_active) { 12137 /* Check active mailbox complete status every 2ms */ 12138 msleep(2); 12139 if (time_after(jiffies, timeout)) 12140 /* Timeout, let the mailbox flush routine to 12141 * forcefully release active mailbox command 12142 */ 12143 break; 12144 } 12145 } else { 12146 spin_unlock_irq(&phba->hbalock); 12147 12148 /* Enable softirqs again, done with phba->hbalock */ 12149 local_bh_enable(); 12150 } 12151 12152 lpfc_sli_mbox_sys_flush(phba); 12153 } 12154 12155 /** 12156 * lpfc_sli_eratt_read - read sli-3 error attention events 12157 * @phba: Pointer to HBA context. 12158 * 12159 * This function is called to read the SLI3 device error attention registers 12160 * for possible error attention events. The caller must hold the hostlock 12161 * with spin_lock_irq(). 12162 * 12163 * This function returns 1 when there is Error Attention in the Host Attention 12164 * Register and returns 0 otherwise. 12165 **/ 12166 static int 12167 lpfc_sli_eratt_read(struct lpfc_hba *phba) 12168 { 12169 uint32_t ha_copy; 12170 12171 /* Read chip Host Attention (HA) register */ 12172 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12173 goto unplug_err; 12174 12175 if (ha_copy & HA_ERATT) { 12176 /* Read host status register to retrieve error event */ 12177 if (lpfc_sli_read_hs(phba)) 12178 goto unplug_err; 12179 12180 /* Check if there is a deferred error condition is active */ 12181 if ((HS_FFER1 & phba->work_hs) && 12182 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12183 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 12184 phba->hba_flag |= DEFER_ERATT; 12185 /* Clear all interrupt enable conditions */ 12186 writel(0, phba->HCregaddr); 12187 readl(phba->HCregaddr); 12188 } 12189 12190 /* Set the driver HA work bitmap */ 12191 phba->work_ha |= HA_ERATT; 12192 /* Indicate polling handles this ERATT */ 12193 phba->hba_flag |= HBA_ERATT_HANDLED; 12194 return 1; 12195 } 12196 return 0; 12197 12198 unplug_err: 12199 /* Set the driver HS work bitmap */ 12200 phba->work_hs |= UNPLUG_ERR; 12201 /* Set the driver HA work bitmap */ 12202 phba->work_ha |= HA_ERATT; 12203 /* Indicate polling handles this ERATT */ 12204 phba->hba_flag |= HBA_ERATT_HANDLED; 12205 return 1; 12206 } 12207 12208 /** 12209 * lpfc_sli4_eratt_read - read sli-4 error attention events 12210 * @phba: Pointer to HBA context. 12211 * 12212 * This function is called to read the SLI4 device error attention registers 12213 * for possible error attention events. The caller must hold the hostlock 12214 * with spin_lock_irq(). 12215 * 12216 * This function returns 1 when there is Error Attention in the Host Attention 12217 * Register and returns 0 otherwise. 12218 **/ 12219 static int 12220 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 12221 { 12222 uint32_t uerr_sta_hi, uerr_sta_lo; 12223 uint32_t if_type, portsmphr; 12224 struct lpfc_register portstat_reg; 12225 12226 /* 12227 * For now, use the SLI4 device internal unrecoverable error 12228 * registers for error attention. This can be changed later. 12229 */ 12230 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 12231 switch (if_type) { 12232 case LPFC_SLI_INTF_IF_TYPE_0: 12233 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 12234 &uerr_sta_lo) || 12235 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 12236 &uerr_sta_hi)) { 12237 phba->work_hs |= UNPLUG_ERR; 12238 phba->work_ha |= HA_ERATT; 12239 phba->hba_flag |= HBA_ERATT_HANDLED; 12240 return 1; 12241 } 12242 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 12243 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 12244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12245 "1423 HBA Unrecoverable error: " 12246 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 12247 "ue_mask_lo_reg=0x%x, " 12248 "ue_mask_hi_reg=0x%x\n", 12249 uerr_sta_lo, uerr_sta_hi, 12250 phba->sli4_hba.ue_mask_lo, 12251 phba->sli4_hba.ue_mask_hi); 12252 phba->work_status[0] = uerr_sta_lo; 12253 phba->work_status[1] = uerr_sta_hi; 12254 phba->work_ha |= HA_ERATT; 12255 phba->hba_flag |= HBA_ERATT_HANDLED; 12256 return 1; 12257 } 12258 break; 12259 case LPFC_SLI_INTF_IF_TYPE_2: 12260 case LPFC_SLI_INTF_IF_TYPE_6: 12261 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 12262 &portstat_reg.word0) || 12263 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 12264 &portsmphr)){ 12265 phba->work_hs |= UNPLUG_ERR; 12266 phba->work_ha |= HA_ERATT; 12267 phba->hba_flag |= HBA_ERATT_HANDLED; 12268 return 1; 12269 } 12270 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 12271 phba->work_status[0] = 12272 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 12273 phba->work_status[1] = 12274 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 12275 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12276 "2885 Port Status Event: " 12277 "port status reg 0x%x, " 12278 "port smphr reg 0x%x, " 12279 "error 1=0x%x, error 2=0x%x\n", 12280 portstat_reg.word0, 12281 portsmphr, 12282 phba->work_status[0], 12283 phba->work_status[1]); 12284 phba->work_ha |= HA_ERATT; 12285 phba->hba_flag |= HBA_ERATT_HANDLED; 12286 return 1; 12287 } 12288 break; 12289 case LPFC_SLI_INTF_IF_TYPE_1: 12290 default: 12291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12292 "2886 HBA Error Attention on unsupported " 12293 "if type %d.", if_type); 12294 return 1; 12295 } 12296 12297 return 0; 12298 } 12299 12300 /** 12301 * lpfc_sli_check_eratt - check error attention events 12302 * @phba: Pointer to HBA context. 12303 * 12304 * This function is called from timer soft interrupt context to check HBA's 12305 * error attention register bit for error attention events. 12306 * 12307 * This function returns 1 when there is Error Attention in the Host Attention 12308 * Register and returns 0 otherwise. 12309 **/ 12310 int 12311 lpfc_sli_check_eratt(struct lpfc_hba *phba) 12312 { 12313 uint32_t ha_copy; 12314 12315 /* If somebody is waiting to handle an eratt, don't process it 12316 * here. The brdkill function will do this. 12317 */ 12318 if (phba->link_flag & LS_IGNORE_ERATT) 12319 return 0; 12320 12321 /* Check if interrupt handler handles this ERATT */ 12322 spin_lock_irq(&phba->hbalock); 12323 if (phba->hba_flag & HBA_ERATT_HANDLED) { 12324 /* Interrupt handler has handled ERATT */ 12325 spin_unlock_irq(&phba->hbalock); 12326 return 0; 12327 } 12328 12329 /* 12330 * If there is deferred error attention, do not check for error 12331 * attention 12332 */ 12333 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12334 spin_unlock_irq(&phba->hbalock); 12335 return 0; 12336 } 12337 12338 /* If PCI channel is offline, don't process it */ 12339 if (unlikely(pci_channel_offline(phba->pcidev))) { 12340 spin_unlock_irq(&phba->hbalock); 12341 return 0; 12342 } 12343 12344 switch (phba->sli_rev) { 12345 case LPFC_SLI_REV2: 12346 case LPFC_SLI_REV3: 12347 /* Read chip Host Attention (HA) register */ 12348 ha_copy = lpfc_sli_eratt_read(phba); 12349 break; 12350 case LPFC_SLI_REV4: 12351 /* Read device Uncoverable Error (UERR) registers */ 12352 ha_copy = lpfc_sli4_eratt_read(phba); 12353 break; 12354 default: 12355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12356 "0299 Invalid SLI revision (%d)\n", 12357 phba->sli_rev); 12358 ha_copy = 0; 12359 break; 12360 } 12361 spin_unlock_irq(&phba->hbalock); 12362 12363 return ha_copy; 12364 } 12365 12366 /** 12367 * lpfc_intr_state_check - Check device state for interrupt handling 12368 * @phba: Pointer to HBA context. 12369 * 12370 * This inline routine checks whether a device or its PCI slot is in a state 12371 * that the interrupt should be handled. 12372 * 12373 * This function returns 0 if the device or the PCI slot is in a state that 12374 * interrupt should be handled, otherwise -EIO. 12375 */ 12376 static inline int 12377 lpfc_intr_state_check(struct lpfc_hba *phba) 12378 { 12379 /* If the pci channel is offline, ignore all the interrupts */ 12380 if (unlikely(pci_channel_offline(phba->pcidev))) 12381 return -EIO; 12382 12383 /* Update device level interrupt statistics */ 12384 phba->sli.slistat.sli_intr++; 12385 12386 /* Ignore all interrupts during initialization. */ 12387 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 12388 return -EIO; 12389 12390 return 0; 12391 } 12392 12393 /** 12394 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 12395 * @irq: Interrupt number. 12396 * @dev_id: The device context pointer. 12397 * 12398 * This function is directly called from the PCI layer as an interrupt 12399 * service routine when device with SLI-3 interface spec is enabled with 12400 * MSI-X multi-message interrupt mode and there are slow-path events in 12401 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 12402 * interrupt mode, this function is called as part of the device-level 12403 * interrupt handler. When the PCI slot is in error recovery or the HBA 12404 * is undergoing initialization, the interrupt handler will not process 12405 * the interrupt. The link attention and ELS ring attention events are 12406 * handled by the worker thread. The interrupt handler signals the worker 12407 * thread and returns for these events. This function is called without 12408 * any lock held. It gets the hbalock to access and update SLI data 12409 * structures. 12410 * 12411 * This function returns IRQ_HANDLED when interrupt is handled else it 12412 * returns IRQ_NONE. 12413 **/ 12414 irqreturn_t 12415 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 12416 { 12417 struct lpfc_hba *phba; 12418 uint32_t ha_copy, hc_copy; 12419 uint32_t work_ha_copy; 12420 unsigned long status; 12421 unsigned long iflag; 12422 uint32_t control; 12423 12424 MAILBOX_t *mbox, *pmbox; 12425 struct lpfc_vport *vport; 12426 struct lpfc_nodelist *ndlp; 12427 struct lpfc_dmabuf *mp; 12428 LPFC_MBOXQ_t *pmb; 12429 int rc; 12430 12431 /* 12432 * Get the driver's phba structure from the dev_id and 12433 * assume the HBA is not interrupting. 12434 */ 12435 phba = (struct lpfc_hba *)dev_id; 12436 12437 if (unlikely(!phba)) 12438 return IRQ_NONE; 12439 12440 /* 12441 * Stuff needs to be attented to when this function is invoked as an 12442 * individual interrupt handler in MSI-X multi-message interrupt mode 12443 */ 12444 if (phba->intr_type == MSIX) { 12445 /* Check device state for handling interrupt */ 12446 if (lpfc_intr_state_check(phba)) 12447 return IRQ_NONE; 12448 /* Need to read HA REG for slow-path events */ 12449 spin_lock_irqsave(&phba->hbalock, iflag); 12450 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12451 goto unplug_error; 12452 /* If somebody is waiting to handle an eratt don't process it 12453 * here. The brdkill function will do this. 12454 */ 12455 if (phba->link_flag & LS_IGNORE_ERATT) 12456 ha_copy &= ~HA_ERATT; 12457 /* Check the need for handling ERATT in interrupt handler */ 12458 if (ha_copy & HA_ERATT) { 12459 if (phba->hba_flag & HBA_ERATT_HANDLED) 12460 /* ERATT polling has handled ERATT */ 12461 ha_copy &= ~HA_ERATT; 12462 else 12463 /* Indicate interrupt handler handles ERATT */ 12464 phba->hba_flag |= HBA_ERATT_HANDLED; 12465 } 12466 12467 /* 12468 * If there is deferred error attention, do not check for any 12469 * interrupt. 12470 */ 12471 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12472 spin_unlock_irqrestore(&phba->hbalock, iflag); 12473 return IRQ_NONE; 12474 } 12475 12476 /* Clear up only attention source related to slow-path */ 12477 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 12478 goto unplug_error; 12479 12480 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 12481 HC_LAINT_ENA | HC_ERINT_ENA), 12482 phba->HCregaddr); 12483 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 12484 phba->HAregaddr); 12485 writel(hc_copy, phba->HCregaddr); 12486 readl(phba->HAregaddr); /* flush */ 12487 spin_unlock_irqrestore(&phba->hbalock, iflag); 12488 } else 12489 ha_copy = phba->ha_copy; 12490 12491 work_ha_copy = ha_copy & phba->work_ha_mask; 12492 12493 if (work_ha_copy) { 12494 if (work_ha_copy & HA_LATT) { 12495 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 12496 /* 12497 * Turn off Link Attention interrupts 12498 * until CLEAR_LA done 12499 */ 12500 spin_lock_irqsave(&phba->hbalock, iflag); 12501 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 12502 if (lpfc_readl(phba->HCregaddr, &control)) 12503 goto unplug_error; 12504 control &= ~HC_LAINT_ENA; 12505 writel(control, phba->HCregaddr); 12506 readl(phba->HCregaddr); /* flush */ 12507 spin_unlock_irqrestore(&phba->hbalock, iflag); 12508 } 12509 else 12510 work_ha_copy &= ~HA_LATT; 12511 } 12512 12513 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 12514 /* 12515 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 12516 * the only slow ring. 12517 */ 12518 status = (work_ha_copy & 12519 (HA_RXMASK << (4*LPFC_ELS_RING))); 12520 status >>= (4*LPFC_ELS_RING); 12521 if (status & HA_RXMASK) { 12522 spin_lock_irqsave(&phba->hbalock, iflag); 12523 if (lpfc_readl(phba->HCregaddr, &control)) 12524 goto unplug_error; 12525 12526 lpfc_debugfs_slow_ring_trc(phba, 12527 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 12528 control, status, 12529 (uint32_t)phba->sli.slistat.sli_intr); 12530 12531 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 12532 lpfc_debugfs_slow_ring_trc(phba, 12533 "ISR Disable ring:" 12534 "pwork:x%x hawork:x%x wait:x%x", 12535 phba->work_ha, work_ha_copy, 12536 (uint32_t)((unsigned long) 12537 &phba->work_waitq)); 12538 12539 control &= 12540 ~(HC_R0INT_ENA << LPFC_ELS_RING); 12541 writel(control, phba->HCregaddr); 12542 readl(phba->HCregaddr); /* flush */ 12543 } 12544 else { 12545 lpfc_debugfs_slow_ring_trc(phba, 12546 "ISR slow ring: pwork:" 12547 "x%x hawork:x%x wait:x%x", 12548 phba->work_ha, work_ha_copy, 12549 (uint32_t)((unsigned long) 12550 &phba->work_waitq)); 12551 } 12552 spin_unlock_irqrestore(&phba->hbalock, iflag); 12553 } 12554 } 12555 spin_lock_irqsave(&phba->hbalock, iflag); 12556 if (work_ha_copy & HA_ERATT) { 12557 if (lpfc_sli_read_hs(phba)) 12558 goto unplug_error; 12559 /* 12560 * Check if there is a deferred error condition 12561 * is active 12562 */ 12563 if ((HS_FFER1 & phba->work_hs) && 12564 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12565 HS_FFER6 | HS_FFER7 | HS_FFER8) & 12566 phba->work_hs)) { 12567 phba->hba_flag |= DEFER_ERATT; 12568 /* Clear all interrupt enable conditions */ 12569 writel(0, phba->HCregaddr); 12570 readl(phba->HCregaddr); 12571 } 12572 } 12573 12574 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 12575 pmb = phba->sli.mbox_active; 12576 pmbox = &pmb->u.mb; 12577 mbox = phba->mbox; 12578 vport = pmb->vport; 12579 12580 /* First check out the status word */ 12581 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 12582 if (pmbox->mbxOwner != OWN_HOST) { 12583 spin_unlock_irqrestore(&phba->hbalock, iflag); 12584 /* 12585 * Stray Mailbox Interrupt, mbxCommand <cmd> 12586 * mbxStatus <status> 12587 */ 12588 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12589 LOG_SLI, 12590 "(%d):0304 Stray Mailbox " 12591 "Interrupt mbxCommand x%x " 12592 "mbxStatus x%x\n", 12593 (vport ? vport->vpi : 0), 12594 pmbox->mbxCommand, 12595 pmbox->mbxStatus); 12596 /* clear mailbox attention bit */ 12597 work_ha_copy &= ~HA_MBATT; 12598 } else { 12599 phba->sli.mbox_active = NULL; 12600 spin_unlock_irqrestore(&phba->hbalock, iflag); 12601 phba->last_completion_time = jiffies; 12602 del_timer(&phba->sli.mbox_tmo); 12603 if (pmb->mbox_cmpl) { 12604 lpfc_sli_pcimem_bcopy(mbox, pmbox, 12605 MAILBOX_CMD_SIZE); 12606 if (pmb->out_ext_byte_len && 12607 pmb->ctx_buf) 12608 lpfc_sli_pcimem_bcopy( 12609 phba->mbox_ext, 12610 pmb->ctx_buf, 12611 pmb->out_ext_byte_len); 12612 } 12613 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12614 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12615 12616 lpfc_debugfs_disc_trc(vport, 12617 LPFC_DISC_TRC_MBOX_VPORT, 12618 "MBOX dflt rpi: : " 12619 "status:x%x rpi:x%x", 12620 (uint32_t)pmbox->mbxStatus, 12621 pmbox->un.varWords[0], 0); 12622 12623 if (!pmbox->mbxStatus) { 12624 mp = (struct lpfc_dmabuf *) 12625 (pmb->ctx_buf); 12626 ndlp = (struct lpfc_nodelist *) 12627 pmb->ctx_ndlp; 12628 12629 /* Reg_LOGIN of dflt RPI was 12630 * successful. new lets get 12631 * rid of the RPI using the 12632 * same mbox buffer. 12633 */ 12634 lpfc_unreg_login(phba, 12635 vport->vpi, 12636 pmbox->un.varWords[0], 12637 pmb); 12638 pmb->mbox_cmpl = 12639 lpfc_mbx_cmpl_dflt_rpi; 12640 pmb->ctx_buf = mp; 12641 pmb->ctx_ndlp = ndlp; 12642 pmb->vport = vport; 12643 rc = lpfc_sli_issue_mbox(phba, 12644 pmb, 12645 MBX_NOWAIT); 12646 if (rc != MBX_BUSY) 12647 lpfc_printf_log(phba, 12648 KERN_ERR, 12649 LOG_MBOX | LOG_SLI, 12650 "0350 rc should have" 12651 "been MBX_BUSY\n"); 12652 if (rc != MBX_NOT_FINISHED) 12653 goto send_current_mbox; 12654 } 12655 } 12656 spin_lock_irqsave( 12657 &phba->pport->work_port_lock, 12658 iflag); 12659 phba->pport->work_port_events &= 12660 ~WORKER_MBOX_TMO; 12661 spin_unlock_irqrestore( 12662 &phba->pport->work_port_lock, 12663 iflag); 12664 lpfc_mbox_cmpl_put(phba, pmb); 12665 } 12666 } else 12667 spin_unlock_irqrestore(&phba->hbalock, iflag); 12668 12669 if ((work_ha_copy & HA_MBATT) && 12670 (phba->sli.mbox_active == NULL)) { 12671 send_current_mbox: 12672 /* Process next mailbox command if there is one */ 12673 do { 12674 rc = lpfc_sli_issue_mbox(phba, NULL, 12675 MBX_NOWAIT); 12676 } while (rc == MBX_NOT_FINISHED); 12677 if (rc != MBX_SUCCESS) 12678 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12679 LOG_SLI, "0349 rc should be " 12680 "MBX_SUCCESS\n"); 12681 } 12682 12683 spin_lock_irqsave(&phba->hbalock, iflag); 12684 phba->work_ha |= work_ha_copy; 12685 spin_unlock_irqrestore(&phba->hbalock, iflag); 12686 lpfc_worker_wake_up(phba); 12687 } 12688 return IRQ_HANDLED; 12689 unplug_error: 12690 spin_unlock_irqrestore(&phba->hbalock, iflag); 12691 return IRQ_HANDLED; 12692 12693 } /* lpfc_sli_sp_intr_handler */ 12694 12695 /** 12696 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12697 * @irq: Interrupt number. 12698 * @dev_id: The device context pointer. 12699 * 12700 * This function is directly called from the PCI layer as an interrupt 12701 * service routine when device with SLI-3 interface spec is enabled with 12702 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12703 * ring event in the HBA. However, when the device is enabled with either 12704 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12705 * device-level interrupt handler. When the PCI slot is in error recovery 12706 * or the HBA is undergoing initialization, the interrupt handler will not 12707 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12708 * the intrrupt context. This function is called without any lock held. 12709 * It gets the hbalock to access and update SLI data structures. 12710 * 12711 * This function returns IRQ_HANDLED when interrupt is handled else it 12712 * returns IRQ_NONE. 12713 **/ 12714 irqreturn_t 12715 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12716 { 12717 struct lpfc_hba *phba; 12718 uint32_t ha_copy; 12719 unsigned long status; 12720 unsigned long iflag; 12721 struct lpfc_sli_ring *pring; 12722 12723 /* Get the driver's phba structure from the dev_id and 12724 * assume the HBA is not interrupting. 12725 */ 12726 phba = (struct lpfc_hba *) dev_id; 12727 12728 if (unlikely(!phba)) 12729 return IRQ_NONE; 12730 12731 /* 12732 * Stuff needs to be attented to when this function is invoked as an 12733 * individual interrupt handler in MSI-X multi-message interrupt mode 12734 */ 12735 if (phba->intr_type == MSIX) { 12736 /* Check device state for handling interrupt */ 12737 if (lpfc_intr_state_check(phba)) 12738 return IRQ_NONE; 12739 /* Need to read HA REG for FCP ring and other ring events */ 12740 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12741 return IRQ_HANDLED; 12742 /* Clear up only attention source related to fast-path */ 12743 spin_lock_irqsave(&phba->hbalock, iflag); 12744 /* 12745 * If there is deferred error attention, do not check for 12746 * any interrupt. 12747 */ 12748 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12749 spin_unlock_irqrestore(&phba->hbalock, iflag); 12750 return IRQ_NONE; 12751 } 12752 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12753 phba->HAregaddr); 12754 readl(phba->HAregaddr); /* flush */ 12755 spin_unlock_irqrestore(&phba->hbalock, iflag); 12756 } else 12757 ha_copy = phba->ha_copy; 12758 12759 /* 12760 * Process all events on FCP ring. Take the optimized path for FCP IO. 12761 */ 12762 ha_copy &= ~(phba->work_ha_mask); 12763 12764 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12765 status >>= (4*LPFC_FCP_RING); 12766 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12767 if (status & HA_RXMASK) 12768 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12769 12770 if (phba->cfg_multi_ring_support == 2) { 12771 /* 12772 * Process all events on extra ring. Take the optimized path 12773 * for extra ring IO. 12774 */ 12775 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12776 status >>= (4*LPFC_EXTRA_RING); 12777 if (status & HA_RXMASK) { 12778 lpfc_sli_handle_fast_ring_event(phba, 12779 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12780 status); 12781 } 12782 } 12783 return IRQ_HANDLED; 12784 } /* lpfc_sli_fp_intr_handler */ 12785 12786 /** 12787 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12788 * @irq: Interrupt number. 12789 * @dev_id: The device context pointer. 12790 * 12791 * This function is the HBA device-level interrupt handler to device with 12792 * SLI-3 interface spec, called from the PCI layer when either MSI or 12793 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12794 * requires driver attention. This function invokes the slow-path interrupt 12795 * attention handling function and fast-path interrupt attention handling 12796 * function in turn to process the relevant HBA attention events. This 12797 * function is called without any lock held. It gets the hbalock to access 12798 * and update SLI data structures. 12799 * 12800 * This function returns IRQ_HANDLED when interrupt is handled, else it 12801 * returns IRQ_NONE. 12802 **/ 12803 irqreturn_t 12804 lpfc_sli_intr_handler(int irq, void *dev_id) 12805 { 12806 struct lpfc_hba *phba; 12807 irqreturn_t sp_irq_rc, fp_irq_rc; 12808 unsigned long status1, status2; 12809 uint32_t hc_copy; 12810 12811 /* 12812 * Get the driver's phba structure from the dev_id and 12813 * assume the HBA is not interrupting. 12814 */ 12815 phba = (struct lpfc_hba *) dev_id; 12816 12817 if (unlikely(!phba)) 12818 return IRQ_NONE; 12819 12820 /* Check device state for handling interrupt */ 12821 if (lpfc_intr_state_check(phba)) 12822 return IRQ_NONE; 12823 12824 spin_lock(&phba->hbalock); 12825 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12826 spin_unlock(&phba->hbalock); 12827 return IRQ_HANDLED; 12828 } 12829 12830 if (unlikely(!phba->ha_copy)) { 12831 spin_unlock(&phba->hbalock); 12832 return IRQ_NONE; 12833 } else if (phba->ha_copy & HA_ERATT) { 12834 if (phba->hba_flag & HBA_ERATT_HANDLED) 12835 /* ERATT polling has handled ERATT */ 12836 phba->ha_copy &= ~HA_ERATT; 12837 else 12838 /* Indicate interrupt handler handles ERATT */ 12839 phba->hba_flag |= HBA_ERATT_HANDLED; 12840 } 12841 12842 /* 12843 * If there is deferred error attention, do not check for any interrupt. 12844 */ 12845 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12846 spin_unlock(&phba->hbalock); 12847 return IRQ_NONE; 12848 } 12849 12850 /* Clear attention sources except link and error attentions */ 12851 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12852 spin_unlock(&phba->hbalock); 12853 return IRQ_HANDLED; 12854 } 12855 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12856 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12857 phba->HCregaddr); 12858 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12859 writel(hc_copy, phba->HCregaddr); 12860 readl(phba->HAregaddr); /* flush */ 12861 spin_unlock(&phba->hbalock); 12862 12863 /* 12864 * Invokes slow-path host attention interrupt handling as appropriate. 12865 */ 12866 12867 /* status of events with mailbox and link attention */ 12868 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12869 12870 /* status of events with ELS ring */ 12871 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12872 status2 >>= (4*LPFC_ELS_RING); 12873 12874 if (status1 || (status2 & HA_RXMASK)) 12875 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12876 else 12877 sp_irq_rc = IRQ_NONE; 12878 12879 /* 12880 * Invoke fast-path host attention interrupt handling as appropriate. 12881 */ 12882 12883 /* status of events with FCP ring */ 12884 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12885 status1 >>= (4*LPFC_FCP_RING); 12886 12887 /* status of events with extra ring */ 12888 if (phba->cfg_multi_ring_support == 2) { 12889 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12890 status2 >>= (4*LPFC_EXTRA_RING); 12891 } else 12892 status2 = 0; 12893 12894 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12895 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12896 else 12897 fp_irq_rc = IRQ_NONE; 12898 12899 /* Return device-level interrupt handling status */ 12900 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12901 } /* lpfc_sli_intr_handler */ 12902 12903 /** 12904 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12905 * @phba: pointer to lpfc hba data structure. 12906 * 12907 * This routine is invoked by the worker thread to process all the pending 12908 * SLI4 els abort xri events. 12909 **/ 12910 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12911 { 12912 struct lpfc_cq_event *cq_event; 12913 12914 /* First, declare the els xri abort event has been handled */ 12915 spin_lock_irq(&phba->hbalock); 12916 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12917 spin_unlock_irq(&phba->hbalock); 12918 /* Now, handle all the els xri abort events */ 12919 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12920 /* Get the first event from the head of the event queue */ 12921 spin_lock_irq(&phba->hbalock); 12922 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12923 cq_event, struct lpfc_cq_event, list); 12924 spin_unlock_irq(&phba->hbalock); 12925 /* Notify aborted XRI for ELS work queue */ 12926 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12927 /* Free the event processed back to the free pool */ 12928 lpfc_sli4_cq_event_release(phba, cq_event); 12929 } 12930 } 12931 12932 /** 12933 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12934 * @phba: pointer to lpfc hba data structure 12935 * @pIocbIn: pointer to the rspiocbq 12936 * @pIocbOut: pointer to the cmdiocbq 12937 * @wcqe: pointer to the complete wcqe 12938 * 12939 * This routine transfers the fields of a command iocbq to a response iocbq 12940 * by copying all the IOCB fields from command iocbq and transferring the 12941 * completion status information from the complete wcqe. 12942 **/ 12943 static void 12944 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12945 struct lpfc_iocbq *pIocbIn, 12946 struct lpfc_iocbq *pIocbOut, 12947 struct lpfc_wcqe_complete *wcqe) 12948 { 12949 int numBdes, i; 12950 unsigned long iflags; 12951 uint32_t status, max_response; 12952 struct lpfc_dmabuf *dmabuf; 12953 struct ulp_bde64 *bpl, bde; 12954 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12955 12956 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12957 sizeof(struct lpfc_iocbq) - offset); 12958 /* Map WCQE parameters into irspiocb parameters */ 12959 status = bf_get(lpfc_wcqe_c_status, wcqe); 12960 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12961 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12962 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12963 pIocbIn->iocb.un.fcpi.fcpi_parm = 12964 pIocbOut->iocb.un.fcpi.fcpi_parm - 12965 wcqe->total_data_placed; 12966 else 12967 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12968 else { 12969 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12970 switch (pIocbOut->iocb.ulpCommand) { 12971 case CMD_ELS_REQUEST64_CR: 12972 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12973 bpl = (struct ulp_bde64 *)dmabuf->virt; 12974 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12975 max_response = bde.tus.f.bdeSize; 12976 break; 12977 case CMD_GEN_REQUEST64_CR: 12978 max_response = 0; 12979 if (!pIocbOut->context3) 12980 break; 12981 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12982 sizeof(struct ulp_bde64); 12983 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12984 bpl = (struct ulp_bde64 *)dmabuf->virt; 12985 for (i = 0; i < numBdes; i++) { 12986 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12987 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12988 max_response += bde.tus.f.bdeSize; 12989 } 12990 break; 12991 default: 12992 max_response = wcqe->total_data_placed; 12993 break; 12994 } 12995 if (max_response < wcqe->total_data_placed) 12996 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 12997 else 12998 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 12999 wcqe->total_data_placed; 13000 } 13001 13002 /* Convert BG errors for completion status */ 13003 if (status == CQE_STATUS_DI_ERROR) { 13004 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 13005 13006 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 13007 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 13008 else 13009 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 13010 13011 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 13012 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 13013 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13014 BGS_GUARD_ERR_MASK; 13015 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 13016 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13017 BGS_APPTAG_ERR_MASK; 13018 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 13019 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13020 BGS_REFTAG_ERR_MASK; 13021 13022 /* Check to see if there was any good data before the error */ 13023 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 13024 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13025 BGS_HI_WATER_MARK_PRESENT_MASK; 13026 pIocbIn->iocb.unsli3.sli3_bg.bghm = 13027 wcqe->total_data_placed; 13028 } 13029 13030 /* 13031 * Set ALL the error bits to indicate we don't know what 13032 * type of error it is. 13033 */ 13034 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 13035 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13036 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 13037 BGS_GUARD_ERR_MASK); 13038 } 13039 13040 /* Pick up HBA exchange busy condition */ 13041 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 13042 spin_lock_irqsave(&phba->hbalock, iflags); 13043 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 13044 spin_unlock_irqrestore(&phba->hbalock, iflags); 13045 } 13046 } 13047 13048 /** 13049 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 13050 * @phba: Pointer to HBA context object. 13051 * @wcqe: Pointer to work-queue completion queue entry. 13052 * 13053 * This routine handles an ELS work-queue completion event and construct 13054 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 13055 * discovery engine to handle. 13056 * 13057 * Return: Pointer to the receive IOCBQ, NULL otherwise. 13058 **/ 13059 static struct lpfc_iocbq * 13060 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 13061 struct lpfc_iocbq *irspiocbq) 13062 { 13063 struct lpfc_sli_ring *pring; 13064 struct lpfc_iocbq *cmdiocbq; 13065 struct lpfc_wcqe_complete *wcqe; 13066 unsigned long iflags; 13067 13068 pring = lpfc_phba_elsring(phba); 13069 if (unlikely(!pring)) 13070 return NULL; 13071 13072 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 13073 spin_lock_irqsave(&pring->ring_lock, iflags); 13074 pring->stats.iocb_event++; 13075 /* Look up the ELS command IOCB and create pseudo response IOCB */ 13076 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13077 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13078 if (unlikely(!cmdiocbq)) { 13079 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13080 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13081 "0386 ELS complete with no corresponding " 13082 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 13083 wcqe->word0, wcqe->total_data_placed, 13084 wcqe->parameter, wcqe->word3); 13085 lpfc_sli_release_iocbq(phba, irspiocbq); 13086 return NULL; 13087 } 13088 13089 /* Put the iocb back on the txcmplq */ 13090 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 13091 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13092 13093 /* Fake the irspiocbq and copy necessary response information */ 13094 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 13095 13096 return irspiocbq; 13097 } 13098 13099 inline struct lpfc_cq_event * 13100 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 13101 { 13102 struct lpfc_cq_event *cq_event; 13103 13104 /* Allocate a new internal CQ_EVENT entry */ 13105 cq_event = lpfc_sli4_cq_event_alloc(phba); 13106 if (!cq_event) { 13107 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13108 "0602 Failed to alloc CQ_EVENT entry\n"); 13109 return NULL; 13110 } 13111 13112 /* Move the CQE into the event */ 13113 memcpy(&cq_event->cqe, entry, size); 13114 return cq_event; 13115 } 13116 13117 /** 13118 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 13119 * @phba: Pointer to HBA context object. 13120 * @cqe: Pointer to mailbox completion queue entry. 13121 * 13122 * This routine process a mailbox completion queue entry with asynchrous 13123 * event. 13124 * 13125 * Return: true if work posted to worker thread, otherwise false. 13126 **/ 13127 static bool 13128 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13129 { 13130 struct lpfc_cq_event *cq_event; 13131 unsigned long iflags; 13132 13133 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13134 "0392 Async Event: word0:x%x, word1:x%x, " 13135 "word2:x%x, word3:x%x\n", mcqe->word0, 13136 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 13137 13138 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 13139 if (!cq_event) 13140 return false; 13141 spin_lock_irqsave(&phba->hbalock, iflags); 13142 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 13143 /* Set the async event flag */ 13144 phba->hba_flag |= ASYNC_EVENT; 13145 spin_unlock_irqrestore(&phba->hbalock, iflags); 13146 13147 return true; 13148 } 13149 13150 /** 13151 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 13152 * @phba: Pointer to HBA context object. 13153 * @cqe: Pointer to mailbox completion queue entry. 13154 * 13155 * This routine process a mailbox completion queue entry with mailbox 13156 * completion event. 13157 * 13158 * Return: true if work posted to worker thread, otherwise false. 13159 **/ 13160 static bool 13161 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13162 { 13163 uint32_t mcqe_status; 13164 MAILBOX_t *mbox, *pmbox; 13165 struct lpfc_mqe *mqe; 13166 struct lpfc_vport *vport; 13167 struct lpfc_nodelist *ndlp; 13168 struct lpfc_dmabuf *mp; 13169 unsigned long iflags; 13170 LPFC_MBOXQ_t *pmb; 13171 bool workposted = false; 13172 int rc; 13173 13174 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 13175 if (!bf_get(lpfc_trailer_completed, mcqe)) 13176 goto out_no_mqe_complete; 13177 13178 /* Get the reference to the active mbox command */ 13179 spin_lock_irqsave(&phba->hbalock, iflags); 13180 pmb = phba->sli.mbox_active; 13181 if (unlikely(!pmb)) { 13182 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13183 "1832 No pending MBOX command to handle\n"); 13184 spin_unlock_irqrestore(&phba->hbalock, iflags); 13185 goto out_no_mqe_complete; 13186 } 13187 spin_unlock_irqrestore(&phba->hbalock, iflags); 13188 mqe = &pmb->u.mqe; 13189 pmbox = (MAILBOX_t *)&pmb->u.mqe; 13190 mbox = phba->mbox; 13191 vport = pmb->vport; 13192 13193 /* Reset heartbeat timer */ 13194 phba->last_completion_time = jiffies; 13195 del_timer(&phba->sli.mbox_tmo); 13196 13197 /* Move mbox data to caller's mailbox region, do endian swapping */ 13198 if (pmb->mbox_cmpl && mbox) 13199 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 13200 13201 /* 13202 * For mcqe errors, conditionally move a modified error code to 13203 * the mbox so that the error will not be missed. 13204 */ 13205 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 13206 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 13207 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 13208 bf_set(lpfc_mqe_status, mqe, 13209 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 13210 } 13211 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 13212 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 13213 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 13214 "MBOX dflt rpi: status:x%x rpi:x%x", 13215 mcqe_status, 13216 pmbox->un.varWords[0], 0); 13217 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 13218 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 13219 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 13220 /* Reg_LOGIN of dflt RPI was successful. Now lets get 13221 * RID of the PPI using the same mbox buffer. 13222 */ 13223 lpfc_unreg_login(phba, vport->vpi, 13224 pmbox->un.varWords[0], pmb); 13225 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 13226 pmb->ctx_buf = mp; 13227 pmb->ctx_ndlp = ndlp; 13228 pmb->vport = vport; 13229 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 13230 if (rc != MBX_BUSY) 13231 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 13232 LOG_SLI, "0385 rc should " 13233 "have been MBX_BUSY\n"); 13234 if (rc != MBX_NOT_FINISHED) 13235 goto send_current_mbox; 13236 } 13237 } 13238 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 13239 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 13240 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 13241 13242 /* There is mailbox completion work to do */ 13243 spin_lock_irqsave(&phba->hbalock, iflags); 13244 __lpfc_mbox_cmpl_put(phba, pmb); 13245 phba->work_ha |= HA_MBATT; 13246 spin_unlock_irqrestore(&phba->hbalock, iflags); 13247 workposted = true; 13248 13249 send_current_mbox: 13250 spin_lock_irqsave(&phba->hbalock, iflags); 13251 /* Release the mailbox command posting token */ 13252 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13253 /* Setting active mailbox pointer need to be in sync to flag clear */ 13254 phba->sli.mbox_active = NULL; 13255 spin_unlock_irqrestore(&phba->hbalock, iflags); 13256 /* Wake up worker thread to post the next pending mailbox command */ 13257 lpfc_worker_wake_up(phba); 13258 out_no_mqe_complete: 13259 if (bf_get(lpfc_trailer_consumed, mcqe)) 13260 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 13261 return workposted; 13262 } 13263 13264 /** 13265 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 13266 * @phba: Pointer to HBA context object. 13267 * @cqe: Pointer to mailbox completion queue entry. 13268 * 13269 * This routine process a mailbox completion queue entry, it invokes the 13270 * proper mailbox complete handling or asynchrous event handling routine 13271 * according to the MCQE's async bit. 13272 * 13273 * Return: true if work posted to worker thread, otherwise false. 13274 **/ 13275 static bool 13276 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13277 struct lpfc_cqe *cqe) 13278 { 13279 struct lpfc_mcqe mcqe; 13280 bool workposted; 13281 13282 cq->CQ_mbox++; 13283 13284 /* Copy the mailbox MCQE and convert endian order as needed */ 13285 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 13286 13287 /* Invoke the proper event handling routine */ 13288 if (!bf_get(lpfc_trailer_async, &mcqe)) 13289 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 13290 else 13291 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 13292 return workposted; 13293 } 13294 13295 /** 13296 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 13297 * @phba: Pointer to HBA context object. 13298 * @cq: Pointer to associated CQ 13299 * @wcqe: Pointer to work-queue completion queue entry. 13300 * 13301 * This routine handles an ELS work-queue completion event. 13302 * 13303 * Return: true if work posted to worker thread, otherwise false. 13304 **/ 13305 static bool 13306 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13307 struct lpfc_wcqe_complete *wcqe) 13308 { 13309 struct lpfc_iocbq *irspiocbq; 13310 unsigned long iflags; 13311 struct lpfc_sli_ring *pring = cq->pring; 13312 int txq_cnt = 0; 13313 int txcmplq_cnt = 0; 13314 int fcp_txcmplq_cnt = 0; 13315 13316 /* Check for response status */ 13317 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13318 /* Log the error status */ 13319 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13320 "0357 ELS CQE error: status=x%x: " 13321 "CQE: %08x %08x %08x %08x\n", 13322 bf_get(lpfc_wcqe_c_status, wcqe), 13323 wcqe->word0, wcqe->total_data_placed, 13324 wcqe->parameter, wcqe->word3); 13325 } 13326 13327 /* Get an irspiocbq for later ELS response processing use */ 13328 irspiocbq = lpfc_sli_get_iocbq(phba); 13329 if (!irspiocbq) { 13330 if (!list_empty(&pring->txq)) 13331 txq_cnt++; 13332 if (!list_empty(&pring->txcmplq)) 13333 txcmplq_cnt++; 13334 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13335 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 13336 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 13337 txq_cnt, phba->iocb_cnt, 13338 fcp_txcmplq_cnt, 13339 txcmplq_cnt); 13340 return false; 13341 } 13342 13343 /* Save off the slow-path queue event for work thread to process */ 13344 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 13345 spin_lock_irqsave(&phba->hbalock, iflags); 13346 list_add_tail(&irspiocbq->cq_event.list, 13347 &phba->sli4_hba.sp_queue_event); 13348 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13349 spin_unlock_irqrestore(&phba->hbalock, iflags); 13350 13351 return true; 13352 } 13353 13354 /** 13355 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 13356 * @phba: Pointer to HBA context object. 13357 * @wcqe: Pointer to work-queue completion queue entry. 13358 * 13359 * This routine handles slow-path WQ entry consumed event by invoking the 13360 * proper WQ release routine to the slow-path WQ. 13361 **/ 13362 static void 13363 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 13364 struct lpfc_wcqe_release *wcqe) 13365 { 13366 /* sanity check on queue memory */ 13367 if (unlikely(!phba->sli4_hba.els_wq)) 13368 return; 13369 /* Check for the slow-path ELS work queue */ 13370 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 13371 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 13372 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13373 else 13374 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13375 "2579 Slow-path wqe consume event carries " 13376 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 13377 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 13378 phba->sli4_hba.els_wq->queue_id); 13379 } 13380 13381 /** 13382 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 13383 * @phba: Pointer to HBA context object. 13384 * @cq: Pointer to a WQ completion queue. 13385 * @wcqe: Pointer to work-queue completion queue entry. 13386 * 13387 * This routine handles an XRI abort event. 13388 * 13389 * Return: true if work posted to worker thread, otherwise false. 13390 **/ 13391 static bool 13392 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 13393 struct lpfc_queue *cq, 13394 struct sli4_wcqe_xri_aborted *wcqe) 13395 { 13396 bool workposted = false; 13397 struct lpfc_cq_event *cq_event; 13398 unsigned long iflags; 13399 13400 switch (cq->subtype) { 13401 case LPFC_FCP: 13402 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq); 13403 workposted = false; 13404 break; 13405 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 13406 case LPFC_ELS: 13407 cq_event = lpfc_cq_event_setup( 13408 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13409 if (!cq_event) 13410 return false; 13411 cq_event->hdwq = cq->hdwq; 13412 spin_lock_irqsave(&phba->hbalock, iflags); 13413 list_add_tail(&cq_event->list, 13414 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 13415 /* Set the els xri abort event flag */ 13416 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 13417 spin_unlock_irqrestore(&phba->hbalock, iflags); 13418 workposted = true; 13419 break; 13420 case LPFC_NVME: 13421 /* Notify aborted XRI for NVME work queue */ 13422 if (phba->nvmet_support) 13423 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 13424 else 13425 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq); 13426 13427 workposted = false; 13428 break; 13429 default: 13430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13431 "0603 Invalid CQ subtype %d: " 13432 "%08x %08x %08x %08x\n", 13433 cq->subtype, wcqe->word0, wcqe->parameter, 13434 wcqe->word2, wcqe->word3); 13435 workposted = false; 13436 break; 13437 } 13438 return workposted; 13439 } 13440 13441 #define FC_RCTL_MDS_DIAGS 0xF4 13442 13443 /** 13444 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 13445 * @phba: Pointer to HBA context object. 13446 * @rcqe: Pointer to receive-queue completion queue entry. 13447 * 13448 * This routine process a receive-queue completion queue entry. 13449 * 13450 * Return: true if work posted to worker thread, otherwise false. 13451 **/ 13452 static bool 13453 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13454 { 13455 bool workposted = false; 13456 struct fc_frame_header *fc_hdr; 13457 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13458 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13459 struct lpfc_nvmet_tgtport *tgtp; 13460 struct hbq_dmabuf *dma_buf; 13461 uint32_t status, rq_id; 13462 unsigned long iflags; 13463 13464 /* sanity check on queue memory */ 13465 if (unlikely(!hrq) || unlikely(!drq)) 13466 return workposted; 13467 13468 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13469 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13470 else 13471 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13472 if (rq_id != hrq->queue_id) 13473 goto out; 13474 13475 status = bf_get(lpfc_rcqe_status, rcqe); 13476 switch (status) { 13477 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13478 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13479 "2537 Receive Frame Truncated!!\n"); 13480 /* fall through */ 13481 case FC_STATUS_RQ_SUCCESS: 13482 spin_lock_irqsave(&phba->hbalock, iflags); 13483 lpfc_sli4_rq_release(hrq, drq); 13484 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 13485 if (!dma_buf) { 13486 hrq->RQ_no_buf_found++; 13487 spin_unlock_irqrestore(&phba->hbalock, iflags); 13488 goto out; 13489 } 13490 hrq->RQ_rcv_buf++; 13491 hrq->RQ_buf_posted--; 13492 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13493 13494 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13495 13496 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 13497 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 13498 spin_unlock_irqrestore(&phba->hbalock, iflags); 13499 /* Handle MDS Loopback frames */ 13500 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf); 13501 break; 13502 } 13503 13504 /* save off the frame for the work thread to process */ 13505 list_add_tail(&dma_buf->cq_event.list, 13506 &phba->sli4_hba.sp_queue_event); 13507 /* Frame received */ 13508 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13509 spin_unlock_irqrestore(&phba->hbalock, iflags); 13510 workposted = true; 13511 break; 13512 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13513 if (phba->nvmet_support) { 13514 tgtp = phba->targetport->private; 13515 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13516 "6402 RQE Error x%x, posted %d err_cnt " 13517 "%d: %x %x %x\n", 13518 status, hrq->RQ_buf_posted, 13519 hrq->RQ_no_posted_buf, 13520 atomic_read(&tgtp->rcv_fcp_cmd_in), 13521 atomic_read(&tgtp->rcv_fcp_cmd_out), 13522 atomic_read(&tgtp->xmt_fcp_release)); 13523 } 13524 /* fallthrough */ 13525 13526 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13527 hrq->RQ_no_posted_buf++; 13528 /* Post more buffers if possible */ 13529 spin_lock_irqsave(&phba->hbalock, iflags); 13530 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 13531 spin_unlock_irqrestore(&phba->hbalock, iflags); 13532 workposted = true; 13533 break; 13534 } 13535 out: 13536 return workposted; 13537 } 13538 13539 /** 13540 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 13541 * @phba: Pointer to HBA context object. 13542 * @cq: Pointer to the completion queue. 13543 * @cqe: Pointer to a completion queue entry. 13544 * 13545 * This routine process a slow-path work-queue or receive queue completion queue 13546 * entry. 13547 * 13548 * Return: true if work posted to worker thread, otherwise false. 13549 **/ 13550 static bool 13551 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13552 struct lpfc_cqe *cqe) 13553 { 13554 struct lpfc_cqe cqevt; 13555 bool workposted = false; 13556 13557 /* Copy the work queue CQE and convert endian order if needed */ 13558 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 13559 13560 /* Check and process for different type of WCQE and dispatch */ 13561 switch (bf_get(lpfc_cqe_code, &cqevt)) { 13562 case CQE_CODE_COMPL_WQE: 13563 /* Process the WQ/RQ complete event */ 13564 phba->last_completion_time = jiffies; 13565 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 13566 (struct lpfc_wcqe_complete *)&cqevt); 13567 break; 13568 case CQE_CODE_RELEASE_WQE: 13569 /* Process the WQ release event */ 13570 lpfc_sli4_sp_handle_rel_wcqe(phba, 13571 (struct lpfc_wcqe_release *)&cqevt); 13572 break; 13573 case CQE_CODE_XRI_ABORTED: 13574 /* Process the WQ XRI abort event */ 13575 phba->last_completion_time = jiffies; 13576 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13577 (struct sli4_wcqe_xri_aborted *)&cqevt); 13578 break; 13579 case CQE_CODE_RECEIVE: 13580 case CQE_CODE_RECEIVE_V1: 13581 /* Process the RQ event */ 13582 phba->last_completion_time = jiffies; 13583 workposted = lpfc_sli4_sp_handle_rcqe(phba, 13584 (struct lpfc_rcqe *)&cqevt); 13585 break; 13586 default: 13587 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13588 "0388 Not a valid WCQE code: x%x\n", 13589 bf_get(lpfc_cqe_code, &cqevt)); 13590 break; 13591 } 13592 return workposted; 13593 } 13594 13595 /** 13596 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 13597 * @phba: Pointer to HBA context object. 13598 * @eqe: Pointer to fast-path event queue entry. 13599 * 13600 * This routine process a event queue entry from the slow-path event queue. 13601 * It will check the MajorCode and MinorCode to determine this is for a 13602 * completion event on a completion queue, if not, an error shall be logged 13603 * and just return. Otherwise, it will get to the corresponding completion 13604 * queue and process all the entries on that completion queue, rearm the 13605 * completion queue, and then return. 13606 * 13607 **/ 13608 static void 13609 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13610 struct lpfc_queue *speq) 13611 { 13612 struct lpfc_queue *cq = NULL, *childq; 13613 uint16_t cqid; 13614 13615 /* Get the reference to the corresponding CQ */ 13616 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13617 13618 list_for_each_entry(childq, &speq->child_list, list) { 13619 if (childq->queue_id == cqid) { 13620 cq = childq; 13621 break; 13622 } 13623 } 13624 if (unlikely(!cq)) { 13625 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13626 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13627 "0365 Slow-path CQ identifier " 13628 "(%d) does not exist\n", cqid); 13629 return; 13630 } 13631 13632 /* Save EQ associated with this CQ */ 13633 cq->assoc_qp = speq; 13634 13635 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) 13636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13637 "0390 Cannot schedule soft IRQ " 13638 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13639 cqid, cq->queue_id, smp_processor_id()); 13640 } 13641 13642 /** 13643 * __lpfc_sli4_process_cq - Process elements of a CQ 13644 * @phba: Pointer to HBA context object. 13645 * @cq: Pointer to CQ to be processed 13646 * @handler: Routine to process each cqe 13647 * @delay: Pointer to usdelay to set in case of rescheduling of the handler 13648 * 13649 * This routine processes completion queue entries in a CQ. While a valid 13650 * queue element is found, the handler is called. During processing checks 13651 * are made for periodic doorbell writes to let the hardware know of 13652 * element consumption. 13653 * 13654 * If the max limit on cqes to process is hit, or there are no more valid 13655 * entries, the loop stops. If we processed a sufficient number of elements, 13656 * meaning there is sufficient load, rather than rearming and generating 13657 * another interrupt, a cq rescheduling delay will be set. A delay of 0 13658 * indicates no rescheduling. 13659 * 13660 * Returns True if work scheduled, False otherwise. 13661 **/ 13662 static bool 13663 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, 13664 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, 13665 struct lpfc_cqe *), unsigned long *delay) 13666 { 13667 struct lpfc_cqe *cqe; 13668 bool workposted = false; 13669 int count = 0, consumed = 0; 13670 bool arm = true; 13671 13672 /* default - no reschedule */ 13673 *delay = 0; 13674 13675 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) 13676 goto rearm_and_exit; 13677 13678 /* Process all the entries to the CQ */ 13679 cqe = lpfc_sli4_cq_get(cq); 13680 while (cqe) { 13681 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME) 13682 if (phba->ktime_on) 13683 cq->isr_timestamp = ktime_get_ns(); 13684 else 13685 cq->isr_timestamp = 0; 13686 #endif 13687 workposted |= handler(phba, cq, cqe); 13688 __lpfc_sli4_consume_cqe(phba, cq, cqe); 13689 13690 consumed++; 13691 if (!(++count % cq->max_proc_limit)) 13692 break; 13693 13694 if (!(count % cq->notify_interval)) { 13695 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13696 LPFC_QUEUE_NOARM); 13697 consumed = 0; 13698 } 13699 13700 cqe = lpfc_sli4_cq_get(cq); 13701 } 13702 if (count >= phba->cfg_cq_poll_threshold) { 13703 *delay = 1; 13704 arm = false; 13705 } 13706 13707 /* Track the max number of CQEs processed in 1 EQ */ 13708 if (count > cq->CQ_max_cqe) 13709 cq->CQ_max_cqe = count; 13710 13711 cq->assoc_qp->EQ_cqe_cnt += count; 13712 13713 /* Catch the no cq entry condition */ 13714 if (unlikely(count == 0)) 13715 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13716 "0369 No entry from completion queue " 13717 "qid=%d\n", cq->queue_id); 13718 13719 cq->queue_claimed = 0; 13720 13721 rearm_and_exit: 13722 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13723 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); 13724 13725 return workposted; 13726 } 13727 13728 /** 13729 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 13730 * @cq: pointer to CQ to process 13731 * 13732 * This routine calls the cq processing routine with a handler specific 13733 * to the type of queue bound to it. 13734 * 13735 * The CQ routine returns two values: the first is the calling status, 13736 * which indicates whether work was queued to the background discovery 13737 * thread. If true, the routine should wakeup the discovery thread; 13738 * the second is the delay parameter. If non-zero, rather than rearming 13739 * the CQ and yet another interrupt, the CQ handler should be queued so 13740 * that it is processed in a subsequent polling action. The value of 13741 * the delay indicates when to reschedule it. 13742 **/ 13743 static void 13744 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) 13745 { 13746 struct lpfc_hba *phba = cq->phba; 13747 unsigned long delay; 13748 bool workposted = false; 13749 13750 /* Process and rearm the CQ */ 13751 switch (cq->type) { 13752 case LPFC_MCQ: 13753 workposted |= __lpfc_sli4_process_cq(phba, cq, 13754 lpfc_sli4_sp_handle_mcqe, 13755 &delay); 13756 break; 13757 case LPFC_WCQ: 13758 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME) 13759 workposted |= __lpfc_sli4_process_cq(phba, cq, 13760 lpfc_sli4_fp_handle_cqe, 13761 &delay); 13762 else 13763 workposted |= __lpfc_sli4_process_cq(phba, cq, 13764 lpfc_sli4_sp_handle_cqe, 13765 &delay); 13766 break; 13767 default: 13768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13769 "0370 Invalid completion queue type (%d)\n", 13770 cq->type); 13771 return; 13772 } 13773 13774 if (delay) { 13775 if (!queue_delayed_work_on(cq->chann, phba->wq, 13776 &cq->sched_spwork, delay)) 13777 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13778 "0394 Cannot schedule soft IRQ " 13779 "for cqid=%d on CPU %d\n", 13780 cq->queue_id, cq->chann); 13781 } 13782 13783 /* wake up worker thread if there are works to be done */ 13784 if (workposted) 13785 lpfc_worker_wake_up(phba); 13786 } 13787 13788 /** 13789 * lpfc_sli4_sp_process_cq - slow-path work handler when started by 13790 * interrupt 13791 * @work: pointer to work element 13792 * 13793 * translates from the work handler and calls the slow-path handler. 13794 **/ 13795 static void 13796 lpfc_sli4_sp_process_cq(struct work_struct *work) 13797 { 13798 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); 13799 13800 __lpfc_sli4_sp_process_cq(cq); 13801 } 13802 13803 /** 13804 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer 13805 * @work: pointer to work element 13806 * 13807 * translates from the work handler and calls the slow-path handler. 13808 **/ 13809 static void 13810 lpfc_sli4_dly_sp_process_cq(struct work_struct *work) 13811 { 13812 struct lpfc_queue *cq = container_of(to_delayed_work(work), 13813 struct lpfc_queue, sched_spwork); 13814 13815 __lpfc_sli4_sp_process_cq(cq); 13816 } 13817 13818 /** 13819 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13820 * @phba: Pointer to HBA context object. 13821 * @cq: Pointer to associated CQ 13822 * @wcqe: Pointer to work-queue completion queue entry. 13823 * 13824 * This routine process a fast-path work queue completion entry from fast-path 13825 * event queue for FCP command response completion. 13826 **/ 13827 static void 13828 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13829 struct lpfc_wcqe_complete *wcqe) 13830 { 13831 struct lpfc_sli_ring *pring = cq->pring; 13832 struct lpfc_iocbq *cmdiocbq; 13833 struct lpfc_iocbq irspiocbq; 13834 unsigned long iflags; 13835 13836 /* Check for response status */ 13837 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13838 /* If resource errors reported from HBA, reduce queue 13839 * depth of the SCSI device. 13840 */ 13841 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13842 IOSTAT_LOCAL_REJECT)) && 13843 ((wcqe->parameter & IOERR_PARAM_MASK) == 13844 IOERR_NO_RESOURCES)) 13845 phba->lpfc_rampdown_queue_depth(phba); 13846 13847 /* Log the error status */ 13848 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13849 "0373 FCP CQE error: status=x%x: " 13850 "CQE: %08x %08x %08x %08x\n", 13851 bf_get(lpfc_wcqe_c_status, wcqe), 13852 wcqe->word0, wcqe->total_data_placed, 13853 wcqe->parameter, wcqe->word3); 13854 } 13855 13856 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13857 spin_lock_irqsave(&pring->ring_lock, iflags); 13858 pring->stats.iocb_event++; 13859 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13860 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13861 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13862 if (unlikely(!cmdiocbq)) { 13863 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13864 "0374 FCP complete with no corresponding " 13865 "cmdiocb: iotag (%d)\n", 13866 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13867 return; 13868 } 13869 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13870 cmdiocbq->isr_timestamp = cq->isr_timestamp; 13871 #endif 13872 if (cmdiocbq->iocb_cmpl == NULL) { 13873 if (cmdiocbq->wqe_cmpl) { 13874 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13875 spin_lock_irqsave(&phba->hbalock, iflags); 13876 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13877 spin_unlock_irqrestore(&phba->hbalock, iflags); 13878 } 13879 13880 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13881 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13882 return; 13883 } 13884 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13885 "0375 FCP cmdiocb not callback function " 13886 "iotag: (%d)\n", 13887 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13888 return; 13889 } 13890 13891 /* Fake the irspiocb and copy necessary response information */ 13892 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13893 13894 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13895 spin_lock_irqsave(&phba->hbalock, iflags); 13896 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13897 spin_unlock_irqrestore(&phba->hbalock, iflags); 13898 } 13899 13900 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13901 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13902 } 13903 13904 /** 13905 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13906 * @phba: Pointer to HBA context object. 13907 * @cq: Pointer to completion queue. 13908 * @wcqe: Pointer to work-queue completion queue entry. 13909 * 13910 * This routine handles an fast-path WQ entry consumed event by invoking the 13911 * proper WQ release routine to the slow-path WQ. 13912 **/ 13913 static void 13914 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13915 struct lpfc_wcqe_release *wcqe) 13916 { 13917 struct lpfc_queue *childwq; 13918 bool wqid_matched = false; 13919 uint16_t hba_wqid; 13920 13921 /* Check for fast-path FCP work queue release */ 13922 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13923 list_for_each_entry(childwq, &cq->child_list, list) { 13924 if (childwq->queue_id == hba_wqid) { 13925 lpfc_sli4_wq_release(childwq, 13926 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13927 if (childwq->q_flag & HBA_NVMET_WQFULL) 13928 lpfc_nvmet_wqfull_process(phba, childwq); 13929 wqid_matched = true; 13930 break; 13931 } 13932 } 13933 /* Report warning log message if no match found */ 13934 if (wqid_matched != true) 13935 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13936 "2580 Fast-path wqe consume event carries " 13937 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13938 } 13939 13940 /** 13941 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13942 * @phba: Pointer to HBA context object. 13943 * @rcqe: Pointer to receive-queue completion queue entry. 13944 * 13945 * This routine process a receive-queue completion queue entry. 13946 * 13947 * Return: true if work posted to worker thread, otherwise false. 13948 **/ 13949 static bool 13950 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13951 struct lpfc_rcqe *rcqe) 13952 { 13953 bool workposted = false; 13954 struct lpfc_queue *hrq; 13955 struct lpfc_queue *drq; 13956 struct rqb_dmabuf *dma_buf; 13957 struct fc_frame_header *fc_hdr; 13958 struct lpfc_nvmet_tgtport *tgtp; 13959 uint32_t status, rq_id; 13960 unsigned long iflags; 13961 uint32_t fctl, idx; 13962 13963 if ((phba->nvmet_support == 0) || 13964 (phba->sli4_hba.nvmet_cqset == NULL)) 13965 return workposted; 13966 13967 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13968 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13969 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13970 13971 /* sanity check on queue memory */ 13972 if (unlikely(!hrq) || unlikely(!drq)) 13973 return workposted; 13974 13975 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13976 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13977 else 13978 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13979 13980 if ((phba->nvmet_support == 0) || 13981 (rq_id != hrq->queue_id)) 13982 return workposted; 13983 13984 status = bf_get(lpfc_rcqe_status, rcqe); 13985 switch (status) { 13986 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13988 "6126 Receive Frame Truncated!!\n"); 13989 /* fall through */ 13990 case FC_STATUS_RQ_SUCCESS: 13991 spin_lock_irqsave(&phba->hbalock, iflags); 13992 lpfc_sli4_rq_release(hrq, drq); 13993 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13994 if (!dma_buf) { 13995 hrq->RQ_no_buf_found++; 13996 spin_unlock_irqrestore(&phba->hbalock, iflags); 13997 goto out; 13998 } 13999 spin_unlock_irqrestore(&phba->hbalock, iflags); 14000 hrq->RQ_rcv_buf++; 14001 hrq->RQ_buf_posted--; 14002 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 14003 14004 /* Just some basic sanity checks on FCP Command frame */ 14005 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 14006 fc_hdr->fh_f_ctl[1] << 8 | 14007 fc_hdr->fh_f_ctl[2]); 14008 if (((fctl & 14009 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 14010 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 14011 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 14012 goto drop; 14013 14014 if (fc_hdr->fh_type == FC_TYPE_FCP) { 14015 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 14016 lpfc_nvmet_unsol_fcp_event( 14017 phba, idx, dma_buf, 14018 cq->isr_timestamp); 14019 return false; 14020 } 14021 drop: 14022 lpfc_in_buf_free(phba, &dma_buf->dbuf); 14023 break; 14024 case FC_STATUS_INSUFF_BUF_FRM_DISC: 14025 if (phba->nvmet_support) { 14026 tgtp = phba->targetport->private; 14027 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 14028 "6401 RQE Error x%x, posted %d err_cnt " 14029 "%d: %x %x %x\n", 14030 status, hrq->RQ_buf_posted, 14031 hrq->RQ_no_posted_buf, 14032 atomic_read(&tgtp->rcv_fcp_cmd_in), 14033 atomic_read(&tgtp->rcv_fcp_cmd_out), 14034 atomic_read(&tgtp->xmt_fcp_release)); 14035 } 14036 /* fallthrough */ 14037 14038 case FC_STATUS_INSUFF_BUF_NEED_BUF: 14039 hrq->RQ_no_posted_buf++; 14040 /* Post more buffers if possible */ 14041 break; 14042 } 14043 out: 14044 return workposted; 14045 } 14046 14047 /** 14048 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 14049 * @phba: adapter with cq 14050 * @cq: Pointer to the completion queue. 14051 * @eqe: Pointer to fast-path completion queue entry. 14052 * 14053 * This routine process a fast-path work queue completion entry from fast-path 14054 * event queue for FCP command response completion. 14055 * 14056 * Return: true if work posted to worker thread, otherwise false. 14057 **/ 14058 static bool 14059 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 14060 struct lpfc_cqe *cqe) 14061 { 14062 struct lpfc_wcqe_release wcqe; 14063 bool workposted = false; 14064 14065 /* Copy the work queue CQE and convert endian order if needed */ 14066 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 14067 14068 /* Check and process for different type of WCQE and dispatch */ 14069 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 14070 case CQE_CODE_COMPL_WQE: 14071 case CQE_CODE_NVME_ERSP: 14072 cq->CQ_wq++; 14073 /* Process the WQ complete event */ 14074 phba->last_completion_time = jiffies; 14075 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) 14076 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 14077 (struct lpfc_wcqe_complete *)&wcqe); 14078 if (cq->subtype == LPFC_NVME_LS) 14079 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 14080 (struct lpfc_wcqe_complete *)&wcqe); 14081 break; 14082 case CQE_CODE_RELEASE_WQE: 14083 cq->CQ_release_wqe++; 14084 /* Process the WQ release event */ 14085 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 14086 (struct lpfc_wcqe_release *)&wcqe); 14087 break; 14088 case CQE_CODE_XRI_ABORTED: 14089 cq->CQ_xri_aborted++; 14090 /* Process the WQ XRI abort event */ 14091 phba->last_completion_time = jiffies; 14092 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 14093 (struct sli4_wcqe_xri_aborted *)&wcqe); 14094 break; 14095 case CQE_CODE_RECEIVE_V1: 14096 case CQE_CODE_RECEIVE: 14097 phba->last_completion_time = jiffies; 14098 if (cq->subtype == LPFC_NVMET) { 14099 workposted = lpfc_sli4_nvmet_handle_rcqe( 14100 phba, cq, (struct lpfc_rcqe *)&wcqe); 14101 } 14102 break; 14103 default: 14104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14105 "0144 Not a valid CQE code: x%x\n", 14106 bf_get(lpfc_wcqe_c_code, &wcqe)); 14107 break; 14108 } 14109 return workposted; 14110 } 14111 14112 /** 14113 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 14114 * @phba: Pointer to HBA context object. 14115 * @eqe: Pointer to fast-path event queue entry. 14116 * 14117 * This routine process a event queue entry from the fast-path event queue. 14118 * It will check the MajorCode and MinorCode to determine this is for a 14119 * completion event on a completion queue, if not, an error shall be logged 14120 * and just return. Otherwise, it will get to the corresponding completion 14121 * queue and process all the entries on the completion queue, rearm the 14122 * completion queue, and then return. 14123 **/ 14124 static void 14125 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 14126 struct lpfc_eqe *eqe) 14127 { 14128 struct lpfc_queue *cq = NULL; 14129 uint32_t qidx = eq->hdwq; 14130 uint16_t cqid, id; 14131 14132 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 14133 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14134 "0366 Not a valid completion " 14135 "event: majorcode=x%x, minorcode=x%x\n", 14136 bf_get_le32(lpfc_eqe_major_code, eqe), 14137 bf_get_le32(lpfc_eqe_minor_code, eqe)); 14138 return; 14139 } 14140 14141 /* Get the reference to the corresponding CQ */ 14142 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14143 14144 /* Use the fast lookup method first */ 14145 if (cqid <= phba->sli4_hba.cq_max) { 14146 cq = phba->sli4_hba.cq_lookup[cqid]; 14147 if (cq) 14148 goto work_cq; 14149 } 14150 14151 /* Next check for NVMET completion */ 14152 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 14153 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 14154 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 14155 /* Process NVMET unsol rcv */ 14156 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 14157 goto process_cq; 14158 } 14159 } 14160 14161 if (phba->sli4_hba.nvmels_cq && 14162 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 14163 /* Process NVME unsol rcv */ 14164 cq = phba->sli4_hba.nvmels_cq; 14165 } 14166 14167 /* Otherwise this is a Slow path event */ 14168 if (cq == NULL) { 14169 lpfc_sli4_sp_handle_eqe(phba, eqe, 14170 phba->sli4_hba.hdwq[qidx].hba_eq); 14171 return; 14172 } 14173 14174 process_cq: 14175 if (unlikely(cqid != cq->queue_id)) { 14176 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14177 "0368 Miss-matched fast-path completion " 14178 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 14179 cqid, cq->queue_id); 14180 return; 14181 } 14182 14183 work_cq: 14184 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) 14185 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14186 "0363 Cannot schedule soft IRQ " 14187 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14188 cqid, cq->queue_id, smp_processor_id()); 14189 } 14190 14191 /** 14192 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 14193 * @cq: Pointer to CQ to be processed 14194 * 14195 * This routine calls the cq processing routine with the handler for 14196 * fast path CQEs. 14197 * 14198 * The CQ routine returns two values: the first is the calling status, 14199 * which indicates whether work was queued to the background discovery 14200 * thread. If true, the routine should wakeup the discovery thread; 14201 * the second is the delay parameter. If non-zero, rather than rearming 14202 * the CQ and yet another interrupt, the CQ handler should be queued so 14203 * that it is processed in a subsequent polling action. The value of 14204 * the delay indicates when to reschedule it. 14205 **/ 14206 static void 14207 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) 14208 { 14209 struct lpfc_hba *phba = cq->phba; 14210 unsigned long delay; 14211 bool workposted = false; 14212 14213 /* process and rearm the CQ */ 14214 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, 14215 &delay); 14216 14217 if (delay) { 14218 if (!queue_delayed_work_on(cq->chann, phba->wq, 14219 &cq->sched_irqwork, delay)) 14220 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14221 "0367 Cannot schedule soft IRQ " 14222 "for cqid=%d on CPU %d\n", 14223 cq->queue_id, cq->chann); 14224 } 14225 14226 /* wake up worker thread if there are works to be done */ 14227 if (workposted) 14228 lpfc_worker_wake_up(phba); 14229 } 14230 14231 /** 14232 * lpfc_sli4_hba_process_cq - fast-path work handler when started by 14233 * interrupt 14234 * @work: pointer to work element 14235 * 14236 * translates from the work handler and calls the fast-path handler. 14237 **/ 14238 static void 14239 lpfc_sli4_hba_process_cq(struct work_struct *work) 14240 { 14241 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); 14242 14243 __lpfc_sli4_hba_process_cq(cq); 14244 } 14245 14246 /** 14247 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer 14248 * @work: pointer to work element 14249 * 14250 * translates from the work handler and calls the fast-path handler. 14251 **/ 14252 static void 14253 lpfc_sli4_dly_hba_process_cq(struct work_struct *work) 14254 { 14255 struct lpfc_queue *cq = container_of(to_delayed_work(work), 14256 struct lpfc_queue, sched_irqwork); 14257 14258 __lpfc_sli4_hba_process_cq(cq); 14259 } 14260 14261 /** 14262 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 14263 * @irq: Interrupt number. 14264 * @dev_id: The device context pointer. 14265 * 14266 * This function is directly called from the PCI layer as an interrupt 14267 * service routine when device with SLI-4 interface spec is enabled with 14268 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 14269 * ring event in the HBA. However, when the device is enabled with either 14270 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 14271 * device-level interrupt handler. When the PCI slot is in error recovery 14272 * or the HBA is undergoing initialization, the interrupt handler will not 14273 * process the interrupt. The SCSI FCP fast-path ring event are handled in 14274 * the intrrupt context. This function is called without any lock held. 14275 * It gets the hbalock to access and update SLI data structures. Note that, 14276 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 14277 * equal to that of FCP CQ index. 14278 * 14279 * The link attention and ELS ring attention events are handled 14280 * by the worker thread. The interrupt handler signals the worker thread 14281 * and returns for these events. This function is called without any lock 14282 * held. It gets the hbalock to access and update SLI data structures. 14283 * 14284 * This function returns IRQ_HANDLED when interrupt is handled else it 14285 * returns IRQ_NONE. 14286 **/ 14287 irqreturn_t 14288 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 14289 { 14290 struct lpfc_hba *phba; 14291 struct lpfc_hba_eq_hdl *hba_eq_hdl; 14292 struct lpfc_queue *fpeq; 14293 unsigned long iflag; 14294 int ecount = 0; 14295 int hba_eqidx; 14296 struct lpfc_eq_intr_info *eqi; 14297 uint32_t icnt; 14298 14299 /* Get the driver's phba structure from the dev_id */ 14300 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 14301 phba = hba_eq_hdl->phba; 14302 hba_eqidx = hba_eq_hdl->idx; 14303 14304 if (unlikely(!phba)) 14305 return IRQ_NONE; 14306 if (unlikely(!phba->sli4_hba.hdwq)) 14307 return IRQ_NONE; 14308 14309 /* Get to the EQ struct associated with this vector */ 14310 fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq; 14311 if (unlikely(!fpeq)) 14312 return IRQ_NONE; 14313 14314 /* Check device state for handling interrupt */ 14315 if (unlikely(lpfc_intr_state_check(phba))) { 14316 /* Check again for link_state with lock held */ 14317 spin_lock_irqsave(&phba->hbalock, iflag); 14318 if (phba->link_state < LPFC_LINK_DOWN) 14319 /* Flush, clear interrupt, and rearm the EQ */ 14320 lpfc_sli4_eq_flush(phba, fpeq); 14321 spin_unlock_irqrestore(&phba->hbalock, iflag); 14322 return IRQ_NONE; 14323 } 14324 14325 eqi = phba->sli4_hba.eq_info; 14326 icnt = this_cpu_inc_return(eqi->icnt); 14327 fpeq->last_cpu = smp_processor_id(); 14328 14329 if (icnt > LPFC_EQD_ISR_TRIGGER && 14330 phba->cfg_irq_chann == 1 && 14331 phba->cfg_auto_imax && 14332 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 14333 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 14334 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); 14335 14336 /* process and rearm the EQ */ 14337 ecount = lpfc_sli4_process_eq(phba, fpeq); 14338 14339 if (unlikely(ecount == 0)) { 14340 fpeq->EQ_no_entry++; 14341 if (phba->intr_type == MSIX) 14342 /* MSI-X treated interrupt served as no EQ share INT */ 14343 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14344 "0358 MSI-X interrupt with no EQE\n"); 14345 else 14346 /* Non MSI-X treated on interrupt as EQ share INT */ 14347 return IRQ_NONE; 14348 } 14349 14350 return IRQ_HANDLED; 14351 } /* lpfc_sli4_fp_intr_handler */ 14352 14353 /** 14354 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 14355 * @irq: Interrupt number. 14356 * @dev_id: The device context pointer. 14357 * 14358 * This function is the device-level interrupt handler to device with SLI-4 14359 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 14360 * interrupt mode is enabled and there is an event in the HBA which requires 14361 * driver attention. This function invokes the slow-path interrupt attention 14362 * handling function and fast-path interrupt attention handling function in 14363 * turn to process the relevant HBA attention events. This function is called 14364 * without any lock held. It gets the hbalock to access and update SLI data 14365 * structures. 14366 * 14367 * This function returns IRQ_HANDLED when interrupt is handled, else it 14368 * returns IRQ_NONE. 14369 **/ 14370 irqreturn_t 14371 lpfc_sli4_intr_handler(int irq, void *dev_id) 14372 { 14373 struct lpfc_hba *phba; 14374 irqreturn_t hba_irq_rc; 14375 bool hba_handled = false; 14376 int qidx; 14377 14378 /* Get the driver's phba structure from the dev_id */ 14379 phba = (struct lpfc_hba *)dev_id; 14380 14381 if (unlikely(!phba)) 14382 return IRQ_NONE; 14383 14384 /* 14385 * Invoke fast-path host attention interrupt handling as appropriate. 14386 */ 14387 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 14388 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 14389 &phba->sli4_hba.hba_eq_hdl[qidx]); 14390 if (hba_irq_rc == IRQ_HANDLED) 14391 hba_handled |= true; 14392 } 14393 14394 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 14395 } /* lpfc_sli4_intr_handler */ 14396 14397 /** 14398 * lpfc_sli4_queue_free - free a queue structure and associated memory 14399 * @queue: The queue structure to free. 14400 * 14401 * This function frees a queue structure and the DMAable memory used for 14402 * the host resident queue. This function must be called after destroying the 14403 * queue on the HBA. 14404 **/ 14405 void 14406 lpfc_sli4_queue_free(struct lpfc_queue *queue) 14407 { 14408 struct lpfc_dmabuf *dmabuf; 14409 14410 if (!queue) 14411 return; 14412 14413 while (!list_empty(&queue->page_list)) { 14414 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 14415 list); 14416 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 14417 dmabuf->virt, dmabuf->phys); 14418 kfree(dmabuf); 14419 } 14420 if (queue->rqbp) { 14421 lpfc_free_rq_buffer(queue->phba, queue); 14422 kfree(queue->rqbp); 14423 } 14424 14425 if (!list_empty(&queue->cpu_list)) 14426 list_del(&queue->cpu_list); 14427 14428 if (!list_empty(&queue->wq_list)) 14429 list_del(&queue->wq_list); 14430 14431 kfree(queue); 14432 return; 14433 } 14434 14435 /** 14436 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 14437 * @phba: The HBA that this queue is being created on. 14438 * @page_size: The size of a queue page 14439 * @entry_size: The size of each queue entry for this queue. 14440 * @entry count: The number of entries that this queue will handle. 14441 * 14442 * This function allocates a queue structure and the DMAable memory used for 14443 * the host resident queue. This function must be called before creating the 14444 * queue on the HBA. 14445 **/ 14446 struct lpfc_queue * 14447 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 14448 uint32_t entry_size, uint32_t entry_count) 14449 { 14450 struct lpfc_queue *queue; 14451 struct lpfc_dmabuf *dmabuf; 14452 int x, total_qe_count; 14453 void *dma_pointer; 14454 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14455 14456 if (!phba->sli4_hba.pc_sli4_params.supported) 14457 hw_page_size = page_size; 14458 14459 queue = kzalloc(sizeof(struct lpfc_queue) + 14460 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 14461 if (!queue) 14462 return NULL; 14463 queue->page_count = (ALIGN(entry_size * entry_count, 14464 hw_page_size))/hw_page_size; 14465 14466 /* If needed, Adjust page count to match the max the adapter supports */ 14467 if (phba->sli4_hba.pc_sli4_params.wqpcnt && 14468 (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)) 14469 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; 14470 14471 INIT_LIST_HEAD(&queue->list); 14472 INIT_LIST_HEAD(&queue->wq_list); 14473 INIT_LIST_HEAD(&queue->wqfull_list); 14474 INIT_LIST_HEAD(&queue->page_list); 14475 INIT_LIST_HEAD(&queue->child_list); 14476 INIT_LIST_HEAD(&queue->cpu_list); 14477 14478 /* Set queue parameters now. If the system cannot provide memory 14479 * resources, the free routine needs to know what was allocated. 14480 */ 14481 queue->entry_size = entry_size; 14482 queue->entry_count = entry_count; 14483 queue->page_size = hw_page_size; 14484 queue->phba = phba; 14485 14486 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 14487 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 14488 if (!dmabuf) 14489 goto out_fail; 14490 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14491 hw_page_size, &dmabuf->phys, 14492 GFP_KERNEL); 14493 if (!dmabuf->virt) { 14494 kfree(dmabuf); 14495 goto out_fail; 14496 } 14497 dmabuf->buffer_tag = x; 14498 list_add_tail(&dmabuf->list, &queue->page_list); 14499 /* initialize queue's entry array */ 14500 dma_pointer = dmabuf->virt; 14501 for (; total_qe_count < entry_count && 14502 dma_pointer < (hw_page_size + dmabuf->virt); 14503 total_qe_count++, dma_pointer += entry_size) { 14504 queue->qe[total_qe_count].address = dma_pointer; 14505 } 14506 } 14507 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 14508 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 14509 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); 14510 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); 14511 14512 /* notify_interval will be set during q creation */ 14513 14514 return queue; 14515 out_fail: 14516 lpfc_sli4_queue_free(queue); 14517 return NULL; 14518 } 14519 14520 /** 14521 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 14522 * @phba: HBA structure that indicates port to create a queue on. 14523 * @pci_barset: PCI BAR set flag. 14524 * 14525 * This function shall perform iomap of the specified PCI BAR address to host 14526 * memory address if not already done so and return it. The returned host 14527 * memory address can be NULL. 14528 */ 14529 static void __iomem * 14530 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 14531 { 14532 if (!phba->pcidev) 14533 return NULL; 14534 14535 switch (pci_barset) { 14536 case WQ_PCI_BAR_0_AND_1: 14537 return phba->pci_bar0_memmap_p; 14538 case WQ_PCI_BAR_2_AND_3: 14539 return phba->pci_bar2_memmap_p; 14540 case WQ_PCI_BAR_4_AND_5: 14541 return phba->pci_bar4_memmap_p; 14542 default: 14543 break; 14544 } 14545 return NULL; 14546 } 14547 14548 /** 14549 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 14550 * @phba: HBA structure that EQs are on. 14551 * @startq: The starting EQ index to modify 14552 * @numq: The number of EQs (consecutive indexes) to modify 14553 * @usdelay: amount of delay 14554 * 14555 * This function revises the EQ delay on 1 or more EQs. The EQ delay 14556 * is set either by writing to a register (if supported by the SLI Port) 14557 * or by mailbox command. The mailbox command allows several EQs to be 14558 * updated at once. 14559 * 14560 * The @phba struct is used to send a mailbox command to HBA. The @startq 14561 * is used to get the starting EQ index to change. The @numq value is 14562 * used to specify how many consecutive EQ indexes, starting at EQ index, 14563 * are to be changed. This function is asynchronous and will wait for any 14564 * mailbox commands to finish before returning. 14565 * 14566 * On success this function will return a zero. If unable to allocate 14567 * enough memory this function will return -ENOMEM. If a mailbox command 14568 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may 14569 * have had their delay multipler changed. 14570 **/ 14571 void 14572 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 14573 uint32_t numq, uint32_t usdelay) 14574 { 14575 struct lpfc_mbx_modify_eq_delay *eq_delay; 14576 LPFC_MBOXQ_t *mbox; 14577 struct lpfc_queue *eq; 14578 int cnt = 0, rc, length; 14579 uint32_t shdr_status, shdr_add_status; 14580 uint32_t dmult; 14581 int qidx; 14582 union lpfc_sli4_cfg_shdr *shdr; 14583 14584 if (startq >= phba->cfg_irq_chann) 14585 return; 14586 14587 if (usdelay > 0xFFFF) { 14588 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, 14589 "6429 usdelay %d too large. Scaled down to " 14590 "0xFFFF.\n", usdelay); 14591 usdelay = 0xFFFF; 14592 } 14593 14594 /* set values by EQ_DELAY register if supported */ 14595 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 14596 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14597 eq = phba->sli4_hba.hdwq[qidx].hba_eq; 14598 if (!eq) 14599 continue; 14600 14601 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); 14602 14603 if (++cnt >= numq) 14604 break; 14605 } 14606 14607 return; 14608 } 14609 14610 /* Otherwise, set values by mailbox cmd */ 14611 14612 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14613 if (!mbox) { 14614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME, 14615 "6428 Failed allocating mailbox cmd buffer." 14616 " EQ delay was not set.\n"); 14617 return; 14618 } 14619 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14620 sizeof(struct lpfc_sli4_cfg_mhdr)); 14621 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14622 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14623 length, LPFC_SLI4_MBX_EMBED); 14624 eq_delay = &mbox->u.mqe.un.eq_delay; 14625 14626 /* Calculate delay multiper from maximum interrupt per second */ 14627 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; 14628 if (dmult) 14629 dmult--; 14630 if (dmult > LPFC_DMULT_MAX) 14631 dmult = LPFC_DMULT_MAX; 14632 14633 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14634 eq = phba->sli4_hba.hdwq[qidx].hba_eq; 14635 if (!eq) 14636 continue; 14637 eq->q_mode = usdelay; 14638 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14639 eq_delay->u.request.eq[cnt].phase = 0; 14640 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14641 14642 if (++cnt >= numq) 14643 break; 14644 } 14645 eq_delay->u.request.num_eq = cnt; 14646 14647 mbox->vport = phba->pport; 14648 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14649 mbox->ctx_buf = NULL; 14650 mbox->ctx_ndlp = NULL; 14651 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14652 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14653 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14654 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14655 if (shdr_status || shdr_add_status || rc) { 14656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14657 "2512 MODIFY_EQ_DELAY mailbox failed with " 14658 "status x%x add_status x%x, mbx status x%x\n", 14659 shdr_status, shdr_add_status, rc); 14660 } 14661 mempool_free(mbox, phba->mbox_mem_pool); 14662 return; 14663 } 14664 14665 /** 14666 * lpfc_eq_create - Create an Event Queue on the HBA 14667 * @phba: HBA structure that indicates port to create a queue on. 14668 * @eq: The queue structure to use to create the event queue. 14669 * @imax: The maximum interrupt per second limit. 14670 * 14671 * This function creates an event queue, as detailed in @eq, on a port, 14672 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14673 * 14674 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14675 * is used to get the entry count and entry size that are necessary to 14676 * determine the number of pages to allocate and use for this queue. This 14677 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14678 * event queue. This function is asynchronous and will wait for the mailbox 14679 * command to finish before continuing. 14680 * 14681 * On success this function will return a zero. If unable to allocate enough 14682 * memory this function will return -ENOMEM. If the queue create mailbox command 14683 * fails this function will return -ENXIO. 14684 **/ 14685 int 14686 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14687 { 14688 struct lpfc_mbx_eq_create *eq_create; 14689 LPFC_MBOXQ_t *mbox; 14690 int rc, length, status = 0; 14691 struct lpfc_dmabuf *dmabuf; 14692 uint32_t shdr_status, shdr_add_status; 14693 union lpfc_sli4_cfg_shdr *shdr; 14694 uint16_t dmult; 14695 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14696 14697 /* sanity check on queue memory */ 14698 if (!eq) 14699 return -ENODEV; 14700 if (!phba->sli4_hba.pc_sli4_params.supported) 14701 hw_page_size = SLI4_PAGE_SIZE; 14702 14703 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14704 if (!mbox) 14705 return -ENOMEM; 14706 length = (sizeof(struct lpfc_mbx_eq_create) - 14707 sizeof(struct lpfc_sli4_cfg_mhdr)); 14708 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14709 LPFC_MBOX_OPCODE_EQ_CREATE, 14710 length, LPFC_SLI4_MBX_EMBED); 14711 eq_create = &mbox->u.mqe.un.eq_create; 14712 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14713 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14714 eq->page_count); 14715 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14716 LPFC_EQE_SIZE); 14717 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14718 14719 /* Use version 2 of CREATE_EQ if eqav is set */ 14720 if (phba->sli4_hba.pc_sli4_params.eqav) { 14721 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14722 LPFC_Q_CREATE_VERSION_2); 14723 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 14724 phba->sli4_hba.pc_sli4_params.eqav); 14725 } 14726 14727 /* don't setup delay multiplier using EQ_CREATE */ 14728 dmult = 0; 14729 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14730 dmult); 14731 switch (eq->entry_count) { 14732 default: 14733 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14734 "0360 Unsupported EQ count. (%d)\n", 14735 eq->entry_count); 14736 if (eq->entry_count < 256) 14737 return -EINVAL; 14738 /* fall through - otherwise default to smallest count */ 14739 case 256: 14740 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14741 LPFC_EQ_CNT_256); 14742 break; 14743 case 512: 14744 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14745 LPFC_EQ_CNT_512); 14746 break; 14747 case 1024: 14748 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14749 LPFC_EQ_CNT_1024); 14750 break; 14751 case 2048: 14752 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14753 LPFC_EQ_CNT_2048); 14754 break; 14755 case 4096: 14756 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14757 LPFC_EQ_CNT_4096); 14758 break; 14759 } 14760 list_for_each_entry(dmabuf, &eq->page_list, list) { 14761 memset(dmabuf->virt, 0, hw_page_size); 14762 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14763 putPaddrLow(dmabuf->phys); 14764 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14765 putPaddrHigh(dmabuf->phys); 14766 } 14767 mbox->vport = phba->pport; 14768 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14769 mbox->ctx_buf = NULL; 14770 mbox->ctx_ndlp = NULL; 14771 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14772 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14773 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14774 if (shdr_status || shdr_add_status || rc) { 14775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14776 "2500 EQ_CREATE mailbox failed with " 14777 "status x%x add_status x%x, mbx status x%x\n", 14778 shdr_status, shdr_add_status, rc); 14779 status = -ENXIO; 14780 } 14781 eq->type = LPFC_EQ; 14782 eq->subtype = LPFC_NONE; 14783 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14784 if (eq->queue_id == 0xFFFF) 14785 status = -ENXIO; 14786 eq->host_index = 0; 14787 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; 14788 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; 14789 14790 mempool_free(mbox, phba->mbox_mem_pool); 14791 return status; 14792 } 14793 14794 /** 14795 * lpfc_cq_create - Create a Completion Queue on the HBA 14796 * @phba: HBA structure that indicates port to create a queue on. 14797 * @cq: The queue structure to use to create the completion queue. 14798 * @eq: The event queue to bind this completion queue to. 14799 * 14800 * This function creates a completion queue, as detailed in @wq, on a port, 14801 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14802 * 14803 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14804 * is used to get the entry count and entry size that are necessary to 14805 * determine the number of pages to allocate and use for this queue. The @eq 14806 * is used to indicate which event queue to bind this completion queue to. This 14807 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14808 * completion queue. This function is asynchronous and will wait for the mailbox 14809 * command to finish before continuing. 14810 * 14811 * On success this function will return a zero. If unable to allocate enough 14812 * memory this function will return -ENOMEM. If the queue create mailbox command 14813 * fails this function will return -ENXIO. 14814 **/ 14815 int 14816 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14817 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14818 { 14819 struct lpfc_mbx_cq_create *cq_create; 14820 struct lpfc_dmabuf *dmabuf; 14821 LPFC_MBOXQ_t *mbox; 14822 int rc, length, status = 0; 14823 uint32_t shdr_status, shdr_add_status; 14824 union lpfc_sli4_cfg_shdr *shdr; 14825 14826 /* sanity check on queue memory */ 14827 if (!cq || !eq) 14828 return -ENODEV; 14829 14830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14831 if (!mbox) 14832 return -ENOMEM; 14833 length = (sizeof(struct lpfc_mbx_cq_create) - 14834 sizeof(struct lpfc_sli4_cfg_mhdr)); 14835 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14836 LPFC_MBOX_OPCODE_CQ_CREATE, 14837 length, LPFC_SLI4_MBX_EMBED); 14838 cq_create = &mbox->u.mqe.un.cq_create; 14839 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14840 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14841 cq->page_count); 14842 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14843 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14844 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14845 phba->sli4_hba.pc_sli4_params.cqv); 14846 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14847 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 14848 (cq->page_size / SLI4_PAGE_SIZE)); 14849 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14850 eq->queue_id); 14851 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 14852 phba->sli4_hba.pc_sli4_params.cqav); 14853 } else { 14854 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 14855 eq->queue_id); 14856 } 14857 switch (cq->entry_count) { 14858 case 2048: 14859 case 4096: 14860 if (phba->sli4_hba.pc_sli4_params.cqv == 14861 LPFC_Q_CREATE_VERSION_2) { 14862 cq_create->u.request.context.lpfc_cq_context_count = 14863 cq->entry_count; 14864 bf_set(lpfc_cq_context_count, 14865 &cq_create->u.request.context, 14866 LPFC_CQ_CNT_WORD7); 14867 break; 14868 } 14869 /* fall through */ 14870 default: 14871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14872 "0361 Unsupported CQ count: " 14873 "entry cnt %d sz %d pg cnt %d\n", 14874 cq->entry_count, cq->entry_size, 14875 cq->page_count); 14876 if (cq->entry_count < 256) { 14877 status = -EINVAL; 14878 goto out; 14879 } 14880 /* fall through - otherwise default to smallest count */ 14881 case 256: 14882 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14883 LPFC_CQ_CNT_256); 14884 break; 14885 case 512: 14886 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14887 LPFC_CQ_CNT_512); 14888 break; 14889 case 1024: 14890 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14891 LPFC_CQ_CNT_1024); 14892 break; 14893 } 14894 list_for_each_entry(dmabuf, &cq->page_list, list) { 14895 memset(dmabuf->virt, 0, cq->page_size); 14896 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14897 putPaddrLow(dmabuf->phys); 14898 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14899 putPaddrHigh(dmabuf->phys); 14900 } 14901 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14902 14903 /* The IOCTL status is embedded in the mailbox subheader. */ 14904 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14905 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14906 if (shdr_status || shdr_add_status || rc) { 14907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14908 "2501 CQ_CREATE mailbox failed with " 14909 "status x%x add_status x%x, mbx status x%x\n", 14910 shdr_status, shdr_add_status, rc); 14911 status = -ENXIO; 14912 goto out; 14913 } 14914 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14915 if (cq->queue_id == 0xFFFF) { 14916 status = -ENXIO; 14917 goto out; 14918 } 14919 /* link the cq onto the parent eq child list */ 14920 list_add_tail(&cq->list, &eq->child_list); 14921 /* Set up completion queue's type and subtype */ 14922 cq->type = type; 14923 cq->subtype = subtype; 14924 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14925 cq->assoc_qid = eq->queue_id; 14926 cq->assoc_qp = eq; 14927 cq->host_index = 0; 14928 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 14929 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); 14930 14931 if (cq->queue_id > phba->sli4_hba.cq_max) 14932 phba->sli4_hba.cq_max = cq->queue_id; 14933 out: 14934 mempool_free(mbox, phba->mbox_mem_pool); 14935 return status; 14936 } 14937 14938 /** 14939 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 14940 * @phba: HBA structure that indicates port to create a queue on. 14941 * @cqp: The queue structure array to use to create the completion queues. 14942 * @hdwq: The hardware queue array with the EQ to bind completion queues to. 14943 * 14944 * This function creates a set of completion queue, s to support MRQ 14945 * as detailed in @cqp, on a port, 14946 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 14947 * 14948 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14949 * is used to get the entry count and entry size that are necessary to 14950 * determine the number of pages to allocate and use for this queue. The @eq 14951 * is used to indicate which event queue to bind this completion queue to. This 14952 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 14953 * completion queue. This function is asynchronous and will wait for the mailbox 14954 * command to finish before continuing. 14955 * 14956 * On success this function will return a zero. If unable to allocate enough 14957 * memory this function will return -ENOMEM. If the queue create mailbox command 14958 * fails this function will return -ENXIO. 14959 **/ 14960 int 14961 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 14962 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, 14963 uint32_t subtype) 14964 { 14965 struct lpfc_queue *cq; 14966 struct lpfc_queue *eq; 14967 struct lpfc_mbx_cq_create_set *cq_set; 14968 struct lpfc_dmabuf *dmabuf; 14969 LPFC_MBOXQ_t *mbox; 14970 int rc, length, alloclen, status = 0; 14971 int cnt, idx, numcq, page_idx = 0; 14972 uint32_t shdr_status, shdr_add_status; 14973 union lpfc_sli4_cfg_shdr *shdr; 14974 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14975 14976 /* sanity check on queue memory */ 14977 numcq = phba->cfg_nvmet_mrq; 14978 if (!cqp || !hdwq || !numcq) 14979 return -ENODEV; 14980 14981 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14982 if (!mbox) 14983 return -ENOMEM; 14984 14985 length = sizeof(struct lpfc_mbx_cq_create_set); 14986 length += ((numcq * cqp[0]->page_count) * 14987 sizeof(struct dma_address)); 14988 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14989 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 14990 LPFC_SLI4_MBX_NEMBED); 14991 if (alloclen < length) { 14992 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14993 "3098 Allocated DMA memory size (%d) is " 14994 "less than the requested DMA memory size " 14995 "(%d)\n", alloclen, length); 14996 status = -ENOMEM; 14997 goto out; 14998 } 14999 cq_set = mbox->sge_array->addr[0]; 15000 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 15001 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 15002 15003 for (idx = 0; idx < numcq; idx++) { 15004 cq = cqp[idx]; 15005 eq = hdwq[idx].hba_eq; 15006 if (!cq || !eq) { 15007 status = -ENOMEM; 15008 goto out; 15009 } 15010 if (!phba->sli4_hba.pc_sli4_params.supported) 15011 hw_page_size = cq->page_size; 15012 15013 switch (idx) { 15014 case 0: 15015 bf_set(lpfc_mbx_cq_create_set_page_size, 15016 &cq_set->u.request, 15017 (hw_page_size / SLI4_PAGE_SIZE)); 15018 bf_set(lpfc_mbx_cq_create_set_num_pages, 15019 &cq_set->u.request, cq->page_count); 15020 bf_set(lpfc_mbx_cq_create_set_evt, 15021 &cq_set->u.request, 1); 15022 bf_set(lpfc_mbx_cq_create_set_valid, 15023 &cq_set->u.request, 1); 15024 bf_set(lpfc_mbx_cq_create_set_cqe_size, 15025 &cq_set->u.request, 0); 15026 bf_set(lpfc_mbx_cq_create_set_num_cq, 15027 &cq_set->u.request, numcq); 15028 bf_set(lpfc_mbx_cq_create_set_autovalid, 15029 &cq_set->u.request, 15030 phba->sli4_hba.pc_sli4_params.cqav); 15031 switch (cq->entry_count) { 15032 case 2048: 15033 case 4096: 15034 if (phba->sli4_hba.pc_sli4_params.cqv == 15035 LPFC_Q_CREATE_VERSION_2) { 15036 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15037 &cq_set->u.request, 15038 cq->entry_count); 15039 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15040 &cq_set->u.request, 15041 LPFC_CQ_CNT_WORD7); 15042 break; 15043 } 15044 /* fall through */ 15045 default: 15046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15047 "3118 Bad CQ count. (%d)\n", 15048 cq->entry_count); 15049 if (cq->entry_count < 256) { 15050 status = -EINVAL; 15051 goto out; 15052 } 15053 /* fall through - otherwise default to smallest */ 15054 case 256: 15055 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15056 &cq_set->u.request, LPFC_CQ_CNT_256); 15057 break; 15058 case 512: 15059 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15060 &cq_set->u.request, LPFC_CQ_CNT_512); 15061 break; 15062 case 1024: 15063 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15064 &cq_set->u.request, LPFC_CQ_CNT_1024); 15065 break; 15066 } 15067 bf_set(lpfc_mbx_cq_create_set_eq_id0, 15068 &cq_set->u.request, eq->queue_id); 15069 break; 15070 case 1: 15071 bf_set(lpfc_mbx_cq_create_set_eq_id1, 15072 &cq_set->u.request, eq->queue_id); 15073 break; 15074 case 2: 15075 bf_set(lpfc_mbx_cq_create_set_eq_id2, 15076 &cq_set->u.request, eq->queue_id); 15077 break; 15078 case 3: 15079 bf_set(lpfc_mbx_cq_create_set_eq_id3, 15080 &cq_set->u.request, eq->queue_id); 15081 break; 15082 case 4: 15083 bf_set(lpfc_mbx_cq_create_set_eq_id4, 15084 &cq_set->u.request, eq->queue_id); 15085 break; 15086 case 5: 15087 bf_set(lpfc_mbx_cq_create_set_eq_id5, 15088 &cq_set->u.request, eq->queue_id); 15089 break; 15090 case 6: 15091 bf_set(lpfc_mbx_cq_create_set_eq_id6, 15092 &cq_set->u.request, eq->queue_id); 15093 break; 15094 case 7: 15095 bf_set(lpfc_mbx_cq_create_set_eq_id7, 15096 &cq_set->u.request, eq->queue_id); 15097 break; 15098 case 8: 15099 bf_set(lpfc_mbx_cq_create_set_eq_id8, 15100 &cq_set->u.request, eq->queue_id); 15101 break; 15102 case 9: 15103 bf_set(lpfc_mbx_cq_create_set_eq_id9, 15104 &cq_set->u.request, eq->queue_id); 15105 break; 15106 case 10: 15107 bf_set(lpfc_mbx_cq_create_set_eq_id10, 15108 &cq_set->u.request, eq->queue_id); 15109 break; 15110 case 11: 15111 bf_set(lpfc_mbx_cq_create_set_eq_id11, 15112 &cq_set->u.request, eq->queue_id); 15113 break; 15114 case 12: 15115 bf_set(lpfc_mbx_cq_create_set_eq_id12, 15116 &cq_set->u.request, eq->queue_id); 15117 break; 15118 case 13: 15119 bf_set(lpfc_mbx_cq_create_set_eq_id13, 15120 &cq_set->u.request, eq->queue_id); 15121 break; 15122 case 14: 15123 bf_set(lpfc_mbx_cq_create_set_eq_id14, 15124 &cq_set->u.request, eq->queue_id); 15125 break; 15126 case 15: 15127 bf_set(lpfc_mbx_cq_create_set_eq_id15, 15128 &cq_set->u.request, eq->queue_id); 15129 break; 15130 } 15131 15132 /* link the cq onto the parent eq child list */ 15133 list_add_tail(&cq->list, &eq->child_list); 15134 /* Set up completion queue's type and subtype */ 15135 cq->type = type; 15136 cq->subtype = subtype; 15137 cq->assoc_qid = eq->queue_id; 15138 cq->assoc_qp = eq; 15139 cq->host_index = 0; 15140 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 15141 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 15142 cq->entry_count); 15143 cq->chann = idx; 15144 15145 rc = 0; 15146 list_for_each_entry(dmabuf, &cq->page_list, list) { 15147 memset(dmabuf->virt, 0, hw_page_size); 15148 cnt = page_idx + dmabuf->buffer_tag; 15149 cq_set->u.request.page[cnt].addr_lo = 15150 putPaddrLow(dmabuf->phys); 15151 cq_set->u.request.page[cnt].addr_hi = 15152 putPaddrHigh(dmabuf->phys); 15153 rc++; 15154 } 15155 page_idx += rc; 15156 } 15157 15158 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15159 15160 /* The IOCTL status is embedded in the mailbox subheader. */ 15161 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15162 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15163 if (shdr_status || shdr_add_status || rc) { 15164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15165 "3119 CQ_CREATE_SET mailbox failed with " 15166 "status x%x add_status x%x, mbx status x%x\n", 15167 shdr_status, shdr_add_status, rc); 15168 status = -ENXIO; 15169 goto out; 15170 } 15171 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 15172 if (rc == 0xFFFF) { 15173 status = -ENXIO; 15174 goto out; 15175 } 15176 15177 for (idx = 0; idx < numcq; idx++) { 15178 cq = cqp[idx]; 15179 cq->queue_id = rc + idx; 15180 if (cq->queue_id > phba->sli4_hba.cq_max) 15181 phba->sli4_hba.cq_max = cq->queue_id; 15182 } 15183 15184 out: 15185 lpfc_sli4_mbox_cmd_free(phba, mbox); 15186 return status; 15187 } 15188 15189 /** 15190 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 15191 * @phba: HBA structure that indicates port to create a queue on. 15192 * @mq: The queue structure to use to create the mailbox queue. 15193 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 15194 * @cq: The completion queue to associate with this cq. 15195 * 15196 * This function provides failback (fb) functionality when the 15197 * mq_create_ext fails on older FW generations. It's purpose is identical 15198 * to mq_create_ext otherwise. 15199 * 15200 * This routine cannot fail as all attributes were previously accessed and 15201 * initialized in mq_create_ext. 15202 **/ 15203 static void 15204 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 15205 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 15206 { 15207 struct lpfc_mbx_mq_create *mq_create; 15208 struct lpfc_dmabuf *dmabuf; 15209 int length; 15210 15211 length = (sizeof(struct lpfc_mbx_mq_create) - 15212 sizeof(struct lpfc_sli4_cfg_mhdr)); 15213 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15214 LPFC_MBOX_OPCODE_MQ_CREATE, 15215 length, LPFC_SLI4_MBX_EMBED); 15216 mq_create = &mbox->u.mqe.un.mq_create; 15217 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 15218 mq->page_count); 15219 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 15220 cq->queue_id); 15221 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 15222 switch (mq->entry_count) { 15223 case 16: 15224 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15225 LPFC_MQ_RING_SIZE_16); 15226 break; 15227 case 32: 15228 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15229 LPFC_MQ_RING_SIZE_32); 15230 break; 15231 case 64: 15232 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15233 LPFC_MQ_RING_SIZE_64); 15234 break; 15235 case 128: 15236 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15237 LPFC_MQ_RING_SIZE_128); 15238 break; 15239 } 15240 list_for_each_entry(dmabuf, &mq->page_list, list) { 15241 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15242 putPaddrLow(dmabuf->phys); 15243 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15244 putPaddrHigh(dmabuf->phys); 15245 } 15246 } 15247 15248 /** 15249 * lpfc_mq_create - Create a mailbox Queue on the HBA 15250 * @phba: HBA structure that indicates port to create a queue on. 15251 * @mq: The queue structure to use to create the mailbox queue. 15252 * @cq: The completion queue to associate with this cq. 15253 * @subtype: The queue's subtype. 15254 * 15255 * This function creates a mailbox queue, as detailed in @mq, on a port, 15256 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 15257 * 15258 * The @phba struct is used to send mailbox command to HBA. The @cq struct 15259 * is used to get the entry count and entry size that are necessary to 15260 * determine the number of pages to allocate and use for this queue. This 15261 * function will send the MQ_CREATE mailbox command to the HBA to setup the 15262 * mailbox queue. This function is asynchronous and will wait for the mailbox 15263 * command to finish before continuing. 15264 * 15265 * On success this function will return a zero. If unable to allocate enough 15266 * memory this function will return -ENOMEM. If the queue create mailbox command 15267 * fails this function will return -ENXIO. 15268 **/ 15269 int32_t 15270 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 15271 struct lpfc_queue *cq, uint32_t subtype) 15272 { 15273 struct lpfc_mbx_mq_create *mq_create; 15274 struct lpfc_mbx_mq_create_ext *mq_create_ext; 15275 struct lpfc_dmabuf *dmabuf; 15276 LPFC_MBOXQ_t *mbox; 15277 int rc, length, status = 0; 15278 uint32_t shdr_status, shdr_add_status; 15279 union lpfc_sli4_cfg_shdr *shdr; 15280 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15281 15282 /* sanity check on queue memory */ 15283 if (!mq || !cq) 15284 return -ENODEV; 15285 if (!phba->sli4_hba.pc_sli4_params.supported) 15286 hw_page_size = SLI4_PAGE_SIZE; 15287 15288 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15289 if (!mbox) 15290 return -ENOMEM; 15291 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 15292 sizeof(struct lpfc_sli4_cfg_mhdr)); 15293 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15294 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 15295 length, LPFC_SLI4_MBX_EMBED); 15296 15297 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 15298 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 15299 bf_set(lpfc_mbx_mq_create_ext_num_pages, 15300 &mq_create_ext->u.request, mq->page_count); 15301 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 15302 &mq_create_ext->u.request, 1); 15303 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 15304 &mq_create_ext->u.request, 1); 15305 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 15306 &mq_create_ext->u.request, 1); 15307 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 15308 &mq_create_ext->u.request, 1); 15309 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 15310 &mq_create_ext->u.request, 1); 15311 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 15312 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15313 phba->sli4_hba.pc_sli4_params.mqv); 15314 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 15315 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 15316 cq->queue_id); 15317 else 15318 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 15319 cq->queue_id); 15320 switch (mq->entry_count) { 15321 default: 15322 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15323 "0362 Unsupported MQ count. (%d)\n", 15324 mq->entry_count); 15325 if (mq->entry_count < 16) { 15326 status = -EINVAL; 15327 goto out; 15328 } 15329 /* fall through - otherwise default to smallest count */ 15330 case 16: 15331 bf_set(lpfc_mq_context_ring_size, 15332 &mq_create_ext->u.request.context, 15333 LPFC_MQ_RING_SIZE_16); 15334 break; 15335 case 32: 15336 bf_set(lpfc_mq_context_ring_size, 15337 &mq_create_ext->u.request.context, 15338 LPFC_MQ_RING_SIZE_32); 15339 break; 15340 case 64: 15341 bf_set(lpfc_mq_context_ring_size, 15342 &mq_create_ext->u.request.context, 15343 LPFC_MQ_RING_SIZE_64); 15344 break; 15345 case 128: 15346 bf_set(lpfc_mq_context_ring_size, 15347 &mq_create_ext->u.request.context, 15348 LPFC_MQ_RING_SIZE_128); 15349 break; 15350 } 15351 list_for_each_entry(dmabuf, &mq->page_list, list) { 15352 memset(dmabuf->virt, 0, hw_page_size); 15353 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 15354 putPaddrLow(dmabuf->phys); 15355 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 15356 putPaddrHigh(dmabuf->phys); 15357 } 15358 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15359 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15360 &mq_create_ext->u.response); 15361 if (rc != MBX_SUCCESS) { 15362 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15363 "2795 MQ_CREATE_EXT failed with " 15364 "status x%x. Failback to MQ_CREATE.\n", 15365 rc); 15366 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 15367 mq_create = &mbox->u.mqe.un.mq_create; 15368 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15369 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 15370 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15371 &mq_create->u.response); 15372 } 15373 15374 /* The IOCTL status is embedded in the mailbox subheader. */ 15375 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15376 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15377 if (shdr_status || shdr_add_status || rc) { 15378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15379 "2502 MQ_CREATE mailbox failed with " 15380 "status x%x add_status x%x, mbx status x%x\n", 15381 shdr_status, shdr_add_status, rc); 15382 status = -ENXIO; 15383 goto out; 15384 } 15385 if (mq->queue_id == 0xFFFF) { 15386 status = -ENXIO; 15387 goto out; 15388 } 15389 mq->type = LPFC_MQ; 15390 mq->assoc_qid = cq->queue_id; 15391 mq->subtype = subtype; 15392 mq->host_index = 0; 15393 mq->hba_index = 0; 15394 15395 /* link the mq onto the parent cq child list */ 15396 list_add_tail(&mq->list, &cq->child_list); 15397 out: 15398 mempool_free(mbox, phba->mbox_mem_pool); 15399 return status; 15400 } 15401 15402 /** 15403 * lpfc_wq_create - Create a Work Queue on the HBA 15404 * @phba: HBA structure that indicates port to create a queue on. 15405 * @wq: The queue structure to use to create the work queue. 15406 * @cq: The completion queue to bind this work queue to. 15407 * @subtype: The subtype of the work queue indicating its functionality. 15408 * 15409 * This function creates a work queue, as detailed in @wq, on a port, described 15410 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 15411 * 15412 * The @phba struct is used to send mailbox command to HBA. The @wq struct 15413 * is used to get the entry count and entry size that are necessary to 15414 * determine the number of pages to allocate and use for this queue. The @cq 15415 * is used to indicate which completion queue to bind this work queue to. This 15416 * function will send the WQ_CREATE mailbox command to the HBA to setup the 15417 * work queue. This function is asynchronous and will wait for the mailbox 15418 * command to finish before continuing. 15419 * 15420 * On success this function will return a zero. If unable to allocate enough 15421 * memory this function will return -ENOMEM. If the queue create mailbox command 15422 * fails this function will return -ENXIO. 15423 **/ 15424 int 15425 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 15426 struct lpfc_queue *cq, uint32_t subtype) 15427 { 15428 struct lpfc_mbx_wq_create *wq_create; 15429 struct lpfc_dmabuf *dmabuf; 15430 LPFC_MBOXQ_t *mbox; 15431 int rc, length, status = 0; 15432 uint32_t shdr_status, shdr_add_status; 15433 union lpfc_sli4_cfg_shdr *shdr; 15434 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15435 struct dma_address *page; 15436 void __iomem *bar_memmap_p; 15437 uint32_t db_offset; 15438 uint16_t pci_barset; 15439 uint8_t dpp_barset; 15440 uint32_t dpp_offset; 15441 unsigned long pg_addr; 15442 uint8_t wq_create_version; 15443 15444 /* sanity check on queue memory */ 15445 if (!wq || !cq) 15446 return -ENODEV; 15447 if (!phba->sli4_hba.pc_sli4_params.supported) 15448 hw_page_size = wq->page_size; 15449 15450 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15451 if (!mbox) 15452 return -ENOMEM; 15453 length = (sizeof(struct lpfc_mbx_wq_create) - 15454 sizeof(struct lpfc_sli4_cfg_mhdr)); 15455 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15456 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 15457 length, LPFC_SLI4_MBX_EMBED); 15458 wq_create = &mbox->u.mqe.un.wq_create; 15459 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 15460 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 15461 wq->page_count); 15462 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 15463 cq->queue_id); 15464 15465 /* wqv is the earliest version supported, NOT the latest */ 15466 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15467 phba->sli4_hba.pc_sli4_params.wqv); 15468 15469 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 15470 (wq->page_size > SLI4_PAGE_SIZE)) 15471 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15472 else 15473 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15474 15475 15476 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) 15477 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15478 else 15479 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15480 15481 switch (wq_create_version) { 15482 case LPFC_Q_CREATE_VERSION_1: 15483 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 15484 wq->entry_count); 15485 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15486 LPFC_Q_CREATE_VERSION_1); 15487 15488 switch (wq->entry_size) { 15489 default: 15490 case 64: 15491 bf_set(lpfc_mbx_wq_create_wqe_size, 15492 &wq_create->u.request_1, 15493 LPFC_WQ_WQE_SIZE_64); 15494 break; 15495 case 128: 15496 bf_set(lpfc_mbx_wq_create_wqe_size, 15497 &wq_create->u.request_1, 15498 LPFC_WQ_WQE_SIZE_128); 15499 break; 15500 } 15501 /* Request DPP by default */ 15502 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 15503 bf_set(lpfc_mbx_wq_create_page_size, 15504 &wq_create->u.request_1, 15505 (wq->page_size / SLI4_PAGE_SIZE)); 15506 page = wq_create->u.request_1.page; 15507 break; 15508 default: 15509 page = wq_create->u.request.page; 15510 break; 15511 } 15512 15513 list_for_each_entry(dmabuf, &wq->page_list, list) { 15514 memset(dmabuf->virt, 0, hw_page_size); 15515 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 15516 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 15517 } 15518 15519 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15520 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 15521 15522 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15523 /* The IOCTL status is embedded in the mailbox subheader. */ 15524 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15525 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15526 if (shdr_status || shdr_add_status || rc) { 15527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15528 "2503 WQ_CREATE mailbox failed with " 15529 "status x%x add_status x%x, mbx status x%x\n", 15530 shdr_status, shdr_add_status, rc); 15531 status = -ENXIO; 15532 goto out; 15533 } 15534 15535 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 15536 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 15537 &wq_create->u.response); 15538 else 15539 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 15540 &wq_create->u.response_1); 15541 15542 if (wq->queue_id == 0xFFFF) { 15543 status = -ENXIO; 15544 goto out; 15545 } 15546 15547 wq->db_format = LPFC_DB_LIST_FORMAT; 15548 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 15549 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15550 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 15551 &wq_create->u.response); 15552 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15553 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15555 "3265 WQ[%d] doorbell format " 15556 "not supported: x%x\n", 15557 wq->queue_id, wq->db_format); 15558 status = -EINVAL; 15559 goto out; 15560 } 15561 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 15562 &wq_create->u.response); 15563 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15564 pci_barset); 15565 if (!bar_memmap_p) { 15566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15567 "3263 WQ[%d] failed to memmap " 15568 "pci barset:x%x\n", 15569 wq->queue_id, pci_barset); 15570 status = -ENOMEM; 15571 goto out; 15572 } 15573 db_offset = wq_create->u.response.doorbell_offset; 15574 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15575 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15576 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15577 "3252 WQ[%d] doorbell offset " 15578 "not supported: x%x\n", 15579 wq->queue_id, db_offset); 15580 status = -EINVAL; 15581 goto out; 15582 } 15583 wq->db_regaddr = bar_memmap_p + db_offset; 15584 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15585 "3264 WQ[%d]: barset:x%x, offset:x%x, " 15586 "format:x%x\n", wq->queue_id, 15587 pci_barset, db_offset, wq->db_format); 15588 } else 15589 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15590 } else { 15591 /* Check if DPP was honored by the firmware */ 15592 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 15593 &wq_create->u.response_1); 15594 if (wq->dpp_enable) { 15595 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 15596 &wq_create->u.response_1); 15597 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15598 pci_barset); 15599 if (!bar_memmap_p) { 15600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15601 "3267 WQ[%d] failed to memmap " 15602 "pci barset:x%x\n", 15603 wq->queue_id, pci_barset); 15604 status = -ENOMEM; 15605 goto out; 15606 } 15607 db_offset = wq_create->u.response_1.doorbell_offset; 15608 wq->db_regaddr = bar_memmap_p + db_offset; 15609 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 15610 &wq_create->u.response_1); 15611 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 15612 &wq_create->u.response_1); 15613 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15614 dpp_barset); 15615 if (!bar_memmap_p) { 15616 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15617 "3268 WQ[%d] failed to memmap " 15618 "pci barset:x%x\n", 15619 wq->queue_id, dpp_barset); 15620 status = -ENOMEM; 15621 goto out; 15622 } 15623 dpp_offset = wq_create->u.response_1.dpp_offset; 15624 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 15625 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15626 "3271 WQ[%d]: barset:x%x, offset:x%x, " 15627 "dpp_id:x%x dpp_barset:x%x " 15628 "dpp_offset:x%x\n", 15629 wq->queue_id, pci_barset, db_offset, 15630 wq->dpp_id, dpp_barset, dpp_offset); 15631 15632 /* Enable combined writes for DPP aperture */ 15633 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 15634 #ifdef CONFIG_X86 15635 rc = set_memory_wc(pg_addr, 1); 15636 if (rc) { 15637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15638 "3272 Cannot setup Combined " 15639 "Write on WQ[%d] - disable DPP\n", 15640 wq->queue_id); 15641 phba->cfg_enable_dpp = 0; 15642 } 15643 #else 15644 phba->cfg_enable_dpp = 0; 15645 #endif 15646 } else 15647 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15648 } 15649 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 15650 if (wq->pring == NULL) { 15651 status = -ENOMEM; 15652 goto out; 15653 } 15654 wq->type = LPFC_WQ; 15655 wq->assoc_qid = cq->queue_id; 15656 wq->subtype = subtype; 15657 wq->host_index = 0; 15658 wq->hba_index = 0; 15659 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; 15660 15661 /* link the wq onto the parent cq child list */ 15662 list_add_tail(&wq->list, &cq->child_list); 15663 out: 15664 mempool_free(mbox, phba->mbox_mem_pool); 15665 return status; 15666 } 15667 15668 /** 15669 * lpfc_rq_create - Create a Receive Queue on the HBA 15670 * @phba: HBA structure that indicates port to create a queue on. 15671 * @hrq: The queue structure to use to create the header receive queue. 15672 * @drq: The queue structure to use to create the data receive queue. 15673 * @cq: The completion queue to bind this work queue to. 15674 * 15675 * This function creates a receive buffer queue pair , as detailed in @hrq and 15676 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15677 * to the HBA. 15678 * 15679 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15680 * struct is used to get the entry count that is necessary to determine the 15681 * number of pages to use for this queue. The @cq is used to indicate which 15682 * completion queue to bind received buffers that are posted to these queues to. 15683 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15684 * receive queue pair. This function is asynchronous and will wait for the 15685 * mailbox command to finish before continuing. 15686 * 15687 * On success this function will return a zero. If unable to allocate enough 15688 * memory this function will return -ENOMEM. If the queue create mailbox command 15689 * fails this function will return -ENXIO. 15690 **/ 15691 int 15692 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15693 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 15694 { 15695 struct lpfc_mbx_rq_create *rq_create; 15696 struct lpfc_dmabuf *dmabuf; 15697 LPFC_MBOXQ_t *mbox; 15698 int rc, length, status = 0; 15699 uint32_t shdr_status, shdr_add_status; 15700 union lpfc_sli4_cfg_shdr *shdr; 15701 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15702 void __iomem *bar_memmap_p; 15703 uint32_t db_offset; 15704 uint16_t pci_barset; 15705 15706 /* sanity check on queue memory */ 15707 if (!hrq || !drq || !cq) 15708 return -ENODEV; 15709 if (!phba->sli4_hba.pc_sli4_params.supported) 15710 hw_page_size = SLI4_PAGE_SIZE; 15711 15712 if (hrq->entry_count != drq->entry_count) 15713 return -EINVAL; 15714 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15715 if (!mbox) 15716 return -ENOMEM; 15717 length = (sizeof(struct lpfc_mbx_rq_create) - 15718 sizeof(struct lpfc_sli4_cfg_mhdr)); 15719 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15720 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15721 length, LPFC_SLI4_MBX_EMBED); 15722 rq_create = &mbox->u.mqe.un.rq_create; 15723 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15724 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15725 phba->sli4_hba.pc_sli4_params.rqv); 15726 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15727 bf_set(lpfc_rq_context_rqe_count_1, 15728 &rq_create->u.request.context, 15729 hrq->entry_count); 15730 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15731 bf_set(lpfc_rq_context_rqe_size, 15732 &rq_create->u.request.context, 15733 LPFC_RQE_SIZE_8); 15734 bf_set(lpfc_rq_context_page_size, 15735 &rq_create->u.request.context, 15736 LPFC_RQ_PAGE_SIZE_4096); 15737 } else { 15738 switch (hrq->entry_count) { 15739 default: 15740 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15741 "2535 Unsupported RQ count. (%d)\n", 15742 hrq->entry_count); 15743 if (hrq->entry_count < 512) { 15744 status = -EINVAL; 15745 goto out; 15746 } 15747 /* fall through - otherwise default to smallest count */ 15748 case 512: 15749 bf_set(lpfc_rq_context_rqe_count, 15750 &rq_create->u.request.context, 15751 LPFC_RQ_RING_SIZE_512); 15752 break; 15753 case 1024: 15754 bf_set(lpfc_rq_context_rqe_count, 15755 &rq_create->u.request.context, 15756 LPFC_RQ_RING_SIZE_1024); 15757 break; 15758 case 2048: 15759 bf_set(lpfc_rq_context_rqe_count, 15760 &rq_create->u.request.context, 15761 LPFC_RQ_RING_SIZE_2048); 15762 break; 15763 case 4096: 15764 bf_set(lpfc_rq_context_rqe_count, 15765 &rq_create->u.request.context, 15766 LPFC_RQ_RING_SIZE_4096); 15767 break; 15768 } 15769 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15770 LPFC_HDR_BUF_SIZE); 15771 } 15772 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15773 cq->queue_id); 15774 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15775 hrq->page_count); 15776 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15777 memset(dmabuf->virt, 0, hw_page_size); 15778 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15779 putPaddrLow(dmabuf->phys); 15780 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15781 putPaddrHigh(dmabuf->phys); 15782 } 15783 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15784 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15785 15786 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15787 /* The IOCTL status is embedded in the mailbox subheader. */ 15788 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15789 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15790 if (shdr_status || shdr_add_status || rc) { 15791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15792 "2504 RQ_CREATE mailbox failed with " 15793 "status x%x add_status x%x, mbx status x%x\n", 15794 shdr_status, shdr_add_status, rc); 15795 status = -ENXIO; 15796 goto out; 15797 } 15798 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15799 if (hrq->queue_id == 0xFFFF) { 15800 status = -ENXIO; 15801 goto out; 15802 } 15803 15804 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15805 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15806 &rq_create->u.response); 15807 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15808 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15810 "3262 RQ [%d] doorbell format not " 15811 "supported: x%x\n", hrq->queue_id, 15812 hrq->db_format); 15813 status = -EINVAL; 15814 goto out; 15815 } 15816 15817 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15818 &rq_create->u.response); 15819 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15820 if (!bar_memmap_p) { 15821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15822 "3269 RQ[%d] failed to memmap pci " 15823 "barset:x%x\n", hrq->queue_id, 15824 pci_barset); 15825 status = -ENOMEM; 15826 goto out; 15827 } 15828 15829 db_offset = rq_create->u.response.doorbell_offset; 15830 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15831 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15833 "3270 RQ[%d] doorbell offset not " 15834 "supported: x%x\n", hrq->queue_id, 15835 db_offset); 15836 status = -EINVAL; 15837 goto out; 15838 } 15839 hrq->db_regaddr = bar_memmap_p + db_offset; 15840 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15841 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15842 "format:x%x\n", hrq->queue_id, pci_barset, 15843 db_offset, hrq->db_format); 15844 } else { 15845 hrq->db_format = LPFC_DB_RING_FORMAT; 15846 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15847 } 15848 hrq->type = LPFC_HRQ; 15849 hrq->assoc_qid = cq->queue_id; 15850 hrq->subtype = subtype; 15851 hrq->host_index = 0; 15852 hrq->hba_index = 0; 15853 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15854 15855 /* now create the data queue */ 15856 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15857 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15858 length, LPFC_SLI4_MBX_EMBED); 15859 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15860 phba->sli4_hba.pc_sli4_params.rqv); 15861 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15862 bf_set(lpfc_rq_context_rqe_count_1, 15863 &rq_create->u.request.context, hrq->entry_count); 15864 if (subtype == LPFC_NVMET) 15865 rq_create->u.request.context.buffer_size = 15866 LPFC_NVMET_DATA_BUF_SIZE; 15867 else 15868 rq_create->u.request.context.buffer_size = 15869 LPFC_DATA_BUF_SIZE; 15870 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15871 LPFC_RQE_SIZE_8); 15872 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15873 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15874 } else { 15875 switch (drq->entry_count) { 15876 default: 15877 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15878 "2536 Unsupported RQ count. (%d)\n", 15879 drq->entry_count); 15880 if (drq->entry_count < 512) { 15881 status = -EINVAL; 15882 goto out; 15883 } 15884 /* fall through - otherwise default to smallest count */ 15885 case 512: 15886 bf_set(lpfc_rq_context_rqe_count, 15887 &rq_create->u.request.context, 15888 LPFC_RQ_RING_SIZE_512); 15889 break; 15890 case 1024: 15891 bf_set(lpfc_rq_context_rqe_count, 15892 &rq_create->u.request.context, 15893 LPFC_RQ_RING_SIZE_1024); 15894 break; 15895 case 2048: 15896 bf_set(lpfc_rq_context_rqe_count, 15897 &rq_create->u.request.context, 15898 LPFC_RQ_RING_SIZE_2048); 15899 break; 15900 case 4096: 15901 bf_set(lpfc_rq_context_rqe_count, 15902 &rq_create->u.request.context, 15903 LPFC_RQ_RING_SIZE_4096); 15904 break; 15905 } 15906 if (subtype == LPFC_NVMET) 15907 bf_set(lpfc_rq_context_buf_size, 15908 &rq_create->u.request.context, 15909 LPFC_NVMET_DATA_BUF_SIZE); 15910 else 15911 bf_set(lpfc_rq_context_buf_size, 15912 &rq_create->u.request.context, 15913 LPFC_DATA_BUF_SIZE); 15914 } 15915 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15916 cq->queue_id); 15917 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15918 drq->page_count); 15919 list_for_each_entry(dmabuf, &drq->page_list, list) { 15920 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15921 putPaddrLow(dmabuf->phys); 15922 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15923 putPaddrHigh(dmabuf->phys); 15924 } 15925 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15926 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15927 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15928 /* The IOCTL status is embedded in the mailbox subheader. */ 15929 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15930 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15931 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15932 if (shdr_status || shdr_add_status || rc) { 15933 status = -ENXIO; 15934 goto out; 15935 } 15936 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15937 if (drq->queue_id == 0xFFFF) { 15938 status = -ENXIO; 15939 goto out; 15940 } 15941 drq->type = LPFC_DRQ; 15942 drq->assoc_qid = cq->queue_id; 15943 drq->subtype = subtype; 15944 drq->host_index = 0; 15945 drq->hba_index = 0; 15946 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15947 15948 /* link the header and data RQs onto the parent cq child list */ 15949 list_add_tail(&hrq->list, &cq->child_list); 15950 list_add_tail(&drq->list, &cq->child_list); 15951 15952 out: 15953 mempool_free(mbox, phba->mbox_mem_pool); 15954 return status; 15955 } 15956 15957 /** 15958 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 15959 * @phba: HBA structure that indicates port to create a queue on. 15960 * @hrqp: The queue structure array to use to create the header receive queues. 15961 * @drqp: The queue structure array to use to create the data receive queues. 15962 * @cqp: The completion queue array to bind these receive queues to. 15963 * 15964 * This function creates a receive buffer queue pair , as detailed in @hrq and 15965 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15966 * to the HBA. 15967 * 15968 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15969 * struct is used to get the entry count that is necessary to determine the 15970 * number of pages to use for this queue. The @cq is used to indicate which 15971 * completion queue to bind received buffers that are posted to these queues to. 15972 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15973 * receive queue pair. This function is asynchronous and will wait for the 15974 * mailbox command to finish before continuing. 15975 * 15976 * On success this function will return a zero. If unable to allocate enough 15977 * memory this function will return -ENOMEM. If the queue create mailbox command 15978 * fails this function will return -ENXIO. 15979 **/ 15980 int 15981 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 15982 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 15983 uint32_t subtype) 15984 { 15985 struct lpfc_queue *hrq, *drq, *cq; 15986 struct lpfc_mbx_rq_create_v2 *rq_create; 15987 struct lpfc_dmabuf *dmabuf; 15988 LPFC_MBOXQ_t *mbox; 15989 int rc, length, alloclen, status = 0; 15990 int cnt, idx, numrq, page_idx = 0; 15991 uint32_t shdr_status, shdr_add_status; 15992 union lpfc_sli4_cfg_shdr *shdr; 15993 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15994 15995 numrq = phba->cfg_nvmet_mrq; 15996 /* sanity check on array memory */ 15997 if (!hrqp || !drqp || !cqp || !numrq) 15998 return -ENODEV; 15999 if (!phba->sli4_hba.pc_sli4_params.supported) 16000 hw_page_size = SLI4_PAGE_SIZE; 16001 16002 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16003 if (!mbox) 16004 return -ENOMEM; 16005 16006 length = sizeof(struct lpfc_mbx_rq_create_v2); 16007 length += ((2 * numrq * hrqp[0]->page_count) * 16008 sizeof(struct dma_address)); 16009 16010 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16011 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 16012 LPFC_SLI4_MBX_NEMBED); 16013 if (alloclen < length) { 16014 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16015 "3099 Allocated DMA memory size (%d) is " 16016 "less than the requested DMA memory size " 16017 "(%d)\n", alloclen, length); 16018 status = -ENOMEM; 16019 goto out; 16020 } 16021 16022 16023 16024 rq_create = mbox->sge_array->addr[0]; 16025 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 16026 16027 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 16028 cnt = 0; 16029 16030 for (idx = 0; idx < numrq; idx++) { 16031 hrq = hrqp[idx]; 16032 drq = drqp[idx]; 16033 cq = cqp[idx]; 16034 16035 /* sanity check on queue memory */ 16036 if (!hrq || !drq || !cq) { 16037 status = -ENODEV; 16038 goto out; 16039 } 16040 16041 if (hrq->entry_count != drq->entry_count) { 16042 status = -EINVAL; 16043 goto out; 16044 } 16045 16046 if (idx == 0) { 16047 bf_set(lpfc_mbx_rq_create_num_pages, 16048 &rq_create->u.request, 16049 hrq->page_count); 16050 bf_set(lpfc_mbx_rq_create_rq_cnt, 16051 &rq_create->u.request, (numrq * 2)); 16052 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 16053 1); 16054 bf_set(lpfc_rq_context_base_cq, 16055 &rq_create->u.request.context, 16056 cq->queue_id); 16057 bf_set(lpfc_rq_context_data_size, 16058 &rq_create->u.request.context, 16059 LPFC_NVMET_DATA_BUF_SIZE); 16060 bf_set(lpfc_rq_context_hdr_size, 16061 &rq_create->u.request.context, 16062 LPFC_HDR_BUF_SIZE); 16063 bf_set(lpfc_rq_context_rqe_count_1, 16064 &rq_create->u.request.context, 16065 hrq->entry_count); 16066 bf_set(lpfc_rq_context_rqe_size, 16067 &rq_create->u.request.context, 16068 LPFC_RQE_SIZE_8); 16069 bf_set(lpfc_rq_context_page_size, 16070 &rq_create->u.request.context, 16071 (PAGE_SIZE/SLI4_PAGE_SIZE)); 16072 } 16073 rc = 0; 16074 list_for_each_entry(dmabuf, &hrq->page_list, list) { 16075 memset(dmabuf->virt, 0, hw_page_size); 16076 cnt = page_idx + dmabuf->buffer_tag; 16077 rq_create->u.request.page[cnt].addr_lo = 16078 putPaddrLow(dmabuf->phys); 16079 rq_create->u.request.page[cnt].addr_hi = 16080 putPaddrHigh(dmabuf->phys); 16081 rc++; 16082 } 16083 page_idx += rc; 16084 16085 rc = 0; 16086 list_for_each_entry(dmabuf, &drq->page_list, list) { 16087 memset(dmabuf->virt, 0, hw_page_size); 16088 cnt = page_idx + dmabuf->buffer_tag; 16089 rq_create->u.request.page[cnt].addr_lo = 16090 putPaddrLow(dmabuf->phys); 16091 rq_create->u.request.page[cnt].addr_hi = 16092 putPaddrHigh(dmabuf->phys); 16093 rc++; 16094 } 16095 page_idx += rc; 16096 16097 hrq->db_format = LPFC_DB_RING_FORMAT; 16098 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16099 hrq->type = LPFC_HRQ; 16100 hrq->assoc_qid = cq->queue_id; 16101 hrq->subtype = subtype; 16102 hrq->host_index = 0; 16103 hrq->hba_index = 0; 16104 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16105 16106 drq->db_format = LPFC_DB_RING_FORMAT; 16107 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16108 drq->type = LPFC_DRQ; 16109 drq->assoc_qid = cq->queue_id; 16110 drq->subtype = subtype; 16111 drq->host_index = 0; 16112 drq->hba_index = 0; 16113 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16114 16115 list_add_tail(&hrq->list, &cq->child_list); 16116 list_add_tail(&drq->list, &cq->child_list); 16117 } 16118 16119 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16120 /* The IOCTL status is embedded in the mailbox subheader. */ 16121 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16122 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16123 if (shdr_status || shdr_add_status || rc) { 16124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16125 "3120 RQ_CREATE mailbox failed with " 16126 "status x%x add_status x%x, mbx status x%x\n", 16127 shdr_status, shdr_add_status, rc); 16128 status = -ENXIO; 16129 goto out; 16130 } 16131 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 16132 if (rc == 0xFFFF) { 16133 status = -ENXIO; 16134 goto out; 16135 } 16136 16137 /* Initialize all RQs with associated queue id */ 16138 for (idx = 0; idx < numrq; idx++) { 16139 hrq = hrqp[idx]; 16140 hrq->queue_id = rc + (2 * idx); 16141 drq = drqp[idx]; 16142 drq->queue_id = rc + (2 * idx) + 1; 16143 } 16144 16145 out: 16146 lpfc_sli4_mbox_cmd_free(phba, mbox); 16147 return status; 16148 } 16149 16150 /** 16151 * lpfc_eq_destroy - Destroy an event Queue on the HBA 16152 * @eq: The queue structure associated with the queue to destroy. 16153 * 16154 * This function destroys a queue, as detailed in @eq by sending an mailbox 16155 * command, specific to the type of queue, to the HBA. 16156 * 16157 * The @eq struct is used to get the queue ID of the queue to destroy. 16158 * 16159 * On success this function will return a zero. If the queue destroy mailbox 16160 * command fails this function will return -ENXIO. 16161 **/ 16162 int 16163 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 16164 { 16165 LPFC_MBOXQ_t *mbox; 16166 int rc, length, status = 0; 16167 uint32_t shdr_status, shdr_add_status; 16168 union lpfc_sli4_cfg_shdr *shdr; 16169 16170 /* sanity check on queue memory */ 16171 if (!eq) 16172 return -ENODEV; 16173 16174 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 16175 if (!mbox) 16176 return -ENOMEM; 16177 length = (sizeof(struct lpfc_mbx_eq_destroy) - 16178 sizeof(struct lpfc_sli4_cfg_mhdr)); 16179 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16180 LPFC_MBOX_OPCODE_EQ_DESTROY, 16181 length, LPFC_SLI4_MBX_EMBED); 16182 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 16183 eq->queue_id); 16184 mbox->vport = eq->phba->pport; 16185 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16186 16187 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 16188 /* The IOCTL status is embedded in the mailbox subheader. */ 16189 shdr = (union lpfc_sli4_cfg_shdr *) 16190 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 16191 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16192 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16193 if (shdr_status || shdr_add_status || rc) { 16194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16195 "2505 EQ_DESTROY mailbox failed with " 16196 "status x%x add_status x%x, mbx status x%x\n", 16197 shdr_status, shdr_add_status, rc); 16198 status = -ENXIO; 16199 } 16200 16201 /* Remove eq from any list */ 16202 list_del_init(&eq->list); 16203 mempool_free(mbox, eq->phba->mbox_mem_pool); 16204 return status; 16205 } 16206 16207 /** 16208 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 16209 * @cq: The queue structure associated with the queue to destroy. 16210 * 16211 * This function destroys a queue, as detailed in @cq by sending an mailbox 16212 * command, specific to the type of queue, to the HBA. 16213 * 16214 * The @cq struct is used to get the queue ID of the queue to destroy. 16215 * 16216 * On success this function will return a zero. If the queue destroy mailbox 16217 * command fails this function will return -ENXIO. 16218 **/ 16219 int 16220 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 16221 { 16222 LPFC_MBOXQ_t *mbox; 16223 int rc, length, status = 0; 16224 uint32_t shdr_status, shdr_add_status; 16225 union lpfc_sli4_cfg_shdr *shdr; 16226 16227 /* sanity check on queue memory */ 16228 if (!cq) 16229 return -ENODEV; 16230 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 16231 if (!mbox) 16232 return -ENOMEM; 16233 length = (sizeof(struct lpfc_mbx_cq_destroy) - 16234 sizeof(struct lpfc_sli4_cfg_mhdr)); 16235 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16236 LPFC_MBOX_OPCODE_CQ_DESTROY, 16237 length, LPFC_SLI4_MBX_EMBED); 16238 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 16239 cq->queue_id); 16240 mbox->vport = cq->phba->pport; 16241 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16242 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 16243 /* The IOCTL status is embedded in the mailbox subheader. */ 16244 shdr = (union lpfc_sli4_cfg_shdr *) 16245 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 16246 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16247 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16248 if (shdr_status || shdr_add_status || rc) { 16249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16250 "2506 CQ_DESTROY mailbox failed with " 16251 "status x%x add_status x%x, mbx status x%x\n", 16252 shdr_status, shdr_add_status, rc); 16253 status = -ENXIO; 16254 } 16255 /* Remove cq from any list */ 16256 list_del_init(&cq->list); 16257 mempool_free(mbox, cq->phba->mbox_mem_pool); 16258 return status; 16259 } 16260 16261 /** 16262 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 16263 * @qm: The queue structure associated with the queue to destroy. 16264 * 16265 * This function destroys a queue, as detailed in @mq by sending an mailbox 16266 * command, specific to the type of queue, to the HBA. 16267 * 16268 * The @mq struct is used to get the queue ID of the queue to destroy. 16269 * 16270 * On success this function will return a zero. If the queue destroy mailbox 16271 * command fails this function will return -ENXIO. 16272 **/ 16273 int 16274 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 16275 { 16276 LPFC_MBOXQ_t *mbox; 16277 int rc, length, status = 0; 16278 uint32_t shdr_status, shdr_add_status; 16279 union lpfc_sli4_cfg_shdr *shdr; 16280 16281 /* sanity check on queue memory */ 16282 if (!mq) 16283 return -ENODEV; 16284 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 16285 if (!mbox) 16286 return -ENOMEM; 16287 length = (sizeof(struct lpfc_mbx_mq_destroy) - 16288 sizeof(struct lpfc_sli4_cfg_mhdr)); 16289 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16290 LPFC_MBOX_OPCODE_MQ_DESTROY, 16291 length, LPFC_SLI4_MBX_EMBED); 16292 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 16293 mq->queue_id); 16294 mbox->vport = mq->phba->pport; 16295 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16296 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 16297 /* The IOCTL status is embedded in the mailbox subheader. */ 16298 shdr = (union lpfc_sli4_cfg_shdr *) 16299 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 16300 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16301 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16302 if (shdr_status || shdr_add_status || rc) { 16303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16304 "2507 MQ_DESTROY mailbox failed with " 16305 "status x%x add_status x%x, mbx status x%x\n", 16306 shdr_status, shdr_add_status, rc); 16307 status = -ENXIO; 16308 } 16309 /* Remove mq from any list */ 16310 list_del_init(&mq->list); 16311 mempool_free(mbox, mq->phba->mbox_mem_pool); 16312 return status; 16313 } 16314 16315 /** 16316 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 16317 * @wq: The queue structure associated with the queue to destroy. 16318 * 16319 * This function destroys a queue, as detailed in @wq by sending an mailbox 16320 * command, specific to the type of queue, to the HBA. 16321 * 16322 * The @wq struct is used to get the queue ID of the queue to destroy. 16323 * 16324 * On success this function will return a zero. If the queue destroy mailbox 16325 * command fails this function will return -ENXIO. 16326 **/ 16327 int 16328 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 16329 { 16330 LPFC_MBOXQ_t *mbox; 16331 int rc, length, status = 0; 16332 uint32_t shdr_status, shdr_add_status; 16333 union lpfc_sli4_cfg_shdr *shdr; 16334 16335 /* sanity check on queue memory */ 16336 if (!wq) 16337 return -ENODEV; 16338 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 16339 if (!mbox) 16340 return -ENOMEM; 16341 length = (sizeof(struct lpfc_mbx_wq_destroy) - 16342 sizeof(struct lpfc_sli4_cfg_mhdr)); 16343 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16344 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 16345 length, LPFC_SLI4_MBX_EMBED); 16346 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 16347 wq->queue_id); 16348 mbox->vport = wq->phba->pport; 16349 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16350 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 16351 shdr = (union lpfc_sli4_cfg_shdr *) 16352 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 16353 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16354 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16355 if (shdr_status || shdr_add_status || rc) { 16356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16357 "2508 WQ_DESTROY mailbox failed with " 16358 "status x%x add_status x%x, mbx status x%x\n", 16359 shdr_status, shdr_add_status, rc); 16360 status = -ENXIO; 16361 } 16362 /* Remove wq from any list */ 16363 list_del_init(&wq->list); 16364 kfree(wq->pring); 16365 wq->pring = NULL; 16366 mempool_free(mbox, wq->phba->mbox_mem_pool); 16367 return status; 16368 } 16369 16370 /** 16371 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 16372 * @rq: The queue structure associated with the queue to destroy. 16373 * 16374 * This function destroys a queue, as detailed in @rq by sending an mailbox 16375 * command, specific to the type of queue, to the HBA. 16376 * 16377 * The @rq struct is used to get the queue ID of the queue to destroy. 16378 * 16379 * On success this function will return a zero. If the queue destroy mailbox 16380 * command fails this function will return -ENXIO. 16381 **/ 16382 int 16383 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 16384 struct lpfc_queue *drq) 16385 { 16386 LPFC_MBOXQ_t *mbox; 16387 int rc, length, status = 0; 16388 uint32_t shdr_status, shdr_add_status; 16389 union lpfc_sli4_cfg_shdr *shdr; 16390 16391 /* sanity check on queue memory */ 16392 if (!hrq || !drq) 16393 return -ENODEV; 16394 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 16395 if (!mbox) 16396 return -ENOMEM; 16397 length = (sizeof(struct lpfc_mbx_rq_destroy) - 16398 sizeof(struct lpfc_sli4_cfg_mhdr)); 16399 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16400 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 16401 length, LPFC_SLI4_MBX_EMBED); 16402 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16403 hrq->queue_id); 16404 mbox->vport = hrq->phba->pport; 16405 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16406 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 16407 /* The IOCTL status is embedded in the mailbox subheader. */ 16408 shdr = (union lpfc_sli4_cfg_shdr *) 16409 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16410 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16411 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16412 if (shdr_status || shdr_add_status || rc) { 16413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16414 "2509 RQ_DESTROY mailbox failed with " 16415 "status x%x add_status x%x, mbx status x%x\n", 16416 shdr_status, shdr_add_status, rc); 16417 if (rc != MBX_TIMEOUT) 16418 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16419 return -ENXIO; 16420 } 16421 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16422 drq->queue_id); 16423 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 16424 shdr = (union lpfc_sli4_cfg_shdr *) 16425 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16426 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16427 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16428 if (shdr_status || shdr_add_status || rc) { 16429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16430 "2510 RQ_DESTROY mailbox failed with " 16431 "status x%x add_status x%x, mbx status x%x\n", 16432 shdr_status, shdr_add_status, rc); 16433 status = -ENXIO; 16434 } 16435 list_del_init(&hrq->list); 16436 list_del_init(&drq->list); 16437 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16438 return status; 16439 } 16440 16441 /** 16442 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 16443 * @phba: The virtual port for which this call being executed. 16444 * @pdma_phys_addr0: Physical address of the 1st SGL page. 16445 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 16446 * @xritag: the xritag that ties this io to the SGL pages. 16447 * 16448 * This routine will post the sgl pages for the IO that has the xritag 16449 * that is in the iocbq structure. The xritag is assigned during iocbq 16450 * creation and persists for as long as the driver is loaded. 16451 * if the caller has fewer than 256 scatter gather segments to map then 16452 * pdma_phys_addr1 should be 0. 16453 * If the caller needs to map more than 256 scatter gather segment then 16454 * pdma_phys_addr1 should be a valid physical address. 16455 * physical address for SGLs must be 64 byte aligned. 16456 * If you are going to map 2 SGL's then the first one must have 256 entries 16457 * the second sgl can have between 1 and 256 entries. 16458 * 16459 * Return codes: 16460 * 0 - Success 16461 * -ENXIO, -ENOMEM - Failure 16462 **/ 16463 int 16464 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 16465 dma_addr_t pdma_phys_addr0, 16466 dma_addr_t pdma_phys_addr1, 16467 uint16_t xritag) 16468 { 16469 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 16470 LPFC_MBOXQ_t *mbox; 16471 int rc; 16472 uint32_t shdr_status, shdr_add_status; 16473 uint32_t mbox_tmo; 16474 union lpfc_sli4_cfg_shdr *shdr; 16475 16476 if (xritag == NO_XRI) { 16477 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16478 "0364 Invalid param:\n"); 16479 return -EINVAL; 16480 } 16481 16482 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16483 if (!mbox) 16484 return -ENOMEM; 16485 16486 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16487 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16488 sizeof(struct lpfc_mbx_post_sgl_pages) - 16489 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16490 16491 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 16492 &mbox->u.mqe.un.post_sgl_pages; 16493 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 16494 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 16495 16496 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 16497 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 16498 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 16499 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 16500 16501 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 16502 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 16503 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 16504 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 16505 if (!phba->sli4_hba.intr_enable) 16506 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16507 else { 16508 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16509 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16510 } 16511 /* The IOCTL status is embedded in the mailbox subheader. */ 16512 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 16513 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16514 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16515 if (rc != MBX_TIMEOUT) 16516 mempool_free(mbox, phba->mbox_mem_pool); 16517 if (shdr_status || shdr_add_status || rc) { 16518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16519 "2511 POST_SGL mailbox failed with " 16520 "status x%x add_status x%x, mbx status x%x\n", 16521 shdr_status, shdr_add_status, rc); 16522 } 16523 return 0; 16524 } 16525 16526 /** 16527 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 16528 * @phba: pointer to lpfc hba data structure. 16529 * 16530 * This routine is invoked to post rpi header templates to the 16531 * HBA consistent with the SLI-4 interface spec. This routine 16532 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 16533 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 16534 * 16535 * Returns 16536 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 16537 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 16538 **/ 16539 static uint16_t 16540 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 16541 { 16542 unsigned long xri; 16543 16544 /* 16545 * Fetch the next logical xri. Because this index is logical, 16546 * the driver starts at 0 each time. 16547 */ 16548 spin_lock_irq(&phba->hbalock); 16549 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 16550 phba->sli4_hba.max_cfg_param.max_xri, 0); 16551 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 16552 spin_unlock_irq(&phba->hbalock); 16553 return NO_XRI; 16554 } else { 16555 set_bit(xri, phba->sli4_hba.xri_bmask); 16556 phba->sli4_hba.max_cfg_param.xri_used++; 16557 } 16558 spin_unlock_irq(&phba->hbalock); 16559 return xri; 16560 } 16561 16562 /** 16563 * lpfc_sli4_free_xri - Release an xri for reuse. 16564 * @phba: pointer to lpfc hba data structure. 16565 * 16566 * This routine is invoked to release an xri to the pool of 16567 * available rpis maintained by the driver. 16568 **/ 16569 static void 16570 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16571 { 16572 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 16573 phba->sli4_hba.max_cfg_param.xri_used--; 16574 } 16575 } 16576 16577 /** 16578 * lpfc_sli4_free_xri - Release an xri for reuse. 16579 * @phba: pointer to lpfc hba data structure. 16580 * 16581 * This routine is invoked to release an xri to the pool of 16582 * available rpis maintained by the driver. 16583 **/ 16584 void 16585 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16586 { 16587 spin_lock_irq(&phba->hbalock); 16588 __lpfc_sli4_free_xri(phba, xri); 16589 spin_unlock_irq(&phba->hbalock); 16590 } 16591 16592 /** 16593 * lpfc_sli4_next_xritag - Get an xritag for the io 16594 * @phba: Pointer to HBA context object. 16595 * 16596 * This function gets an xritag for the iocb. If there is no unused xritag 16597 * it will return 0xffff. 16598 * The function returns the allocated xritag if successful, else returns zero. 16599 * Zero is not a valid xritag. 16600 * The caller is not required to hold any lock. 16601 **/ 16602 uint16_t 16603 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 16604 { 16605 uint16_t xri_index; 16606 16607 xri_index = lpfc_sli4_alloc_xri(phba); 16608 if (xri_index == NO_XRI) 16609 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16610 "2004 Failed to allocate XRI.last XRITAG is %d" 16611 " Max XRI is %d, Used XRI is %d\n", 16612 xri_index, 16613 phba->sli4_hba.max_cfg_param.max_xri, 16614 phba->sli4_hba.max_cfg_param.xri_used); 16615 return xri_index; 16616 } 16617 16618 /** 16619 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 16620 * @phba: pointer to lpfc hba data structure. 16621 * @post_sgl_list: pointer to els sgl entry list. 16622 * @count: number of els sgl entries on the list. 16623 * 16624 * This routine is invoked to post a block of driver's sgl pages to the 16625 * HBA using non-embedded mailbox command. No Lock is held. This routine 16626 * is only called when the driver is loading and after all IO has been 16627 * stopped. 16628 **/ 16629 static int 16630 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 16631 struct list_head *post_sgl_list, 16632 int post_cnt) 16633 { 16634 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 16635 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16636 struct sgl_page_pairs *sgl_pg_pairs; 16637 void *viraddr; 16638 LPFC_MBOXQ_t *mbox; 16639 uint32_t reqlen, alloclen, pg_pairs; 16640 uint32_t mbox_tmo; 16641 uint16_t xritag_start = 0; 16642 int rc = 0; 16643 uint32_t shdr_status, shdr_add_status; 16644 union lpfc_sli4_cfg_shdr *shdr; 16645 16646 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 16647 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16648 if (reqlen > SLI4_PAGE_SIZE) { 16649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16650 "2559 Block sgl registration required DMA " 16651 "size (%d) great than a page\n", reqlen); 16652 return -ENOMEM; 16653 } 16654 16655 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16656 if (!mbox) 16657 return -ENOMEM; 16658 16659 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16660 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16661 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16662 LPFC_SLI4_MBX_NEMBED); 16663 16664 if (alloclen < reqlen) { 16665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16666 "0285 Allocated DMA memory size (%d) is " 16667 "less than the requested DMA memory " 16668 "size (%d)\n", alloclen, reqlen); 16669 lpfc_sli4_mbox_cmd_free(phba, mbox); 16670 return -ENOMEM; 16671 } 16672 /* Set up the SGL pages in the non-embedded DMA pages */ 16673 viraddr = mbox->sge_array->addr[0]; 16674 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16675 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16676 16677 pg_pairs = 0; 16678 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 16679 /* Set up the sge entry */ 16680 sgl_pg_pairs->sgl_pg0_addr_lo = 16681 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 16682 sgl_pg_pairs->sgl_pg0_addr_hi = 16683 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 16684 sgl_pg_pairs->sgl_pg1_addr_lo = 16685 cpu_to_le32(putPaddrLow(0)); 16686 sgl_pg_pairs->sgl_pg1_addr_hi = 16687 cpu_to_le32(putPaddrHigh(0)); 16688 16689 /* Keep the first xritag on the list */ 16690 if (pg_pairs == 0) 16691 xritag_start = sglq_entry->sli4_xritag; 16692 sgl_pg_pairs++; 16693 pg_pairs++; 16694 } 16695 16696 /* Complete initialization and perform endian conversion. */ 16697 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16698 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 16699 sgl->word0 = cpu_to_le32(sgl->word0); 16700 16701 if (!phba->sli4_hba.intr_enable) 16702 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16703 else { 16704 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16705 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16706 } 16707 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16708 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16709 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16710 if (rc != MBX_TIMEOUT) 16711 lpfc_sli4_mbox_cmd_free(phba, mbox); 16712 if (shdr_status || shdr_add_status || rc) { 16713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16714 "2513 POST_SGL_BLOCK mailbox command failed " 16715 "status x%x add_status x%x mbx status x%x\n", 16716 shdr_status, shdr_add_status, rc); 16717 rc = -ENXIO; 16718 } 16719 return rc; 16720 } 16721 16722 /** 16723 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware 16724 * @phba: pointer to lpfc hba data structure. 16725 * @nblist: pointer to nvme buffer list. 16726 * @count: number of scsi buffers on the list. 16727 * 16728 * This routine is invoked to post a block of @count scsi sgl pages from a 16729 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. 16730 * No Lock is held. 16731 * 16732 **/ 16733 static int 16734 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, 16735 int count) 16736 { 16737 struct lpfc_io_buf *lpfc_ncmd; 16738 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16739 struct sgl_page_pairs *sgl_pg_pairs; 16740 void *viraddr; 16741 LPFC_MBOXQ_t *mbox; 16742 uint32_t reqlen, alloclen, pg_pairs; 16743 uint32_t mbox_tmo; 16744 uint16_t xritag_start = 0; 16745 int rc = 0; 16746 uint32_t shdr_status, shdr_add_status; 16747 dma_addr_t pdma_phys_bpl1; 16748 union lpfc_sli4_cfg_shdr *shdr; 16749 16750 /* Calculate the requested length of the dma memory */ 16751 reqlen = count * sizeof(struct sgl_page_pairs) + 16752 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16753 if (reqlen > SLI4_PAGE_SIZE) { 16754 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16755 "6118 Block sgl registration required DMA " 16756 "size (%d) great than a page\n", reqlen); 16757 return -ENOMEM; 16758 } 16759 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16760 if (!mbox) { 16761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16762 "6119 Failed to allocate mbox cmd memory\n"); 16763 return -ENOMEM; 16764 } 16765 16766 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16767 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16768 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16769 reqlen, LPFC_SLI4_MBX_NEMBED); 16770 16771 if (alloclen < reqlen) { 16772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16773 "6120 Allocated DMA memory size (%d) is " 16774 "less than the requested DMA memory " 16775 "size (%d)\n", alloclen, reqlen); 16776 lpfc_sli4_mbox_cmd_free(phba, mbox); 16777 return -ENOMEM; 16778 } 16779 16780 /* Get the first SGE entry from the non-embedded DMA memory */ 16781 viraddr = mbox->sge_array->addr[0]; 16782 16783 /* Set up the SGL pages in the non-embedded DMA pages */ 16784 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16785 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16786 16787 pg_pairs = 0; 16788 list_for_each_entry(lpfc_ncmd, nblist, list) { 16789 /* Set up the sge entry */ 16790 sgl_pg_pairs->sgl_pg0_addr_lo = 16791 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); 16792 sgl_pg_pairs->sgl_pg0_addr_hi = 16793 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); 16794 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16795 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + 16796 SGL_PAGE_SIZE; 16797 else 16798 pdma_phys_bpl1 = 0; 16799 sgl_pg_pairs->sgl_pg1_addr_lo = 16800 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16801 sgl_pg_pairs->sgl_pg1_addr_hi = 16802 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16803 /* Keep the first xritag on the list */ 16804 if (pg_pairs == 0) 16805 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; 16806 sgl_pg_pairs++; 16807 pg_pairs++; 16808 } 16809 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16810 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16811 /* Perform endian conversion if necessary */ 16812 sgl->word0 = cpu_to_le32(sgl->word0); 16813 16814 if (!phba->sli4_hba.intr_enable) { 16815 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16816 } else { 16817 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16818 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16819 } 16820 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; 16821 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16822 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16823 if (rc != MBX_TIMEOUT) 16824 lpfc_sli4_mbox_cmd_free(phba, mbox); 16825 if (shdr_status || shdr_add_status || rc) { 16826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16827 "6125 POST_SGL_BLOCK mailbox command failed " 16828 "status x%x add_status x%x mbx status x%x\n", 16829 shdr_status, shdr_add_status, rc); 16830 rc = -ENXIO; 16831 } 16832 return rc; 16833 } 16834 16835 /** 16836 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list 16837 * @phba: pointer to lpfc hba data structure. 16838 * @post_nblist: pointer to the nvme buffer list. 16839 * 16840 * This routine walks a list of nvme buffers that was passed in. It attempts 16841 * to construct blocks of nvme buffer sgls which contains contiguous xris and 16842 * uses the non-embedded SGL block post mailbox commands to post to the port. 16843 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use 16844 * embedded SGL post mailbox command for posting. The @post_nblist passed in 16845 * must be local list, thus no lock is needed when manipulate the list. 16846 * 16847 * Returns: 0 = failure, non-zero number of successfully posted buffers. 16848 **/ 16849 int 16850 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, 16851 struct list_head *post_nblist, int sb_count) 16852 { 16853 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 16854 int status, sgl_size; 16855 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 16856 dma_addr_t pdma_phys_sgl1; 16857 int last_xritag = NO_XRI; 16858 int cur_xritag; 16859 LIST_HEAD(prep_nblist); 16860 LIST_HEAD(blck_nblist); 16861 LIST_HEAD(nvme_nblist); 16862 16863 /* sanity check */ 16864 if (sb_count <= 0) 16865 return -EINVAL; 16866 16867 sgl_size = phba->cfg_sg_dma_buf_size; 16868 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { 16869 list_del_init(&lpfc_ncmd->list); 16870 block_cnt++; 16871 if ((last_xritag != NO_XRI) && 16872 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { 16873 /* a hole in xri block, form a sgl posting block */ 16874 list_splice_init(&prep_nblist, &blck_nblist); 16875 post_cnt = block_cnt - 1; 16876 /* prepare list for next posting block */ 16877 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16878 block_cnt = 1; 16879 } else { 16880 /* prepare list for next posting block */ 16881 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16882 /* enough sgls for non-embed sgl mbox command */ 16883 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 16884 list_splice_init(&prep_nblist, &blck_nblist); 16885 post_cnt = block_cnt; 16886 block_cnt = 0; 16887 } 16888 } 16889 num_posting++; 16890 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16891 16892 /* end of repost sgl list condition for NVME buffers */ 16893 if (num_posting == sb_count) { 16894 if (post_cnt == 0) { 16895 /* last sgl posting block */ 16896 list_splice_init(&prep_nblist, &blck_nblist); 16897 post_cnt = block_cnt; 16898 } else if (block_cnt == 1) { 16899 /* last single sgl with non-contiguous xri */ 16900 if (sgl_size > SGL_PAGE_SIZE) 16901 pdma_phys_sgl1 = 16902 lpfc_ncmd->dma_phys_sgl + 16903 SGL_PAGE_SIZE; 16904 else 16905 pdma_phys_sgl1 = 0; 16906 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16907 status = lpfc_sli4_post_sgl( 16908 phba, lpfc_ncmd->dma_phys_sgl, 16909 pdma_phys_sgl1, cur_xritag); 16910 if (status) { 16911 /* Post error. Buffer unavailable. */ 16912 lpfc_ncmd->flags |= 16913 LPFC_SBUF_NOT_POSTED; 16914 } else { 16915 /* Post success. Bffer available. */ 16916 lpfc_ncmd->flags &= 16917 ~LPFC_SBUF_NOT_POSTED; 16918 lpfc_ncmd->status = IOSTAT_SUCCESS; 16919 num_posted++; 16920 } 16921 /* success, put on NVME buffer sgl list */ 16922 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16923 } 16924 } 16925 16926 /* continue until a nembed page worth of sgls */ 16927 if (post_cnt == 0) 16928 continue; 16929 16930 /* post block of NVME buffer list sgls */ 16931 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, 16932 post_cnt); 16933 16934 /* don't reset xirtag due to hole in xri block */ 16935 if (block_cnt == 0) 16936 last_xritag = NO_XRI; 16937 16938 /* reset NVME buffer post count for next round of posting */ 16939 post_cnt = 0; 16940 16941 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ 16942 while (!list_empty(&blck_nblist)) { 16943 list_remove_head(&blck_nblist, lpfc_ncmd, 16944 struct lpfc_io_buf, list); 16945 if (status) { 16946 /* Post error. Mark buffer unavailable. */ 16947 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; 16948 } else { 16949 /* Post success, Mark buffer available. */ 16950 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; 16951 lpfc_ncmd->status = IOSTAT_SUCCESS; 16952 num_posted++; 16953 } 16954 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16955 } 16956 } 16957 /* Push NVME buffers with sgl posted to the available list */ 16958 lpfc_io_buf_replenish(phba, &nvme_nblist); 16959 16960 return num_posted; 16961 } 16962 16963 /** 16964 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 16965 * @phba: pointer to lpfc_hba struct that the frame was received on 16966 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16967 * 16968 * This function checks the fields in the @fc_hdr to see if the FC frame is a 16969 * valid type of frame that the LPFC driver will handle. This function will 16970 * return a zero if the frame is a valid frame or a non zero value when the 16971 * frame does not pass the check. 16972 **/ 16973 static int 16974 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 16975 { 16976 /* make rctl_names static to save stack space */ 16977 struct fc_vft_header *fc_vft_hdr; 16978 uint32_t *header = (uint32_t *) fc_hdr; 16979 16980 switch (fc_hdr->fh_r_ctl) { 16981 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16982 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16983 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 16984 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 16985 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 16986 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 16987 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 16988 case FC_RCTL_DD_CMD_STATUS: /* command status */ 16989 case FC_RCTL_ELS_REQ: /* extended link services request */ 16990 case FC_RCTL_ELS_REP: /* extended link services reply */ 16991 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 16992 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 16993 case FC_RCTL_BA_NOP: /* basic link service NOP */ 16994 case FC_RCTL_BA_ABTS: /* basic link service abort */ 16995 case FC_RCTL_BA_RMC: /* remove connection */ 16996 case FC_RCTL_BA_ACC: /* basic accept */ 16997 case FC_RCTL_BA_RJT: /* basic reject */ 16998 case FC_RCTL_BA_PRMT: 16999 case FC_RCTL_ACK_1: /* acknowledge_1 */ 17000 case FC_RCTL_ACK_0: /* acknowledge_0 */ 17001 case FC_RCTL_P_RJT: /* port reject */ 17002 case FC_RCTL_F_RJT: /* fabric reject */ 17003 case FC_RCTL_P_BSY: /* port busy */ 17004 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 17005 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 17006 case FC_RCTL_LCR: /* link credit reset */ 17007 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 17008 case FC_RCTL_END: /* end */ 17009 break; 17010 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 17011 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 17012 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 17013 return lpfc_fc_frame_check(phba, fc_hdr); 17014 default: 17015 goto drop; 17016 } 17017 17018 switch (fc_hdr->fh_type) { 17019 case FC_TYPE_BLS: 17020 case FC_TYPE_ELS: 17021 case FC_TYPE_FCP: 17022 case FC_TYPE_CT: 17023 case FC_TYPE_NVME: 17024 break; 17025 case FC_TYPE_IP: 17026 case FC_TYPE_ILS: 17027 default: 17028 goto drop; 17029 } 17030 17031 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 17032 "2538 Received frame rctl:x%x, type:x%x, " 17033 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 17034 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 17035 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 17036 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 17037 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 17038 be32_to_cpu(header[6])); 17039 return 0; 17040 drop: 17041 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 17042 "2539 Dropped frame rctl:x%x type:x%x\n", 17043 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17044 return 1; 17045 } 17046 17047 /** 17048 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 17049 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 17050 * 17051 * This function processes the FC header to retrieve the VFI from the VF 17052 * header, if one exists. This function will return the VFI if one exists 17053 * or 0 if no VSAN Header exists. 17054 **/ 17055 static uint32_t 17056 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 17057 { 17058 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 17059 17060 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 17061 return 0; 17062 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 17063 } 17064 17065 /** 17066 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 17067 * @phba: Pointer to the HBA structure to search for the vport on 17068 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 17069 * @fcfi: The FC Fabric ID that the frame came from 17070 * 17071 * This function searches the @phba for a vport that matches the content of the 17072 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 17073 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 17074 * returns the matching vport pointer or NULL if unable to match frame to a 17075 * vport. 17076 **/ 17077 static struct lpfc_vport * 17078 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 17079 uint16_t fcfi, uint32_t did) 17080 { 17081 struct lpfc_vport **vports; 17082 struct lpfc_vport *vport = NULL; 17083 int i; 17084 17085 if (did == Fabric_DID) 17086 return phba->pport; 17087 if ((phba->pport->fc_flag & FC_PT2PT) && 17088 !(phba->link_state == LPFC_HBA_READY)) 17089 return phba->pport; 17090 17091 vports = lpfc_create_vport_work_array(phba); 17092 if (vports != NULL) { 17093 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 17094 if (phba->fcf.fcfi == fcfi && 17095 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 17096 vports[i]->fc_myDID == did) { 17097 vport = vports[i]; 17098 break; 17099 } 17100 } 17101 } 17102 lpfc_destroy_vport_work_array(phba, vports); 17103 return vport; 17104 } 17105 17106 /** 17107 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 17108 * @vport: The vport to work on. 17109 * 17110 * This function updates the receive sequence time stamp for this vport. The 17111 * receive sequence time stamp indicates the time that the last frame of the 17112 * the sequence that has been idle for the longest amount of time was received. 17113 * the driver uses this time stamp to indicate if any received sequences have 17114 * timed out. 17115 **/ 17116 static void 17117 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 17118 { 17119 struct lpfc_dmabuf *h_buf; 17120 struct hbq_dmabuf *dmabuf = NULL; 17121 17122 /* get the oldest sequence on the rcv list */ 17123 h_buf = list_get_first(&vport->rcv_buffer_list, 17124 struct lpfc_dmabuf, list); 17125 if (!h_buf) 17126 return; 17127 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17128 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 17129 } 17130 17131 /** 17132 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 17133 * @vport: The vport that the received sequences were sent to. 17134 * 17135 * This function cleans up all outstanding received sequences. This is called 17136 * by the driver when a link event or user action invalidates all the received 17137 * sequences. 17138 **/ 17139 void 17140 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 17141 { 17142 struct lpfc_dmabuf *h_buf, *hnext; 17143 struct lpfc_dmabuf *d_buf, *dnext; 17144 struct hbq_dmabuf *dmabuf = NULL; 17145 17146 /* start with the oldest sequence on the rcv list */ 17147 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17148 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17149 list_del_init(&dmabuf->hbuf.list); 17150 list_for_each_entry_safe(d_buf, dnext, 17151 &dmabuf->dbuf.list, list) { 17152 list_del_init(&d_buf->list); 17153 lpfc_in_buf_free(vport->phba, d_buf); 17154 } 17155 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17156 } 17157 } 17158 17159 /** 17160 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 17161 * @vport: The vport that the received sequences were sent to. 17162 * 17163 * This function determines whether any received sequences have timed out by 17164 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 17165 * indicates that there is at least one timed out sequence this routine will 17166 * go through the received sequences one at a time from most inactive to most 17167 * active to determine which ones need to be cleaned up. Once it has determined 17168 * that a sequence needs to be cleaned up it will simply free up the resources 17169 * without sending an abort. 17170 **/ 17171 void 17172 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 17173 { 17174 struct lpfc_dmabuf *h_buf, *hnext; 17175 struct lpfc_dmabuf *d_buf, *dnext; 17176 struct hbq_dmabuf *dmabuf = NULL; 17177 unsigned long timeout; 17178 int abort_count = 0; 17179 17180 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17181 vport->rcv_buffer_time_stamp); 17182 if (list_empty(&vport->rcv_buffer_list) || 17183 time_before(jiffies, timeout)) 17184 return; 17185 /* start with the oldest sequence on the rcv list */ 17186 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17187 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17188 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17189 dmabuf->time_stamp); 17190 if (time_before(jiffies, timeout)) 17191 break; 17192 abort_count++; 17193 list_del_init(&dmabuf->hbuf.list); 17194 list_for_each_entry_safe(d_buf, dnext, 17195 &dmabuf->dbuf.list, list) { 17196 list_del_init(&d_buf->list); 17197 lpfc_in_buf_free(vport->phba, d_buf); 17198 } 17199 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17200 } 17201 if (abort_count) 17202 lpfc_update_rcv_time_stamp(vport); 17203 } 17204 17205 /** 17206 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 17207 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 17208 * 17209 * This function searches through the existing incomplete sequences that have 17210 * been sent to this @vport. If the frame matches one of the incomplete 17211 * sequences then the dbuf in the @dmabuf is added to the list of frames that 17212 * make up that sequence. If no sequence is found that matches this frame then 17213 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 17214 * This function returns a pointer to the first dmabuf in the sequence list that 17215 * the frame was linked to. 17216 **/ 17217 static struct hbq_dmabuf * 17218 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17219 { 17220 struct fc_frame_header *new_hdr; 17221 struct fc_frame_header *temp_hdr; 17222 struct lpfc_dmabuf *d_buf; 17223 struct lpfc_dmabuf *h_buf; 17224 struct hbq_dmabuf *seq_dmabuf = NULL; 17225 struct hbq_dmabuf *temp_dmabuf = NULL; 17226 uint8_t found = 0; 17227 17228 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17229 dmabuf->time_stamp = jiffies; 17230 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17231 17232 /* Use the hdr_buf to find the sequence that this frame belongs to */ 17233 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17234 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17235 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17236 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17237 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17238 continue; 17239 /* found a pending sequence that matches this frame */ 17240 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17241 break; 17242 } 17243 if (!seq_dmabuf) { 17244 /* 17245 * This indicates first frame received for this sequence. 17246 * Queue the buffer on the vport's rcv_buffer_list. 17247 */ 17248 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17249 lpfc_update_rcv_time_stamp(vport); 17250 return dmabuf; 17251 } 17252 temp_hdr = seq_dmabuf->hbuf.virt; 17253 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 17254 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17255 list_del_init(&seq_dmabuf->hbuf.list); 17256 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17257 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17258 lpfc_update_rcv_time_stamp(vport); 17259 return dmabuf; 17260 } 17261 /* move this sequence to the tail to indicate a young sequence */ 17262 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 17263 seq_dmabuf->time_stamp = jiffies; 17264 lpfc_update_rcv_time_stamp(vport); 17265 if (list_empty(&seq_dmabuf->dbuf.list)) { 17266 temp_hdr = dmabuf->hbuf.virt; 17267 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17268 return seq_dmabuf; 17269 } 17270 /* find the correct place in the sequence to insert this frame */ 17271 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 17272 while (!found) { 17273 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17274 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 17275 /* 17276 * If the frame's sequence count is greater than the frame on 17277 * the list then insert the frame right after this frame 17278 */ 17279 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 17280 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17281 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 17282 found = 1; 17283 break; 17284 } 17285 17286 if (&d_buf->list == &seq_dmabuf->dbuf.list) 17287 break; 17288 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 17289 } 17290 17291 if (found) 17292 return seq_dmabuf; 17293 return NULL; 17294 } 17295 17296 /** 17297 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 17298 * @vport: pointer to a vitural port 17299 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17300 * 17301 * This function tries to abort from the partially assembed sequence, described 17302 * by the information from basic abbort @dmabuf. It checks to see whether such 17303 * partially assembled sequence held by the driver. If so, it shall free up all 17304 * the frames from the partially assembled sequence. 17305 * 17306 * Return 17307 * true -- if there is matching partially assembled sequence present and all 17308 * the frames freed with the sequence; 17309 * false -- if there is no matching partially assembled sequence present so 17310 * nothing got aborted in the lower layer driver 17311 **/ 17312 static bool 17313 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 17314 struct hbq_dmabuf *dmabuf) 17315 { 17316 struct fc_frame_header *new_hdr; 17317 struct fc_frame_header *temp_hdr; 17318 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 17319 struct hbq_dmabuf *seq_dmabuf = NULL; 17320 17321 /* Use the hdr_buf to find the sequence that matches this frame */ 17322 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17323 INIT_LIST_HEAD(&dmabuf->hbuf.list); 17324 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17325 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17326 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17327 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17328 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17329 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17330 continue; 17331 /* found a pending sequence that matches this frame */ 17332 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17333 break; 17334 } 17335 17336 /* Free up all the frames from the partially assembled sequence */ 17337 if (seq_dmabuf) { 17338 list_for_each_entry_safe(d_buf, n_buf, 17339 &seq_dmabuf->dbuf.list, list) { 17340 list_del_init(&d_buf->list); 17341 lpfc_in_buf_free(vport->phba, d_buf); 17342 } 17343 return true; 17344 } 17345 return false; 17346 } 17347 17348 /** 17349 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 17350 * @vport: pointer to a vitural port 17351 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17352 * 17353 * This function tries to abort from the assembed sequence from upper level 17354 * protocol, described by the information from basic abbort @dmabuf. It 17355 * checks to see whether such pending context exists at upper level protocol. 17356 * If so, it shall clean up the pending context. 17357 * 17358 * Return 17359 * true -- if there is matching pending context of the sequence cleaned 17360 * at ulp; 17361 * false -- if there is no matching pending context of the sequence present 17362 * at ulp. 17363 **/ 17364 static bool 17365 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17366 { 17367 struct lpfc_hba *phba = vport->phba; 17368 int handled; 17369 17370 /* Accepting abort at ulp with SLI4 only */ 17371 if (phba->sli_rev < LPFC_SLI_REV4) 17372 return false; 17373 17374 /* Register all caring upper level protocols to attend abort */ 17375 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 17376 if (handled) 17377 return true; 17378 17379 return false; 17380 } 17381 17382 /** 17383 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 17384 * @phba: Pointer to HBA context object. 17385 * @cmd_iocbq: pointer to the command iocbq structure. 17386 * @rsp_iocbq: pointer to the response iocbq structure. 17387 * 17388 * This function handles the sequence abort response iocb command complete 17389 * event. It properly releases the memory allocated to the sequence abort 17390 * accept iocb. 17391 **/ 17392 static void 17393 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 17394 struct lpfc_iocbq *cmd_iocbq, 17395 struct lpfc_iocbq *rsp_iocbq) 17396 { 17397 struct lpfc_nodelist *ndlp; 17398 17399 if (cmd_iocbq) { 17400 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 17401 lpfc_nlp_put(ndlp); 17402 lpfc_nlp_not_used(ndlp); 17403 lpfc_sli_release_iocbq(phba, cmd_iocbq); 17404 } 17405 17406 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 17407 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 17408 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17409 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 17410 rsp_iocbq->iocb.ulpStatus, 17411 rsp_iocbq->iocb.un.ulpWord[4]); 17412 } 17413 17414 /** 17415 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 17416 * @phba: Pointer to HBA context object. 17417 * @xri: xri id in transaction. 17418 * 17419 * This function validates the xri maps to the known range of XRIs allocated an 17420 * used by the driver. 17421 **/ 17422 uint16_t 17423 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 17424 uint16_t xri) 17425 { 17426 uint16_t i; 17427 17428 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 17429 if (xri == phba->sli4_hba.xri_ids[i]) 17430 return i; 17431 } 17432 return NO_XRI; 17433 } 17434 17435 /** 17436 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 17437 * @phba: Pointer to HBA context object. 17438 * @fc_hdr: pointer to a FC frame header. 17439 * 17440 * This function sends a basic response to a previous unsol sequence abort 17441 * event after aborting the sequence handling. 17442 **/ 17443 void 17444 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 17445 struct fc_frame_header *fc_hdr, bool aborted) 17446 { 17447 struct lpfc_hba *phba = vport->phba; 17448 struct lpfc_iocbq *ctiocb = NULL; 17449 struct lpfc_nodelist *ndlp; 17450 uint16_t oxid, rxid, xri, lxri; 17451 uint32_t sid, fctl; 17452 IOCB_t *icmd; 17453 int rc; 17454 17455 if (!lpfc_is_link_up(phba)) 17456 return; 17457 17458 sid = sli4_sid_from_fc_hdr(fc_hdr); 17459 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 17460 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 17461 17462 ndlp = lpfc_findnode_did(vport, sid); 17463 if (!ndlp) { 17464 ndlp = lpfc_nlp_init(vport, sid); 17465 if (!ndlp) { 17466 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17467 "1268 Failed to allocate ndlp for " 17468 "oxid:x%x SID:x%x\n", oxid, sid); 17469 return; 17470 } 17471 /* Put ndlp onto pport node list */ 17472 lpfc_enqueue_node(vport, ndlp); 17473 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 17474 /* re-setup ndlp without removing from node list */ 17475 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 17476 if (!ndlp) { 17477 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17478 "3275 Failed to active ndlp found " 17479 "for oxid:x%x SID:x%x\n", oxid, sid); 17480 return; 17481 } 17482 } 17483 17484 /* Allocate buffer for rsp iocb */ 17485 ctiocb = lpfc_sli_get_iocbq(phba); 17486 if (!ctiocb) 17487 return; 17488 17489 /* Extract the F_CTL field from FC_HDR */ 17490 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 17491 17492 icmd = &ctiocb->iocb; 17493 icmd->un.xseq64.bdl.bdeSize = 0; 17494 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 17495 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 17496 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 17497 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 17498 17499 /* Fill in the rest of iocb fields */ 17500 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 17501 icmd->ulpBdeCount = 0; 17502 icmd->ulpLe = 1; 17503 icmd->ulpClass = CLASS3; 17504 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 17505 ctiocb->context1 = lpfc_nlp_get(ndlp); 17506 17507 ctiocb->iocb_cmpl = NULL; 17508 ctiocb->vport = phba->pport; 17509 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 17510 ctiocb->sli4_lxritag = NO_XRI; 17511 ctiocb->sli4_xritag = NO_XRI; 17512 17513 if (fctl & FC_FC_EX_CTX) 17514 /* Exchange responder sent the abort so we 17515 * own the oxid. 17516 */ 17517 xri = oxid; 17518 else 17519 xri = rxid; 17520 lxri = lpfc_sli4_xri_inrange(phba, xri); 17521 if (lxri != NO_XRI) 17522 lpfc_set_rrq_active(phba, ndlp, lxri, 17523 (xri == oxid) ? rxid : oxid, 0); 17524 /* For BA_ABTS from exchange responder, if the logical xri with 17525 * the oxid maps to the FCP XRI range, the port no longer has 17526 * that exchange context, send a BLS_RJT. Override the IOCB for 17527 * a BA_RJT. 17528 */ 17529 if ((fctl & FC_FC_EX_CTX) && 17530 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 17531 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17532 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17533 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17534 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17535 } 17536 17537 /* If BA_ABTS failed to abort a partially assembled receive sequence, 17538 * the driver no longer has that exchange, send a BLS_RJT. Override 17539 * the IOCB for a BA_RJT. 17540 */ 17541 if (aborted == false) { 17542 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17543 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17544 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17545 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17546 } 17547 17548 if (fctl & FC_FC_EX_CTX) { 17549 /* ABTS sent by responder to CT exchange, construction 17550 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 17551 * field and RX_ID from ABTS for RX_ID field. 17552 */ 17553 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 17554 } else { 17555 /* ABTS sent by initiator to CT exchange, construction 17556 * of BA_ACC will need to allocate a new XRI as for the 17557 * XRI_TAG field. 17558 */ 17559 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 17560 } 17561 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 17562 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 17563 17564 /* Xmit CT abts response on exchange <xid> */ 17565 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 17566 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 17567 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 17568 17569 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 17570 if (rc == IOCB_ERROR) { 17571 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 17572 "2925 Failed to issue CT ABTS RSP x%x on " 17573 "xri x%x, Data x%x\n", 17574 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 17575 phba->link_state); 17576 lpfc_nlp_put(ndlp); 17577 ctiocb->context1 = NULL; 17578 lpfc_sli_release_iocbq(phba, ctiocb); 17579 } 17580 } 17581 17582 /** 17583 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 17584 * @vport: Pointer to the vport on which this sequence was received 17585 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17586 * 17587 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 17588 * receive sequence is only partially assembed by the driver, it shall abort 17589 * the partially assembled frames for the sequence. Otherwise, if the 17590 * unsolicited receive sequence has been completely assembled and passed to 17591 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 17592 * unsolicited sequence has been aborted. After that, it will issue a basic 17593 * accept to accept the abort. 17594 **/ 17595 static void 17596 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 17597 struct hbq_dmabuf *dmabuf) 17598 { 17599 struct lpfc_hba *phba = vport->phba; 17600 struct fc_frame_header fc_hdr; 17601 uint32_t fctl; 17602 bool aborted; 17603 17604 /* Make a copy of fc_hdr before the dmabuf being released */ 17605 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 17606 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 17607 17608 if (fctl & FC_FC_EX_CTX) { 17609 /* ABTS by responder to exchange, no cleanup needed */ 17610 aborted = true; 17611 } else { 17612 /* ABTS by initiator to exchange, need to do cleanup */ 17613 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 17614 if (aborted == false) 17615 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 17616 } 17617 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17618 17619 if (phba->nvmet_support) { 17620 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 17621 return; 17622 } 17623 17624 /* Respond with BA_ACC or BA_RJT accordingly */ 17625 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 17626 } 17627 17628 /** 17629 * lpfc_seq_complete - Indicates if a sequence is complete 17630 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17631 * 17632 * This function checks the sequence, starting with the frame described by 17633 * @dmabuf, to see if all the frames associated with this sequence are present. 17634 * the frames associated with this sequence are linked to the @dmabuf using the 17635 * dbuf list. This function looks for two major things. 1) That the first frame 17636 * has a sequence count of zero. 2) There is a frame with last frame of sequence 17637 * set. 3) That there are no holes in the sequence count. The function will 17638 * return 1 when the sequence is complete, otherwise it will return 0. 17639 **/ 17640 static int 17641 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 17642 { 17643 struct fc_frame_header *hdr; 17644 struct lpfc_dmabuf *d_buf; 17645 struct hbq_dmabuf *seq_dmabuf; 17646 uint32_t fctl; 17647 int seq_count = 0; 17648 17649 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17650 /* make sure first fame of sequence has a sequence count of zero */ 17651 if (hdr->fh_seq_cnt != seq_count) 17652 return 0; 17653 fctl = (hdr->fh_f_ctl[0] << 16 | 17654 hdr->fh_f_ctl[1] << 8 | 17655 hdr->fh_f_ctl[2]); 17656 /* If last frame of sequence we can return success. */ 17657 if (fctl & FC_FC_END_SEQ) 17658 return 1; 17659 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 17660 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17661 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17662 /* If there is a hole in the sequence count then fail. */ 17663 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 17664 return 0; 17665 fctl = (hdr->fh_f_ctl[0] << 16 | 17666 hdr->fh_f_ctl[1] << 8 | 17667 hdr->fh_f_ctl[2]); 17668 /* If last frame of sequence we can return success. */ 17669 if (fctl & FC_FC_END_SEQ) 17670 return 1; 17671 } 17672 return 0; 17673 } 17674 17675 /** 17676 * lpfc_prep_seq - Prep sequence for ULP processing 17677 * @vport: Pointer to the vport on which this sequence was received 17678 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17679 * 17680 * This function takes a sequence, described by a list of frames, and creates 17681 * a list of iocbq structures to describe the sequence. This iocbq list will be 17682 * used to issue to the generic unsolicited sequence handler. This routine 17683 * returns a pointer to the first iocbq in the list. If the function is unable 17684 * to allocate an iocbq then it throw out the received frames that were not 17685 * able to be described and return a pointer to the first iocbq. If unable to 17686 * allocate any iocbqs (including the first) this function will return NULL. 17687 **/ 17688 static struct lpfc_iocbq * 17689 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 17690 { 17691 struct hbq_dmabuf *hbq_buf; 17692 struct lpfc_dmabuf *d_buf, *n_buf; 17693 struct lpfc_iocbq *first_iocbq, *iocbq; 17694 struct fc_frame_header *fc_hdr; 17695 uint32_t sid; 17696 uint32_t len, tot_len; 17697 struct ulp_bde64 *pbde; 17698 17699 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17700 /* remove from receive buffer list */ 17701 list_del_init(&seq_dmabuf->hbuf.list); 17702 lpfc_update_rcv_time_stamp(vport); 17703 /* get the Remote Port's SID */ 17704 sid = sli4_sid_from_fc_hdr(fc_hdr); 17705 tot_len = 0; 17706 /* Get an iocbq struct to fill in. */ 17707 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 17708 if (first_iocbq) { 17709 /* Initialize the first IOCB. */ 17710 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 17711 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 17712 first_iocbq->vport = vport; 17713 17714 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 17715 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 17716 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 17717 first_iocbq->iocb.un.rcvels.parmRo = 17718 sli4_did_from_fc_hdr(fc_hdr); 17719 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 17720 } else 17721 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 17722 first_iocbq->iocb.ulpContext = NO_XRI; 17723 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 17724 be16_to_cpu(fc_hdr->fh_ox_id); 17725 /* iocbq is prepped for internal consumption. Physical vpi. */ 17726 first_iocbq->iocb.unsli3.rcvsli3.vpi = 17727 vport->phba->vpi_ids[vport->vpi]; 17728 /* put the first buffer into the first IOCBq */ 17729 tot_len = bf_get(lpfc_rcqe_length, 17730 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 17731 17732 first_iocbq->context2 = &seq_dmabuf->dbuf; 17733 first_iocbq->context3 = NULL; 17734 first_iocbq->iocb.ulpBdeCount = 1; 17735 if (tot_len > LPFC_DATA_BUF_SIZE) 17736 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17737 LPFC_DATA_BUF_SIZE; 17738 else 17739 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 17740 17741 first_iocbq->iocb.un.rcvels.remoteID = sid; 17742 17743 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17744 } 17745 iocbq = first_iocbq; 17746 /* 17747 * Each IOCBq can have two Buffers assigned, so go through the list 17748 * of buffers for this sequence and save two buffers in each IOCBq 17749 */ 17750 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 17751 if (!iocbq) { 17752 lpfc_in_buf_free(vport->phba, d_buf); 17753 continue; 17754 } 17755 if (!iocbq->context3) { 17756 iocbq->context3 = d_buf; 17757 iocbq->iocb.ulpBdeCount++; 17758 /* We need to get the size out of the right CQE */ 17759 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17760 len = bf_get(lpfc_rcqe_length, 17761 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17762 pbde = (struct ulp_bde64 *) 17763 &iocbq->iocb.unsli3.sli3Words[4]; 17764 if (len > LPFC_DATA_BUF_SIZE) 17765 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 17766 else 17767 pbde->tus.f.bdeSize = len; 17768 17769 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 17770 tot_len += len; 17771 } else { 17772 iocbq = lpfc_sli_get_iocbq(vport->phba); 17773 if (!iocbq) { 17774 if (first_iocbq) { 17775 first_iocbq->iocb.ulpStatus = 17776 IOSTAT_FCP_RSP_ERROR; 17777 first_iocbq->iocb.un.ulpWord[4] = 17778 IOERR_NO_RESOURCES; 17779 } 17780 lpfc_in_buf_free(vport->phba, d_buf); 17781 continue; 17782 } 17783 /* We need to get the size out of the right CQE */ 17784 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17785 len = bf_get(lpfc_rcqe_length, 17786 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17787 iocbq->context2 = d_buf; 17788 iocbq->context3 = NULL; 17789 iocbq->iocb.ulpBdeCount = 1; 17790 if (len > LPFC_DATA_BUF_SIZE) 17791 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17792 LPFC_DATA_BUF_SIZE; 17793 else 17794 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 17795 17796 tot_len += len; 17797 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17798 17799 iocbq->iocb.un.rcvels.remoteID = sid; 17800 list_add_tail(&iocbq->list, &first_iocbq->list); 17801 } 17802 } 17803 return first_iocbq; 17804 } 17805 17806 static void 17807 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 17808 struct hbq_dmabuf *seq_dmabuf) 17809 { 17810 struct fc_frame_header *fc_hdr; 17811 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 17812 struct lpfc_hba *phba = vport->phba; 17813 17814 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17815 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 17816 if (!iocbq) { 17817 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17818 "2707 Ring %d handler: Failed to allocate " 17819 "iocb Rctl x%x Type x%x received\n", 17820 LPFC_ELS_RING, 17821 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17822 return; 17823 } 17824 if (!lpfc_complete_unsol_iocb(phba, 17825 phba->sli4_hba.els_wq->pring, 17826 iocbq, fc_hdr->fh_r_ctl, 17827 fc_hdr->fh_type)) 17828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17829 "2540 Ring %d handler: unexpected Rctl " 17830 "x%x Type x%x received\n", 17831 LPFC_ELS_RING, 17832 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17833 17834 /* Free iocb created in lpfc_prep_seq */ 17835 list_for_each_entry_safe(curr_iocb, next_iocb, 17836 &iocbq->list, list) { 17837 list_del_init(&curr_iocb->list); 17838 lpfc_sli_release_iocbq(phba, curr_iocb); 17839 } 17840 lpfc_sli_release_iocbq(phba, iocbq); 17841 } 17842 17843 static void 17844 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17845 struct lpfc_iocbq *rspiocb) 17846 { 17847 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17848 17849 if (pcmd && pcmd->virt) 17850 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17851 kfree(pcmd); 17852 lpfc_sli_release_iocbq(phba, cmdiocb); 17853 lpfc_drain_txq(phba); 17854 } 17855 17856 static void 17857 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 17858 struct hbq_dmabuf *dmabuf) 17859 { 17860 struct fc_frame_header *fc_hdr; 17861 struct lpfc_hba *phba = vport->phba; 17862 struct lpfc_iocbq *iocbq = NULL; 17863 union lpfc_wqe *wqe; 17864 struct lpfc_dmabuf *pcmd = NULL; 17865 uint32_t frame_len; 17866 int rc; 17867 unsigned long iflags; 17868 17869 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17870 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17871 17872 /* Send the received frame back */ 17873 iocbq = lpfc_sli_get_iocbq(phba); 17874 if (!iocbq) { 17875 /* Queue cq event and wakeup worker thread to process it */ 17876 spin_lock_irqsave(&phba->hbalock, iflags); 17877 list_add_tail(&dmabuf->cq_event.list, 17878 &phba->sli4_hba.sp_queue_event); 17879 phba->hba_flag |= HBA_SP_QUEUE_EVT; 17880 spin_unlock_irqrestore(&phba->hbalock, iflags); 17881 lpfc_worker_wake_up(phba); 17882 return; 17883 } 17884 17885 /* Allocate buffer for command payload */ 17886 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17887 if (pcmd) 17888 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 17889 &pcmd->phys); 17890 if (!pcmd || !pcmd->virt) 17891 goto exit; 17892 17893 INIT_LIST_HEAD(&pcmd->list); 17894 17895 /* copyin the payload */ 17896 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 17897 17898 /* fill in BDE's for command */ 17899 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 17900 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 17901 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 17902 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 17903 17904 iocbq->context2 = pcmd; 17905 iocbq->vport = vport; 17906 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 17907 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 17908 17909 /* 17910 * Setup rest of the iocb as though it were a WQE 17911 * Build the SEND_FRAME WQE 17912 */ 17913 wqe = (union lpfc_wqe *)&iocbq->iocb; 17914 17915 wqe->send_frame.frame_len = frame_len; 17916 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 17917 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 17918 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 17919 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 17920 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 17921 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 17922 17923 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 17924 iocbq->iocb.ulpLe = 1; 17925 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 17926 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 17927 if (rc == IOCB_ERROR) 17928 goto exit; 17929 17930 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17931 return; 17932 17933 exit: 17934 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 17935 "2023 Unable to process MDS loopback frame\n"); 17936 if (pcmd && pcmd->virt) 17937 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17938 kfree(pcmd); 17939 if (iocbq) 17940 lpfc_sli_release_iocbq(phba, iocbq); 17941 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17942 } 17943 17944 /** 17945 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17946 * @phba: Pointer to HBA context object. 17947 * 17948 * This function is called with no lock held. This function processes all 17949 * the received buffers and gives it to upper layers when a received buffer 17950 * indicates that it is the final frame in the sequence. The interrupt 17951 * service routine processes received buffers at interrupt contexts. 17952 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 17953 * appropriate receive function when the final frame in a sequence is received. 17954 **/ 17955 void 17956 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 17957 struct hbq_dmabuf *dmabuf) 17958 { 17959 struct hbq_dmabuf *seq_dmabuf; 17960 struct fc_frame_header *fc_hdr; 17961 struct lpfc_vport *vport; 17962 uint32_t fcfi; 17963 uint32_t did; 17964 17965 /* Process each received buffer */ 17966 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17967 17968 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 17969 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 17970 vport = phba->pport; 17971 /* Handle MDS Loopback frames */ 17972 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17973 return; 17974 } 17975 17976 /* check to see if this a valid type of frame */ 17977 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17978 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17979 return; 17980 } 17981 17982 if ((bf_get(lpfc_cqe_code, 17983 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 17984 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 17985 &dmabuf->cq_event.cqe.rcqe_cmpl); 17986 else 17987 fcfi = bf_get(lpfc_rcqe_fcf_id, 17988 &dmabuf->cq_event.cqe.rcqe_cmpl); 17989 17990 /* d_id this frame is directed to */ 17991 did = sli4_did_from_fc_hdr(fc_hdr); 17992 17993 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 17994 if (!vport) { 17995 /* throw out the frame */ 17996 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17997 return; 17998 } 17999 18000 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 18001 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 18002 (did != Fabric_DID)) { 18003 /* 18004 * Throw out the frame if we are not pt2pt. 18005 * The pt2pt protocol allows for discovery frames 18006 * to be received without a registered VPI. 18007 */ 18008 if (!(vport->fc_flag & FC_PT2PT) || 18009 (phba->link_state == LPFC_HBA_READY)) { 18010 lpfc_in_buf_free(phba, &dmabuf->dbuf); 18011 return; 18012 } 18013 } 18014 18015 /* Handle the basic abort sequence (BA_ABTS) event */ 18016 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 18017 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 18018 return; 18019 } 18020 18021 /* Link this frame */ 18022 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 18023 if (!seq_dmabuf) { 18024 /* unable to add frame to vport - throw it out */ 18025 lpfc_in_buf_free(phba, &dmabuf->dbuf); 18026 return; 18027 } 18028 /* If not last frame in sequence continue processing frames. */ 18029 if (!lpfc_seq_complete(seq_dmabuf)) 18030 return; 18031 18032 /* Send the complete sequence to the upper layer protocol */ 18033 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 18034 } 18035 18036 /** 18037 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 18038 * @phba: pointer to lpfc hba data structure. 18039 * 18040 * This routine is invoked to post rpi header templates to the 18041 * HBA consistent with the SLI-4 interface spec. This routine 18042 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18043 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18044 * 18045 * This routine does not require any locks. It's usage is expected 18046 * to be driver load or reset recovery when the driver is 18047 * sequential. 18048 * 18049 * Return codes 18050 * 0 - successful 18051 * -EIO - The mailbox failed to complete successfully. 18052 * When this error occurs, the driver is not guaranteed 18053 * to have any rpi regions posted to the device and 18054 * must either attempt to repost the regions or take a 18055 * fatal error. 18056 **/ 18057 int 18058 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 18059 { 18060 struct lpfc_rpi_hdr *rpi_page; 18061 uint32_t rc = 0; 18062 uint16_t lrpi = 0; 18063 18064 /* SLI4 ports that support extents do not require RPI headers. */ 18065 if (!phba->sli4_hba.rpi_hdrs_in_use) 18066 goto exit; 18067 if (phba->sli4_hba.extents_in_use) 18068 return -EIO; 18069 18070 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 18071 /* 18072 * Assign the rpi headers a physical rpi only if the driver 18073 * has not initialized those resources. A port reset only 18074 * needs the headers posted. 18075 */ 18076 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 18077 LPFC_RPI_RSRC_RDY) 18078 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18079 18080 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 18081 if (rc != MBX_SUCCESS) { 18082 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18083 "2008 Error %d posting all rpi " 18084 "headers\n", rc); 18085 rc = -EIO; 18086 break; 18087 } 18088 } 18089 18090 exit: 18091 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 18092 LPFC_RPI_RSRC_RDY); 18093 return rc; 18094 } 18095 18096 /** 18097 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 18098 * @phba: pointer to lpfc hba data structure. 18099 * @rpi_page: pointer to the rpi memory region. 18100 * 18101 * This routine is invoked to post a single rpi header to the 18102 * HBA consistent with the SLI-4 interface spec. This memory region 18103 * maps up to 64 rpi context regions. 18104 * 18105 * Return codes 18106 * 0 - successful 18107 * -ENOMEM - No available memory 18108 * -EIO - The mailbox failed to complete successfully. 18109 **/ 18110 int 18111 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 18112 { 18113 LPFC_MBOXQ_t *mboxq; 18114 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 18115 uint32_t rc = 0; 18116 uint32_t shdr_status, shdr_add_status; 18117 union lpfc_sli4_cfg_shdr *shdr; 18118 18119 /* SLI4 ports that support extents do not require RPI headers. */ 18120 if (!phba->sli4_hba.rpi_hdrs_in_use) 18121 return rc; 18122 if (phba->sli4_hba.extents_in_use) 18123 return -EIO; 18124 18125 /* The port is notified of the header region via a mailbox command. */ 18126 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18127 if (!mboxq) { 18128 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18129 "2001 Unable to allocate memory for issuing " 18130 "SLI_CONFIG_SPECIAL mailbox command\n"); 18131 return -ENOMEM; 18132 } 18133 18134 /* Post all rpi memory regions to the port. */ 18135 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 18136 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18137 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 18138 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 18139 sizeof(struct lpfc_sli4_cfg_mhdr), 18140 LPFC_SLI4_MBX_EMBED); 18141 18142 18143 /* Post the physical rpi to the port for this rpi header. */ 18144 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 18145 rpi_page->start_rpi); 18146 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 18147 hdr_tmpl, rpi_page->page_count); 18148 18149 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 18150 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 18151 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18152 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 18153 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18154 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18155 if (rc != MBX_TIMEOUT) 18156 mempool_free(mboxq, phba->mbox_mem_pool); 18157 if (shdr_status || shdr_add_status || rc) { 18158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18159 "2514 POST_RPI_HDR mailbox failed with " 18160 "status x%x add_status x%x, mbx status x%x\n", 18161 shdr_status, shdr_add_status, rc); 18162 rc = -ENXIO; 18163 } else { 18164 /* 18165 * The next_rpi stores the next logical module-64 rpi value used 18166 * to post physical rpis in subsequent rpi postings. 18167 */ 18168 spin_lock_irq(&phba->hbalock); 18169 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 18170 spin_unlock_irq(&phba->hbalock); 18171 } 18172 return rc; 18173 } 18174 18175 /** 18176 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 18177 * @phba: pointer to lpfc hba data structure. 18178 * 18179 * This routine is invoked to post rpi header templates to the 18180 * HBA consistent with the SLI-4 interface spec. This routine 18181 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18182 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18183 * 18184 * Returns 18185 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 18186 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 18187 **/ 18188 int 18189 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 18190 { 18191 unsigned long rpi; 18192 uint16_t max_rpi, rpi_limit; 18193 uint16_t rpi_remaining, lrpi = 0; 18194 struct lpfc_rpi_hdr *rpi_hdr; 18195 unsigned long iflag; 18196 18197 /* 18198 * Fetch the next logical rpi. Because this index is logical, 18199 * the driver starts at 0 each time. 18200 */ 18201 spin_lock_irqsave(&phba->hbalock, iflag); 18202 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 18203 rpi_limit = phba->sli4_hba.next_rpi; 18204 18205 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 18206 if (rpi >= rpi_limit) 18207 rpi = LPFC_RPI_ALLOC_ERROR; 18208 else { 18209 set_bit(rpi, phba->sli4_hba.rpi_bmask); 18210 phba->sli4_hba.max_cfg_param.rpi_used++; 18211 phba->sli4_hba.rpi_count++; 18212 } 18213 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 18214 "0001 rpi:%x max:%x lim:%x\n", 18215 (int) rpi, max_rpi, rpi_limit); 18216 18217 /* 18218 * Don't try to allocate more rpi header regions if the device limit 18219 * has been exhausted. 18220 */ 18221 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 18222 (phba->sli4_hba.rpi_count >= max_rpi)) { 18223 spin_unlock_irqrestore(&phba->hbalock, iflag); 18224 return rpi; 18225 } 18226 18227 /* 18228 * RPI header postings are not required for SLI4 ports capable of 18229 * extents. 18230 */ 18231 if (!phba->sli4_hba.rpi_hdrs_in_use) { 18232 spin_unlock_irqrestore(&phba->hbalock, iflag); 18233 return rpi; 18234 } 18235 18236 /* 18237 * If the driver is running low on rpi resources, allocate another 18238 * page now. Note that the next_rpi value is used because 18239 * it represents how many are actually in use whereas max_rpi notes 18240 * how many are supported max by the device. 18241 */ 18242 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 18243 spin_unlock_irqrestore(&phba->hbalock, iflag); 18244 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 18245 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 18246 if (!rpi_hdr) { 18247 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18248 "2002 Error Could not grow rpi " 18249 "count\n"); 18250 } else { 18251 lrpi = rpi_hdr->start_rpi; 18252 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18253 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 18254 } 18255 } 18256 18257 return rpi; 18258 } 18259 18260 /** 18261 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18262 * @phba: pointer to lpfc hba data structure. 18263 * 18264 * This routine is invoked to release an rpi to the pool of 18265 * available rpis maintained by the driver. 18266 **/ 18267 static void 18268 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18269 { 18270 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 18271 phba->sli4_hba.rpi_count--; 18272 phba->sli4_hba.max_cfg_param.rpi_used--; 18273 } 18274 } 18275 18276 /** 18277 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18278 * @phba: pointer to lpfc hba data structure. 18279 * 18280 * This routine is invoked to release an rpi to the pool of 18281 * available rpis maintained by the driver. 18282 **/ 18283 void 18284 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18285 { 18286 spin_lock_irq(&phba->hbalock); 18287 __lpfc_sli4_free_rpi(phba, rpi); 18288 spin_unlock_irq(&phba->hbalock); 18289 } 18290 18291 /** 18292 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 18293 * @phba: pointer to lpfc hba data structure. 18294 * 18295 * This routine is invoked to remove the memory region that 18296 * provided rpi via a bitmask. 18297 **/ 18298 void 18299 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 18300 { 18301 kfree(phba->sli4_hba.rpi_bmask); 18302 kfree(phba->sli4_hba.rpi_ids); 18303 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 18304 } 18305 18306 /** 18307 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 18308 * @phba: pointer to lpfc hba data structure. 18309 * 18310 * This routine is invoked to remove the memory region that 18311 * provided rpi via a bitmask. 18312 **/ 18313 int 18314 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 18315 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 18316 { 18317 LPFC_MBOXQ_t *mboxq; 18318 struct lpfc_hba *phba = ndlp->phba; 18319 int rc; 18320 18321 /* The port is notified of the header region via a mailbox command. */ 18322 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18323 if (!mboxq) 18324 return -ENOMEM; 18325 18326 /* Post all rpi memory regions to the port. */ 18327 lpfc_resume_rpi(mboxq, ndlp); 18328 if (cmpl) { 18329 mboxq->mbox_cmpl = cmpl; 18330 mboxq->ctx_buf = arg; 18331 mboxq->ctx_ndlp = ndlp; 18332 } else 18333 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18334 mboxq->vport = ndlp->vport; 18335 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18336 if (rc == MBX_NOT_FINISHED) { 18337 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18338 "2010 Resume RPI Mailbox failed " 18339 "status %d, mbxStatus x%x\n", rc, 18340 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18341 mempool_free(mboxq, phba->mbox_mem_pool); 18342 return -EIO; 18343 } 18344 return 0; 18345 } 18346 18347 /** 18348 * lpfc_sli4_init_vpi - Initialize a vpi with the port 18349 * @vport: Pointer to the vport for which the vpi is being initialized 18350 * 18351 * This routine is invoked to activate a vpi with the port. 18352 * 18353 * Returns: 18354 * 0 success 18355 * -Evalue otherwise 18356 **/ 18357 int 18358 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 18359 { 18360 LPFC_MBOXQ_t *mboxq; 18361 int rc = 0; 18362 int retval = MBX_SUCCESS; 18363 uint32_t mbox_tmo; 18364 struct lpfc_hba *phba = vport->phba; 18365 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18366 if (!mboxq) 18367 return -ENOMEM; 18368 lpfc_init_vpi(phba, mboxq, vport->vpi); 18369 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 18370 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 18371 if (rc != MBX_SUCCESS) { 18372 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 18373 "2022 INIT VPI Mailbox failed " 18374 "status %d, mbxStatus x%x\n", rc, 18375 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18376 retval = -EIO; 18377 } 18378 if (rc != MBX_TIMEOUT) 18379 mempool_free(mboxq, vport->phba->mbox_mem_pool); 18380 18381 return retval; 18382 } 18383 18384 /** 18385 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 18386 * @phba: pointer to lpfc hba data structure. 18387 * @mboxq: Pointer to mailbox object. 18388 * 18389 * This routine is invoked to manually add a single FCF record. The caller 18390 * must pass a completely initialized FCF_Record. This routine takes 18391 * care of the nonembedded mailbox operations. 18392 **/ 18393 static void 18394 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 18395 { 18396 void *virt_addr; 18397 union lpfc_sli4_cfg_shdr *shdr; 18398 uint32_t shdr_status, shdr_add_status; 18399 18400 virt_addr = mboxq->sge_array->addr[0]; 18401 /* The IOCTL status is embedded in the mailbox subheader. */ 18402 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 18403 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18404 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18405 18406 if ((shdr_status || shdr_add_status) && 18407 (shdr_status != STATUS_FCF_IN_USE)) 18408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18409 "2558 ADD_FCF_RECORD mailbox failed with " 18410 "status x%x add_status x%x\n", 18411 shdr_status, shdr_add_status); 18412 18413 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18414 } 18415 18416 /** 18417 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 18418 * @phba: pointer to lpfc hba data structure. 18419 * @fcf_record: pointer to the initialized fcf record to add. 18420 * 18421 * This routine is invoked to manually add a single FCF record. The caller 18422 * must pass a completely initialized FCF_Record. This routine takes 18423 * care of the nonembedded mailbox operations. 18424 **/ 18425 int 18426 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 18427 { 18428 int rc = 0; 18429 LPFC_MBOXQ_t *mboxq; 18430 uint8_t *bytep; 18431 void *virt_addr; 18432 struct lpfc_mbx_sge sge; 18433 uint32_t alloc_len, req_len; 18434 uint32_t fcfindex; 18435 18436 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18437 if (!mboxq) { 18438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18439 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 18440 return -ENOMEM; 18441 } 18442 18443 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 18444 sizeof(uint32_t); 18445 18446 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18447 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18448 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 18449 req_len, LPFC_SLI4_MBX_NEMBED); 18450 if (alloc_len < req_len) { 18451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18452 "2523 Allocated DMA memory size (x%x) is " 18453 "less than the requested DMA memory " 18454 "size (x%x)\n", alloc_len, req_len); 18455 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18456 return -ENOMEM; 18457 } 18458 18459 /* 18460 * Get the first SGE entry from the non-embedded DMA memory. This 18461 * routine only uses a single SGE. 18462 */ 18463 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 18464 virt_addr = mboxq->sge_array->addr[0]; 18465 /* 18466 * Configure the FCF record for FCFI 0. This is the driver's 18467 * hardcoded default and gets used in nonFIP mode. 18468 */ 18469 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 18470 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 18471 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 18472 18473 /* 18474 * Copy the fcf_index and the FCF Record Data. The data starts after 18475 * the FCoE header plus word10. The data copy needs to be endian 18476 * correct. 18477 */ 18478 bytep += sizeof(uint32_t); 18479 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 18480 mboxq->vport = phba->pport; 18481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 18482 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18483 if (rc == MBX_NOT_FINISHED) { 18484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18485 "2515 ADD_FCF_RECORD mailbox failed with " 18486 "status 0x%x\n", rc); 18487 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18488 rc = -EIO; 18489 } else 18490 rc = 0; 18491 18492 return rc; 18493 } 18494 18495 /** 18496 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 18497 * @phba: pointer to lpfc hba data structure. 18498 * @fcf_record: pointer to the fcf record to write the default data. 18499 * @fcf_index: FCF table entry index. 18500 * 18501 * This routine is invoked to build the driver's default FCF record. The 18502 * values used are hardcoded. This routine handles memory initialization. 18503 * 18504 **/ 18505 void 18506 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 18507 struct fcf_record *fcf_record, 18508 uint16_t fcf_index) 18509 { 18510 memset(fcf_record, 0, sizeof(struct fcf_record)); 18511 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 18512 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 18513 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 18514 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 18515 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 18516 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 18517 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 18518 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 18519 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 18520 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 18521 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 18522 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 18523 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 18524 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 18525 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 18526 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 18527 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 18528 /* Set the VLAN bit map */ 18529 if (phba->valid_vlan) { 18530 fcf_record->vlan_bitmap[phba->vlan_id / 8] 18531 = 1 << (phba->vlan_id % 8); 18532 } 18533 } 18534 18535 /** 18536 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 18537 * @phba: pointer to lpfc hba data structure. 18538 * @fcf_index: FCF table entry offset. 18539 * 18540 * This routine is invoked to scan the entire FCF table by reading FCF 18541 * record and processing it one at a time starting from the @fcf_index 18542 * for initial FCF discovery or fast FCF failover rediscovery. 18543 * 18544 * Return 0 if the mailbox command is submitted successfully, none 0 18545 * otherwise. 18546 **/ 18547 int 18548 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18549 { 18550 int rc = 0, error; 18551 LPFC_MBOXQ_t *mboxq; 18552 18553 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 18554 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 18555 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18556 if (!mboxq) { 18557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18558 "2000 Failed to allocate mbox for " 18559 "READ_FCF cmd\n"); 18560 error = -ENOMEM; 18561 goto fail_fcf_scan; 18562 } 18563 /* Construct the read FCF record mailbox command */ 18564 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18565 if (rc) { 18566 error = -EINVAL; 18567 goto fail_fcf_scan; 18568 } 18569 /* Issue the mailbox command asynchronously */ 18570 mboxq->vport = phba->pport; 18571 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 18572 18573 spin_lock_irq(&phba->hbalock); 18574 phba->hba_flag |= FCF_TS_INPROG; 18575 spin_unlock_irq(&phba->hbalock); 18576 18577 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18578 if (rc == MBX_NOT_FINISHED) 18579 error = -EIO; 18580 else { 18581 /* Reset eligible FCF count for new scan */ 18582 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 18583 phba->fcf.eligible_fcf_cnt = 0; 18584 error = 0; 18585 } 18586 fail_fcf_scan: 18587 if (error) { 18588 if (mboxq) 18589 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18590 /* FCF scan failed, clear FCF_TS_INPROG flag */ 18591 spin_lock_irq(&phba->hbalock); 18592 phba->hba_flag &= ~FCF_TS_INPROG; 18593 spin_unlock_irq(&phba->hbalock); 18594 } 18595 return error; 18596 } 18597 18598 /** 18599 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 18600 * @phba: pointer to lpfc hba data structure. 18601 * @fcf_index: FCF table entry offset. 18602 * 18603 * This routine is invoked to read an FCF record indicated by @fcf_index 18604 * and to use it for FLOGI roundrobin FCF failover. 18605 * 18606 * Return 0 if the mailbox command is submitted successfully, none 0 18607 * otherwise. 18608 **/ 18609 int 18610 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18611 { 18612 int rc = 0, error; 18613 LPFC_MBOXQ_t *mboxq; 18614 18615 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18616 if (!mboxq) { 18617 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18618 "2763 Failed to allocate mbox for " 18619 "READ_FCF cmd\n"); 18620 error = -ENOMEM; 18621 goto fail_fcf_read; 18622 } 18623 /* Construct the read FCF record mailbox command */ 18624 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18625 if (rc) { 18626 error = -EINVAL; 18627 goto fail_fcf_read; 18628 } 18629 /* Issue the mailbox command asynchronously */ 18630 mboxq->vport = phba->pport; 18631 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 18632 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18633 if (rc == MBX_NOT_FINISHED) 18634 error = -EIO; 18635 else 18636 error = 0; 18637 18638 fail_fcf_read: 18639 if (error && mboxq) 18640 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18641 return error; 18642 } 18643 18644 /** 18645 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 18646 * @phba: pointer to lpfc hba data structure. 18647 * @fcf_index: FCF table entry offset. 18648 * 18649 * This routine is invoked to read an FCF record indicated by @fcf_index to 18650 * determine whether it's eligible for FLOGI roundrobin failover list. 18651 * 18652 * Return 0 if the mailbox command is submitted successfully, none 0 18653 * otherwise. 18654 **/ 18655 int 18656 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18657 { 18658 int rc = 0, error; 18659 LPFC_MBOXQ_t *mboxq; 18660 18661 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18662 if (!mboxq) { 18663 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18664 "2758 Failed to allocate mbox for " 18665 "READ_FCF cmd\n"); 18666 error = -ENOMEM; 18667 goto fail_fcf_read; 18668 } 18669 /* Construct the read FCF record mailbox command */ 18670 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18671 if (rc) { 18672 error = -EINVAL; 18673 goto fail_fcf_read; 18674 } 18675 /* Issue the mailbox command asynchronously */ 18676 mboxq->vport = phba->pport; 18677 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 18678 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18679 if (rc == MBX_NOT_FINISHED) 18680 error = -EIO; 18681 else 18682 error = 0; 18683 18684 fail_fcf_read: 18685 if (error && mboxq) 18686 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18687 return error; 18688 } 18689 18690 /** 18691 * lpfc_check_next_fcf_pri_level 18692 * phba pointer to the lpfc_hba struct for this port. 18693 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 18694 * routine when the rr_bmask is empty. The FCF indecies are put into the 18695 * rr_bmask based on their priority level. Starting from the highest priority 18696 * to the lowest. The most likely FCF candidate will be in the highest 18697 * priority group. When this routine is called it searches the fcf_pri list for 18698 * next lowest priority group and repopulates the rr_bmask with only those 18699 * fcf_indexes. 18700 * returns: 18701 * 1=success 0=failure 18702 **/ 18703 static int 18704 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 18705 { 18706 uint16_t next_fcf_pri; 18707 uint16_t last_index; 18708 struct lpfc_fcf_pri *fcf_pri; 18709 int rc; 18710 int ret = 0; 18711 18712 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 18713 LPFC_SLI4_FCF_TBL_INDX_MAX); 18714 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18715 "3060 Last IDX %d\n", last_index); 18716 18717 /* Verify the priority list has 2 or more entries */ 18718 spin_lock_irq(&phba->hbalock); 18719 if (list_empty(&phba->fcf.fcf_pri_list) || 18720 list_is_singular(&phba->fcf.fcf_pri_list)) { 18721 spin_unlock_irq(&phba->hbalock); 18722 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18723 "3061 Last IDX %d\n", last_index); 18724 return 0; /* Empty rr list */ 18725 } 18726 spin_unlock_irq(&phba->hbalock); 18727 18728 next_fcf_pri = 0; 18729 /* 18730 * Clear the rr_bmask and set all of the bits that are at this 18731 * priority. 18732 */ 18733 memset(phba->fcf.fcf_rr_bmask, 0, 18734 sizeof(*phba->fcf.fcf_rr_bmask)); 18735 spin_lock_irq(&phba->hbalock); 18736 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18737 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 18738 continue; 18739 /* 18740 * the 1st priority that has not FLOGI failed 18741 * will be the highest. 18742 */ 18743 if (!next_fcf_pri) 18744 next_fcf_pri = fcf_pri->fcf_rec.priority; 18745 spin_unlock_irq(&phba->hbalock); 18746 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18747 rc = lpfc_sli4_fcf_rr_index_set(phba, 18748 fcf_pri->fcf_rec.fcf_index); 18749 if (rc) 18750 return 0; 18751 } 18752 spin_lock_irq(&phba->hbalock); 18753 } 18754 /* 18755 * if next_fcf_pri was not set above and the list is not empty then 18756 * we have failed flogis on all of them. So reset flogi failed 18757 * and start at the beginning. 18758 */ 18759 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 18760 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18761 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 18762 /* 18763 * the 1st priority that has not FLOGI failed 18764 * will be the highest. 18765 */ 18766 if (!next_fcf_pri) 18767 next_fcf_pri = fcf_pri->fcf_rec.priority; 18768 spin_unlock_irq(&phba->hbalock); 18769 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18770 rc = lpfc_sli4_fcf_rr_index_set(phba, 18771 fcf_pri->fcf_rec.fcf_index); 18772 if (rc) 18773 return 0; 18774 } 18775 spin_lock_irq(&phba->hbalock); 18776 } 18777 } else 18778 ret = 1; 18779 spin_unlock_irq(&phba->hbalock); 18780 18781 return ret; 18782 } 18783 /** 18784 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 18785 * @phba: pointer to lpfc hba data structure. 18786 * 18787 * This routine is to get the next eligible FCF record index in a round 18788 * robin fashion. If the next eligible FCF record index equals to the 18789 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 18790 * shall be returned, otherwise, the next eligible FCF record's index 18791 * shall be returned. 18792 **/ 18793 uint16_t 18794 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 18795 { 18796 uint16_t next_fcf_index; 18797 18798 initial_priority: 18799 /* Search start from next bit of currently registered FCF index */ 18800 next_fcf_index = phba->fcf.current_rec.fcf_indx; 18801 18802 next_priority: 18803 /* Determine the next fcf index to check */ 18804 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 18805 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18806 LPFC_SLI4_FCF_TBL_INDX_MAX, 18807 next_fcf_index); 18808 18809 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 18810 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18811 /* 18812 * If we have wrapped then we need to clear the bits that 18813 * have been tested so that we can detect when we should 18814 * change the priority level. 18815 */ 18816 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18817 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 18818 } 18819 18820 18821 /* Check roundrobin failover list empty condition */ 18822 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 18823 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 18824 /* 18825 * If next fcf index is not found check if there are lower 18826 * Priority level fcf's in the fcf_priority list. 18827 * Set up the rr_bmask with all of the avaiable fcf bits 18828 * at that level and continue the selection process. 18829 */ 18830 if (lpfc_check_next_fcf_pri_level(phba)) 18831 goto initial_priority; 18832 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18833 "2844 No roundrobin failover FCF available\n"); 18834 18835 return LPFC_FCOE_FCF_NEXT_NONE; 18836 } 18837 18838 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18839 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 18840 LPFC_FCF_FLOGI_FAILED) { 18841 if (list_is_singular(&phba->fcf.fcf_pri_list)) 18842 return LPFC_FCOE_FCF_NEXT_NONE; 18843 18844 goto next_priority; 18845 } 18846 18847 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18848 "2845 Get next roundrobin failover FCF (x%x)\n", 18849 next_fcf_index); 18850 18851 return next_fcf_index; 18852 } 18853 18854 /** 18855 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 18856 * @phba: pointer to lpfc hba data structure. 18857 * 18858 * This routine sets the FCF record index in to the eligible bmask for 18859 * roundrobin failover search. It checks to make sure that the index 18860 * does not go beyond the range of the driver allocated bmask dimension 18861 * before setting the bit. 18862 * 18863 * Returns 0 if the index bit successfully set, otherwise, it returns 18864 * -EINVAL. 18865 **/ 18866 int 18867 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 18868 { 18869 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18870 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18871 "2610 FCF (x%x) reached driver's book " 18872 "keeping dimension:x%x\n", 18873 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18874 return -EINVAL; 18875 } 18876 /* Set the eligible FCF record index bmask */ 18877 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18878 18879 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18880 "2790 Set FCF (x%x) to roundrobin FCF failover " 18881 "bmask\n", fcf_index); 18882 18883 return 0; 18884 } 18885 18886 /** 18887 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 18888 * @phba: pointer to lpfc hba data structure. 18889 * 18890 * This routine clears the FCF record index from the eligible bmask for 18891 * roundrobin failover search. It checks to make sure that the index 18892 * does not go beyond the range of the driver allocated bmask dimension 18893 * before clearing the bit. 18894 **/ 18895 void 18896 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 18897 { 18898 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 18899 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18900 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18901 "2762 FCF (x%x) reached driver's book " 18902 "keeping dimension:x%x\n", 18903 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18904 return; 18905 } 18906 /* Clear the eligible FCF record index bmask */ 18907 spin_lock_irq(&phba->hbalock); 18908 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 18909 list) { 18910 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 18911 list_del_init(&fcf_pri->list); 18912 break; 18913 } 18914 } 18915 spin_unlock_irq(&phba->hbalock); 18916 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18917 18918 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18919 "2791 Clear FCF (x%x) from roundrobin failover " 18920 "bmask\n", fcf_index); 18921 } 18922 18923 /** 18924 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 18925 * @phba: pointer to lpfc hba data structure. 18926 * 18927 * This routine is the completion routine for the rediscover FCF table mailbox 18928 * command. If the mailbox command returned failure, it will try to stop the 18929 * FCF rediscover wait timer. 18930 **/ 18931 static void 18932 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 18933 { 18934 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18935 uint32_t shdr_status, shdr_add_status; 18936 18937 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18938 18939 shdr_status = bf_get(lpfc_mbox_hdr_status, 18940 &redisc_fcf->header.cfg_shdr.response); 18941 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 18942 &redisc_fcf->header.cfg_shdr.response); 18943 if (shdr_status || shdr_add_status) { 18944 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18945 "2746 Requesting for FCF rediscovery failed " 18946 "status x%x add_status x%x\n", 18947 shdr_status, shdr_add_status); 18948 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 18949 spin_lock_irq(&phba->hbalock); 18950 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 18951 spin_unlock_irq(&phba->hbalock); 18952 /* 18953 * CVL event triggered FCF rediscover request failed, 18954 * last resort to re-try current registered FCF entry. 18955 */ 18956 lpfc_retry_pport_discovery(phba); 18957 } else { 18958 spin_lock_irq(&phba->hbalock); 18959 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 18960 spin_unlock_irq(&phba->hbalock); 18961 /* 18962 * DEAD FCF event triggered FCF rediscover request 18963 * failed, last resort to fail over as a link down 18964 * to FCF registration. 18965 */ 18966 lpfc_sli4_fcf_dead_failthrough(phba); 18967 } 18968 } else { 18969 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18970 "2775 Start FCF rediscover quiescent timer\n"); 18971 /* 18972 * Start FCF rediscovery wait timer for pending FCF 18973 * before rescan FCF record table. 18974 */ 18975 lpfc_fcf_redisc_wait_start_timer(phba); 18976 } 18977 18978 mempool_free(mbox, phba->mbox_mem_pool); 18979 } 18980 18981 /** 18982 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 18983 * @phba: pointer to lpfc hba data structure. 18984 * 18985 * This routine is invoked to request for rediscovery of the entire FCF table 18986 * by the port. 18987 **/ 18988 int 18989 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 18990 { 18991 LPFC_MBOXQ_t *mbox; 18992 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18993 int rc, length; 18994 18995 /* Cancel retry delay timers to all vports before FCF rediscover */ 18996 lpfc_cancel_all_vport_retry_delay_timer(phba); 18997 18998 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18999 if (!mbox) { 19000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19001 "2745 Failed to allocate mbox for " 19002 "requesting FCF rediscover.\n"); 19003 return -ENOMEM; 19004 } 19005 19006 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 19007 sizeof(struct lpfc_sli4_cfg_mhdr)); 19008 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 19009 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 19010 length, LPFC_SLI4_MBX_EMBED); 19011 19012 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 19013 /* Set count to 0 for invalidating the entire FCF database */ 19014 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 19015 19016 /* Issue the mailbox command asynchronously */ 19017 mbox->vport = phba->pport; 19018 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 19019 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 19020 19021 if (rc == MBX_NOT_FINISHED) { 19022 mempool_free(mbox, phba->mbox_mem_pool); 19023 return -EIO; 19024 } 19025 return 0; 19026 } 19027 19028 /** 19029 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 19030 * @phba: pointer to lpfc hba data structure. 19031 * 19032 * This function is the failover routine as a last resort to the FCF DEAD 19033 * event when driver failed to perform fast FCF failover. 19034 **/ 19035 void 19036 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 19037 { 19038 uint32_t link_state; 19039 19040 /* 19041 * Last resort as FCF DEAD event failover will treat this as 19042 * a link down, but save the link state because we don't want 19043 * it to be changed to Link Down unless it is already down. 19044 */ 19045 link_state = phba->link_state; 19046 lpfc_linkdown(phba); 19047 phba->link_state = link_state; 19048 19049 /* Unregister FCF if no devices connected to it */ 19050 lpfc_unregister_unused_fcf(phba); 19051 } 19052 19053 /** 19054 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 19055 * @phba: pointer to lpfc hba data structure. 19056 * @rgn23_data: pointer to configure region 23 data. 19057 * 19058 * This function gets SLI3 port configure region 23 data through memory dump 19059 * mailbox command. When it successfully retrieves data, the size of the data 19060 * will be returned, otherwise, 0 will be returned. 19061 **/ 19062 static uint32_t 19063 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19064 { 19065 LPFC_MBOXQ_t *pmb = NULL; 19066 MAILBOX_t *mb; 19067 uint32_t offset = 0; 19068 int rc; 19069 19070 if (!rgn23_data) 19071 return 0; 19072 19073 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19074 if (!pmb) { 19075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19076 "2600 failed to allocate mailbox memory\n"); 19077 return 0; 19078 } 19079 mb = &pmb->u.mb; 19080 19081 do { 19082 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 19083 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 19084 19085 if (rc != MBX_SUCCESS) { 19086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19087 "2601 failed to read config " 19088 "region 23, rc 0x%x Status 0x%x\n", 19089 rc, mb->mbxStatus); 19090 mb->un.varDmp.word_cnt = 0; 19091 } 19092 /* 19093 * dump mem may return a zero when finished or we got a 19094 * mailbox error, either way we are done. 19095 */ 19096 if (mb->un.varDmp.word_cnt == 0) 19097 break; 19098 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 19099 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 19100 19101 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 19102 rgn23_data + offset, 19103 mb->un.varDmp.word_cnt); 19104 offset += mb->un.varDmp.word_cnt; 19105 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 19106 19107 mempool_free(pmb, phba->mbox_mem_pool); 19108 return offset; 19109 } 19110 19111 /** 19112 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 19113 * @phba: pointer to lpfc hba data structure. 19114 * @rgn23_data: pointer to configure region 23 data. 19115 * 19116 * This function gets SLI4 port configure region 23 data through memory dump 19117 * mailbox command. When it successfully retrieves data, the size of the data 19118 * will be returned, otherwise, 0 will be returned. 19119 **/ 19120 static uint32_t 19121 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19122 { 19123 LPFC_MBOXQ_t *mboxq = NULL; 19124 struct lpfc_dmabuf *mp = NULL; 19125 struct lpfc_mqe *mqe; 19126 uint32_t data_length = 0; 19127 int rc; 19128 19129 if (!rgn23_data) 19130 return 0; 19131 19132 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19133 if (!mboxq) { 19134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19135 "3105 failed to allocate mailbox memory\n"); 19136 return 0; 19137 } 19138 19139 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 19140 goto out; 19141 mqe = &mboxq->u.mqe; 19142 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 19143 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19144 if (rc) 19145 goto out; 19146 data_length = mqe->un.mb_words[5]; 19147 if (data_length == 0) 19148 goto out; 19149 if (data_length > DMP_RGN23_SIZE) { 19150 data_length = 0; 19151 goto out; 19152 } 19153 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 19154 out: 19155 mempool_free(mboxq, phba->mbox_mem_pool); 19156 if (mp) { 19157 lpfc_mbuf_free(phba, mp->virt, mp->phys); 19158 kfree(mp); 19159 } 19160 return data_length; 19161 } 19162 19163 /** 19164 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 19165 * @phba: pointer to lpfc hba data structure. 19166 * 19167 * This function read region 23 and parse TLV for port status to 19168 * decide if the user disaled the port. If the TLV indicates the 19169 * port is disabled, the hba_flag is set accordingly. 19170 **/ 19171 void 19172 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 19173 { 19174 uint8_t *rgn23_data = NULL; 19175 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 19176 uint32_t offset = 0; 19177 19178 /* Get adapter Region 23 data */ 19179 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 19180 if (!rgn23_data) 19181 goto out; 19182 19183 if (phba->sli_rev < LPFC_SLI_REV4) 19184 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 19185 else { 19186 if_type = bf_get(lpfc_sli_intf_if_type, 19187 &phba->sli4_hba.sli_intf); 19188 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 19189 goto out; 19190 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 19191 } 19192 19193 if (!data_size) 19194 goto out; 19195 19196 /* Check the region signature first */ 19197 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 19198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19199 "2619 Config region 23 has bad signature\n"); 19200 goto out; 19201 } 19202 offset += 4; 19203 19204 /* Check the data structure version */ 19205 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 19206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19207 "2620 Config region 23 has bad version\n"); 19208 goto out; 19209 } 19210 offset += 4; 19211 19212 /* Parse TLV entries in the region */ 19213 while (offset < data_size) { 19214 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 19215 break; 19216 /* 19217 * If the TLV is not driver specific TLV or driver id is 19218 * not linux driver id, skip the record. 19219 */ 19220 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 19221 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 19222 (rgn23_data[offset + 3] != 0)) { 19223 offset += rgn23_data[offset + 1] * 4 + 4; 19224 continue; 19225 } 19226 19227 /* Driver found a driver specific TLV in the config region */ 19228 sub_tlv_len = rgn23_data[offset + 1] * 4; 19229 offset += 4; 19230 tlv_offset = 0; 19231 19232 /* 19233 * Search for configured port state sub-TLV. 19234 */ 19235 while ((offset < data_size) && 19236 (tlv_offset < sub_tlv_len)) { 19237 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 19238 offset += 4; 19239 tlv_offset += 4; 19240 break; 19241 } 19242 if (rgn23_data[offset] != PORT_STE_TYPE) { 19243 offset += rgn23_data[offset + 1] * 4 + 4; 19244 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 19245 continue; 19246 } 19247 19248 /* This HBA contains PORT_STE configured */ 19249 if (!rgn23_data[offset + 2]) 19250 phba->hba_flag |= LINK_DISABLED; 19251 19252 goto out; 19253 } 19254 } 19255 19256 out: 19257 kfree(rgn23_data); 19258 return; 19259 } 19260 19261 /** 19262 * lpfc_wr_object - write an object to the firmware 19263 * @phba: HBA structure that indicates port to create a queue on. 19264 * @dmabuf_list: list of dmabufs to write to the port. 19265 * @size: the total byte value of the objects to write to the port. 19266 * @offset: the current offset to be used to start the transfer. 19267 * 19268 * This routine will create a wr_object mailbox command to send to the port. 19269 * the mailbox command will be constructed using the dma buffers described in 19270 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 19271 * BDEs that the imbedded mailbox can support. The @offset variable will be 19272 * used to indicate the starting offset of the transfer and will also return 19273 * the offset after the write object mailbox has completed. @size is used to 19274 * determine the end of the object and whether the eof bit should be set. 19275 * 19276 * Return 0 is successful and offset will contain the the new offset to use 19277 * for the next write. 19278 * Return negative value for error cases. 19279 **/ 19280 int 19281 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 19282 uint32_t size, uint32_t *offset) 19283 { 19284 struct lpfc_mbx_wr_object *wr_object; 19285 LPFC_MBOXQ_t *mbox; 19286 int rc = 0, i = 0; 19287 uint32_t shdr_status, shdr_add_status, shdr_change_status; 19288 uint32_t mbox_tmo; 19289 struct lpfc_dmabuf *dmabuf; 19290 uint32_t written = 0; 19291 bool check_change_status = false; 19292 19293 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19294 if (!mbox) 19295 return -ENOMEM; 19296 19297 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 19298 LPFC_MBOX_OPCODE_WRITE_OBJECT, 19299 sizeof(struct lpfc_mbx_wr_object) - 19300 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 19301 19302 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 19303 wr_object->u.request.write_offset = *offset; 19304 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 19305 wr_object->u.request.object_name[0] = 19306 cpu_to_le32(wr_object->u.request.object_name[0]); 19307 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 19308 list_for_each_entry(dmabuf, dmabuf_list, list) { 19309 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 19310 break; 19311 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 19312 wr_object->u.request.bde[i].addrHigh = 19313 putPaddrHigh(dmabuf->phys); 19314 if (written + SLI4_PAGE_SIZE >= size) { 19315 wr_object->u.request.bde[i].tus.f.bdeSize = 19316 (size - written); 19317 written += (size - written); 19318 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 19319 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); 19320 check_change_status = true; 19321 } else { 19322 wr_object->u.request.bde[i].tus.f.bdeSize = 19323 SLI4_PAGE_SIZE; 19324 written += SLI4_PAGE_SIZE; 19325 } 19326 i++; 19327 } 19328 wr_object->u.request.bde_count = i; 19329 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 19330 if (!phba->sli4_hba.intr_enable) 19331 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 19332 else { 19333 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 19334 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 19335 } 19336 /* The IOCTL status is embedded in the mailbox subheader. */ 19337 shdr_status = bf_get(lpfc_mbox_hdr_status, 19338 &wr_object->header.cfg_shdr.response); 19339 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 19340 &wr_object->header.cfg_shdr.response); 19341 if (check_change_status) { 19342 shdr_change_status = bf_get(lpfc_wr_object_change_status, 19343 &wr_object->u.response); 19344 switch (shdr_change_status) { 19345 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): 19346 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19347 "3198 Firmware write complete: System " 19348 "reboot required to instantiate\n"); 19349 break; 19350 case (LPFC_CHANGE_STATUS_FW_RESET): 19351 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19352 "3199 Firmware write complete: Firmware" 19353 " reset required to instantiate\n"); 19354 break; 19355 case (LPFC_CHANGE_STATUS_PORT_MIGRATION): 19356 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19357 "3200 Firmware write complete: Port " 19358 "Migration or PCI Reset required to " 19359 "instantiate\n"); 19360 break; 19361 case (LPFC_CHANGE_STATUS_PCI_RESET): 19362 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19363 "3201 Firmware write complete: PCI " 19364 "Reset required to instantiate\n"); 19365 break; 19366 default: 19367 break; 19368 } 19369 } 19370 if (rc != MBX_TIMEOUT) 19371 mempool_free(mbox, phba->mbox_mem_pool); 19372 if (shdr_status || shdr_add_status || rc) { 19373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19374 "3025 Write Object mailbox failed with " 19375 "status x%x add_status x%x, mbx status x%x\n", 19376 shdr_status, shdr_add_status, rc); 19377 rc = -ENXIO; 19378 *offset = shdr_add_status; 19379 } else 19380 *offset += wr_object->u.response.actual_write_length; 19381 return rc; 19382 } 19383 19384 /** 19385 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 19386 * @vport: pointer to vport data structure. 19387 * 19388 * This function iterate through the mailboxq and clean up all REG_LOGIN 19389 * and REG_VPI mailbox commands associated with the vport. This function 19390 * is called when driver want to restart discovery of the vport due to 19391 * a Clear Virtual Link event. 19392 **/ 19393 void 19394 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 19395 { 19396 struct lpfc_hba *phba = vport->phba; 19397 LPFC_MBOXQ_t *mb, *nextmb; 19398 struct lpfc_dmabuf *mp; 19399 struct lpfc_nodelist *ndlp; 19400 struct lpfc_nodelist *act_mbx_ndlp = NULL; 19401 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 19402 LIST_HEAD(mbox_cmd_list); 19403 uint8_t restart_loop; 19404 19405 /* Clean up internally queued mailbox commands with the vport */ 19406 spin_lock_irq(&phba->hbalock); 19407 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 19408 if (mb->vport != vport) 19409 continue; 19410 19411 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19412 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19413 continue; 19414 19415 list_del(&mb->list); 19416 list_add_tail(&mb->list, &mbox_cmd_list); 19417 } 19418 /* Clean up active mailbox command with the vport */ 19419 mb = phba->sli.mbox_active; 19420 if (mb && (mb->vport == vport)) { 19421 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 19422 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 19423 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19424 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19425 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19426 /* Put reference count for delayed processing */ 19427 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 19428 /* Unregister the RPI when mailbox complete */ 19429 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19430 } 19431 } 19432 /* Cleanup any mailbox completions which are not yet processed */ 19433 do { 19434 restart_loop = 0; 19435 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 19436 /* 19437 * If this mailox is already processed or it is 19438 * for another vport ignore it. 19439 */ 19440 if ((mb->vport != vport) || 19441 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 19442 continue; 19443 19444 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19445 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19446 continue; 19447 19448 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19449 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19450 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19451 /* Unregister the RPI when mailbox complete */ 19452 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19453 restart_loop = 1; 19454 spin_unlock_irq(&phba->hbalock); 19455 spin_lock(shost->host_lock); 19456 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19457 spin_unlock(shost->host_lock); 19458 spin_lock_irq(&phba->hbalock); 19459 break; 19460 } 19461 } 19462 } while (restart_loop); 19463 19464 spin_unlock_irq(&phba->hbalock); 19465 19466 /* Release the cleaned-up mailbox commands */ 19467 while (!list_empty(&mbox_cmd_list)) { 19468 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 19469 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19470 mp = (struct lpfc_dmabuf *)(mb->ctx_buf); 19471 if (mp) { 19472 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 19473 kfree(mp); 19474 } 19475 mb->ctx_buf = NULL; 19476 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19477 mb->ctx_ndlp = NULL; 19478 if (ndlp) { 19479 spin_lock(shost->host_lock); 19480 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19481 spin_unlock(shost->host_lock); 19482 lpfc_nlp_put(ndlp); 19483 } 19484 } 19485 mempool_free(mb, phba->mbox_mem_pool); 19486 } 19487 19488 /* Release the ndlp with the cleaned-up active mailbox command */ 19489 if (act_mbx_ndlp) { 19490 spin_lock(shost->host_lock); 19491 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19492 spin_unlock(shost->host_lock); 19493 lpfc_nlp_put(act_mbx_ndlp); 19494 } 19495 } 19496 19497 /** 19498 * lpfc_drain_txq - Drain the txq 19499 * @phba: Pointer to HBA context object. 19500 * 19501 * This function attempt to submit IOCBs on the txq 19502 * to the adapter. For SLI4 adapters, the txq contains 19503 * ELS IOCBs that have been deferred because the there 19504 * are no SGLs. This congestion can occur with large 19505 * vport counts during node discovery. 19506 **/ 19507 19508 uint32_t 19509 lpfc_drain_txq(struct lpfc_hba *phba) 19510 { 19511 LIST_HEAD(completions); 19512 struct lpfc_sli_ring *pring; 19513 struct lpfc_iocbq *piocbq = NULL; 19514 unsigned long iflags = 0; 19515 char *fail_msg = NULL; 19516 struct lpfc_sglq *sglq; 19517 union lpfc_wqe128 wqe; 19518 uint32_t txq_cnt = 0; 19519 struct lpfc_queue *wq; 19520 19521 if (phba->link_flag & LS_MDS_LOOPBACK) { 19522 /* MDS WQE are posted only to first WQ*/ 19523 wq = phba->sli4_hba.hdwq[0].fcp_wq; 19524 if (unlikely(!wq)) 19525 return 0; 19526 pring = wq->pring; 19527 } else { 19528 wq = phba->sli4_hba.els_wq; 19529 if (unlikely(!wq)) 19530 return 0; 19531 pring = lpfc_phba_elsring(phba); 19532 } 19533 19534 if (unlikely(!pring) || list_empty(&pring->txq)) 19535 return 0; 19536 19537 spin_lock_irqsave(&pring->ring_lock, iflags); 19538 list_for_each_entry(piocbq, &pring->txq, list) { 19539 txq_cnt++; 19540 } 19541 19542 if (txq_cnt > pring->txq_max) 19543 pring->txq_max = txq_cnt; 19544 19545 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19546 19547 while (!list_empty(&pring->txq)) { 19548 spin_lock_irqsave(&pring->ring_lock, iflags); 19549 19550 piocbq = lpfc_sli_ringtx_get(phba, pring); 19551 if (!piocbq) { 19552 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19553 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19554 "2823 txq empty and txq_cnt is %d\n ", 19555 txq_cnt); 19556 break; 19557 } 19558 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 19559 if (!sglq) { 19560 __lpfc_sli_ringtx_put(phba, pring, piocbq); 19561 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19562 break; 19563 } 19564 txq_cnt--; 19565 19566 /* The xri and iocb resources secured, 19567 * attempt to issue request 19568 */ 19569 piocbq->sli4_lxritag = sglq->sli4_lxritag; 19570 piocbq->sli4_xritag = sglq->sli4_xritag; 19571 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 19572 fail_msg = "to convert bpl to sgl"; 19573 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 19574 fail_msg = "to convert iocb to wqe"; 19575 else if (lpfc_sli4_wq_put(wq, &wqe)) 19576 fail_msg = " - Wq is full"; 19577 else 19578 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 19579 19580 if (fail_msg) { 19581 /* Failed means we can't issue and need to cancel */ 19582 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19583 "2822 IOCB failed %s iotag 0x%x " 19584 "xri 0x%x\n", 19585 fail_msg, 19586 piocbq->iotag, piocbq->sli4_xritag); 19587 list_add_tail(&piocbq->list, &completions); 19588 } 19589 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19590 } 19591 19592 /* Cancel all the IOCBs that cannot be issued */ 19593 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 19594 IOERR_SLI_ABORTED); 19595 19596 return txq_cnt; 19597 } 19598 19599 /** 19600 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 19601 * @phba: Pointer to HBA context object. 19602 * @pwqe: Pointer to command WQE. 19603 * @sglq: Pointer to the scatter gather queue object. 19604 * 19605 * This routine converts the bpl or bde that is in the WQE 19606 * to a sgl list for the sli4 hardware. The physical address 19607 * of the bpl/bde is converted back to a virtual address. 19608 * If the WQE contains a BPL then the list of BDE's is 19609 * converted to sli4_sge's. If the WQE contains a single 19610 * BDE then it is converted to a single sli_sge. 19611 * The WQE is still in cpu endianness so the contents of 19612 * the bpl can be used without byte swapping. 19613 * 19614 * Returns valid XRI = Success, NO_XRI = Failure. 19615 */ 19616 static uint16_t 19617 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 19618 struct lpfc_sglq *sglq) 19619 { 19620 uint16_t xritag = NO_XRI; 19621 struct ulp_bde64 *bpl = NULL; 19622 struct ulp_bde64 bde; 19623 struct sli4_sge *sgl = NULL; 19624 struct lpfc_dmabuf *dmabuf; 19625 union lpfc_wqe128 *wqe; 19626 int numBdes = 0; 19627 int i = 0; 19628 uint32_t offset = 0; /* accumulated offset in the sg request list */ 19629 int inbound = 0; /* number of sg reply entries inbound from firmware */ 19630 uint32_t cmd; 19631 19632 if (!pwqeq || !sglq) 19633 return xritag; 19634 19635 sgl = (struct sli4_sge *)sglq->sgl; 19636 wqe = &pwqeq->wqe; 19637 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 19638 19639 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 19640 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 19641 return sglq->sli4_xritag; 19642 numBdes = pwqeq->rsvd2; 19643 if (numBdes) { 19644 /* The addrHigh and addrLow fields within the WQE 19645 * have not been byteswapped yet so there is no 19646 * need to swap them back. 19647 */ 19648 if (pwqeq->context3) 19649 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 19650 else 19651 return xritag; 19652 19653 bpl = (struct ulp_bde64 *)dmabuf->virt; 19654 if (!bpl) 19655 return xritag; 19656 19657 for (i = 0; i < numBdes; i++) { 19658 /* Should already be byte swapped. */ 19659 sgl->addr_hi = bpl->addrHigh; 19660 sgl->addr_lo = bpl->addrLow; 19661 19662 sgl->word2 = le32_to_cpu(sgl->word2); 19663 if ((i+1) == numBdes) 19664 bf_set(lpfc_sli4_sge_last, sgl, 1); 19665 else 19666 bf_set(lpfc_sli4_sge_last, sgl, 0); 19667 /* swap the size field back to the cpu so we 19668 * can assign it to the sgl. 19669 */ 19670 bde.tus.w = le32_to_cpu(bpl->tus.w); 19671 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 19672 /* The offsets in the sgl need to be accumulated 19673 * separately for the request and reply lists. 19674 * The request is always first, the reply follows. 19675 */ 19676 switch (cmd) { 19677 case CMD_GEN_REQUEST64_WQE: 19678 /* add up the reply sg entries */ 19679 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 19680 inbound++; 19681 /* first inbound? reset the offset */ 19682 if (inbound == 1) 19683 offset = 0; 19684 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19685 bf_set(lpfc_sli4_sge_type, sgl, 19686 LPFC_SGE_TYPE_DATA); 19687 offset += bde.tus.f.bdeSize; 19688 break; 19689 case CMD_FCP_TRSP64_WQE: 19690 bf_set(lpfc_sli4_sge_offset, sgl, 0); 19691 bf_set(lpfc_sli4_sge_type, sgl, 19692 LPFC_SGE_TYPE_DATA); 19693 break; 19694 case CMD_FCP_TSEND64_WQE: 19695 case CMD_FCP_TRECEIVE64_WQE: 19696 bf_set(lpfc_sli4_sge_type, sgl, 19697 bpl->tus.f.bdeFlags); 19698 if (i < 3) 19699 offset = 0; 19700 else 19701 offset += bde.tus.f.bdeSize; 19702 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19703 break; 19704 } 19705 sgl->word2 = cpu_to_le32(sgl->word2); 19706 bpl++; 19707 sgl++; 19708 } 19709 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 19710 /* The addrHigh and addrLow fields of the BDE have not 19711 * been byteswapped yet so they need to be swapped 19712 * before putting them in the sgl. 19713 */ 19714 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 19715 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 19716 sgl->word2 = le32_to_cpu(sgl->word2); 19717 bf_set(lpfc_sli4_sge_last, sgl, 1); 19718 sgl->word2 = cpu_to_le32(sgl->word2); 19719 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 19720 } 19721 return sglq->sli4_xritag; 19722 } 19723 19724 /** 19725 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 19726 * @phba: Pointer to HBA context object. 19727 * @ring_number: Base sli ring number 19728 * @pwqe: Pointer to command WQE. 19729 **/ 19730 int 19731 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19732 struct lpfc_iocbq *pwqe) 19733 { 19734 union lpfc_wqe128 *wqe = &pwqe->wqe; 19735 struct lpfc_nvmet_rcv_ctx *ctxp; 19736 struct lpfc_queue *wq; 19737 struct lpfc_sglq *sglq; 19738 struct lpfc_sli_ring *pring; 19739 unsigned long iflags; 19740 uint32_t ret = 0; 19741 19742 /* NVME_LS and NVME_LS ABTS requests. */ 19743 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 19744 pring = phba->sli4_hba.nvmels_wq->pring; 19745 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19746 qp, wq_access); 19747 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 19748 if (!sglq) { 19749 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19750 return WQE_BUSY; 19751 } 19752 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19753 pwqe->sli4_xritag = sglq->sli4_xritag; 19754 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 19755 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19756 return WQE_ERROR; 19757 } 19758 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19759 pwqe->sli4_xritag); 19760 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 19761 if (ret) { 19762 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19763 return ret; 19764 } 19765 19766 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19767 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19768 return 0; 19769 } 19770 19771 /* NVME_FCREQ and NVME_ABTS requests */ 19772 if (pwqe->iocb_flag & LPFC_IO_NVME) { 19773 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19774 wq = qp->nvme_wq; 19775 pring = wq->pring; 19776 19777 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19778 19779 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19780 qp, wq_access); 19781 ret = lpfc_sli4_wq_put(wq, wqe); 19782 if (ret) { 19783 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19784 return ret; 19785 } 19786 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19787 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19788 return 0; 19789 } 19790 19791 /* NVMET requests */ 19792 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19793 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19794 wq = qp->nvme_wq; 19795 pring = wq->pring; 19796 19797 ctxp = pwqe->context2; 19798 sglq = ctxp->ctxbuf->sglq; 19799 if (pwqe->sli4_xritag == NO_XRI) { 19800 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19801 pwqe->sli4_xritag = sglq->sli4_xritag; 19802 } 19803 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19804 pwqe->sli4_xritag); 19805 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19806 19807 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19808 qp, wq_access); 19809 ret = lpfc_sli4_wq_put(wq, wqe); 19810 if (ret) { 19811 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19812 return ret; 19813 } 19814 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19815 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19816 return 0; 19817 } 19818 return WQE_ERROR; 19819 } 19820 19821 #ifdef LPFC_MXP_STAT 19822 /** 19823 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count 19824 * @phba: pointer to lpfc hba data structure. 19825 * @hwqid: belong to which HWQ. 19826 * 19827 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count 19828 * 15 seconds after a test case is running. 19829 * 19830 * The user should call lpfc_debugfs_multixripools_write before running a test 19831 * case to clear stat_snapshot_taken. Then the user starts a test case. During 19832 * test case is running, stat_snapshot_taken is incremented by 1 every time when 19833 * this routine is called from heartbeat timer. When stat_snapshot_taken is 19834 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. 19835 **/ 19836 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) 19837 { 19838 struct lpfc_sli4_hdw_queue *qp; 19839 struct lpfc_multixri_pool *multixri_pool; 19840 struct lpfc_pvt_pool *pvt_pool; 19841 struct lpfc_pbl_pool *pbl_pool; 19842 u32 txcmplq_cnt; 19843 19844 qp = &phba->sli4_hba.hdwq[hwqid]; 19845 multixri_pool = qp->p_multixri_pool; 19846 if (!multixri_pool) 19847 return; 19848 19849 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { 19850 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19851 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19852 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19853 if (qp->nvme_wq) 19854 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 19855 19856 multixri_pool->stat_pbl_count = pbl_pool->count; 19857 multixri_pool->stat_pvt_count = pvt_pool->count; 19858 multixri_pool->stat_busy_count = txcmplq_cnt; 19859 } 19860 19861 multixri_pool->stat_snapshot_taken++; 19862 } 19863 #endif 19864 19865 /** 19866 * lpfc_adjust_pvt_pool_count - Adjust private pool count 19867 * @phba: pointer to lpfc hba data structure. 19868 * @hwqid: belong to which HWQ. 19869 * 19870 * This routine moves some XRIs from private to public pool when private pool 19871 * is not busy. 19872 **/ 19873 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) 19874 { 19875 struct lpfc_multixri_pool *multixri_pool; 19876 u32 io_req_count; 19877 u32 prev_io_req_count; 19878 19879 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 19880 if (!multixri_pool) 19881 return; 19882 io_req_count = multixri_pool->io_req_count; 19883 prev_io_req_count = multixri_pool->prev_io_req_count; 19884 19885 if (prev_io_req_count != io_req_count) { 19886 /* Private pool is busy */ 19887 multixri_pool->prev_io_req_count = io_req_count; 19888 } else { 19889 /* Private pool is not busy. 19890 * Move XRIs from private to public pool. 19891 */ 19892 lpfc_move_xri_pvt_to_pbl(phba, hwqid); 19893 } 19894 } 19895 19896 /** 19897 * lpfc_adjust_high_watermark - Adjust high watermark 19898 * @phba: pointer to lpfc hba data structure. 19899 * @hwqid: belong to which HWQ. 19900 * 19901 * This routine sets high watermark as number of outstanding XRIs, 19902 * but make sure the new value is between xri_limit/2 and xri_limit. 19903 **/ 19904 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) 19905 { 19906 u32 new_watermark; 19907 u32 watermark_max; 19908 u32 watermark_min; 19909 u32 xri_limit; 19910 u32 txcmplq_cnt; 19911 u32 abts_io_bufs; 19912 struct lpfc_multixri_pool *multixri_pool; 19913 struct lpfc_sli4_hdw_queue *qp; 19914 19915 qp = &phba->sli4_hba.hdwq[hwqid]; 19916 multixri_pool = qp->p_multixri_pool; 19917 if (!multixri_pool) 19918 return; 19919 xri_limit = multixri_pool->xri_limit; 19920 19921 watermark_max = xri_limit; 19922 watermark_min = xri_limit / 2; 19923 19924 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19925 abts_io_bufs = qp->abts_scsi_io_bufs; 19926 if (qp->nvme_wq) { 19927 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 19928 abts_io_bufs += qp->abts_nvme_io_bufs; 19929 } 19930 19931 new_watermark = txcmplq_cnt + abts_io_bufs; 19932 new_watermark = min(watermark_max, new_watermark); 19933 new_watermark = max(watermark_min, new_watermark); 19934 multixri_pool->pvt_pool.high_watermark = new_watermark; 19935 19936 #ifdef LPFC_MXP_STAT 19937 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, 19938 new_watermark); 19939 #endif 19940 } 19941 19942 /** 19943 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool 19944 * @phba: pointer to lpfc hba data structure. 19945 * @hwqid: belong to which HWQ. 19946 * 19947 * This routine is called from hearbeat timer when pvt_pool is idle. 19948 * All free XRIs are moved from private to public pool on hwqid with 2 steps. 19949 * The first step moves (all - low_watermark) amount of XRIs. 19950 * The second step moves the rest of XRIs. 19951 **/ 19952 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) 19953 { 19954 struct lpfc_pbl_pool *pbl_pool; 19955 struct lpfc_pvt_pool *pvt_pool; 19956 struct lpfc_sli4_hdw_queue *qp; 19957 struct lpfc_io_buf *lpfc_ncmd; 19958 struct lpfc_io_buf *lpfc_ncmd_next; 19959 unsigned long iflag; 19960 struct list_head tmp_list; 19961 u32 tmp_count; 19962 19963 qp = &phba->sli4_hba.hdwq[hwqid]; 19964 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19965 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19966 tmp_count = 0; 19967 19968 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); 19969 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); 19970 19971 if (pvt_pool->count > pvt_pool->low_watermark) { 19972 /* Step 1: move (all - low_watermark) from pvt_pool 19973 * to pbl_pool 19974 */ 19975 19976 /* Move low watermark of bufs from pvt_pool to tmp_list */ 19977 INIT_LIST_HEAD(&tmp_list); 19978 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 19979 &pvt_pool->list, list) { 19980 list_move_tail(&lpfc_ncmd->list, &tmp_list); 19981 tmp_count++; 19982 if (tmp_count >= pvt_pool->low_watermark) 19983 break; 19984 } 19985 19986 /* Move all bufs from pvt_pool to pbl_pool */ 19987 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19988 19989 /* Move all bufs from tmp_list to pvt_pool */ 19990 list_splice(&tmp_list, &pvt_pool->list); 19991 19992 pbl_pool->count += (pvt_pool->count - tmp_count); 19993 pvt_pool->count = tmp_count; 19994 } else { 19995 /* Step 2: move the rest from pvt_pool to pbl_pool */ 19996 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19997 pbl_pool->count += pvt_pool->count; 19998 pvt_pool->count = 0; 19999 } 20000 20001 spin_unlock(&pvt_pool->lock); 20002 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20003 } 20004 20005 /** 20006 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 20007 * @phba: pointer to lpfc hba data structure 20008 * @pbl_pool: specified public free XRI pool 20009 * @pvt_pool: specified private free XRI pool 20010 * @count: number of XRIs to move 20011 * 20012 * This routine tries to move some free common bufs from the specified pbl_pool 20013 * to the specified pvt_pool. It might move less than count XRIs if there's not 20014 * enough in public pool. 20015 * 20016 * Return: 20017 * true - if XRIs are successfully moved from the specified pbl_pool to the 20018 * specified pvt_pool 20019 * false - if the specified pbl_pool is empty or locked by someone else 20020 **/ 20021 static bool 20022 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 20023 struct lpfc_pbl_pool *pbl_pool, 20024 struct lpfc_pvt_pool *pvt_pool, u32 count) 20025 { 20026 struct lpfc_io_buf *lpfc_ncmd; 20027 struct lpfc_io_buf *lpfc_ncmd_next; 20028 unsigned long iflag; 20029 int ret; 20030 20031 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); 20032 if (ret) { 20033 if (pbl_pool->count) { 20034 /* Move a batch of XRIs from public to private pool */ 20035 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); 20036 list_for_each_entry_safe(lpfc_ncmd, 20037 lpfc_ncmd_next, 20038 &pbl_pool->list, 20039 list) { 20040 list_move_tail(&lpfc_ncmd->list, 20041 &pvt_pool->list); 20042 pvt_pool->count++; 20043 pbl_pool->count--; 20044 count--; 20045 if (count == 0) 20046 break; 20047 } 20048 20049 spin_unlock(&pvt_pool->lock); 20050 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20051 return true; 20052 } 20053 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20054 } 20055 20056 return false; 20057 } 20058 20059 /** 20060 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 20061 * @phba: pointer to lpfc hba data structure. 20062 * @hwqid: belong to which HWQ. 20063 * @count: number of XRIs to move 20064 * 20065 * This routine tries to find some free common bufs in one of public pools with 20066 * Round Robin method. The search always starts from local hwqid, then the next 20067 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, 20068 * a batch of free common bufs are moved to private pool on hwqid. 20069 * It might move less than count XRIs if there's not enough in public pool. 20070 **/ 20071 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) 20072 { 20073 struct lpfc_multixri_pool *multixri_pool; 20074 struct lpfc_multixri_pool *next_multixri_pool; 20075 struct lpfc_pvt_pool *pvt_pool; 20076 struct lpfc_pbl_pool *pbl_pool; 20077 struct lpfc_sli4_hdw_queue *qp; 20078 u32 next_hwqid; 20079 u32 hwq_count; 20080 int ret; 20081 20082 qp = &phba->sli4_hba.hdwq[hwqid]; 20083 multixri_pool = qp->p_multixri_pool; 20084 pvt_pool = &multixri_pool->pvt_pool; 20085 pbl_pool = &multixri_pool->pbl_pool; 20086 20087 /* Check if local pbl_pool is available */ 20088 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); 20089 if (ret) { 20090 #ifdef LPFC_MXP_STAT 20091 multixri_pool->local_pbl_hit_count++; 20092 #endif 20093 return; 20094 } 20095 20096 hwq_count = phba->cfg_hdw_queue; 20097 20098 /* Get the next hwqid which was found last time */ 20099 next_hwqid = multixri_pool->rrb_next_hwqid; 20100 20101 do { 20102 /* Go to next hwq */ 20103 next_hwqid = (next_hwqid + 1) % hwq_count; 20104 20105 next_multixri_pool = 20106 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; 20107 pbl_pool = &next_multixri_pool->pbl_pool; 20108 20109 /* Check if the public free xri pool is available */ 20110 ret = _lpfc_move_xri_pbl_to_pvt( 20111 phba, qp, pbl_pool, pvt_pool, count); 20112 20113 /* Exit while-loop if success or all hwqid are checked */ 20114 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); 20115 20116 /* Starting point for the next time */ 20117 multixri_pool->rrb_next_hwqid = next_hwqid; 20118 20119 if (!ret) { 20120 /* stats: all public pools are empty*/ 20121 multixri_pool->pbl_empty_count++; 20122 } 20123 20124 #ifdef LPFC_MXP_STAT 20125 if (ret) { 20126 if (next_hwqid == hwqid) 20127 multixri_pool->local_pbl_hit_count++; 20128 else 20129 multixri_pool->other_pbl_hit_count++; 20130 } 20131 #endif 20132 } 20133 20134 /** 20135 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark 20136 * @phba: pointer to lpfc hba data structure. 20137 * @qp: belong to which HWQ. 20138 * 20139 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than 20140 * low watermark. 20141 **/ 20142 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) 20143 { 20144 struct lpfc_multixri_pool *multixri_pool; 20145 struct lpfc_pvt_pool *pvt_pool; 20146 20147 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 20148 pvt_pool = &multixri_pool->pvt_pool; 20149 20150 if (pvt_pool->count < pvt_pool->low_watermark) 20151 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20152 } 20153 20154 /** 20155 * lpfc_release_io_buf - Return one IO buf back to free pool 20156 * @phba: pointer to lpfc hba data structure. 20157 * @lpfc_ncmd: IO buf to be returned. 20158 * @qp: belong to which HWQ. 20159 * 20160 * This routine returns one IO buf back to free pool. If this is an urgent IO, 20161 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, 20162 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and 20163 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to 20164 * lpfc_io_buf_list_put. 20165 **/ 20166 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, 20167 struct lpfc_sli4_hdw_queue *qp) 20168 { 20169 unsigned long iflag; 20170 struct lpfc_pbl_pool *pbl_pool; 20171 struct lpfc_pvt_pool *pvt_pool; 20172 struct lpfc_epd_pool *epd_pool; 20173 u32 txcmplq_cnt; 20174 u32 xri_owned; 20175 u32 xri_limit; 20176 u32 abts_io_bufs; 20177 20178 /* MUST zero fields if buffer is reused by another protocol */ 20179 lpfc_ncmd->nvmeCmd = NULL; 20180 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; 20181 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; 20182 20183 if (phba->cfg_xri_rebalancing) { 20184 if (lpfc_ncmd->expedite) { 20185 /* Return to expedite pool */ 20186 epd_pool = &phba->epd_pool; 20187 spin_lock_irqsave(&epd_pool->lock, iflag); 20188 list_add_tail(&lpfc_ncmd->list, &epd_pool->list); 20189 epd_pool->count++; 20190 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20191 return; 20192 } 20193 20194 /* Avoid invalid access if an IO sneaks in and is being rejected 20195 * just _after_ xri pools are destroyed in lpfc_offline. 20196 * Nothing much can be done at this point. 20197 */ 20198 if (!qp->p_multixri_pool) 20199 return; 20200 20201 pbl_pool = &qp->p_multixri_pool->pbl_pool; 20202 pvt_pool = &qp->p_multixri_pool->pvt_pool; 20203 20204 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 20205 abts_io_bufs = qp->abts_scsi_io_bufs; 20206 if (qp->nvme_wq) { 20207 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 20208 abts_io_bufs += qp->abts_nvme_io_bufs; 20209 } 20210 20211 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; 20212 xri_limit = qp->p_multixri_pool->xri_limit; 20213 20214 #ifdef LPFC_MXP_STAT 20215 if (xri_owned <= xri_limit) 20216 qp->p_multixri_pool->below_limit_count++; 20217 else 20218 qp->p_multixri_pool->above_limit_count++; 20219 #endif 20220 20221 /* XRI goes to either public or private free xri pool 20222 * based on watermark and xri_limit 20223 */ 20224 if ((pvt_pool->count < pvt_pool->low_watermark) || 20225 (xri_owned < xri_limit && 20226 pvt_pool->count < pvt_pool->high_watermark)) { 20227 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, 20228 qp, free_pvt_pool); 20229 list_add_tail(&lpfc_ncmd->list, 20230 &pvt_pool->list); 20231 pvt_pool->count++; 20232 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20233 } else { 20234 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, 20235 qp, free_pub_pool); 20236 list_add_tail(&lpfc_ncmd->list, 20237 &pbl_pool->list); 20238 pbl_pool->count++; 20239 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20240 } 20241 } else { 20242 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, 20243 qp, free_xri); 20244 list_add_tail(&lpfc_ncmd->list, 20245 &qp->lpfc_io_buf_list_put); 20246 qp->put_io_bufs++; 20247 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 20248 iflag); 20249 } 20250 } 20251 20252 /** 20253 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool 20254 * @phba: pointer to lpfc hba data structure. 20255 * @pvt_pool: pointer to private pool data structure. 20256 * @ndlp: pointer to lpfc nodelist data structure. 20257 * 20258 * This routine tries to get one free IO buf from private pool. 20259 * 20260 * Return: 20261 * pointer to one free IO buf - if private pool is not empty 20262 * NULL - if private pool is empty 20263 **/ 20264 static struct lpfc_io_buf * 20265 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, 20266 struct lpfc_sli4_hdw_queue *qp, 20267 struct lpfc_pvt_pool *pvt_pool, 20268 struct lpfc_nodelist *ndlp) 20269 { 20270 struct lpfc_io_buf *lpfc_ncmd; 20271 struct lpfc_io_buf *lpfc_ncmd_next; 20272 unsigned long iflag; 20273 20274 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); 20275 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20276 &pvt_pool->list, list) { 20277 if (lpfc_test_rrq_active( 20278 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) 20279 continue; 20280 list_del(&lpfc_ncmd->list); 20281 pvt_pool->count--; 20282 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20283 return lpfc_ncmd; 20284 } 20285 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20286 20287 return NULL; 20288 } 20289 20290 /** 20291 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool 20292 * @phba: pointer to lpfc hba data structure. 20293 * 20294 * This routine tries to get one free IO buf from expedite pool. 20295 * 20296 * Return: 20297 * pointer to one free IO buf - if expedite pool is not empty 20298 * NULL - if expedite pool is empty 20299 **/ 20300 static struct lpfc_io_buf * 20301 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) 20302 { 20303 struct lpfc_io_buf *lpfc_ncmd; 20304 struct lpfc_io_buf *lpfc_ncmd_next; 20305 unsigned long iflag; 20306 struct lpfc_epd_pool *epd_pool; 20307 20308 epd_pool = &phba->epd_pool; 20309 lpfc_ncmd = NULL; 20310 20311 spin_lock_irqsave(&epd_pool->lock, iflag); 20312 if (epd_pool->count > 0) { 20313 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20314 &epd_pool->list, list) { 20315 list_del(&lpfc_ncmd->list); 20316 epd_pool->count--; 20317 break; 20318 } 20319 } 20320 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20321 20322 return lpfc_ncmd; 20323 } 20324 20325 /** 20326 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs 20327 * @phba: pointer to lpfc hba data structure. 20328 * @ndlp: pointer to lpfc nodelist data structure. 20329 * @hwqid: belong to which HWQ 20330 * @expedite: 1 means this request is urgent. 20331 * 20332 * This routine will do the following actions and then return a pointer to 20333 * one free IO buf. 20334 * 20335 * 1. If private free xri count is empty, move some XRIs from public to 20336 * private pool. 20337 * 2. Get one XRI from private free xri pool. 20338 * 3. If we fail to get one from pvt_pool and this is an expedite request, 20339 * get one free xri from expedite pool. 20340 * 20341 * Note: ndlp is only used on SCSI side for RRQ testing. 20342 * The caller should pass NULL for ndlp on NVME side. 20343 * 20344 * Return: 20345 * pointer to one free IO buf - if private pool is not empty 20346 * NULL - if private pool is empty 20347 **/ 20348 static struct lpfc_io_buf * 20349 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, 20350 struct lpfc_nodelist *ndlp, 20351 int hwqid, int expedite) 20352 { 20353 struct lpfc_sli4_hdw_queue *qp; 20354 struct lpfc_multixri_pool *multixri_pool; 20355 struct lpfc_pvt_pool *pvt_pool; 20356 struct lpfc_io_buf *lpfc_ncmd; 20357 20358 qp = &phba->sli4_hba.hdwq[hwqid]; 20359 lpfc_ncmd = NULL; 20360 multixri_pool = qp->p_multixri_pool; 20361 pvt_pool = &multixri_pool->pvt_pool; 20362 multixri_pool->io_req_count++; 20363 20364 /* If pvt_pool is empty, move some XRIs from public to private pool */ 20365 if (pvt_pool->count == 0) 20366 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20367 20368 /* Get one XRI from private free xri pool */ 20369 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); 20370 20371 if (lpfc_ncmd) { 20372 lpfc_ncmd->hdwq = qp; 20373 lpfc_ncmd->hdwq_no = hwqid; 20374 } else if (expedite) { 20375 /* If we fail to get one from pvt_pool and this is an expedite 20376 * request, get one free xri from expedite pool. 20377 */ 20378 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); 20379 } 20380 20381 return lpfc_ncmd; 20382 } 20383 20384 static inline struct lpfc_io_buf * 20385 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) 20386 { 20387 struct lpfc_sli4_hdw_queue *qp; 20388 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; 20389 20390 qp = &phba->sli4_hba.hdwq[idx]; 20391 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 20392 &qp->lpfc_io_buf_list_get, list) { 20393 if (lpfc_test_rrq_active(phba, ndlp, 20394 lpfc_cmd->cur_iocbq.sli4_lxritag)) 20395 continue; 20396 20397 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) 20398 continue; 20399 20400 list_del_init(&lpfc_cmd->list); 20401 qp->get_io_bufs--; 20402 lpfc_cmd->hdwq = qp; 20403 lpfc_cmd->hdwq_no = idx; 20404 return lpfc_cmd; 20405 } 20406 return NULL; 20407 } 20408 20409 /** 20410 * lpfc_get_io_buf - Get one IO buffer from free pool 20411 * @phba: The HBA for which this call is being executed. 20412 * @ndlp: pointer to lpfc nodelist data structure. 20413 * @hwqid: belong to which HWQ 20414 * @expedite: 1 means this request is urgent. 20415 * 20416 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, 20417 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes 20418 * a IO buffer from head of @hdwq io_buf_list and returns to caller. 20419 * 20420 * Note: ndlp is only used on SCSI side for RRQ testing. 20421 * The caller should pass NULL for ndlp on NVME side. 20422 * 20423 * Return codes: 20424 * NULL - Error 20425 * Pointer to lpfc_io_buf - Success 20426 **/ 20427 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, 20428 struct lpfc_nodelist *ndlp, 20429 u32 hwqid, int expedite) 20430 { 20431 struct lpfc_sli4_hdw_queue *qp; 20432 unsigned long iflag; 20433 struct lpfc_io_buf *lpfc_cmd; 20434 20435 qp = &phba->sli4_hba.hdwq[hwqid]; 20436 lpfc_cmd = NULL; 20437 20438 if (phba->cfg_xri_rebalancing) 20439 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( 20440 phba, ndlp, hwqid, expedite); 20441 else { 20442 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, 20443 qp, alloc_xri_get); 20444 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) 20445 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20446 if (!lpfc_cmd) { 20447 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, 20448 qp, alloc_xri_put); 20449 list_splice(&qp->lpfc_io_buf_list_put, 20450 &qp->lpfc_io_buf_list_get); 20451 qp->get_io_bufs += qp->put_io_bufs; 20452 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 20453 qp->put_io_bufs = 0; 20454 spin_unlock(&qp->io_buf_list_put_lock); 20455 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || 20456 expedite) 20457 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20458 } 20459 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); 20460 } 20461 20462 return lpfc_cmd; 20463 } 20464