1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 #include <linux/crash_dump.h> 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include "lpfc_hw4.h" 43 #include "lpfc_hw.h" 44 #include "lpfc_sli.h" 45 #include "lpfc_sli4.h" 46 #include "lpfc_nl.h" 47 #include "lpfc_disc.h" 48 #include "lpfc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc_nvme.h" 51 #include "lpfc_crtn.h" 52 #include "lpfc_logmsg.h" 53 #include "lpfc_compat.h" 54 #include "lpfc_debugfs.h" 55 #include "lpfc_vport.h" 56 #include "lpfc_version.h" 57 58 /* There are only four IOCB completion types. */ 59 typedef enum _lpfc_iocb_type { 60 LPFC_UNKNOWN_IOCB, 61 LPFC_UNSOL_IOCB, 62 LPFC_SOL_IOCB, 63 LPFC_ABORT_IOCB 64 } lpfc_iocb_type; 65 66 67 /* Provide function prototypes local to this module. */ 68 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 69 uint32_t); 70 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 71 uint8_t *, uint32_t *); 72 static struct lpfc_iocbq * 73 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, 74 struct lpfc_iocbq *rspiocbq); 75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 76 struct hbq_dmabuf *); 77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 78 struct hbq_dmabuf *dmabuf); 79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, 80 struct lpfc_queue *cq, struct lpfc_cqe *cqe); 81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 82 int); 83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 84 struct lpfc_queue *eq, 85 struct lpfc_eqe *eqe, 86 enum lpfc_poll_mode poll_mode); 87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 89 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); 90 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, 91 struct lpfc_queue *cq, 92 struct lpfc_cqe *cqe); 93 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, 94 struct lpfc_iocbq *pwqeq, 95 struct lpfc_sglq *sglq); 96 97 union lpfc_wqe128 lpfc_iread_cmd_template; 98 union lpfc_wqe128 lpfc_iwrite_cmd_template; 99 union lpfc_wqe128 lpfc_icmnd_cmd_template; 100 101 /* Setup WQE templates for IOs */ 102 void lpfc_wqe_cmd_template(void) 103 { 104 union lpfc_wqe128 *wqe; 105 106 /* IREAD template */ 107 wqe = &lpfc_iread_cmd_template; 108 memset(wqe, 0, sizeof(union lpfc_wqe128)); 109 110 /* Word 0, 1, 2 - BDE is variable */ 111 112 /* Word 3 - cmd_buff_len, payload_offset_len is zero */ 113 114 /* Word 4 - total_xfer_len is variable */ 115 116 /* Word 5 - is zero */ 117 118 /* Word 6 - ctxt_tag, xri_tag is variable */ 119 120 /* Word 7 */ 121 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); 122 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); 123 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); 124 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); 125 126 /* Word 8 - abort_tag is variable */ 127 128 /* Word 9 - reqtag is variable */ 129 130 /* Word 10 - dbde, wqes is variable */ 131 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); 132 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 133 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4); 134 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 135 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 136 137 /* Word 11 - pbde is variable */ 138 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN); 139 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 140 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 141 142 /* Word 12 - is zero */ 143 144 /* Word 13, 14, 15 - PBDE is variable */ 145 146 /* IWRITE template */ 147 wqe = &lpfc_iwrite_cmd_template; 148 memset(wqe, 0, sizeof(union lpfc_wqe128)); 149 150 /* Word 0, 1, 2 - BDE is variable */ 151 152 /* Word 3 - cmd_buff_len, payload_offset_len is zero */ 153 154 /* Word 4 - total_xfer_len is variable */ 155 156 /* Word 5 - initial_xfer_len is variable */ 157 158 /* Word 6 - ctxt_tag, xri_tag is variable */ 159 160 /* Word 7 */ 161 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE); 162 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK); 163 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3); 164 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI); 165 166 /* Word 8 - abort_tag is variable */ 167 168 /* Word 9 - reqtag is variable */ 169 170 /* Word 10 - dbde, wqes is variable */ 171 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0); 172 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 173 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4); 174 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 175 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 176 177 /* Word 11 - pbde is variable */ 178 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT); 179 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 180 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 181 182 /* Word 12 - is zero */ 183 184 /* Word 13, 14, 15 - PBDE is variable */ 185 186 /* ICMND template */ 187 wqe = &lpfc_icmnd_cmd_template; 188 memset(wqe, 0, sizeof(union lpfc_wqe128)); 189 190 /* Word 0, 1, 2 - BDE is variable */ 191 192 /* Word 3 - payload_offset_len is variable */ 193 194 /* Word 4, 5 - is zero */ 195 196 /* Word 6 - ctxt_tag, xri_tag is variable */ 197 198 /* Word 7 */ 199 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE); 200 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 201 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3); 202 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI); 203 204 /* Word 8 - abort_tag is variable */ 205 206 /* Word 9 - reqtag is variable */ 207 208 /* Word 10 - dbde, wqes is variable */ 209 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 210 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE); 211 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE); 212 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 213 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 214 215 /* Word 11 */ 216 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN); 217 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 218 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0); 219 220 /* Word 12, 13, 14, 15 - is zero */ 221 } 222 223 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 224 /** 225 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 226 * @srcp: Source memory pointer. 227 * @destp: Destination memory pointer. 228 * @cnt: Number of words required to be copied. 229 * Must be a multiple of sizeof(uint64_t) 230 * 231 * This function is used for copying data between driver memory 232 * and the SLI WQ. This function also changes the endianness 233 * of each word if native endianness is different from SLI 234 * endianness. This function can be called with or without 235 * lock. 236 **/ 237 static void 238 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 239 { 240 uint64_t *src = srcp; 241 uint64_t *dest = destp; 242 int i; 243 244 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 245 *dest++ = *src++; 246 } 247 #else 248 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 249 #endif 250 251 /** 252 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 253 * @q: The Work Queue to operate on. 254 * @wqe: The work Queue Entry to put on the Work queue. 255 * 256 * This routine will copy the contents of @wqe to the next available entry on 257 * the @q. This function will then ring the Work Queue Doorbell to signal the 258 * HBA to start processing the Work Queue Entry. This function returns 0 if 259 * successful. If no entries are available on @q then this function will return 260 * -ENOMEM. 261 * The caller is expected to hold the hbalock when calling this routine. 262 **/ 263 static int 264 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 265 { 266 union lpfc_wqe *temp_wqe; 267 struct lpfc_register doorbell; 268 uint32_t host_index; 269 uint32_t idx; 270 uint32_t i = 0; 271 uint8_t *tmp; 272 u32 if_type; 273 274 /* sanity check on queue memory */ 275 if (unlikely(!q)) 276 return -ENOMEM; 277 278 temp_wqe = lpfc_sli4_qe(q, q->host_index); 279 280 /* If the host has not yet processed the next entry then we are done */ 281 idx = ((q->host_index + 1) % q->entry_count); 282 if (idx == q->hba_index) { 283 q->WQ_overflow++; 284 return -EBUSY; 285 } 286 q->WQ_posted++; 287 /* set consumption flag every once in a while */ 288 if (!((q->host_index + 1) % q->notify_interval)) 289 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 290 else 291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 292 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 293 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 294 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 295 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 296 /* write to DPP aperture taking advatage of Combined Writes */ 297 tmp = (uint8_t *)temp_wqe; 298 #ifdef __raw_writeq 299 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 300 __raw_writeq(*((uint64_t *)(tmp + i)), 301 q->dpp_regaddr + i); 302 #else 303 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 304 __raw_writel(*((uint32_t *)(tmp + i)), 305 q->dpp_regaddr + i); 306 #endif 307 } 308 /* ensure WQE bcopy and DPP flushed before doorbell write */ 309 wmb(); 310 311 /* Update the host index before invoking device */ 312 host_index = q->host_index; 313 314 q->host_index = idx; 315 316 /* Ring Doorbell */ 317 doorbell.word0 = 0; 318 if (q->db_format == LPFC_DB_LIST_FORMAT) { 319 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 320 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 321 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 322 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 323 q->dpp_id); 324 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 325 q->queue_id); 326 } else { 327 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 328 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 329 330 /* Leave bits <23:16> clear for if_type 6 dpp */ 331 if_type = bf_get(lpfc_sli_intf_if_type, 332 &q->phba->sli4_hba.sli_intf); 333 if (if_type != LPFC_SLI_INTF_IF_TYPE_6) 334 bf_set(lpfc_wq_db_list_fm_index, &doorbell, 335 host_index); 336 } 337 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 338 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 339 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 340 } else { 341 return -EINVAL; 342 } 343 writel(doorbell.word0, q->db_regaddr); 344 345 return 0; 346 } 347 348 /** 349 * lpfc_sli4_wq_release - Updates internal hba index for WQ 350 * @q: The Work Queue to operate on. 351 * @index: The index to advance the hba index to. 352 * 353 * This routine will update the HBA index of a queue to reflect consumption of 354 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 355 * an entry the host calls this function to update the queue's internal 356 * pointers. 357 **/ 358 static void 359 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 360 { 361 /* sanity check on queue memory */ 362 if (unlikely(!q)) 363 return; 364 365 q->hba_index = index; 366 } 367 368 /** 369 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 370 * @q: The Mailbox Queue to operate on. 371 * @mqe: The Mailbox Queue Entry to put on the Work queue. 372 * 373 * This routine will copy the contents of @mqe to the next available entry on 374 * the @q. This function will then ring the Work Queue Doorbell to signal the 375 * HBA to start processing the Work Queue Entry. This function returns 0 if 376 * successful. If no entries are available on @q then this function will return 377 * -ENOMEM. 378 * The caller is expected to hold the hbalock when calling this routine. 379 **/ 380 static uint32_t 381 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 382 { 383 struct lpfc_mqe *temp_mqe; 384 struct lpfc_register doorbell; 385 386 /* sanity check on queue memory */ 387 if (unlikely(!q)) 388 return -ENOMEM; 389 temp_mqe = lpfc_sli4_qe(q, q->host_index); 390 391 /* If the host has not yet processed the next entry then we are done */ 392 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 393 return -ENOMEM; 394 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 395 /* Save off the mailbox pointer for completion */ 396 q->phba->mbox = (MAILBOX_t *)temp_mqe; 397 398 /* Update the host index before invoking device */ 399 q->host_index = ((q->host_index + 1) % q->entry_count); 400 401 /* Ring Doorbell */ 402 doorbell.word0 = 0; 403 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 404 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 405 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 406 return 0; 407 } 408 409 /** 410 * lpfc_sli4_mq_release - Updates internal hba index for MQ 411 * @q: The Mailbox Queue to operate on. 412 * 413 * This routine will update the HBA index of a queue to reflect consumption of 414 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 415 * an entry the host calls this function to update the queue's internal 416 * pointers. This routine returns the number of entries that were consumed by 417 * the HBA. 418 **/ 419 static uint32_t 420 lpfc_sli4_mq_release(struct lpfc_queue *q) 421 { 422 /* sanity check on queue memory */ 423 if (unlikely(!q)) 424 return 0; 425 426 /* Clear the mailbox pointer for completion */ 427 q->phba->mbox = NULL; 428 q->hba_index = ((q->hba_index + 1) % q->entry_count); 429 return 1; 430 } 431 432 /** 433 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 434 * @q: The Event Queue to get the first valid EQE from 435 * 436 * This routine will get the first valid Event Queue Entry from @q, update 437 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 438 * the Queue (no more work to do), or the Queue is full of EQEs that have been 439 * processed, but not popped back to the HBA then this routine will return NULL. 440 **/ 441 static struct lpfc_eqe * 442 lpfc_sli4_eq_get(struct lpfc_queue *q) 443 { 444 struct lpfc_eqe *eqe; 445 446 /* sanity check on queue memory */ 447 if (unlikely(!q)) 448 return NULL; 449 eqe = lpfc_sli4_qe(q, q->host_index); 450 451 /* If the next EQE is not valid then we are done */ 452 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 453 return NULL; 454 455 /* 456 * insert barrier for instruction interlock : data from the hardware 457 * must have the valid bit checked before it can be copied and acted 458 * upon. Speculative instructions were allowing a bcopy at the start 459 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 460 * after our return, to copy data before the valid bit check above 461 * was done. As such, some of the copied data was stale. The barrier 462 * ensures the check is before any data is copied. 463 */ 464 mb(); 465 return eqe; 466 } 467 468 /** 469 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 470 * @q: The Event Queue to disable interrupts 471 * 472 **/ 473 void 474 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 475 { 476 struct lpfc_register doorbell; 477 478 doorbell.word0 = 0; 479 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 480 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 481 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 482 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 483 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 484 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 485 } 486 487 /** 488 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 489 * @q: The Event Queue to disable interrupts 490 * 491 **/ 492 void 493 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 494 { 495 struct lpfc_register doorbell; 496 497 doorbell.word0 = 0; 498 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 499 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 500 } 501 502 /** 503 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state 504 * @phba: adapter with EQ 505 * @q: The Event Queue that the host has completed processing for. 506 * @count: Number of elements that have been consumed 507 * @arm: Indicates whether the host wants to arms this CQ. 508 * 509 * This routine will notify the HBA, by ringing the doorbell, that count 510 * number of EQEs have been processed. The @arm parameter indicates whether 511 * the queue should be rearmed when ringing the doorbell. 512 **/ 513 void 514 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 515 uint32_t count, bool arm) 516 { 517 struct lpfc_register doorbell; 518 519 /* sanity check on queue memory */ 520 if (unlikely(!q || (count == 0 && !arm))) 521 return; 522 523 /* ring doorbell for number popped */ 524 doorbell.word0 = 0; 525 if (arm) { 526 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 527 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 528 } 529 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 530 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 531 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 532 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 533 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 534 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 535 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 536 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 537 readl(q->phba->sli4_hba.EQDBregaddr); 538 } 539 540 /** 541 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state 542 * @phba: adapter with EQ 543 * @q: The Event Queue that the host has completed processing for. 544 * @count: Number of elements that have been consumed 545 * @arm: Indicates whether the host wants to arms this CQ. 546 * 547 * This routine will notify the HBA, by ringing the doorbell, that count 548 * number of EQEs have been processed. The @arm parameter indicates whether 549 * the queue should be rearmed when ringing the doorbell. 550 **/ 551 void 552 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 553 uint32_t count, bool arm) 554 { 555 struct lpfc_register doorbell; 556 557 /* sanity check on queue memory */ 558 if (unlikely(!q || (count == 0 && !arm))) 559 return; 560 561 /* ring doorbell for number popped */ 562 doorbell.word0 = 0; 563 if (arm) 564 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 565 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); 566 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 567 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 568 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 569 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 570 readl(q->phba->sli4_hba.EQDBregaddr); 571 } 572 573 static void 574 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 575 struct lpfc_eqe *eqe) 576 { 577 if (!phba->sli4_hba.pc_sli4_params.eqav) 578 bf_set_le32(lpfc_eqe_valid, eqe, 0); 579 580 eq->host_index = ((eq->host_index + 1) % eq->entry_count); 581 582 /* if the index wrapped around, toggle the valid bit */ 583 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) 584 eq->qe_valid = (eq->qe_valid) ? 0 : 1; 585 } 586 587 static void 588 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 589 { 590 struct lpfc_eqe *eqe = NULL; 591 u32 eq_count = 0, cq_count = 0; 592 struct lpfc_cqe *cqe = NULL; 593 struct lpfc_queue *cq = NULL, *childq = NULL; 594 int cqid = 0; 595 596 /* walk all the EQ entries and drop on the floor */ 597 eqe = lpfc_sli4_eq_get(eq); 598 while (eqe) { 599 /* Get the reference to the corresponding CQ */ 600 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 601 cq = NULL; 602 603 list_for_each_entry(childq, &eq->child_list, list) { 604 if (childq->queue_id == cqid) { 605 cq = childq; 606 break; 607 } 608 } 609 /* If CQ is valid, iterate through it and drop all the CQEs */ 610 if (cq) { 611 cqe = lpfc_sli4_cq_get(cq); 612 while (cqe) { 613 __lpfc_sli4_consume_cqe(phba, cq, cqe); 614 cq_count++; 615 cqe = lpfc_sli4_cq_get(cq); 616 } 617 /* Clear and re-arm the CQ */ 618 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count, 619 LPFC_QUEUE_REARM); 620 cq_count = 0; 621 } 622 __lpfc_sli4_consume_eqe(phba, eq, eqe); 623 eq_count++; 624 eqe = lpfc_sli4_eq_get(eq); 625 } 626 627 /* Clear and re-arm the EQ */ 628 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM); 629 } 630 631 static int 632 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, 633 u8 rearm, enum lpfc_poll_mode poll_mode) 634 { 635 struct lpfc_eqe *eqe; 636 int count = 0, consumed = 0; 637 638 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) 639 goto rearm_and_exit; 640 641 eqe = lpfc_sli4_eq_get(eq); 642 while (eqe) { 643 lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode); 644 __lpfc_sli4_consume_eqe(phba, eq, eqe); 645 646 consumed++; 647 if (!(++count % eq->max_proc_limit)) 648 break; 649 650 if (!(count % eq->notify_interval)) { 651 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, 652 LPFC_QUEUE_NOARM); 653 consumed = 0; 654 } 655 656 eqe = lpfc_sli4_eq_get(eq); 657 } 658 eq->EQ_processed += count; 659 660 /* Track the max number of EQEs processed in 1 intr */ 661 if (count > eq->EQ_max_eqe) 662 eq->EQ_max_eqe = count; 663 664 xchg(&eq->queue_claimed, 0); 665 666 rearm_and_exit: 667 /* Always clear the EQ. */ 668 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm); 669 670 return count; 671 } 672 673 /** 674 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 675 * @q: The Completion Queue to get the first valid CQE from 676 * 677 * This routine will get the first valid Completion Queue Entry from @q, update 678 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 679 * the Queue (no more work to do), or the Queue is full of CQEs that have been 680 * processed, but not popped back to the HBA then this routine will return NULL. 681 **/ 682 static struct lpfc_cqe * 683 lpfc_sli4_cq_get(struct lpfc_queue *q) 684 { 685 struct lpfc_cqe *cqe; 686 687 /* sanity check on queue memory */ 688 if (unlikely(!q)) 689 return NULL; 690 cqe = lpfc_sli4_qe(q, q->host_index); 691 692 /* If the next CQE is not valid then we are done */ 693 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 694 return NULL; 695 696 /* 697 * insert barrier for instruction interlock : data from the hardware 698 * must have the valid bit checked before it can be copied and acted 699 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 700 * instructions allowing action on content before valid bit checked, 701 * add barrier here as well. May not be needed as "content" is a 702 * single 32-bit entity here (vs multi word structure for cq's). 703 */ 704 mb(); 705 return cqe; 706 } 707 708 static void 709 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 710 struct lpfc_cqe *cqe) 711 { 712 if (!phba->sli4_hba.pc_sli4_params.cqav) 713 bf_set_le32(lpfc_cqe_valid, cqe, 0); 714 715 cq->host_index = ((cq->host_index + 1) % cq->entry_count); 716 717 /* if the index wrapped around, toggle the valid bit */ 718 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) 719 cq->qe_valid = (cq->qe_valid) ? 0 : 1; 720 } 721 722 /** 723 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. 724 * @phba: the adapter with the CQ 725 * @q: The Completion Queue that the host has completed processing for. 726 * @count: the number of elements that were consumed 727 * @arm: Indicates whether the host wants to arms this CQ. 728 * 729 * This routine will notify the HBA, by ringing the doorbell, that the 730 * CQEs have been processed. The @arm parameter specifies whether the 731 * queue should be rearmed when ringing the doorbell. 732 **/ 733 void 734 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 735 uint32_t count, bool arm) 736 { 737 struct lpfc_register doorbell; 738 739 /* sanity check on queue memory */ 740 if (unlikely(!q || (count == 0 && !arm))) 741 return; 742 743 /* ring doorbell for number popped */ 744 doorbell.word0 = 0; 745 if (arm) 746 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 747 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 748 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 749 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 750 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 751 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 752 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 753 } 754 755 /** 756 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. 757 * @phba: the adapter with the CQ 758 * @q: The Completion Queue that the host has completed processing for. 759 * @count: the number of elements that were consumed 760 * @arm: Indicates whether the host wants to arms this CQ. 761 * 762 * This routine will notify the HBA, by ringing the doorbell, that the 763 * CQEs have been processed. The @arm parameter specifies whether the 764 * queue should be rearmed when ringing the doorbell. 765 **/ 766 void 767 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 768 uint32_t count, bool arm) 769 { 770 struct lpfc_register doorbell; 771 772 /* sanity check on queue memory */ 773 if (unlikely(!q || (count == 0 && !arm))) 774 return; 775 776 /* ring doorbell for number popped */ 777 doorbell.word0 = 0; 778 if (arm) 779 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 780 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); 781 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 782 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 783 } 784 785 /* 786 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 787 * 788 * This routine will copy the contents of @wqe to the next available entry on 789 * the @q. This function will then ring the Receive Queue Doorbell to signal the 790 * HBA to start processing the Receive Queue Entry. This function returns the 791 * index that the rqe was copied to if successful. If no entries are available 792 * on @q then this function will return -ENOMEM. 793 * The caller is expected to hold the hbalock when calling this routine. 794 **/ 795 int 796 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 797 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 798 { 799 struct lpfc_rqe *temp_hrqe; 800 struct lpfc_rqe *temp_drqe; 801 struct lpfc_register doorbell; 802 int hq_put_index; 803 int dq_put_index; 804 805 /* sanity check on queue memory */ 806 if (unlikely(!hq) || unlikely(!dq)) 807 return -ENOMEM; 808 hq_put_index = hq->host_index; 809 dq_put_index = dq->host_index; 810 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); 811 temp_drqe = lpfc_sli4_qe(dq, dq_put_index); 812 813 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 814 return -EINVAL; 815 if (hq_put_index != dq_put_index) 816 return -EINVAL; 817 /* If the host has not yet processed the next entry then we are done */ 818 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 819 return -EBUSY; 820 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 821 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 822 823 /* Update the host index to point to the next slot */ 824 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 825 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 826 hq->RQ_buf_posted++; 827 828 /* Ring The Header Receive Queue Doorbell */ 829 if (!(hq->host_index % hq->notify_interval)) { 830 doorbell.word0 = 0; 831 if (hq->db_format == LPFC_DB_RING_FORMAT) { 832 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 833 hq->notify_interval); 834 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 835 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 836 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 837 hq->notify_interval); 838 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 839 hq->host_index); 840 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 841 } else { 842 return -EINVAL; 843 } 844 writel(doorbell.word0, hq->db_regaddr); 845 } 846 return hq_put_index; 847 } 848 849 /* 850 * lpfc_sli4_rq_release - Updates internal hba index for RQ 851 * 852 * This routine will update the HBA index of a queue to reflect consumption of 853 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 854 * consumed an entry the host calls this function to update the queue's 855 * internal pointers. This routine returns the number of entries that were 856 * consumed by the HBA. 857 **/ 858 static uint32_t 859 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 860 { 861 /* sanity check on queue memory */ 862 if (unlikely(!hq) || unlikely(!dq)) 863 return 0; 864 865 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 866 return 0; 867 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 868 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 869 return 1; 870 } 871 872 /** 873 * lpfc_cmd_iocb - Get next command iocb entry in the ring 874 * @phba: Pointer to HBA context object. 875 * @pring: Pointer to driver SLI ring object. 876 * 877 * This function returns pointer to next command iocb entry 878 * in the command ring. The caller must hold hbalock to prevent 879 * other threads consume the next command iocb. 880 * SLI-2/SLI-3 provide different sized iocbs. 881 **/ 882 static inline IOCB_t * 883 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 884 { 885 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 886 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 887 } 888 889 /** 890 * lpfc_resp_iocb - Get next response iocb entry in the ring 891 * @phba: Pointer to HBA context object. 892 * @pring: Pointer to driver SLI ring object. 893 * 894 * This function returns pointer to next response iocb entry 895 * in the response ring. The caller must hold hbalock to make sure 896 * that no other thread consume the next response iocb. 897 * SLI-2/SLI-3 provide different sized iocbs. 898 **/ 899 static inline IOCB_t * 900 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 901 { 902 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 903 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 904 } 905 906 /** 907 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 908 * @phba: Pointer to HBA context object. 909 * 910 * This function is called with hbalock held. This function 911 * allocates a new driver iocb object from the iocb pool. If the 912 * allocation is successful, it returns pointer to the newly 913 * allocated iocb object else it returns NULL. 914 **/ 915 struct lpfc_iocbq * 916 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 917 { 918 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 919 struct lpfc_iocbq * iocbq = NULL; 920 921 lockdep_assert_held(&phba->hbalock); 922 923 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 924 if (iocbq) 925 phba->iocb_cnt++; 926 if (phba->iocb_cnt > phba->iocb_max) 927 phba->iocb_max = phba->iocb_cnt; 928 return iocbq; 929 } 930 931 /** 932 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 933 * @phba: Pointer to HBA context object. 934 * @xritag: XRI value. 935 * 936 * This function clears the sglq pointer from the array of active 937 * sglq's. The xritag that is passed in is used to index into the 938 * array. Before the xritag can be used it needs to be adjusted 939 * by subtracting the xribase. 940 * 941 * Returns sglq ponter = success, NULL = Failure. 942 **/ 943 struct lpfc_sglq * 944 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 945 { 946 struct lpfc_sglq *sglq; 947 948 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 949 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 950 return sglq; 951 } 952 953 /** 954 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 955 * @phba: Pointer to HBA context object. 956 * @xritag: XRI value. 957 * 958 * This function returns the sglq pointer from the array of active 959 * sglq's. The xritag that is passed in is used to index into the 960 * array. Before the xritag can be used it needs to be adjusted 961 * by subtracting the xribase. 962 * 963 * Returns sglq ponter = success, NULL = Failure. 964 **/ 965 struct lpfc_sglq * 966 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 967 { 968 struct lpfc_sglq *sglq; 969 970 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 971 return sglq; 972 } 973 974 /** 975 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 976 * @phba: Pointer to HBA context object. 977 * @xritag: xri used in this exchange. 978 * @rrq: The RRQ to be cleared. 979 * 980 **/ 981 void 982 lpfc_clr_rrq_active(struct lpfc_hba *phba, 983 uint16_t xritag, 984 struct lpfc_node_rrq *rrq) 985 { 986 struct lpfc_nodelist *ndlp = NULL; 987 988 /* Lookup did to verify if did is still active on this vport */ 989 if (rrq->vport) 990 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 991 992 if (!ndlp) 993 goto out; 994 995 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 996 rrq->send_rrq = 0; 997 rrq->xritag = 0; 998 rrq->rrq_stop_time = 0; 999 } 1000 out: 1001 mempool_free(rrq, phba->rrq_pool); 1002 } 1003 1004 /** 1005 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 1006 * @phba: Pointer to HBA context object. 1007 * 1008 * This function is called with hbalock held. This function 1009 * Checks if stop_time (ratov from setting rrq active) has 1010 * been reached, if it has and the send_rrq flag is set then 1011 * it will call lpfc_send_rrq. If the send_rrq flag is not set 1012 * then it will just call the routine to clear the rrq and 1013 * free the rrq resource. 1014 * The timer is set to the next rrq that is going to expire before 1015 * leaving the routine. 1016 * 1017 **/ 1018 void 1019 lpfc_handle_rrq_active(struct lpfc_hba *phba) 1020 { 1021 struct lpfc_node_rrq *rrq; 1022 struct lpfc_node_rrq *nextrrq; 1023 unsigned long next_time; 1024 unsigned long iflags; 1025 LIST_HEAD(send_rrq); 1026 1027 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1028 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1029 spin_lock_irqsave(&phba->rrq_list_lock, iflags); 1030 list_for_each_entry_safe(rrq, nextrrq, 1031 &phba->active_rrq_list, list) { 1032 if (time_after(jiffies, rrq->rrq_stop_time)) 1033 list_move(&rrq->list, &send_rrq); 1034 else if (time_before(rrq->rrq_stop_time, next_time)) 1035 next_time = rrq->rrq_stop_time; 1036 } 1037 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1038 if ((!list_empty(&phba->active_rrq_list)) && 1039 (!test_bit(FC_UNLOADING, &phba->pport->load_flag))) 1040 mod_timer(&phba->rrq_tmr, next_time); 1041 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 1042 list_del(&rrq->list); 1043 if (!rrq->send_rrq) { 1044 /* this call will free the rrq */ 1045 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1046 } else if (lpfc_send_rrq(phba, rrq)) { 1047 /* if we send the rrq then the completion handler 1048 * will clear the bit in the xribitmap. 1049 */ 1050 lpfc_clr_rrq_active(phba, rrq->xritag, 1051 rrq); 1052 } 1053 } 1054 } 1055 1056 /** 1057 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 1058 * @vport: Pointer to vport context object. 1059 * @xri: The xri used in the exchange. 1060 * @did: The targets DID for this exchange. 1061 * 1062 * returns NULL = rrq not found in the phba->active_rrq_list. 1063 * rrq = rrq for this xri and target. 1064 **/ 1065 struct lpfc_node_rrq * 1066 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 1067 { 1068 struct lpfc_hba *phba = vport->phba; 1069 struct lpfc_node_rrq *rrq; 1070 struct lpfc_node_rrq *nextrrq; 1071 unsigned long iflags; 1072 1073 if (phba->sli_rev != LPFC_SLI_REV4) 1074 return NULL; 1075 spin_lock_irqsave(&phba->rrq_list_lock, iflags); 1076 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 1077 if (rrq->vport == vport && rrq->xritag == xri && 1078 rrq->nlp_DID == did){ 1079 list_del(&rrq->list); 1080 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1081 return rrq; 1082 } 1083 } 1084 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1085 return NULL; 1086 } 1087 1088 /** 1089 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 1090 * @vport: Pointer to vport context object. 1091 * @ndlp: Pointer to the lpfc_node_list structure. 1092 * If ndlp is NULL Remove all active RRQs for this vport from the 1093 * phba->active_rrq_list and clear the rrq. 1094 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 1095 **/ 1096 void 1097 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1098 1099 { 1100 struct lpfc_hba *phba = vport->phba; 1101 struct lpfc_node_rrq *rrq; 1102 struct lpfc_node_rrq *nextrrq; 1103 unsigned long iflags; 1104 LIST_HEAD(rrq_list); 1105 1106 if (phba->sli_rev != LPFC_SLI_REV4) 1107 return; 1108 if (!ndlp) { 1109 lpfc_sli4_vport_delete_els_xri_aborted(vport); 1110 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 1111 } 1112 spin_lock_irqsave(&phba->rrq_list_lock, iflags); 1113 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 1114 if (rrq->vport != vport) 1115 continue; 1116 1117 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID)) 1118 list_move(&rrq->list, &rrq_list); 1119 1120 } 1121 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1122 1123 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 1124 list_del(&rrq->list); 1125 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1126 } 1127 } 1128 1129 /** 1130 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 1131 * @phba: Pointer to HBA context object. 1132 * @ndlp: Targets nodelist pointer for this exchange. 1133 * @xritag: the xri in the bitmap to test. 1134 * 1135 * This function returns: 1136 * 0 = rrq not active for this xri 1137 * 1 = rrq is valid for this xri. 1138 **/ 1139 int 1140 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1141 uint16_t xritag) 1142 { 1143 if (!ndlp) 1144 return 0; 1145 if (!ndlp->active_rrqs_xri_bitmap) 1146 return 0; 1147 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1148 return 1; 1149 else 1150 return 0; 1151 } 1152 1153 /** 1154 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1155 * @phba: Pointer to HBA context object. 1156 * @ndlp: nodelist pointer for this target. 1157 * @xritag: xri used in this exchange. 1158 * @rxid: Remote Exchange ID. 1159 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1160 * 1161 * This function takes the hbalock. 1162 * The active bit is always set in the active rrq xri_bitmap even 1163 * if there is no slot avaiable for the other rrq information. 1164 * 1165 * returns 0 rrq actived for this xri 1166 * < 0 No memory or invalid ndlp. 1167 **/ 1168 int 1169 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1170 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1171 { 1172 unsigned long iflags; 1173 struct lpfc_node_rrq *rrq; 1174 int empty; 1175 1176 if (!ndlp) 1177 return -EINVAL; 1178 1179 if (!phba->cfg_enable_rrq) 1180 return -EINVAL; 1181 1182 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { 1183 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1184 goto outnl; 1185 } 1186 1187 spin_lock_irqsave(&phba->hbalock, iflags); 1188 if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag)) 1189 goto out; 1190 1191 if (!ndlp->active_rrqs_xri_bitmap) 1192 goto out; 1193 1194 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1195 goto out; 1196 1197 spin_unlock_irqrestore(&phba->hbalock, iflags); 1198 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC); 1199 if (!rrq) { 1200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1201 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1202 " DID:0x%x Send:%d\n", 1203 xritag, rxid, ndlp->nlp_DID, send_rrq); 1204 return -EINVAL; 1205 } 1206 if (phba->cfg_enable_rrq == 1) 1207 rrq->send_rrq = send_rrq; 1208 else 1209 rrq->send_rrq = 0; 1210 rrq->xritag = xritag; 1211 rrq->rrq_stop_time = jiffies + 1212 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1213 rrq->nlp_DID = ndlp->nlp_DID; 1214 rrq->vport = ndlp->vport; 1215 rrq->rxid = rxid; 1216 1217 spin_lock_irqsave(&phba->rrq_list_lock, iflags); 1218 empty = list_empty(&phba->active_rrq_list); 1219 list_add_tail(&rrq->list, &phba->active_rrq_list); 1220 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1221 set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1222 if (empty) 1223 lpfc_worker_wake_up(phba); 1224 return 0; 1225 out: 1226 spin_unlock_irqrestore(&phba->hbalock, iflags); 1227 outnl: 1228 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1229 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1230 " DID:0x%x Send:%d\n", 1231 xritag, rxid, ndlp->nlp_DID, send_rrq); 1232 return -EINVAL; 1233 } 1234 1235 /** 1236 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1237 * @phba: Pointer to HBA context object. 1238 * @piocbq: Pointer to the iocbq. 1239 * 1240 * The driver calls this function with either the nvme ls ring lock 1241 * or the fc els ring lock held depending on the iocb usage. This function 1242 * gets a new driver sglq object from the sglq list. If the list is not empty 1243 * then it is successful, it returns pointer to the newly allocated sglq 1244 * object else it returns NULL. 1245 **/ 1246 static struct lpfc_sglq * 1247 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1248 { 1249 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1250 struct lpfc_sglq *sglq = NULL; 1251 struct lpfc_sglq *start_sglq = NULL; 1252 struct lpfc_io_buf *lpfc_cmd; 1253 struct lpfc_nodelist *ndlp; 1254 int found = 0; 1255 u8 cmnd; 1256 1257 cmnd = get_job_cmnd(phba, piocbq); 1258 1259 if (piocbq->cmd_flag & LPFC_IO_FCP) { 1260 lpfc_cmd = piocbq->io_buf; 1261 ndlp = lpfc_cmd->rdata->pnode; 1262 } else if ((cmnd == CMD_GEN_REQUEST64_CR) && 1263 !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) { 1264 ndlp = piocbq->ndlp; 1265 } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) { 1266 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK) 1267 ndlp = NULL; 1268 else 1269 ndlp = piocbq->ndlp; 1270 } else { 1271 ndlp = piocbq->ndlp; 1272 } 1273 1274 spin_lock(&phba->sli4_hba.sgl_list_lock); 1275 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1276 start_sglq = sglq; 1277 while (!found) { 1278 if (!sglq) 1279 break; 1280 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1281 test_bit(sglq->sli4_lxritag, 1282 ndlp->active_rrqs_xri_bitmap)) { 1283 /* This xri has an rrq outstanding for this DID. 1284 * put it back in the list and get another xri. 1285 */ 1286 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1287 sglq = NULL; 1288 list_remove_head(lpfc_els_sgl_list, sglq, 1289 struct lpfc_sglq, list); 1290 if (sglq == start_sglq) { 1291 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1292 sglq = NULL; 1293 break; 1294 } else 1295 continue; 1296 } 1297 sglq->ndlp = ndlp; 1298 found = 1; 1299 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1300 sglq->state = SGL_ALLOCATED; 1301 } 1302 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1303 return sglq; 1304 } 1305 1306 /** 1307 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1308 * @phba: Pointer to HBA context object. 1309 * @piocbq: Pointer to the iocbq. 1310 * 1311 * This function is called with the sgl_list lock held. This function 1312 * gets a new driver sglq object from the sglq list. If the 1313 * list is not empty then it is successful, it returns pointer to the newly 1314 * allocated sglq object else it returns NULL. 1315 **/ 1316 struct lpfc_sglq * 1317 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1318 { 1319 struct list_head *lpfc_nvmet_sgl_list; 1320 struct lpfc_sglq *sglq = NULL; 1321 1322 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1323 1324 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1325 1326 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1327 if (!sglq) 1328 return NULL; 1329 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1330 sglq->state = SGL_ALLOCATED; 1331 return sglq; 1332 } 1333 1334 /** 1335 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1336 * @phba: Pointer to HBA context object. 1337 * 1338 * This function is called with no lock held. This function 1339 * allocates a new driver iocb object from the iocb pool. If the 1340 * allocation is successful, it returns pointer to the newly 1341 * allocated iocb object else it returns NULL. 1342 **/ 1343 struct lpfc_iocbq * 1344 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1345 { 1346 struct lpfc_iocbq * iocbq = NULL; 1347 unsigned long iflags; 1348 1349 spin_lock_irqsave(&phba->hbalock, iflags); 1350 iocbq = __lpfc_sli_get_iocbq(phba); 1351 spin_unlock_irqrestore(&phba->hbalock, iflags); 1352 return iocbq; 1353 } 1354 1355 /** 1356 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1357 * @phba: Pointer to HBA context object. 1358 * @iocbq: Pointer to driver iocb object. 1359 * 1360 * This function is called to release the driver iocb object 1361 * to the iocb pool. The iotag in the iocb object 1362 * does not change for each use of the iocb object. This function 1363 * clears all other fields of the iocb object when it is freed. 1364 * The sqlq structure that holds the xritag and phys and virtual 1365 * mappings for the scatter gather list is retrieved from the 1366 * active array of sglq. The get of the sglq pointer also clears 1367 * the entry in the array. If the status of the IO indiactes that 1368 * this IO was aborted then the sglq entry it put on the 1369 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1370 * IO has good status or fails for any other reason then the sglq 1371 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is 1372 * asserted held in the code path calling this routine. 1373 **/ 1374 static void 1375 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1376 { 1377 struct lpfc_sglq *sglq; 1378 unsigned long iflag = 0; 1379 struct lpfc_sli_ring *pring; 1380 1381 if (iocbq->sli4_xritag == NO_XRI) 1382 sglq = NULL; 1383 else 1384 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1385 1386 1387 if (sglq) { 1388 if (iocbq->cmd_flag & LPFC_IO_NVMET) { 1389 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1390 iflag); 1391 sglq->state = SGL_FREED; 1392 sglq->ndlp = NULL; 1393 list_add_tail(&sglq->list, 1394 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1395 spin_unlock_irqrestore( 1396 &phba->sli4_hba.sgl_list_lock, iflag); 1397 goto out; 1398 } 1399 1400 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) && 1401 (!(unlikely(pci_channel_offline(phba->pcidev)))) && 1402 sglq->state != SGL_XRI_ABORTED) { 1403 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1404 iflag); 1405 1406 /* Check if we can get a reference on ndlp */ 1407 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp)) 1408 sglq->ndlp = NULL; 1409 1410 list_add(&sglq->list, 1411 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1412 spin_unlock_irqrestore( 1413 &phba->sli4_hba.sgl_list_lock, iflag); 1414 } else { 1415 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1416 iflag); 1417 sglq->state = SGL_FREED; 1418 sglq->ndlp = NULL; 1419 list_add_tail(&sglq->list, 1420 &phba->sli4_hba.lpfc_els_sgl_list); 1421 spin_unlock_irqrestore( 1422 &phba->sli4_hba.sgl_list_lock, iflag); 1423 pring = lpfc_phba_elsring(phba); 1424 /* Check if TXQ queue needs to be serviced */ 1425 if (pring && (!list_empty(&pring->txq))) 1426 lpfc_worker_wake_up(phba); 1427 } 1428 } 1429 1430 out: 1431 /* 1432 * Clean all volatile data fields, preserve iotag and node struct. 1433 */ 1434 memset_startat(iocbq, 0, wqe); 1435 iocbq->sli4_lxritag = NO_XRI; 1436 iocbq->sli4_xritag = NO_XRI; 1437 iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | 1438 LPFC_IO_NVME_LS); 1439 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1440 } 1441 1442 1443 /** 1444 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1445 * @phba: Pointer to HBA context object. 1446 * @iocbq: Pointer to driver iocb object. 1447 * 1448 * This function is called to release the driver iocb object to the 1449 * iocb pool. The iotag in the iocb object does not change for each 1450 * use of the iocb object. This function clears all other fields of 1451 * the iocb object when it is freed. The hbalock is asserted held in 1452 * the code path calling this routine. 1453 **/ 1454 static void 1455 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1456 { 1457 1458 /* 1459 * Clean all volatile data fields, preserve iotag and node struct. 1460 */ 1461 memset_startat(iocbq, 0, iocb); 1462 iocbq->sli4_xritag = NO_XRI; 1463 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1464 } 1465 1466 /** 1467 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1468 * @phba: Pointer to HBA context object. 1469 * @iocbq: Pointer to driver iocb object. 1470 * 1471 * This function is called with hbalock held to release driver 1472 * iocb object to the iocb pool. The iotag in the iocb object 1473 * does not change for each use of the iocb object. This function 1474 * clears all other fields of the iocb object when it is freed. 1475 **/ 1476 static void 1477 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1478 { 1479 lockdep_assert_held(&phba->hbalock); 1480 1481 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1482 phba->iocb_cnt--; 1483 } 1484 1485 /** 1486 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1487 * @phba: Pointer to HBA context object. 1488 * @iocbq: Pointer to driver iocb object. 1489 * 1490 * This function is called with no lock held to release the iocb to 1491 * iocb pool. 1492 **/ 1493 void 1494 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1495 { 1496 unsigned long iflags; 1497 1498 /* 1499 * Clean all volatile data fields, preserve iotag and node struct. 1500 */ 1501 spin_lock_irqsave(&phba->hbalock, iflags); 1502 __lpfc_sli_release_iocbq(phba, iocbq); 1503 spin_unlock_irqrestore(&phba->hbalock, iflags); 1504 } 1505 1506 /** 1507 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1508 * @phba: Pointer to HBA context object. 1509 * @iocblist: List of IOCBs. 1510 * @ulpstatus: ULP status in IOCB command field. 1511 * @ulpWord4: ULP word-4 in IOCB command field. 1512 * 1513 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1514 * on the list by invoking the complete callback function associated with the 1515 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1516 * fields. 1517 **/ 1518 void 1519 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1520 uint32_t ulpstatus, uint32_t ulpWord4) 1521 { 1522 struct lpfc_iocbq *piocb; 1523 1524 while (!list_empty(iocblist)) { 1525 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1526 if (piocb->cmd_cmpl) { 1527 if (piocb->cmd_flag & LPFC_IO_NVME) { 1528 lpfc_nvme_cancel_iocb(phba, piocb, 1529 ulpstatus, ulpWord4); 1530 } else { 1531 if (phba->sli_rev == LPFC_SLI_REV4) { 1532 bf_set(lpfc_wcqe_c_status, 1533 &piocb->wcqe_cmpl, ulpstatus); 1534 piocb->wcqe_cmpl.parameter = ulpWord4; 1535 } else { 1536 piocb->iocb.ulpStatus = ulpstatus; 1537 piocb->iocb.un.ulpWord[4] = ulpWord4; 1538 } 1539 (piocb->cmd_cmpl) (phba, piocb, piocb); 1540 } 1541 } else { 1542 lpfc_sli_release_iocbq(phba, piocb); 1543 } 1544 } 1545 return; 1546 } 1547 1548 /** 1549 * lpfc_sli_iocb_cmd_type - Get the iocb type 1550 * @iocb_cmnd: iocb command code. 1551 * 1552 * This function is called by ring event handler function to get the iocb type. 1553 * This function translates the iocb command to an iocb command type used to 1554 * decide the final disposition of each completed IOCB. 1555 * The function returns 1556 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1557 * LPFC_SOL_IOCB if it is a solicited iocb completion 1558 * LPFC_ABORT_IOCB if it is an abort iocb 1559 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1560 * 1561 * The caller is not required to hold any lock. 1562 **/ 1563 static lpfc_iocb_type 1564 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1565 { 1566 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1567 1568 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1569 return 0; 1570 1571 switch (iocb_cmnd) { 1572 case CMD_XMIT_SEQUENCE_CR: 1573 case CMD_XMIT_SEQUENCE_CX: 1574 case CMD_XMIT_BCAST_CN: 1575 case CMD_XMIT_BCAST_CX: 1576 case CMD_ELS_REQUEST_CR: 1577 case CMD_ELS_REQUEST_CX: 1578 case CMD_CREATE_XRI_CR: 1579 case CMD_CREATE_XRI_CX: 1580 case CMD_GET_RPI_CN: 1581 case CMD_XMIT_ELS_RSP_CX: 1582 case CMD_GET_RPI_CR: 1583 case CMD_FCP_IWRITE_CR: 1584 case CMD_FCP_IWRITE_CX: 1585 case CMD_FCP_IREAD_CR: 1586 case CMD_FCP_IREAD_CX: 1587 case CMD_FCP_ICMND_CR: 1588 case CMD_FCP_ICMND_CX: 1589 case CMD_FCP_TSEND_CX: 1590 case CMD_FCP_TRSP_CX: 1591 case CMD_FCP_TRECEIVE_CX: 1592 case CMD_FCP_AUTO_TRSP_CX: 1593 case CMD_ADAPTER_MSG: 1594 case CMD_ADAPTER_DUMP: 1595 case CMD_XMIT_SEQUENCE64_CR: 1596 case CMD_XMIT_SEQUENCE64_CX: 1597 case CMD_XMIT_BCAST64_CN: 1598 case CMD_XMIT_BCAST64_CX: 1599 case CMD_ELS_REQUEST64_CR: 1600 case CMD_ELS_REQUEST64_CX: 1601 case CMD_FCP_IWRITE64_CR: 1602 case CMD_FCP_IWRITE64_CX: 1603 case CMD_FCP_IREAD64_CR: 1604 case CMD_FCP_IREAD64_CX: 1605 case CMD_FCP_ICMND64_CR: 1606 case CMD_FCP_ICMND64_CX: 1607 case CMD_FCP_TSEND64_CX: 1608 case CMD_FCP_TRSP64_CX: 1609 case CMD_FCP_TRECEIVE64_CX: 1610 case CMD_GEN_REQUEST64_CR: 1611 case CMD_GEN_REQUEST64_CX: 1612 case CMD_XMIT_ELS_RSP64_CX: 1613 case DSSCMD_IWRITE64_CR: 1614 case DSSCMD_IWRITE64_CX: 1615 case DSSCMD_IREAD64_CR: 1616 case DSSCMD_IREAD64_CX: 1617 case CMD_SEND_FRAME: 1618 type = LPFC_SOL_IOCB; 1619 break; 1620 case CMD_ABORT_XRI_CN: 1621 case CMD_ABORT_XRI_CX: 1622 case CMD_CLOSE_XRI_CN: 1623 case CMD_CLOSE_XRI_CX: 1624 case CMD_XRI_ABORTED_CX: 1625 case CMD_ABORT_MXRI64_CN: 1626 case CMD_XMIT_BLS_RSP64_CX: 1627 type = LPFC_ABORT_IOCB; 1628 break; 1629 case CMD_RCV_SEQUENCE_CX: 1630 case CMD_RCV_ELS_REQ_CX: 1631 case CMD_RCV_SEQUENCE64_CX: 1632 case CMD_RCV_ELS_REQ64_CX: 1633 case CMD_ASYNC_STATUS: 1634 case CMD_IOCB_RCV_SEQ64_CX: 1635 case CMD_IOCB_RCV_ELS64_CX: 1636 case CMD_IOCB_RCV_CONT64_CX: 1637 case CMD_IOCB_RET_XRI64_CX: 1638 type = LPFC_UNSOL_IOCB; 1639 break; 1640 case CMD_IOCB_XMIT_MSEQ64_CR: 1641 case CMD_IOCB_XMIT_MSEQ64_CX: 1642 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1643 case CMD_IOCB_RCV_ELS_LIST64_CX: 1644 case CMD_IOCB_CLOSE_EXTENDED_CN: 1645 case CMD_IOCB_ABORT_EXTENDED_CN: 1646 case CMD_IOCB_RET_HBQE64_CN: 1647 case CMD_IOCB_FCP_IBIDIR64_CR: 1648 case CMD_IOCB_FCP_IBIDIR64_CX: 1649 case CMD_IOCB_FCP_ITASKMGT64_CX: 1650 case CMD_IOCB_LOGENTRY_CN: 1651 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1652 printk("%s - Unhandled SLI-3 Command x%x\n", 1653 __func__, iocb_cmnd); 1654 type = LPFC_UNKNOWN_IOCB; 1655 break; 1656 default: 1657 type = LPFC_UNKNOWN_IOCB; 1658 break; 1659 } 1660 1661 return type; 1662 } 1663 1664 /** 1665 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1666 * @phba: Pointer to HBA context object. 1667 * 1668 * This function is called from SLI initialization code 1669 * to configure every ring of the HBA's SLI interface. The 1670 * caller is not required to hold any lock. This function issues 1671 * a config_ring mailbox command for each ring. 1672 * This function returns zero if successful else returns a negative 1673 * error code. 1674 **/ 1675 static int 1676 lpfc_sli_ring_map(struct lpfc_hba *phba) 1677 { 1678 struct lpfc_sli *psli = &phba->sli; 1679 LPFC_MBOXQ_t *pmb; 1680 MAILBOX_t *pmbox; 1681 int i, rc, ret = 0; 1682 1683 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1684 if (!pmb) 1685 return -ENOMEM; 1686 pmbox = &pmb->u.mb; 1687 phba->link_state = LPFC_INIT_MBX_CMDS; 1688 for (i = 0; i < psli->num_rings; i++) { 1689 lpfc_config_ring(phba, i, pmb); 1690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1691 if (rc != MBX_SUCCESS) { 1692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1693 "0446 Adapter failed to init (%d), " 1694 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1695 "ring %d\n", 1696 rc, pmbox->mbxCommand, 1697 pmbox->mbxStatus, i); 1698 phba->link_state = LPFC_HBA_ERROR; 1699 ret = -ENXIO; 1700 break; 1701 } 1702 } 1703 mempool_free(pmb, phba->mbox_mem_pool); 1704 return ret; 1705 } 1706 1707 /** 1708 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1709 * @phba: Pointer to HBA context object. 1710 * @pring: Pointer to driver SLI ring object. 1711 * @piocb: Pointer to the driver iocb object. 1712 * 1713 * The driver calls this function with the hbalock held for SLI3 ports or 1714 * the ring lock held for SLI4 ports. The function adds the 1715 * new iocb to txcmplq of the given ring. This function always returns 1716 * 0. If this function is called for ELS ring, this function checks if 1717 * there is a vport associated with the ELS command. This function also 1718 * starts els_tmofunc timer if this is an ELS command. 1719 **/ 1720 static int 1721 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1722 struct lpfc_iocbq *piocb) 1723 { 1724 u32 ulp_command = 0; 1725 1726 BUG_ON(!piocb); 1727 ulp_command = get_job_cmnd(phba, piocb); 1728 1729 list_add_tail(&piocb->list, &pring->txcmplq); 1730 piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ; 1731 pring->txcmplq_cnt++; 1732 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1733 (ulp_command != CMD_ABORT_XRI_WQE) && 1734 (ulp_command != CMD_ABORT_XRI_CN) && 1735 (ulp_command != CMD_CLOSE_XRI_CN)) { 1736 BUG_ON(!piocb->vport); 1737 if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag)) 1738 mod_timer(&piocb->vport->els_tmofunc, 1739 jiffies + 1740 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1741 } 1742 1743 return 0; 1744 } 1745 1746 /** 1747 * lpfc_sli_ringtx_get - Get first element of the txq 1748 * @phba: Pointer to HBA context object. 1749 * @pring: Pointer to driver SLI ring object. 1750 * 1751 * This function is called with hbalock held to get next 1752 * iocb in txq of the given ring. If there is any iocb in 1753 * the txq, the function returns first iocb in the list after 1754 * removing the iocb from the list, else it returns NULL. 1755 **/ 1756 struct lpfc_iocbq * 1757 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1758 { 1759 struct lpfc_iocbq *cmd_iocb; 1760 1761 lockdep_assert_held(&phba->hbalock); 1762 1763 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1764 return cmd_iocb; 1765 } 1766 1767 /** 1768 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl 1769 * @phba: Pointer to HBA context object. 1770 * @cmdiocb: Pointer to driver command iocb object. 1771 * @rspiocb: Pointer to driver response iocb object. 1772 * 1773 * This routine will inform the driver of any BW adjustments we need 1774 * to make. These changes will be picked up during the next CMF 1775 * timer interrupt. In addition, any BW changes will be logged 1776 * with LOG_CGN_MGMT. 1777 **/ 1778 static void 1779 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1780 struct lpfc_iocbq *rspiocb) 1781 { 1782 union lpfc_wqe128 *wqe; 1783 uint32_t status, info; 1784 struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl; 1785 uint64_t bw, bwdif, slop; 1786 uint64_t pcent, bwpcent; 1787 int asig, afpin, sigcnt, fpincnt; 1788 int wsigmax, wfpinmax, cg, tdp; 1789 char *s; 1790 1791 /* First check for error */ 1792 status = bf_get(lpfc_wcqe_c_status, wcqe); 1793 if (status) { 1794 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1795 "6211 CMF_SYNC_WQE Error " 1796 "req_tag x%x status x%x hwstatus x%x " 1797 "tdatap x%x parm x%x\n", 1798 bf_get(lpfc_wcqe_c_request_tag, wcqe), 1799 bf_get(lpfc_wcqe_c_status, wcqe), 1800 bf_get(lpfc_wcqe_c_hw_status, wcqe), 1801 wcqe->total_data_placed, 1802 wcqe->parameter); 1803 goto out; 1804 } 1805 1806 /* Gather congestion information on a successful cmpl */ 1807 info = wcqe->parameter; 1808 phba->cmf_active_info = info; 1809 1810 /* See if firmware info count is valid or has changed */ 1811 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info) 1812 info = 0; 1813 else 1814 phba->cmf_info_per_interval = info; 1815 1816 tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe); 1817 cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe); 1818 1819 /* Get BW requirement from firmware */ 1820 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE; 1821 if (!bw) { 1822 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1823 "6212 CMF_SYNC_WQE x%x: NULL bw\n", 1824 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 1825 goto out; 1826 } 1827 1828 /* Gather information needed for logging if a BW change is required */ 1829 wqe = &cmdiocb->wqe; 1830 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync); 1831 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync); 1832 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync); 1833 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync); 1834 if (phba->cmf_max_bytes_per_interval != bw || 1835 (asig || afpin || sigcnt || fpincnt)) { 1836 /* Are we increasing or decreasing BW */ 1837 if (phba->cmf_max_bytes_per_interval < bw) { 1838 bwdif = bw - phba->cmf_max_bytes_per_interval; 1839 s = "Increase"; 1840 } else { 1841 bwdif = phba->cmf_max_bytes_per_interval - bw; 1842 s = "Decrease"; 1843 } 1844 1845 /* What is the change percentage */ 1846 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/ 1847 pcent = div64_u64(bwdif * 100 + slop, 1848 phba->cmf_link_byte_count); 1849 bwpcent = div64_u64(bw * 100 + slop, 1850 phba->cmf_link_byte_count); 1851 /* Because of bytes adjustment due to shorter timer in 1852 * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and 1853 * may seem like BW is above 100%. 1854 */ 1855 if (bwpcent > 100) 1856 bwpcent = 100; 1857 1858 if (phba->cmf_max_bytes_per_interval < bw && 1859 bwpcent > 95) 1860 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1861 "6208 Congestion bandwidth " 1862 "limits removed\n"); 1863 else if ((phba->cmf_max_bytes_per_interval > bw) && 1864 ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95)) 1865 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1866 "6209 Congestion bandwidth " 1867 "limits in effect\n"); 1868 1869 if (asig) { 1870 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1871 "6237 BW Threshold %lld%% (%lld): " 1872 "%lld%% %s: Signal Alarm: cg:%d " 1873 "Info:%u\n", 1874 bwpcent, bw, pcent, s, cg, 1875 phba->cmf_active_info); 1876 } else if (afpin) { 1877 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1878 "6238 BW Threshold %lld%% (%lld): " 1879 "%lld%% %s: FPIN Alarm: cg:%d " 1880 "Info:%u\n", 1881 bwpcent, bw, pcent, s, cg, 1882 phba->cmf_active_info); 1883 } else if (sigcnt) { 1884 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync); 1885 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1886 "6239 BW Threshold %lld%% (%lld): " 1887 "%lld%% %s: Signal Warning: " 1888 "Cnt %d Max %d: cg:%d Info:%u\n", 1889 bwpcent, bw, pcent, s, sigcnt, 1890 wsigmax, cg, phba->cmf_active_info); 1891 } else if (fpincnt) { 1892 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync); 1893 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1894 "6240 BW Threshold %lld%% (%lld): " 1895 "%lld%% %s: FPIN Warning: " 1896 "Cnt %d Max %d: cg:%d Info:%u\n", 1897 bwpcent, bw, pcent, s, fpincnt, 1898 wfpinmax, cg, phba->cmf_active_info); 1899 } else { 1900 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1901 "6241 BW Threshold %lld%% (%lld): " 1902 "CMF %lld%% %s: cg:%d Info:%u\n", 1903 bwpcent, bw, pcent, s, cg, 1904 phba->cmf_active_info); 1905 } 1906 } else if (info) { 1907 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1908 "6246 Info Threshold %u\n", info); 1909 } 1910 1911 /* Save BW change to be picked up during next timer interrupt */ 1912 phba->cmf_last_sync_bw = bw; 1913 out: 1914 lpfc_sli_release_iocbq(phba, cmdiocb); 1915 } 1916 1917 /** 1918 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE 1919 * @phba: Pointer to HBA context object. 1920 * @ms: ms to set in WQE interval, 0 means use init op 1921 * @total: Total rcv bytes for this interval 1922 * 1923 * This routine is called every CMF timer interrupt. Its purpose is 1924 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events 1925 * that may indicate we have congestion (FPINs or Signals). Upon 1926 * completion, the firmware will indicate any BW restrictions the 1927 * driver may need to take. 1928 **/ 1929 int 1930 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) 1931 { 1932 union lpfc_wqe128 *wqe; 1933 struct lpfc_iocbq *sync_buf; 1934 unsigned long iflags; 1935 u32 ret_val; 1936 u32 atot, wtot, max; 1937 u8 warn_sync_period = 0; 1938 1939 /* First address any alarm / warning activity */ 1940 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); 1941 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); 1942 1943 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ 1944 if (phba->cmf_active_mode != LPFC_CFG_MANAGED || 1945 phba->link_state == LPFC_LINK_DOWN) 1946 return 0; 1947 1948 spin_lock_irqsave(&phba->hbalock, iflags); 1949 sync_buf = __lpfc_sli_get_iocbq(phba); 1950 if (!sync_buf) { 1951 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 1952 "6244 No available WQEs for CMF_SYNC_WQE\n"); 1953 ret_val = ENOMEM; 1954 goto out_unlock; 1955 } 1956 1957 wqe = &sync_buf->wqe; 1958 1959 /* WQEs are reused. Clear stale data and set key fields to zero */ 1960 memset(wqe, 0, sizeof(*wqe)); 1961 1962 /* If this is the very first CMF_SYNC_WQE, issue an init operation */ 1963 if (!ms) { 1964 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1965 "6441 CMF Init %d - CMF_SYNC_WQE\n", 1966 phba->fc_eventTag); 1967 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */ 1968 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL); 1969 goto initpath; 1970 } 1971 1972 bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */ 1973 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms); 1974 1975 /* Check for alarms / warnings */ 1976 if (atot) { 1977 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 1978 /* We hit an Signal alarm condition */ 1979 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1); 1980 } else { 1981 /* We hit a FPIN alarm condition */ 1982 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1); 1983 } 1984 } else if (wtot) { 1985 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 1986 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 1987 /* We hit an Signal warning condition */ 1988 max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency * 1989 lpfc_acqe_cgn_frequency; 1990 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); 1991 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); 1992 warn_sync_period = lpfc_acqe_cgn_frequency; 1993 } else { 1994 /* We hit a FPIN warning condition */ 1995 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1); 1996 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1); 1997 if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) 1998 warn_sync_period = 1999 LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency); 2000 } 2001 } 2002 2003 /* Update total read blocks during previous timer interval */ 2004 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE); 2005 2006 initpath: 2007 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER); 2008 wqe->cmf_sync.event_tag = phba->fc_eventTag; 2009 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE); 2010 2011 /* Setup reqtag to match the wqe completion. */ 2012 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag); 2013 2014 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1); 2015 bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period); 2016 2017 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND); 2018 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1); 2019 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT); 2020 2021 sync_buf->vport = phba->pport; 2022 sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl; 2023 sync_buf->cmd_dmabuf = NULL; 2024 sync_buf->rsp_dmabuf = NULL; 2025 sync_buf->bpl_dmabuf = NULL; 2026 sync_buf->sli4_xritag = NO_XRI; 2027 2028 sync_buf->cmd_flag |= LPFC_IO_CMF; 2029 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf); 2030 if (ret_val) { 2031 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 2032 "6214 Cannot issue CMF_SYNC_WQE: x%x\n", 2033 ret_val); 2034 __lpfc_sli_release_iocbq(phba, sync_buf); 2035 } 2036 out_unlock: 2037 spin_unlock_irqrestore(&phba->hbalock, iflags); 2038 return ret_val; 2039 } 2040 2041 /** 2042 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 2043 * @phba: Pointer to HBA context object. 2044 * @pring: Pointer to driver SLI ring object. 2045 * 2046 * This function is called with hbalock held and the caller must post the 2047 * iocb without releasing the lock. If the caller releases the lock, 2048 * iocb slot returned by the function is not guaranteed to be available. 2049 * The function returns pointer to the next available iocb slot if there 2050 * is available slot in the ring, else it returns NULL. 2051 * If the get index of the ring is ahead of the put index, the function 2052 * will post an error attention event to the worker thread to take the 2053 * HBA to offline state. 2054 **/ 2055 static IOCB_t * 2056 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2057 { 2058 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2059 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 2060 2061 lockdep_assert_held(&phba->hbalock); 2062 2063 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 2064 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 2065 pring->sli.sli3.next_cmdidx = 0; 2066 2067 if (unlikely(pring->sli.sli3.local_getidx == 2068 pring->sli.sli3.next_cmdidx)) { 2069 2070 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 2071 2072 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 2073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2074 "0315 Ring %d issue: portCmdGet %d " 2075 "is bigger than cmd ring %d\n", 2076 pring->ringno, 2077 pring->sli.sli3.local_getidx, 2078 max_cmd_idx); 2079 2080 phba->link_state = LPFC_HBA_ERROR; 2081 /* 2082 * All error attention handlers are posted to 2083 * worker thread 2084 */ 2085 phba->work_ha |= HA_ERATT; 2086 phba->work_hs = HS_FFER3; 2087 2088 lpfc_worker_wake_up(phba); 2089 2090 return NULL; 2091 } 2092 2093 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 2094 return NULL; 2095 } 2096 2097 return lpfc_cmd_iocb(phba, pring); 2098 } 2099 2100 /** 2101 * lpfc_sli_next_iotag - Get an iotag for the iocb 2102 * @phba: Pointer to HBA context object. 2103 * @iocbq: Pointer to driver iocb object. 2104 * 2105 * This function gets an iotag for the iocb. If there is no unused iotag and 2106 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 2107 * array and assigns a new iotag. 2108 * The function returns the allocated iotag if successful, else returns zero. 2109 * Zero is not a valid iotag. 2110 * The caller is not required to hold any lock. 2111 **/ 2112 uint16_t 2113 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 2114 { 2115 struct lpfc_iocbq **new_arr; 2116 struct lpfc_iocbq **old_arr; 2117 size_t new_len; 2118 struct lpfc_sli *psli = &phba->sli; 2119 uint16_t iotag; 2120 2121 spin_lock_irq(&phba->hbalock); 2122 iotag = psli->last_iotag; 2123 if(++iotag < psli->iocbq_lookup_len) { 2124 psli->last_iotag = iotag; 2125 psli->iocbq_lookup[iotag] = iocbq; 2126 spin_unlock_irq(&phba->hbalock); 2127 iocbq->iotag = iotag; 2128 return iotag; 2129 } else if (psli->iocbq_lookup_len < (0xffff 2130 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 2131 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 2132 spin_unlock_irq(&phba->hbalock); 2133 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), 2134 GFP_KERNEL); 2135 if (new_arr) { 2136 spin_lock_irq(&phba->hbalock); 2137 old_arr = psli->iocbq_lookup; 2138 if (new_len <= psli->iocbq_lookup_len) { 2139 /* highly unprobable case */ 2140 kfree(new_arr); 2141 iotag = psli->last_iotag; 2142 if(++iotag < psli->iocbq_lookup_len) { 2143 psli->last_iotag = iotag; 2144 psli->iocbq_lookup[iotag] = iocbq; 2145 spin_unlock_irq(&phba->hbalock); 2146 iocbq->iotag = iotag; 2147 return iotag; 2148 } 2149 spin_unlock_irq(&phba->hbalock); 2150 return 0; 2151 } 2152 if (psli->iocbq_lookup) 2153 memcpy(new_arr, old_arr, 2154 ((psli->last_iotag + 1) * 2155 sizeof (struct lpfc_iocbq *))); 2156 psli->iocbq_lookup = new_arr; 2157 psli->iocbq_lookup_len = new_len; 2158 psli->last_iotag = iotag; 2159 psli->iocbq_lookup[iotag] = iocbq; 2160 spin_unlock_irq(&phba->hbalock); 2161 iocbq->iotag = iotag; 2162 kfree(old_arr); 2163 return iotag; 2164 } 2165 } else 2166 spin_unlock_irq(&phba->hbalock); 2167 2168 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2169 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 2170 psli->last_iotag); 2171 2172 return 0; 2173 } 2174 2175 /** 2176 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 2177 * @phba: Pointer to HBA context object. 2178 * @pring: Pointer to driver SLI ring object. 2179 * @iocb: Pointer to iocb slot in the ring. 2180 * @nextiocb: Pointer to driver iocb object which need to be 2181 * posted to firmware. 2182 * 2183 * This function is called to post a new iocb to the firmware. This 2184 * function copies the new iocb to ring iocb slot and updates the 2185 * ring pointers. It adds the new iocb to txcmplq if there is 2186 * a completion call back for this iocb else the function will free the 2187 * iocb object. The hbalock is asserted held in the code path calling 2188 * this routine. 2189 **/ 2190 static void 2191 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2192 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 2193 { 2194 /* 2195 * Set up an iotag 2196 */ 2197 nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0; 2198 2199 2200 if (pring->ringno == LPFC_ELS_RING) { 2201 lpfc_debugfs_slow_ring_trc(phba, 2202 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 2203 *(((uint32_t *) &nextiocb->iocb) + 4), 2204 *(((uint32_t *) &nextiocb->iocb) + 6), 2205 *(((uint32_t *) &nextiocb->iocb) + 7)); 2206 } 2207 2208 /* 2209 * Issue iocb command to adapter 2210 */ 2211 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 2212 wmb(); 2213 pring->stats.iocb_cmd++; 2214 2215 /* 2216 * If there is no completion routine to call, we can release the 2217 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 2218 * that have no rsp ring completion, cmd_cmpl MUST be NULL. 2219 */ 2220 if (nextiocb->cmd_cmpl) 2221 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 2222 else 2223 __lpfc_sli_release_iocbq(phba, nextiocb); 2224 2225 /* 2226 * Let the HBA know what IOCB slot will be the next one the 2227 * driver will put a command into. 2228 */ 2229 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 2230 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 2231 } 2232 2233 /** 2234 * lpfc_sli_update_full_ring - Update the chip attention register 2235 * @phba: Pointer to HBA context object. 2236 * @pring: Pointer to driver SLI ring object. 2237 * 2238 * The caller is not required to hold any lock for calling this function. 2239 * This function updates the chip attention bits for the ring to inform firmware 2240 * that there are pending work to be done for this ring and requests an 2241 * interrupt when there is space available in the ring. This function is 2242 * called when the driver is unable to post more iocbs to the ring due 2243 * to unavailability of space in the ring. 2244 **/ 2245 static void 2246 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2247 { 2248 int ringno = pring->ringno; 2249 2250 pring->flag |= LPFC_CALL_RING_AVAILABLE; 2251 2252 wmb(); 2253 2254 /* 2255 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 2256 * The HBA will tell us when an IOCB entry is available. 2257 */ 2258 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 2259 readl(phba->CAregaddr); /* flush */ 2260 2261 pring->stats.iocb_cmd_full++; 2262 } 2263 2264 /** 2265 * lpfc_sli_update_ring - Update chip attention register 2266 * @phba: Pointer to HBA context object. 2267 * @pring: Pointer to driver SLI ring object. 2268 * 2269 * This function updates the chip attention register bit for the 2270 * given ring to inform HBA that there is more work to be done 2271 * in this ring. The caller is not required to hold any lock. 2272 **/ 2273 static void 2274 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2275 { 2276 int ringno = pring->ringno; 2277 2278 /* 2279 * Tell the HBA that there is work to do in this ring. 2280 */ 2281 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 2282 wmb(); 2283 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 2284 readl(phba->CAregaddr); /* flush */ 2285 } 2286 } 2287 2288 /** 2289 * lpfc_sli_resume_iocb - Process iocbs in the txq 2290 * @phba: Pointer to HBA context object. 2291 * @pring: Pointer to driver SLI ring object. 2292 * 2293 * This function is called with hbalock held to post pending iocbs 2294 * in the txq to the firmware. This function is called when driver 2295 * detects space available in the ring. 2296 **/ 2297 static void 2298 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2299 { 2300 IOCB_t *iocb; 2301 struct lpfc_iocbq *nextiocb; 2302 2303 lockdep_assert_held(&phba->hbalock); 2304 2305 /* 2306 * Check to see if: 2307 * (a) there is anything on the txq to send 2308 * (b) link is up 2309 * (c) link attention events can be processed (fcp ring only) 2310 * (d) IOCB processing is not blocked by the outstanding mbox command. 2311 */ 2312 2313 if (lpfc_is_link_up(phba) && 2314 (!list_empty(&pring->txq)) && 2315 (pring->ringno != LPFC_FCP_RING || 2316 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 2317 2318 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2319 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 2320 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 2321 2322 if (iocb) 2323 lpfc_sli_update_ring(phba, pring); 2324 else 2325 lpfc_sli_update_full_ring(phba, pring); 2326 } 2327 2328 return; 2329 } 2330 2331 /** 2332 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 2333 * @phba: Pointer to HBA context object. 2334 * @hbqno: HBQ number. 2335 * 2336 * This function is called with hbalock held to get the next 2337 * available slot for the given HBQ. If there is free slot 2338 * available for the HBQ it will return pointer to the next available 2339 * HBQ entry else it will return NULL. 2340 **/ 2341 static struct lpfc_hbq_entry * 2342 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 2343 { 2344 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2345 2346 lockdep_assert_held(&phba->hbalock); 2347 2348 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 2349 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 2350 hbqp->next_hbqPutIdx = 0; 2351 2352 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 2353 uint32_t raw_index = phba->hbq_get[hbqno]; 2354 uint32_t getidx = le32_to_cpu(raw_index); 2355 2356 hbqp->local_hbqGetIdx = getidx; 2357 2358 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 2359 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2360 "1802 HBQ %d: local_hbqGetIdx " 2361 "%u is > than hbqp->entry_count %u\n", 2362 hbqno, hbqp->local_hbqGetIdx, 2363 hbqp->entry_count); 2364 2365 phba->link_state = LPFC_HBA_ERROR; 2366 return NULL; 2367 } 2368 2369 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 2370 return NULL; 2371 } 2372 2373 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 2374 hbqp->hbqPutIdx; 2375 } 2376 2377 /** 2378 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 2379 * @phba: Pointer to HBA context object. 2380 * 2381 * This function is called with no lock held to free all the 2382 * hbq buffers while uninitializing the SLI interface. It also 2383 * frees the HBQ buffers returned by the firmware but not yet 2384 * processed by the upper layers. 2385 **/ 2386 void 2387 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 2388 { 2389 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2390 struct hbq_dmabuf *hbq_buf; 2391 unsigned long flags; 2392 int i, hbq_count; 2393 2394 hbq_count = lpfc_sli_hbq_count(); 2395 /* Return all memory used by all HBQs */ 2396 spin_lock_irqsave(&phba->hbalock, flags); 2397 for (i = 0; i < hbq_count; ++i) { 2398 list_for_each_entry_safe(dmabuf, next_dmabuf, 2399 &phba->hbqs[i].hbq_buffer_list, list) { 2400 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 2401 list_del(&hbq_buf->dbuf.list); 2402 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 2403 } 2404 phba->hbqs[i].buffer_count = 0; 2405 } 2406 2407 /* Mark the HBQs not in use */ 2408 phba->hbq_in_use = 0; 2409 spin_unlock_irqrestore(&phba->hbalock, flags); 2410 } 2411 2412 /** 2413 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 2414 * @phba: Pointer to HBA context object. 2415 * @hbqno: HBQ number. 2416 * @hbq_buf: Pointer to HBQ buffer. 2417 * 2418 * This function is called with the hbalock held to post a 2419 * hbq buffer to the firmware. If the function finds an empty 2420 * slot in the HBQ, it will post the buffer. The function will return 2421 * pointer to the hbq entry if it successfully post the buffer 2422 * else it will return NULL. 2423 **/ 2424 static int 2425 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2426 struct hbq_dmabuf *hbq_buf) 2427 { 2428 lockdep_assert_held(&phba->hbalock); 2429 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2430 } 2431 2432 /** 2433 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2434 * @phba: Pointer to HBA context object. 2435 * @hbqno: HBQ number. 2436 * @hbq_buf: Pointer to HBQ buffer. 2437 * 2438 * This function is called with the hbalock held to post a hbq buffer to the 2439 * firmware. If the function finds an empty slot in the HBQ, it will post the 2440 * buffer and place it on the hbq_buffer_list. The function will return zero if 2441 * it successfully post the buffer else it will return an error. 2442 **/ 2443 static int 2444 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2445 struct hbq_dmabuf *hbq_buf) 2446 { 2447 struct lpfc_hbq_entry *hbqe; 2448 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2449 2450 lockdep_assert_held(&phba->hbalock); 2451 /* Get next HBQ entry slot to use */ 2452 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2453 if (hbqe) { 2454 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2455 2456 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2457 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2458 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2459 hbqe->bde.tus.f.bdeFlags = 0; 2460 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2461 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2462 /* Sync SLIM */ 2463 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2464 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2465 /* flush */ 2466 readl(phba->hbq_put + hbqno); 2467 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2468 return 0; 2469 } else 2470 return -ENOMEM; 2471 } 2472 2473 /** 2474 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2475 * @phba: Pointer to HBA context object. 2476 * @hbqno: HBQ number. 2477 * @hbq_buf: Pointer to HBQ buffer. 2478 * 2479 * This function is called with the hbalock held to post an RQE to the SLI4 2480 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2481 * the hbq_buffer_list and return zero, otherwise it will return an error. 2482 **/ 2483 static int 2484 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2485 struct hbq_dmabuf *hbq_buf) 2486 { 2487 int rc; 2488 struct lpfc_rqe hrqe; 2489 struct lpfc_rqe drqe; 2490 struct lpfc_queue *hrq; 2491 struct lpfc_queue *drq; 2492 2493 if (hbqno != LPFC_ELS_HBQ) 2494 return 1; 2495 hrq = phba->sli4_hba.hdr_rq; 2496 drq = phba->sli4_hba.dat_rq; 2497 2498 lockdep_assert_held(&phba->hbalock); 2499 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2500 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2501 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2502 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2503 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2504 if (rc < 0) 2505 return rc; 2506 hbq_buf->tag = (rc | (hbqno << 16)); 2507 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2508 return 0; 2509 } 2510 2511 /* HBQ for ELS and CT traffic. */ 2512 static struct lpfc_hbq_init lpfc_els_hbq = { 2513 .rn = 1, 2514 .entry_count = 256, 2515 .mask_count = 0, 2516 .profile = 0, 2517 .ring_mask = (1 << LPFC_ELS_RING), 2518 .buffer_count = 0, 2519 .init_count = 40, 2520 .add_count = 40, 2521 }; 2522 2523 /* Array of HBQs */ 2524 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2525 &lpfc_els_hbq, 2526 }; 2527 2528 /** 2529 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2530 * @phba: Pointer to HBA context object. 2531 * @hbqno: HBQ number. 2532 * @count: Number of HBQ buffers to be posted. 2533 * 2534 * This function is called with no lock held to post more hbq buffers to the 2535 * given HBQ. The function returns the number of HBQ buffers successfully 2536 * posted. 2537 **/ 2538 static int 2539 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2540 { 2541 uint32_t i, posted = 0; 2542 unsigned long flags; 2543 struct hbq_dmabuf *hbq_buffer; 2544 LIST_HEAD(hbq_buf_list); 2545 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2546 return 0; 2547 2548 if ((phba->hbqs[hbqno].buffer_count + count) > 2549 lpfc_hbq_defs[hbqno]->entry_count) 2550 count = lpfc_hbq_defs[hbqno]->entry_count - 2551 phba->hbqs[hbqno].buffer_count; 2552 if (!count) 2553 return 0; 2554 /* Allocate HBQ entries */ 2555 for (i = 0; i < count; i++) { 2556 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2557 if (!hbq_buffer) 2558 break; 2559 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2560 } 2561 /* Check whether HBQ is still in use */ 2562 spin_lock_irqsave(&phba->hbalock, flags); 2563 if (!phba->hbq_in_use) 2564 goto err; 2565 while (!list_empty(&hbq_buf_list)) { 2566 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2567 dbuf.list); 2568 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2569 (hbqno << 16)); 2570 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2571 phba->hbqs[hbqno].buffer_count++; 2572 posted++; 2573 } else 2574 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2575 } 2576 spin_unlock_irqrestore(&phba->hbalock, flags); 2577 return posted; 2578 err: 2579 spin_unlock_irqrestore(&phba->hbalock, flags); 2580 while (!list_empty(&hbq_buf_list)) { 2581 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2582 dbuf.list); 2583 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2584 } 2585 return 0; 2586 } 2587 2588 /** 2589 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2590 * @phba: Pointer to HBA context object. 2591 * @qno: HBQ number. 2592 * 2593 * This function posts more buffers to the HBQ. This function 2594 * is called with no lock held. The function returns the number of HBQ entries 2595 * successfully allocated. 2596 **/ 2597 int 2598 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2599 { 2600 if (phba->sli_rev == LPFC_SLI_REV4) 2601 return 0; 2602 else 2603 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2604 lpfc_hbq_defs[qno]->add_count); 2605 } 2606 2607 /** 2608 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2609 * @phba: Pointer to HBA context object. 2610 * @qno: HBQ queue number. 2611 * 2612 * This function is called from SLI initialization code path with 2613 * no lock held to post initial HBQ buffers to firmware. The 2614 * function returns the number of HBQ entries successfully allocated. 2615 **/ 2616 static int 2617 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2618 { 2619 if (phba->sli_rev == LPFC_SLI_REV4) 2620 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2621 lpfc_hbq_defs[qno]->entry_count); 2622 else 2623 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2624 lpfc_hbq_defs[qno]->init_count); 2625 } 2626 2627 /* 2628 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2629 * 2630 * This function removes the first hbq buffer on an hbq list and returns a 2631 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2632 **/ 2633 static struct hbq_dmabuf * 2634 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2635 { 2636 struct lpfc_dmabuf *d_buf; 2637 2638 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2639 if (!d_buf) 2640 return NULL; 2641 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2642 } 2643 2644 /** 2645 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2646 * @phba: Pointer to HBA context object. 2647 * @hrq: HBQ number. 2648 * 2649 * This function removes the first RQ buffer on an RQ buffer list and returns a 2650 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2651 **/ 2652 static struct rqb_dmabuf * 2653 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2654 { 2655 struct lpfc_dmabuf *h_buf; 2656 struct lpfc_rqb *rqbp; 2657 2658 rqbp = hrq->rqbp; 2659 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2660 struct lpfc_dmabuf, list); 2661 if (!h_buf) 2662 return NULL; 2663 rqbp->buffer_count--; 2664 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2665 } 2666 2667 /** 2668 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2669 * @phba: Pointer to HBA context object. 2670 * @tag: Tag of the hbq buffer. 2671 * 2672 * This function searches for the hbq buffer associated with the given tag in 2673 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2674 * otherwise it returns NULL. 2675 **/ 2676 static struct hbq_dmabuf * 2677 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2678 { 2679 struct lpfc_dmabuf *d_buf; 2680 struct hbq_dmabuf *hbq_buf; 2681 uint32_t hbqno; 2682 2683 hbqno = tag >> 16; 2684 if (hbqno >= LPFC_MAX_HBQS) 2685 return NULL; 2686 2687 spin_lock_irq(&phba->hbalock); 2688 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2689 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2690 if (hbq_buf->tag == tag) { 2691 spin_unlock_irq(&phba->hbalock); 2692 return hbq_buf; 2693 } 2694 } 2695 spin_unlock_irq(&phba->hbalock); 2696 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2697 "1803 Bad hbq tag. Data: x%x x%x\n", 2698 tag, phba->hbqs[tag >> 16].buffer_count); 2699 return NULL; 2700 } 2701 2702 /** 2703 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2704 * @phba: Pointer to HBA context object. 2705 * @hbq_buffer: Pointer to HBQ buffer. 2706 * 2707 * This function is called with hbalock. This function gives back 2708 * the hbq buffer to firmware. If the HBQ does not have space to 2709 * post the buffer, it will free the buffer. 2710 **/ 2711 void 2712 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2713 { 2714 uint32_t hbqno; 2715 2716 if (hbq_buffer) { 2717 hbqno = hbq_buffer->tag >> 16; 2718 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2719 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2720 } 2721 } 2722 2723 /** 2724 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2725 * @mbxCommand: mailbox command code. 2726 * 2727 * This function is called by the mailbox event handler function to verify 2728 * that the completed mailbox command is a legitimate mailbox command. If the 2729 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2730 * and the mailbox event handler will take the HBA offline. 2731 **/ 2732 static int 2733 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2734 { 2735 uint8_t ret; 2736 2737 switch (mbxCommand) { 2738 case MBX_LOAD_SM: 2739 case MBX_READ_NV: 2740 case MBX_WRITE_NV: 2741 case MBX_WRITE_VPARMS: 2742 case MBX_RUN_BIU_DIAG: 2743 case MBX_INIT_LINK: 2744 case MBX_DOWN_LINK: 2745 case MBX_CONFIG_LINK: 2746 case MBX_CONFIG_RING: 2747 case MBX_RESET_RING: 2748 case MBX_READ_CONFIG: 2749 case MBX_READ_RCONFIG: 2750 case MBX_READ_SPARM: 2751 case MBX_READ_STATUS: 2752 case MBX_READ_RPI: 2753 case MBX_READ_XRI: 2754 case MBX_READ_REV: 2755 case MBX_READ_LNK_STAT: 2756 case MBX_REG_LOGIN: 2757 case MBX_UNREG_LOGIN: 2758 case MBX_CLEAR_LA: 2759 case MBX_DUMP_MEMORY: 2760 case MBX_DUMP_CONTEXT: 2761 case MBX_RUN_DIAGS: 2762 case MBX_RESTART: 2763 case MBX_UPDATE_CFG: 2764 case MBX_DOWN_LOAD: 2765 case MBX_DEL_LD_ENTRY: 2766 case MBX_RUN_PROGRAM: 2767 case MBX_SET_MASK: 2768 case MBX_SET_VARIABLE: 2769 case MBX_UNREG_D_ID: 2770 case MBX_KILL_BOARD: 2771 case MBX_CONFIG_FARP: 2772 case MBX_BEACON: 2773 case MBX_LOAD_AREA: 2774 case MBX_RUN_BIU_DIAG64: 2775 case MBX_CONFIG_PORT: 2776 case MBX_READ_SPARM64: 2777 case MBX_READ_RPI64: 2778 case MBX_REG_LOGIN64: 2779 case MBX_READ_TOPOLOGY: 2780 case MBX_WRITE_WWN: 2781 case MBX_SET_DEBUG: 2782 case MBX_LOAD_EXP_ROM: 2783 case MBX_ASYNCEVT_ENABLE: 2784 case MBX_REG_VPI: 2785 case MBX_UNREG_VPI: 2786 case MBX_HEARTBEAT: 2787 case MBX_PORT_CAPABILITIES: 2788 case MBX_PORT_IOV_CONTROL: 2789 case MBX_SLI4_CONFIG: 2790 case MBX_SLI4_REQ_FTRS: 2791 case MBX_REG_FCFI: 2792 case MBX_UNREG_FCFI: 2793 case MBX_REG_VFI: 2794 case MBX_UNREG_VFI: 2795 case MBX_INIT_VPI: 2796 case MBX_INIT_VFI: 2797 case MBX_RESUME_RPI: 2798 case MBX_READ_EVENT_LOG_STATUS: 2799 case MBX_READ_EVENT_LOG: 2800 case MBX_SECURITY_MGMT: 2801 case MBX_AUTH_PORT: 2802 case MBX_ACCESS_VDATA: 2803 ret = mbxCommand; 2804 break; 2805 default: 2806 ret = MBX_SHUTDOWN; 2807 break; 2808 } 2809 return ret; 2810 } 2811 2812 /** 2813 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2814 * @phba: Pointer to HBA context object. 2815 * @pmboxq: Pointer to mailbox command. 2816 * 2817 * This is completion handler function for mailbox commands issued from 2818 * lpfc_sli_issue_mbox_wait function. This function is called by the 2819 * mailbox event handler function with no lock held. This function 2820 * will wake up thread waiting on the wait queue pointed by context1 2821 * of the mailbox. 2822 **/ 2823 void 2824 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2825 { 2826 unsigned long drvr_flag; 2827 struct completion *pmbox_done; 2828 2829 /* 2830 * If pmbox_done is empty, the driver thread gave up waiting and 2831 * continued running. 2832 */ 2833 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2834 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2835 pmbox_done = pmboxq->ctx_u.mbox_wait; 2836 if (pmbox_done) 2837 complete(pmbox_done); 2838 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2839 return; 2840 } 2841 2842 static void 2843 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2844 { 2845 unsigned long iflags; 2846 2847 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 2848 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); 2849 spin_lock_irqsave(&ndlp->lock, iflags); 2850 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 2851 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 2852 spin_unlock_irqrestore(&ndlp->lock, iflags); 2853 } 2854 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2855 } 2856 2857 void 2858 lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2859 { 2860 __lpfc_sli_rpi_release(vport, ndlp); 2861 } 2862 2863 /** 2864 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2865 * @phba: Pointer to HBA context object. 2866 * @pmb: Pointer to mailbox object. 2867 * 2868 * This function is the default mailbox completion handler. It 2869 * frees the memory resources associated with the completed mailbox 2870 * command. If the completed command is a REG_LOGIN mailbox command, 2871 * this function will issue a UREG_LOGIN to re-claim the RPI. 2872 **/ 2873 void 2874 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2875 { 2876 struct lpfc_vport *vport = pmb->vport; 2877 struct lpfc_dmabuf *mp; 2878 struct lpfc_nodelist *ndlp; 2879 struct Scsi_Host *shost; 2880 uint16_t rpi, vpi; 2881 int rc; 2882 2883 /* 2884 * If a REG_LOGIN succeeded after node is destroyed or node 2885 * is in re-discovery driver need to cleanup the RPI. 2886 */ 2887 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) && 2888 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2889 !pmb->u.mb.mbxStatus) { 2890 mp = pmb->ctx_buf; 2891 if (mp) { 2892 pmb->ctx_buf = NULL; 2893 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2894 kfree(mp); 2895 } 2896 rpi = pmb->u.mb.un.varWords[0]; 2897 vpi = pmb->u.mb.un.varRegLogin.vpi; 2898 if (phba->sli_rev == LPFC_SLI_REV4) 2899 vpi -= phba->sli4_hba.max_cfg_param.vpi_base; 2900 lpfc_unreg_login(phba, vpi, rpi, pmb); 2901 pmb->vport = vport; 2902 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2903 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2904 if (rc != MBX_NOT_FINISHED) 2905 return; 2906 } 2907 2908 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2909 !test_bit(FC_UNLOADING, &phba->pport->load_flag) && 2910 !pmb->u.mb.mbxStatus) { 2911 shost = lpfc_shost_from_vport(vport); 2912 spin_lock_irq(shost->host_lock); 2913 vport->vpi_state |= LPFC_VPI_REGISTERED; 2914 spin_unlock_irq(shost->host_lock); 2915 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 2916 } 2917 2918 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2919 ndlp = pmb->ctx_ndlp; 2920 lpfc_nlp_put(ndlp); 2921 } 2922 2923 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2924 ndlp = pmb->ctx_ndlp; 2925 2926 /* Check to see if there are any deferred events to process */ 2927 if (ndlp) { 2928 lpfc_printf_vlog( 2929 vport, 2930 KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2931 "1438 UNREG cmpl deferred mbox x%x " 2932 "on NPort x%x Data: x%x x%x x%px x%lx x%x\n", 2933 ndlp->nlp_rpi, ndlp->nlp_DID, 2934 ndlp->nlp_flag, ndlp->nlp_defer_did, 2935 ndlp, vport->load_flag, kref_read(&ndlp->kref)); 2936 2937 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2938 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 2939 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2940 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2941 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2942 } else { 2943 __lpfc_sli_rpi_release(vport, ndlp); 2944 } 2945 2946 /* The unreg_login mailbox is complete and had a 2947 * reference that has to be released. The PLOGI 2948 * got its own ref. 2949 */ 2950 lpfc_nlp_put(ndlp); 2951 pmb->ctx_ndlp = NULL; 2952 } 2953 } 2954 2955 /* This nlp_put pairs with lpfc_sli4_resume_rpi */ 2956 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) { 2957 ndlp = pmb->ctx_ndlp; 2958 lpfc_nlp_put(ndlp); 2959 } 2960 2961 /* Check security permission status on INIT_LINK mailbox command */ 2962 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2963 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2964 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2965 "2860 SLI authentication is required " 2966 "for INIT_LINK but has not done yet\n"); 2967 2968 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2969 lpfc_sli4_mbox_cmd_free(phba, pmb); 2970 else 2971 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 2972 } 2973 /** 2974 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2975 * @phba: Pointer to HBA context object. 2976 * @pmb: Pointer to mailbox object. 2977 * 2978 * This function is the unreg rpi mailbox completion handler. It 2979 * frees the memory resources associated with the completed mailbox 2980 * command. An additional reference is put on the ndlp to prevent 2981 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2982 * the unreg mailbox command completes, this routine puts the 2983 * reference back. 2984 * 2985 **/ 2986 void 2987 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2988 { 2989 struct lpfc_vport *vport = pmb->vport; 2990 struct lpfc_nodelist *ndlp; 2991 2992 ndlp = pmb->ctx_ndlp; 2993 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2994 if (phba->sli_rev == LPFC_SLI_REV4 && 2995 (bf_get(lpfc_sli_intf_if_type, 2996 &phba->sli4_hba.sli_intf) >= 2997 LPFC_SLI_INTF_IF_TYPE_2)) { 2998 if (ndlp) { 2999 lpfc_printf_vlog( 3000 vport, KERN_INFO, 3001 LOG_MBOX | LOG_SLI | LOG_NODE, 3002 "0010 UNREG_LOGIN vpi:x%x " 3003 "rpi:%x DID:%x defer x%x flg x%x " 3004 "x%px\n", 3005 vport->vpi, ndlp->nlp_rpi, 3006 ndlp->nlp_DID, ndlp->nlp_defer_did, 3007 ndlp->nlp_flag, 3008 ndlp); 3009 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3010 3011 /* Check to see if there are any deferred 3012 * events to process 3013 */ 3014 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 3015 (ndlp->nlp_defer_did != 3016 NLP_EVT_NOTHING_PENDING)) { 3017 lpfc_printf_vlog( 3018 vport, KERN_INFO, 3019 LOG_MBOX | LOG_SLI | LOG_NODE, 3020 "4111 UNREG cmpl deferred " 3021 "clr x%x on " 3022 "NPort x%x Data: x%x x%px\n", 3023 ndlp->nlp_rpi, ndlp->nlp_DID, 3024 ndlp->nlp_defer_did, ndlp); 3025 ndlp->nlp_flag &= ~NLP_UNREG_INP; 3026 ndlp->nlp_defer_did = 3027 NLP_EVT_NOTHING_PENDING; 3028 lpfc_issue_els_plogi( 3029 vport, ndlp->nlp_DID, 0); 3030 } else { 3031 __lpfc_sli_rpi_release(vport, ndlp); 3032 } 3033 lpfc_nlp_put(ndlp); 3034 } 3035 } 3036 } 3037 3038 mempool_free(pmb, phba->mbox_mem_pool); 3039 } 3040 3041 /** 3042 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 3043 * @phba: Pointer to HBA context object. 3044 * 3045 * This function is called with no lock held. This function processes all 3046 * the completed mailbox commands and gives it to upper layers. The interrupt 3047 * service routine processes mailbox completion interrupt and adds completed 3048 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 3049 * Worker thread call lpfc_sli_handle_mb_event, which will return the 3050 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 3051 * function returns the mailbox commands to the upper layer by calling the 3052 * completion handler function of each mailbox. 3053 **/ 3054 int 3055 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 3056 { 3057 MAILBOX_t *pmbox; 3058 LPFC_MBOXQ_t *pmb; 3059 int rc; 3060 LIST_HEAD(cmplq); 3061 3062 phba->sli.slistat.mbox_event++; 3063 3064 /* Get all completed mailboxe buffers into the cmplq */ 3065 spin_lock_irq(&phba->hbalock); 3066 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 3067 spin_unlock_irq(&phba->hbalock); 3068 3069 /* Get a Mailbox buffer to setup mailbox commands for callback */ 3070 do { 3071 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 3072 if (pmb == NULL) 3073 break; 3074 3075 pmbox = &pmb->u.mb; 3076 3077 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 3078 if (pmb->vport) { 3079 lpfc_debugfs_disc_trc(pmb->vport, 3080 LPFC_DISC_TRC_MBOX_VPORT, 3081 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 3082 (uint32_t)pmbox->mbxCommand, 3083 pmbox->un.varWords[0], 3084 pmbox->un.varWords[1]); 3085 } 3086 else { 3087 lpfc_debugfs_disc_trc(phba->pport, 3088 LPFC_DISC_TRC_MBOX, 3089 "MBOX cmpl: cmd:x%x mb:x%x x%x", 3090 (uint32_t)pmbox->mbxCommand, 3091 pmbox->un.varWords[0], 3092 pmbox->un.varWords[1]); 3093 } 3094 } 3095 3096 /* 3097 * It is a fatal error if unknown mbox command completion. 3098 */ 3099 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 3100 MBX_SHUTDOWN) { 3101 /* Unknown mailbox command compl */ 3102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3103 "(%d):0323 Unknown Mailbox command " 3104 "x%x (x%x/x%x) Cmpl\n", 3105 pmb->vport ? pmb->vport->vpi : 3106 LPFC_VPORT_UNKNOWN, 3107 pmbox->mbxCommand, 3108 lpfc_sli_config_mbox_subsys_get(phba, 3109 pmb), 3110 lpfc_sli_config_mbox_opcode_get(phba, 3111 pmb)); 3112 phba->link_state = LPFC_HBA_ERROR; 3113 phba->work_hs = HS_FFER3; 3114 lpfc_handle_eratt(phba); 3115 continue; 3116 } 3117 3118 if (pmbox->mbxStatus) { 3119 phba->sli.slistat.mbox_stat_err++; 3120 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 3121 /* Mbox cmd cmpl error - RETRYing */ 3122 lpfc_printf_log(phba, KERN_INFO, 3123 LOG_MBOX | LOG_SLI, 3124 "(%d):0305 Mbox cmd cmpl " 3125 "error - RETRYing Data: x%x " 3126 "(x%x/x%x) x%x x%x x%x\n", 3127 pmb->vport ? pmb->vport->vpi : 3128 LPFC_VPORT_UNKNOWN, 3129 pmbox->mbxCommand, 3130 lpfc_sli_config_mbox_subsys_get(phba, 3131 pmb), 3132 lpfc_sli_config_mbox_opcode_get(phba, 3133 pmb), 3134 pmbox->mbxStatus, 3135 pmbox->un.varWords[0], 3136 pmb->vport ? pmb->vport->port_state : 3137 LPFC_VPORT_UNKNOWN); 3138 pmbox->mbxStatus = 0; 3139 pmbox->mbxOwner = OWN_HOST; 3140 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3141 if (rc != MBX_NOT_FINISHED) 3142 continue; 3143 } 3144 } 3145 3146 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 3147 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 3148 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps " 3149 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 3150 "x%x x%x x%x\n", 3151 pmb->vport ? pmb->vport->vpi : 0, 3152 pmbox->mbxCommand, 3153 lpfc_sli_config_mbox_subsys_get(phba, pmb), 3154 lpfc_sli_config_mbox_opcode_get(phba, pmb), 3155 pmb->mbox_cmpl, 3156 *((uint32_t *) pmbox), 3157 pmbox->un.varWords[0], 3158 pmbox->un.varWords[1], 3159 pmbox->un.varWords[2], 3160 pmbox->un.varWords[3], 3161 pmbox->un.varWords[4], 3162 pmbox->un.varWords[5], 3163 pmbox->un.varWords[6], 3164 pmbox->un.varWords[7], 3165 pmbox->un.varWords[8], 3166 pmbox->un.varWords[9], 3167 pmbox->un.varWords[10]); 3168 3169 if (pmb->mbox_cmpl) 3170 pmb->mbox_cmpl(phba,pmb); 3171 } while (1); 3172 return 0; 3173 } 3174 3175 /** 3176 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 3177 * @phba: Pointer to HBA context object. 3178 * @pring: Pointer to driver SLI ring object. 3179 * @tag: buffer tag. 3180 * 3181 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 3182 * is set in the tag the buffer is posted for a particular exchange, 3183 * the function will return the buffer without replacing the buffer. 3184 * If the buffer is for unsolicited ELS or CT traffic, this function 3185 * returns the buffer and also posts another buffer to the firmware. 3186 **/ 3187 static struct lpfc_dmabuf * 3188 lpfc_sli_get_buff(struct lpfc_hba *phba, 3189 struct lpfc_sli_ring *pring, 3190 uint32_t tag) 3191 { 3192 struct hbq_dmabuf *hbq_entry; 3193 3194 if (tag & QUE_BUFTAG_BIT) 3195 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 3196 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 3197 if (!hbq_entry) 3198 return NULL; 3199 return &hbq_entry->dbuf; 3200 } 3201 3202 /** 3203 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer 3204 * containing a NVME LS request. 3205 * @phba: pointer to lpfc hba data structure. 3206 * @piocb: pointer to the iocbq struct representing the sequence starting 3207 * frame. 3208 * 3209 * This routine initially validates the NVME LS, validates there is a login 3210 * with the port that sent the LS, and then calls the appropriate nvme host 3211 * or target LS request handler. 3212 **/ 3213 static void 3214 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 3215 { 3216 struct lpfc_nodelist *ndlp; 3217 struct lpfc_dmabuf *d_buf; 3218 struct hbq_dmabuf *nvmebuf; 3219 struct fc_frame_header *fc_hdr; 3220 struct lpfc_async_xchg_ctx *axchg = NULL; 3221 char *failwhy = NULL; 3222 uint32_t oxid, sid, did, fctl, size; 3223 int ret = 1; 3224 3225 d_buf = piocb->cmd_dmabuf; 3226 3227 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 3228 fc_hdr = nvmebuf->hbuf.virt; 3229 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 3230 sid = sli4_sid_from_fc_hdr(fc_hdr); 3231 did = sli4_did_from_fc_hdr(fc_hdr); 3232 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 3233 fc_hdr->fh_f_ctl[1] << 8 | 3234 fc_hdr->fh_f_ctl[2]); 3235 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); 3236 3237 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n", 3238 oxid, size, sid); 3239 3240 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { 3241 failwhy = "Driver Unloading"; 3242 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { 3243 failwhy = "NVME FC4 Disabled"; 3244 } else if (!phba->nvmet_support && !phba->pport->localport) { 3245 failwhy = "No Localport"; 3246 } else if (phba->nvmet_support && !phba->targetport) { 3247 failwhy = "No Targetport"; 3248 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) { 3249 failwhy = "Bad NVME LS R_CTL"; 3250 } else if (unlikely((fctl & 0x00FF0000) != 3251 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) { 3252 failwhy = "Bad NVME LS F_CTL"; 3253 } else { 3254 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC); 3255 if (!axchg) 3256 failwhy = "No CTX memory"; 3257 } 3258 3259 if (unlikely(failwhy)) { 3260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3261 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n", 3262 sid, oxid, failwhy); 3263 goto out_fail; 3264 } 3265 3266 /* validate the source of the LS is logged in */ 3267 ndlp = lpfc_findnode_did(phba->pport, sid); 3268 if (!ndlp || 3269 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 3270 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 3271 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 3272 "6216 NVME Unsol rcv: No ndlp: " 3273 "NPort_ID x%x oxid x%x\n", 3274 sid, oxid); 3275 goto out_fail; 3276 } 3277 3278 axchg->phba = phba; 3279 axchg->ndlp = ndlp; 3280 axchg->size = size; 3281 axchg->oxid = oxid; 3282 axchg->sid = sid; 3283 axchg->wqeq = NULL; 3284 axchg->state = LPFC_NVME_STE_LS_RCV; 3285 axchg->entry_cnt = 1; 3286 axchg->rqb_buffer = (void *)nvmebuf; 3287 axchg->hdwq = &phba->sli4_hba.hdwq[0]; 3288 axchg->payload = nvmebuf->dbuf.virt; 3289 INIT_LIST_HEAD(&axchg->list); 3290 3291 if (phba->nvmet_support) { 3292 ret = lpfc_nvmet_handle_lsreq(phba, axchg); 3293 spin_lock_irq(&ndlp->lock); 3294 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) { 3295 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH; 3296 spin_unlock_irq(&ndlp->lock); 3297 3298 /* This reference is a single occurrence to hold the 3299 * node valid until the nvmet transport calls 3300 * host_release. 3301 */ 3302 if (!lpfc_nlp_get(ndlp)) 3303 goto out_fail; 3304 3305 lpfc_printf_log(phba, KERN_ERR, LOG_NODE, 3306 "6206 NVMET unsol ls_req ndlp x%px " 3307 "DID x%x xflags x%x refcnt %d\n", 3308 ndlp, ndlp->nlp_DID, 3309 ndlp->fc4_xpt_flags, 3310 kref_read(&ndlp->kref)); 3311 } else { 3312 spin_unlock_irq(&ndlp->lock); 3313 } 3314 } else { 3315 ret = lpfc_nvme_handle_lsreq(phba, axchg); 3316 } 3317 3318 /* if zero, LS was successfully handled. If non-zero, LS not handled */ 3319 if (!ret) 3320 return; 3321 3322 out_fail: 3323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3324 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X " 3325 "NVMe%s handler failed %d\n", 3326 did, sid, oxid, 3327 (phba->nvmet_support) ? "T" : "I", ret); 3328 3329 /* recycle receive buffer */ 3330 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 3331 3332 /* If start of new exchange, abort it */ 3333 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX))) 3334 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid); 3335 3336 if (ret) 3337 kfree(axchg); 3338 } 3339 3340 /** 3341 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 3342 * @phba: Pointer to HBA context object. 3343 * @pring: Pointer to driver SLI ring object. 3344 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 3345 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 3346 * @fch_type: the type for the first frame of the sequence. 3347 * 3348 * This function is called with no lock held. This function uses the r_ctl and 3349 * type of the received sequence to find the correct callback function to call 3350 * to process the sequence. 3351 **/ 3352 static int 3353 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3354 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 3355 uint32_t fch_type) 3356 { 3357 int i; 3358 3359 switch (fch_type) { 3360 case FC_TYPE_NVME: 3361 lpfc_nvme_unsol_ls_handler(phba, saveq); 3362 return 1; 3363 default: 3364 break; 3365 } 3366 3367 /* unSolicited Responses */ 3368 if (pring->prt[0].profile) { 3369 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 3370 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 3371 saveq); 3372 return 1; 3373 } 3374 /* We must search, based on rctl / type 3375 for the right routine */ 3376 for (i = 0; i < pring->num_mask; i++) { 3377 if ((pring->prt[i].rctl == fch_r_ctl) && 3378 (pring->prt[i].type == fch_type)) { 3379 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 3380 (pring->prt[i].lpfc_sli_rcv_unsol_event) 3381 (phba, pring, saveq); 3382 return 1; 3383 } 3384 } 3385 return 0; 3386 } 3387 3388 static void 3389 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba, 3390 struct lpfc_iocbq *saveq) 3391 { 3392 IOCB_t *irsp; 3393 union lpfc_wqe128 *wqe; 3394 u16 i = 0; 3395 3396 irsp = &saveq->iocb; 3397 wqe = &saveq->wqe; 3398 3399 /* Fill wcqe with the IOCB status fields */ 3400 bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus); 3401 saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount; 3402 saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4]; 3403 saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len; 3404 3405 /* Source ID */ 3406 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo); 3407 3408 /* rx-id of the response frame */ 3409 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext); 3410 3411 /* ox-id of the frame */ 3412 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 3413 irsp->unsli3.rcvsli3.ox_id); 3414 3415 /* DID */ 3416 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 3417 irsp->un.rcvels.remoteID); 3418 3419 /* unsol data len */ 3420 for (i = 0; i < irsp->ulpBdeCount; i++) { 3421 struct lpfc_hbq_entry *hbqe = NULL; 3422 3423 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 3424 if (i == 0) { 3425 hbqe = (struct lpfc_hbq_entry *) 3426 &irsp->un.ulpWord[0]; 3427 saveq->wqe.gen_req.bde.tus.f.bdeSize = 3428 hbqe->bde.tus.f.bdeSize; 3429 } else if (i == 1) { 3430 hbqe = (struct lpfc_hbq_entry *) 3431 &irsp->unsli3.sli3Words[4]; 3432 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize; 3433 } 3434 } 3435 } 3436 } 3437 3438 /** 3439 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 3440 * @phba: Pointer to HBA context object. 3441 * @pring: Pointer to driver SLI ring object. 3442 * @saveq: Pointer to the unsolicited iocb. 3443 * 3444 * This function is called with no lock held by the ring event handler 3445 * when there is an unsolicited iocb posted to the response ring by the 3446 * firmware. This function gets the buffer associated with the iocbs 3447 * and calls the event handler for the ring. This function handles both 3448 * qring buffers and hbq buffers. 3449 * When the function returns 1 the caller can free the iocb object otherwise 3450 * upper layer functions will free the iocb objects. 3451 **/ 3452 static int 3453 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3454 struct lpfc_iocbq *saveq) 3455 { 3456 IOCB_t * irsp; 3457 WORD5 * w5p; 3458 dma_addr_t paddr; 3459 uint32_t Rctl, Type; 3460 struct lpfc_iocbq *iocbq; 3461 struct lpfc_dmabuf *dmzbuf; 3462 3463 irsp = &saveq->iocb; 3464 saveq->vport = phba->pport; 3465 3466 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 3467 if (pring->lpfc_sli_rcv_async_status) 3468 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 3469 else 3470 lpfc_printf_log(phba, 3471 KERN_WARNING, 3472 LOG_SLI, 3473 "0316 Ring %d handler: unexpected " 3474 "ASYNC_STATUS iocb received evt_code " 3475 "0x%x\n", 3476 pring->ringno, 3477 irsp->un.asyncstat.evt_code); 3478 return 1; 3479 } 3480 3481 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 3482 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 3483 if (irsp->ulpBdeCount > 0) { 3484 dmzbuf = lpfc_sli_get_buff(phba, pring, 3485 irsp->un.ulpWord[3]); 3486 lpfc_in_buf_free(phba, dmzbuf); 3487 } 3488 3489 if (irsp->ulpBdeCount > 1) { 3490 dmzbuf = lpfc_sli_get_buff(phba, pring, 3491 irsp->unsli3.sli3Words[3]); 3492 lpfc_in_buf_free(phba, dmzbuf); 3493 } 3494 3495 if (irsp->ulpBdeCount > 2) { 3496 dmzbuf = lpfc_sli_get_buff(phba, pring, 3497 irsp->unsli3.sli3Words[7]); 3498 lpfc_in_buf_free(phba, dmzbuf); 3499 } 3500 3501 return 1; 3502 } 3503 3504 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 3505 if (irsp->ulpBdeCount != 0) { 3506 saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring, 3507 irsp->un.ulpWord[3]); 3508 if (!saveq->cmd_dmabuf) 3509 lpfc_printf_log(phba, 3510 KERN_ERR, 3511 LOG_SLI, 3512 "0341 Ring %d Cannot find buffer for " 3513 "an unsolicited iocb. tag 0x%x\n", 3514 pring->ringno, 3515 irsp->un.ulpWord[3]); 3516 } 3517 if (irsp->ulpBdeCount == 2) { 3518 saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring, 3519 irsp->unsli3.sli3Words[7]); 3520 if (!saveq->bpl_dmabuf) 3521 lpfc_printf_log(phba, 3522 KERN_ERR, 3523 LOG_SLI, 3524 "0342 Ring %d Cannot find buffer for an" 3525 " unsolicited iocb. tag 0x%x\n", 3526 pring->ringno, 3527 irsp->unsli3.sli3Words[7]); 3528 } 3529 list_for_each_entry(iocbq, &saveq->list, list) { 3530 irsp = &iocbq->iocb; 3531 if (irsp->ulpBdeCount != 0) { 3532 iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba, 3533 pring, 3534 irsp->un.ulpWord[3]); 3535 if (!iocbq->cmd_dmabuf) 3536 lpfc_printf_log(phba, 3537 KERN_ERR, 3538 LOG_SLI, 3539 "0343 Ring %d Cannot find " 3540 "buffer for an unsolicited iocb" 3541 ". tag 0x%x\n", pring->ringno, 3542 irsp->un.ulpWord[3]); 3543 } 3544 if (irsp->ulpBdeCount == 2) { 3545 iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba, 3546 pring, 3547 irsp->unsli3.sli3Words[7]); 3548 if (!iocbq->bpl_dmabuf) 3549 lpfc_printf_log(phba, 3550 KERN_ERR, 3551 LOG_SLI, 3552 "0344 Ring %d Cannot find " 3553 "buffer for an unsolicited " 3554 "iocb. tag 0x%x\n", 3555 pring->ringno, 3556 irsp->unsli3.sli3Words[7]); 3557 } 3558 } 3559 } else { 3560 paddr = getPaddr(irsp->un.cont64[0].addrHigh, 3561 irsp->un.cont64[0].addrLow); 3562 saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 3563 paddr); 3564 if (irsp->ulpBdeCount == 2) { 3565 paddr = getPaddr(irsp->un.cont64[1].addrHigh, 3566 irsp->un.cont64[1].addrLow); 3567 saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 3568 pring, 3569 paddr); 3570 } 3571 } 3572 3573 if (irsp->ulpBdeCount != 0 && 3574 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 3575 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 3576 int found = 0; 3577 3578 /* search continue save q for same XRI */ 3579 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 3580 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 3581 saveq->iocb.unsli3.rcvsli3.ox_id) { 3582 list_add_tail(&saveq->list, &iocbq->list); 3583 found = 1; 3584 break; 3585 } 3586 } 3587 if (!found) 3588 list_add_tail(&saveq->clist, 3589 &pring->iocb_continue_saveq); 3590 3591 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 3592 list_del_init(&iocbq->clist); 3593 saveq = iocbq; 3594 irsp = &saveq->iocb; 3595 } else { 3596 return 0; 3597 } 3598 } 3599 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 3600 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 3601 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 3602 Rctl = FC_RCTL_ELS_REQ; 3603 Type = FC_TYPE_ELS; 3604 } else { 3605 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 3606 Rctl = w5p->hcsw.Rctl; 3607 Type = w5p->hcsw.Type; 3608 3609 /* Firmware Workaround */ 3610 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 3611 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 3612 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 3613 Rctl = FC_RCTL_ELS_REQ; 3614 Type = FC_TYPE_ELS; 3615 w5p->hcsw.Rctl = Rctl; 3616 w5p->hcsw.Type = Type; 3617 } 3618 } 3619 3620 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3621 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 3622 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 3623 if (irsp->unsli3.rcvsli3.vpi == 0xffff) 3624 saveq->vport = phba->pport; 3625 else 3626 saveq->vport = lpfc_find_vport_by_vpid(phba, 3627 irsp->unsli3.rcvsli3.vpi); 3628 } 3629 3630 /* Prepare WQE with Unsol frame */ 3631 lpfc_sli_prep_unsol_wqe(phba, saveq); 3632 3633 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 3634 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3635 "0313 Ring %d handler: unexpected Rctl x%x " 3636 "Type x%x received\n", 3637 pring->ringno, Rctl, Type); 3638 3639 return 1; 3640 } 3641 3642 /** 3643 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 3644 * @phba: Pointer to HBA context object. 3645 * @pring: Pointer to driver SLI ring object. 3646 * @prspiocb: Pointer to response iocb object. 3647 * 3648 * This function looks up the iocb_lookup table to get the command iocb 3649 * corresponding to the given response iocb using the iotag of the 3650 * response iocb. The driver calls this function with the hbalock held 3651 * for SLI3 ports or the ring lock held for SLI4 ports. 3652 * This function returns the command iocb object if it finds the command 3653 * iocb else returns NULL. 3654 **/ 3655 static struct lpfc_iocbq * 3656 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 3657 struct lpfc_sli_ring *pring, 3658 struct lpfc_iocbq *prspiocb) 3659 { 3660 struct lpfc_iocbq *cmd_iocb = NULL; 3661 u16 iotag; 3662 3663 if (phba->sli_rev == LPFC_SLI_REV4) 3664 iotag = get_wqe_reqtag(prspiocb); 3665 else 3666 iotag = prspiocb->iocb.ulpIoTag; 3667 3668 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3669 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3670 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { 3671 /* remove from txcmpl queue list */ 3672 list_del_init(&cmd_iocb->list); 3673 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 3674 pring->txcmplq_cnt--; 3675 return cmd_iocb; 3676 } 3677 } 3678 3679 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3680 "0317 iotag x%x is out of " 3681 "range: max iotag x%x\n", 3682 iotag, phba->sli.last_iotag); 3683 return NULL; 3684 } 3685 3686 /** 3687 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 3688 * @phba: Pointer to HBA context object. 3689 * @pring: Pointer to driver SLI ring object. 3690 * @iotag: IOCB tag. 3691 * 3692 * This function looks up the iocb_lookup table to get the command iocb 3693 * corresponding to the given iotag. The driver calls this function with 3694 * the ring lock held because this function is an SLI4 port only helper. 3695 * This function returns the command iocb object if it finds the command 3696 * iocb else returns NULL. 3697 **/ 3698 static struct lpfc_iocbq * 3699 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 3700 struct lpfc_sli_ring *pring, uint16_t iotag) 3701 { 3702 struct lpfc_iocbq *cmd_iocb = NULL; 3703 3704 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3705 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3706 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { 3707 /* remove from txcmpl queue list */ 3708 list_del_init(&cmd_iocb->list); 3709 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 3710 pring->txcmplq_cnt--; 3711 return cmd_iocb; 3712 } 3713 } 3714 3715 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3716 "0372 iotag x%x lookup error: max iotag (x%x) " 3717 "cmd_flag x%x\n", 3718 iotag, phba->sli.last_iotag, 3719 cmd_iocb ? cmd_iocb->cmd_flag : 0xffff); 3720 return NULL; 3721 } 3722 3723 /** 3724 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3725 * @phba: Pointer to HBA context object. 3726 * @pring: Pointer to driver SLI ring object. 3727 * @saveq: Pointer to the response iocb to be processed. 3728 * 3729 * This function is called by the ring event handler for non-fcp 3730 * rings when there is a new response iocb in the response ring. 3731 * The caller is not required to hold any locks. This function 3732 * gets the command iocb associated with the response iocb and 3733 * calls the completion handler for the command iocb. If there 3734 * is no completion handler, the function will free the resources 3735 * associated with command iocb. If the response iocb is for 3736 * an already aborted command iocb, the status of the completion 3737 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3738 * This function always returns 1. 3739 **/ 3740 static int 3741 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3742 struct lpfc_iocbq *saveq) 3743 { 3744 struct lpfc_iocbq *cmdiocbp; 3745 unsigned long iflag; 3746 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag; 3747 3748 if (phba->sli_rev == LPFC_SLI_REV4) 3749 spin_lock_irqsave(&pring->ring_lock, iflag); 3750 else 3751 spin_lock_irqsave(&phba->hbalock, iflag); 3752 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3753 if (phba->sli_rev == LPFC_SLI_REV4) 3754 spin_unlock_irqrestore(&pring->ring_lock, iflag); 3755 else 3756 spin_unlock_irqrestore(&phba->hbalock, iflag); 3757 3758 ulp_command = get_job_cmnd(phba, saveq); 3759 ulp_status = get_job_ulpstatus(phba, saveq); 3760 ulp_word4 = get_job_word4(phba, saveq); 3761 ulp_context = get_job_ulpcontext(phba, saveq); 3762 if (phba->sli_rev == LPFC_SLI_REV4) 3763 iotag = get_wqe_reqtag(saveq); 3764 else 3765 iotag = saveq->iocb.ulpIoTag; 3766 3767 if (cmdiocbp) { 3768 ulp_command = get_job_cmnd(phba, cmdiocbp); 3769 if (cmdiocbp->cmd_cmpl) { 3770 /* 3771 * If an ELS command failed send an event to mgmt 3772 * application. 3773 */ 3774 if (ulp_status && 3775 (pring->ringno == LPFC_ELS_RING) && 3776 (ulp_command == CMD_ELS_REQUEST64_CR)) 3777 lpfc_send_els_failure_event(phba, 3778 cmdiocbp, saveq); 3779 3780 /* 3781 * Post all ELS completions to the worker thread. 3782 * All other are passed to the completion callback. 3783 */ 3784 if (pring->ringno == LPFC_ELS_RING) { 3785 if ((phba->sli_rev < LPFC_SLI_REV4) && 3786 (cmdiocbp->cmd_flag & 3787 LPFC_DRIVER_ABORTED)) { 3788 spin_lock_irqsave(&phba->hbalock, 3789 iflag); 3790 cmdiocbp->cmd_flag &= 3791 ~LPFC_DRIVER_ABORTED; 3792 spin_unlock_irqrestore(&phba->hbalock, 3793 iflag); 3794 saveq->iocb.ulpStatus = 3795 IOSTAT_LOCAL_REJECT; 3796 saveq->iocb.un.ulpWord[4] = 3797 IOERR_SLI_ABORTED; 3798 3799 /* Firmware could still be in progress 3800 * of DMAing payload, so don't free data 3801 * buffer till after a hbeat. 3802 */ 3803 spin_lock_irqsave(&phba->hbalock, 3804 iflag); 3805 saveq->cmd_flag |= LPFC_DELAY_MEM_FREE; 3806 spin_unlock_irqrestore(&phba->hbalock, 3807 iflag); 3808 } 3809 if (phba->sli_rev == LPFC_SLI_REV4) { 3810 if (saveq->cmd_flag & 3811 LPFC_EXCHANGE_BUSY) { 3812 /* Set cmdiocb flag for the 3813 * exchange busy so sgl (xri) 3814 * will not be released until 3815 * the abort xri is received 3816 * from hba. 3817 */ 3818 spin_lock_irqsave( 3819 &phba->hbalock, iflag); 3820 cmdiocbp->cmd_flag |= 3821 LPFC_EXCHANGE_BUSY; 3822 spin_unlock_irqrestore( 3823 &phba->hbalock, iflag); 3824 } 3825 if (cmdiocbp->cmd_flag & 3826 LPFC_DRIVER_ABORTED) { 3827 /* 3828 * Clear LPFC_DRIVER_ABORTED 3829 * bit in case it was driver 3830 * initiated abort. 3831 */ 3832 spin_lock_irqsave( 3833 &phba->hbalock, iflag); 3834 cmdiocbp->cmd_flag &= 3835 ~LPFC_DRIVER_ABORTED; 3836 spin_unlock_irqrestore( 3837 &phba->hbalock, iflag); 3838 set_job_ulpstatus(cmdiocbp, 3839 IOSTAT_LOCAL_REJECT); 3840 set_job_ulpword4(cmdiocbp, 3841 IOERR_ABORT_REQUESTED); 3842 /* 3843 * For SLI4, irspiocb contains 3844 * NO_XRI in sli_xritag, it 3845 * shall not affect releasing 3846 * sgl (xri) process. 3847 */ 3848 set_job_ulpstatus(saveq, 3849 IOSTAT_LOCAL_REJECT); 3850 set_job_ulpword4(saveq, 3851 IOERR_SLI_ABORTED); 3852 spin_lock_irqsave( 3853 &phba->hbalock, iflag); 3854 saveq->cmd_flag |= 3855 LPFC_DELAY_MEM_FREE; 3856 spin_unlock_irqrestore( 3857 &phba->hbalock, iflag); 3858 } 3859 } 3860 } 3861 cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq); 3862 } else 3863 lpfc_sli_release_iocbq(phba, cmdiocbp); 3864 } else { 3865 /* 3866 * Unknown initiating command based on the response iotag. 3867 * This could be the case on the ELS ring because of 3868 * lpfc_els_abort(). 3869 */ 3870 if (pring->ringno != LPFC_ELS_RING) { 3871 /* 3872 * Ring <ringno> handler: unexpected completion IoTag 3873 * <IoTag> 3874 */ 3875 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3876 "0322 Ring %d handler: " 3877 "unexpected completion IoTag x%x " 3878 "Data: x%x x%x x%x x%x\n", 3879 pring->ringno, iotag, ulp_status, 3880 ulp_word4, ulp_command, ulp_context); 3881 } 3882 } 3883 3884 return 1; 3885 } 3886 3887 /** 3888 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3889 * @phba: Pointer to HBA context object. 3890 * @pring: Pointer to driver SLI ring object. 3891 * 3892 * This function is called from the iocb ring event handlers when 3893 * put pointer is ahead of the get pointer for a ring. This function signal 3894 * an error attention condition to the worker thread and the worker 3895 * thread will transition the HBA to offline state. 3896 **/ 3897 static void 3898 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3899 { 3900 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3901 /* 3902 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3903 * rsp ring <portRspMax> 3904 */ 3905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3906 "0312 Ring %d handler: portRspPut %d " 3907 "is bigger than rsp ring %d\n", 3908 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3909 pring->sli.sli3.numRiocb); 3910 3911 phba->link_state = LPFC_HBA_ERROR; 3912 3913 /* 3914 * All error attention handlers are posted to 3915 * worker thread 3916 */ 3917 phba->work_ha |= HA_ERATT; 3918 phba->work_hs = HS_FFER3; 3919 3920 lpfc_worker_wake_up(phba); 3921 3922 return; 3923 } 3924 3925 /** 3926 * lpfc_poll_eratt - Error attention polling timer timeout handler 3927 * @t: Context to fetch pointer to address of HBA context object from. 3928 * 3929 * This function is invoked by the Error Attention polling timer when the 3930 * timer times out. It will check the SLI Error Attention register for 3931 * possible attention events. If so, it will post an Error Attention event 3932 * and wake up worker thread to process it. Otherwise, it will set up the 3933 * Error Attention polling timer for the next poll. 3934 **/ 3935 void lpfc_poll_eratt(struct timer_list *t) 3936 { 3937 struct lpfc_hba *phba; 3938 uint32_t eratt = 0; 3939 uint64_t sli_intr, cnt; 3940 3941 phba = from_timer(phba, t, eratt_poll); 3942 if (!test_bit(HBA_SETUP, &phba->hba_flag)) 3943 return; 3944 3945 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) 3946 return; 3947 3948 /* Here we will also keep track of interrupts per sec of the hba */ 3949 sli_intr = phba->sli.slistat.sli_intr; 3950 3951 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3952 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3953 sli_intr); 3954 else 3955 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3956 3957 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3958 do_div(cnt, phba->eratt_poll_interval); 3959 phba->sli.slistat.sli_ips = cnt; 3960 3961 phba->sli.slistat.sli_prev_intr = sli_intr; 3962 3963 /* Check chip HA register for error event */ 3964 eratt = lpfc_sli_check_eratt(phba); 3965 3966 if (eratt) 3967 /* Tell the worker thread there is work to do */ 3968 lpfc_worker_wake_up(phba); 3969 else 3970 /* Restart the timer for next eratt poll */ 3971 mod_timer(&phba->eratt_poll, 3972 jiffies + 3973 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3974 return; 3975 } 3976 3977 3978 /** 3979 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3980 * @phba: Pointer to HBA context object. 3981 * @pring: Pointer to driver SLI ring object. 3982 * @mask: Host attention register mask for this ring. 3983 * 3984 * This function is called from the interrupt context when there is a ring 3985 * event for the fcp ring. The caller does not hold any lock. 3986 * The function processes each response iocb in the response ring until it 3987 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3988 * LE bit set. The function will call the completion handler of the command iocb 3989 * if the response iocb indicates a completion for a command iocb or it is 3990 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3991 * function if this is an unsolicited iocb. 3992 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3993 * to check it explicitly. 3994 */ 3995 int 3996 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3997 struct lpfc_sli_ring *pring, uint32_t mask) 3998 { 3999 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 4000 IOCB_t *irsp = NULL; 4001 IOCB_t *entry = NULL; 4002 struct lpfc_iocbq *cmdiocbq = NULL; 4003 struct lpfc_iocbq rspiocbq; 4004 uint32_t status; 4005 uint32_t portRspPut, portRspMax; 4006 int rc = 1; 4007 lpfc_iocb_type type; 4008 unsigned long iflag; 4009 uint32_t rsp_cmpl = 0; 4010 4011 spin_lock_irqsave(&phba->hbalock, iflag); 4012 pring->stats.iocb_event++; 4013 4014 /* 4015 * The next available response entry should never exceed the maximum 4016 * entries. If it does, treat it as an adapter hardware error. 4017 */ 4018 portRspMax = pring->sli.sli3.numRiocb; 4019 portRspPut = le32_to_cpu(pgp->rspPutInx); 4020 if (unlikely(portRspPut >= portRspMax)) { 4021 lpfc_sli_rsp_pointers_error(phba, pring); 4022 spin_unlock_irqrestore(&phba->hbalock, iflag); 4023 return 1; 4024 } 4025 if (phba->fcp_ring_in_use) { 4026 spin_unlock_irqrestore(&phba->hbalock, iflag); 4027 return 1; 4028 } else 4029 phba->fcp_ring_in_use = 1; 4030 4031 rmb(); 4032 while (pring->sli.sli3.rspidx != portRspPut) { 4033 /* 4034 * Fetch an entry off the ring and copy it into a local data 4035 * structure. The copy involves a byte-swap since the 4036 * network byte order and pci byte orders are different. 4037 */ 4038 entry = lpfc_resp_iocb(phba, pring); 4039 phba->last_completion_time = jiffies; 4040 4041 if (++pring->sli.sli3.rspidx >= portRspMax) 4042 pring->sli.sli3.rspidx = 0; 4043 4044 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 4045 (uint32_t *) &rspiocbq.iocb, 4046 phba->iocb_rsp_size); 4047 INIT_LIST_HEAD(&(rspiocbq.list)); 4048 irsp = &rspiocbq.iocb; 4049 4050 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 4051 pring->stats.iocb_rsp++; 4052 rsp_cmpl++; 4053 4054 if (unlikely(irsp->ulpStatus)) { 4055 /* 4056 * If resource errors reported from HBA, reduce 4057 * queuedepths of the SCSI device. 4058 */ 4059 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 4060 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 4061 IOERR_NO_RESOURCES)) { 4062 spin_unlock_irqrestore(&phba->hbalock, iflag); 4063 phba->lpfc_rampdown_queue_depth(phba); 4064 spin_lock_irqsave(&phba->hbalock, iflag); 4065 } 4066 4067 /* Rsp ring <ringno> error: IOCB */ 4068 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4069 "0336 Rsp Ring %d error: IOCB Data: " 4070 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 4071 pring->ringno, 4072 irsp->un.ulpWord[0], 4073 irsp->un.ulpWord[1], 4074 irsp->un.ulpWord[2], 4075 irsp->un.ulpWord[3], 4076 irsp->un.ulpWord[4], 4077 irsp->un.ulpWord[5], 4078 *(uint32_t *)&irsp->un1, 4079 *((uint32_t *)&irsp->un1 + 1)); 4080 } 4081 4082 switch (type) { 4083 case LPFC_ABORT_IOCB: 4084 case LPFC_SOL_IOCB: 4085 /* 4086 * Idle exchange closed via ABTS from port. No iocb 4087 * resources need to be recovered. 4088 */ 4089 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 4090 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4091 "0333 IOCB cmd 0x%x" 4092 " processed. Skipping" 4093 " completion\n", 4094 irsp->ulpCommand); 4095 break; 4096 } 4097 4098 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 4099 &rspiocbq); 4100 if (unlikely(!cmdiocbq)) 4101 break; 4102 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) 4103 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; 4104 if (cmdiocbq->cmd_cmpl) { 4105 spin_unlock_irqrestore(&phba->hbalock, iflag); 4106 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq); 4107 spin_lock_irqsave(&phba->hbalock, iflag); 4108 } 4109 break; 4110 case LPFC_UNSOL_IOCB: 4111 spin_unlock_irqrestore(&phba->hbalock, iflag); 4112 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 4113 spin_lock_irqsave(&phba->hbalock, iflag); 4114 break; 4115 default: 4116 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 4117 char adaptermsg[LPFC_MAX_ADPTMSG]; 4118 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 4119 memcpy(&adaptermsg[0], (uint8_t *) irsp, 4120 MAX_MSG_DATA); 4121 dev_warn(&((phba->pcidev)->dev), 4122 "lpfc%d: %s\n", 4123 phba->brd_no, adaptermsg); 4124 } else { 4125 /* Unknown IOCB command */ 4126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4127 "0334 Unknown IOCB command " 4128 "Data: x%x, x%x x%x x%x x%x\n", 4129 type, irsp->ulpCommand, 4130 irsp->ulpStatus, 4131 irsp->ulpIoTag, 4132 irsp->ulpContext); 4133 } 4134 break; 4135 } 4136 4137 /* 4138 * The response IOCB has been processed. Update the ring 4139 * pointer in SLIM. If the port response put pointer has not 4140 * been updated, sync the pgp->rspPutInx and fetch the new port 4141 * response put pointer. 4142 */ 4143 writel(pring->sli.sli3.rspidx, 4144 &phba->host_gp[pring->ringno].rspGetInx); 4145 4146 if (pring->sli.sli3.rspidx == portRspPut) 4147 portRspPut = le32_to_cpu(pgp->rspPutInx); 4148 } 4149 4150 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 4151 pring->stats.iocb_rsp_full++; 4152 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 4153 writel(status, phba->CAregaddr); 4154 readl(phba->CAregaddr); 4155 } 4156 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 4157 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 4158 pring->stats.iocb_cmd_empty++; 4159 4160 /* Force update of the local copy of cmdGetInx */ 4161 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 4162 lpfc_sli_resume_iocb(phba, pring); 4163 4164 if ((pring->lpfc_sli_cmd_available)) 4165 (pring->lpfc_sli_cmd_available) (phba, pring); 4166 4167 } 4168 4169 phba->fcp_ring_in_use = 0; 4170 spin_unlock_irqrestore(&phba->hbalock, iflag); 4171 return rc; 4172 } 4173 4174 /** 4175 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 4176 * @phba: Pointer to HBA context object. 4177 * @pring: Pointer to driver SLI ring object. 4178 * @rspiocbp: Pointer to driver response IOCB object. 4179 * 4180 * This function is called from the worker thread when there is a slow-path 4181 * response IOCB to process. This function chains all the response iocbs until 4182 * seeing the iocb with the LE bit set. The function will call 4183 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 4184 * completion of a command iocb. The function will call the 4185 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 4186 * The function frees the resources or calls the completion handler if this 4187 * iocb is an abort completion. The function returns NULL when the response 4188 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 4189 * this function shall chain the iocb on to the iocb_continueq and return the 4190 * response iocb passed in. 4191 **/ 4192 static struct lpfc_iocbq * 4193 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4194 struct lpfc_iocbq *rspiocbp) 4195 { 4196 struct lpfc_iocbq *saveq; 4197 struct lpfc_iocbq *cmdiocb; 4198 struct lpfc_iocbq *next_iocb; 4199 IOCB_t *irsp; 4200 uint32_t free_saveq; 4201 u8 cmd_type; 4202 lpfc_iocb_type type; 4203 unsigned long iflag; 4204 u32 ulp_status = get_job_ulpstatus(phba, rspiocbp); 4205 u32 ulp_word4 = get_job_word4(phba, rspiocbp); 4206 u32 ulp_command = get_job_cmnd(phba, rspiocbp); 4207 int rc; 4208 4209 spin_lock_irqsave(&phba->hbalock, iflag); 4210 /* First add the response iocb to the countinueq list */ 4211 list_add_tail(&rspiocbp->list, &pring->iocb_continueq); 4212 pring->iocb_continueq_cnt++; 4213 4214 /* 4215 * By default, the driver expects to free all resources 4216 * associated with this iocb completion. 4217 */ 4218 free_saveq = 1; 4219 saveq = list_get_first(&pring->iocb_continueq, 4220 struct lpfc_iocbq, list); 4221 list_del_init(&pring->iocb_continueq); 4222 pring->iocb_continueq_cnt = 0; 4223 4224 pring->stats.iocb_rsp++; 4225 4226 /* 4227 * If resource errors reported from HBA, reduce 4228 * queuedepths of the SCSI device. 4229 */ 4230 if (ulp_status == IOSTAT_LOCAL_REJECT && 4231 ((ulp_word4 & IOERR_PARAM_MASK) == 4232 IOERR_NO_RESOURCES)) { 4233 spin_unlock_irqrestore(&phba->hbalock, iflag); 4234 phba->lpfc_rampdown_queue_depth(phba); 4235 spin_lock_irqsave(&phba->hbalock, iflag); 4236 } 4237 4238 if (ulp_status) { 4239 /* Rsp ring <ringno> error: IOCB */ 4240 if (phba->sli_rev < LPFC_SLI_REV4) { 4241 irsp = &rspiocbp->iocb; 4242 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4243 "0328 Rsp Ring %d error: ulp_status x%x " 4244 "IOCB Data: " 4245 "x%08x x%08x x%08x x%08x " 4246 "x%08x x%08x x%08x x%08x " 4247 "x%08x x%08x x%08x x%08x " 4248 "x%08x x%08x x%08x x%08x\n", 4249 pring->ringno, ulp_status, 4250 get_job_ulpword(rspiocbp, 0), 4251 get_job_ulpword(rspiocbp, 1), 4252 get_job_ulpword(rspiocbp, 2), 4253 get_job_ulpword(rspiocbp, 3), 4254 get_job_ulpword(rspiocbp, 4), 4255 get_job_ulpword(rspiocbp, 5), 4256 *(((uint32_t *)irsp) + 6), 4257 *(((uint32_t *)irsp) + 7), 4258 *(((uint32_t *)irsp) + 8), 4259 *(((uint32_t *)irsp) + 9), 4260 *(((uint32_t *)irsp) + 10), 4261 *(((uint32_t *)irsp) + 11), 4262 *(((uint32_t *)irsp) + 12), 4263 *(((uint32_t *)irsp) + 13), 4264 *(((uint32_t *)irsp) + 14), 4265 *(((uint32_t *)irsp) + 15)); 4266 } else { 4267 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4268 "0321 Rsp Ring %d error: " 4269 "IOCB Data: " 4270 "x%x x%x x%x x%x\n", 4271 pring->ringno, 4272 rspiocbp->wcqe_cmpl.word0, 4273 rspiocbp->wcqe_cmpl.total_data_placed, 4274 rspiocbp->wcqe_cmpl.parameter, 4275 rspiocbp->wcqe_cmpl.word3); 4276 } 4277 } 4278 4279 4280 /* 4281 * Fetch the iocb command type and call the correct completion 4282 * routine. Solicited and Unsolicited IOCBs on the ELS ring 4283 * get freed back to the lpfc_iocb_list by the discovery 4284 * kernel thread. 4285 */ 4286 cmd_type = ulp_command & CMD_IOCB_MASK; 4287 type = lpfc_sli_iocb_cmd_type(cmd_type); 4288 switch (type) { 4289 case LPFC_SOL_IOCB: 4290 spin_unlock_irqrestore(&phba->hbalock, iflag); 4291 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 4292 spin_lock_irqsave(&phba->hbalock, iflag); 4293 break; 4294 case LPFC_UNSOL_IOCB: 4295 spin_unlock_irqrestore(&phba->hbalock, iflag); 4296 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 4297 spin_lock_irqsave(&phba->hbalock, iflag); 4298 if (!rc) 4299 free_saveq = 0; 4300 break; 4301 case LPFC_ABORT_IOCB: 4302 cmdiocb = NULL; 4303 if (ulp_command != CMD_XRI_ABORTED_CX) 4304 cmdiocb = lpfc_sli_iocbq_lookup(phba, pring, 4305 saveq); 4306 if (cmdiocb) { 4307 /* Call the specified completion routine */ 4308 if (cmdiocb->cmd_cmpl) { 4309 spin_unlock_irqrestore(&phba->hbalock, iflag); 4310 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq); 4311 spin_lock_irqsave(&phba->hbalock, iflag); 4312 } else { 4313 __lpfc_sli_release_iocbq(phba, cmdiocb); 4314 } 4315 } 4316 break; 4317 case LPFC_UNKNOWN_IOCB: 4318 if (ulp_command == CMD_ADAPTER_MSG) { 4319 char adaptermsg[LPFC_MAX_ADPTMSG]; 4320 4321 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 4322 memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe, 4323 MAX_MSG_DATA); 4324 dev_warn(&((phba->pcidev)->dev), 4325 "lpfc%d: %s\n", 4326 phba->brd_no, adaptermsg); 4327 } else { 4328 /* Unknown command */ 4329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4330 "0335 Unknown IOCB " 4331 "command Data: x%x " 4332 "x%x x%x x%x\n", 4333 ulp_command, 4334 ulp_status, 4335 get_wqe_reqtag(rspiocbp), 4336 get_job_ulpcontext(phba, rspiocbp)); 4337 } 4338 break; 4339 } 4340 4341 if (free_saveq) { 4342 list_for_each_entry_safe(rspiocbp, next_iocb, 4343 &saveq->list, list) { 4344 list_del_init(&rspiocbp->list); 4345 __lpfc_sli_release_iocbq(phba, rspiocbp); 4346 } 4347 __lpfc_sli_release_iocbq(phba, saveq); 4348 } 4349 rspiocbp = NULL; 4350 spin_unlock_irqrestore(&phba->hbalock, iflag); 4351 return rspiocbp; 4352 } 4353 4354 /** 4355 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 4356 * @phba: Pointer to HBA context object. 4357 * @pring: Pointer to driver SLI ring object. 4358 * @mask: Host attention register mask for this ring. 4359 * 4360 * This routine wraps the actual slow_ring event process routine from the 4361 * API jump table function pointer from the lpfc_hba struct. 4362 **/ 4363 void 4364 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 4365 struct lpfc_sli_ring *pring, uint32_t mask) 4366 { 4367 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 4368 } 4369 4370 /** 4371 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 4372 * @phba: Pointer to HBA context object. 4373 * @pring: Pointer to driver SLI ring object. 4374 * @mask: Host attention register mask for this ring. 4375 * 4376 * This function is called from the worker thread when there is a ring event 4377 * for non-fcp rings. The caller does not hold any lock. The function will 4378 * remove each response iocb in the response ring and calls the handle 4379 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 4380 **/ 4381 static void 4382 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 4383 struct lpfc_sli_ring *pring, uint32_t mask) 4384 { 4385 struct lpfc_pgp *pgp; 4386 IOCB_t *entry; 4387 IOCB_t *irsp = NULL; 4388 struct lpfc_iocbq *rspiocbp = NULL; 4389 uint32_t portRspPut, portRspMax; 4390 unsigned long iflag; 4391 uint32_t status; 4392 4393 pgp = &phba->port_gp[pring->ringno]; 4394 spin_lock_irqsave(&phba->hbalock, iflag); 4395 pring->stats.iocb_event++; 4396 4397 /* 4398 * The next available response entry should never exceed the maximum 4399 * entries. If it does, treat it as an adapter hardware error. 4400 */ 4401 portRspMax = pring->sli.sli3.numRiocb; 4402 portRspPut = le32_to_cpu(pgp->rspPutInx); 4403 if (portRspPut >= portRspMax) { 4404 /* 4405 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 4406 * rsp ring <portRspMax> 4407 */ 4408 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4409 "0303 Ring %d handler: portRspPut %d " 4410 "is bigger than rsp ring %d\n", 4411 pring->ringno, portRspPut, portRspMax); 4412 4413 phba->link_state = LPFC_HBA_ERROR; 4414 spin_unlock_irqrestore(&phba->hbalock, iflag); 4415 4416 phba->work_hs = HS_FFER3; 4417 lpfc_handle_eratt(phba); 4418 4419 return; 4420 } 4421 4422 rmb(); 4423 while (pring->sli.sli3.rspidx != portRspPut) { 4424 /* 4425 * Build a completion list and call the appropriate handler. 4426 * The process is to get the next available response iocb, get 4427 * a free iocb from the list, copy the response data into the 4428 * free iocb, insert to the continuation list, and update the 4429 * next response index to slim. This process makes response 4430 * iocb's in the ring available to DMA as fast as possible but 4431 * pays a penalty for a copy operation. Since the iocb is 4432 * only 32 bytes, this penalty is considered small relative to 4433 * the PCI reads for register values and a slim write. When 4434 * the ulpLe field is set, the entire Command has been 4435 * received. 4436 */ 4437 entry = lpfc_resp_iocb(phba, pring); 4438 4439 phba->last_completion_time = jiffies; 4440 rspiocbp = __lpfc_sli_get_iocbq(phba); 4441 if (rspiocbp == NULL) { 4442 printk(KERN_ERR "%s: out of buffers! Failing " 4443 "completion.\n", __func__); 4444 break; 4445 } 4446 4447 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 4448 phba->iocb_rsp_size); 4449 irsp = &rspiocbp->iocb; 4450 4451 if (++pring->sli.sli3.rspidx >= portRspMax) 4452 pring->sli.sli3.rspidx = 0; 4453 4454 if (pring->ringno == LPFC_ELS_RING) { 4455 lpfc_debugfs_slow_ring_trc(phba, 4456 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 4457 *(((uint32_t *) irsp) + 4), 4458 *(((uint32_t *) irsp) + 6), 4459 *(((uint32_t *) irsp) + 7)); 4460 } 4461 4462 writel(pring->sli.sli3.rspidx, 4463 &phba->host_gp[pring->ringno].rspGetInx); 4464 4465 spin_unlock_irqrestore(&phba->hbalock, iflag); 4466 /* Handle the response IOCB */ 4467 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 4468 spin_lock_irqsave(&phba->hbalock, iflag); 4469 4470 /* 4471 * If the port response put pointer has not been updated, sync 4472 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 4473 * response put pointer. 4474 */ 4475 if (pring->sli.sli3.rspidx == portRspPut) { 4476 portRspPut = le32_to_cpu(pgp->rspPutInx); 4477 } 4478 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 4479 4480 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 4481 /* At least one response entry has been freed */ 4482 pring->stats.iocb_rsp_full++; 4483 /* SET RxRE_RSP in Chip Att register */ 4484 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 4485 writel(status, phba->CAregaddr); 4486 readl(phba->CAregaddr); /* flush */ 4487 } 4488 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 4489 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 4490 pring->stats.iocb_cmd_empty++; 4491 4492 /* Force update of the local copy of cmdGetInx */ 4493 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 4494 lpfc_sli_resume_iocb(phba, pring); 4495 4496 if ((pring->lpfc_sli_cmd_available)) 4497 (pring->lpfc_sli_cmd_available) (phba, pring); 4498 4499 } 4500 4501 spin_unlock_irqrestore(&phba->hbalock, iflag); 4502 return; 4503 } 4504 4505 /** 4506 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 4507 * @phba: Pointer to HBA context object. 4508 * @pring: Pointer to driver SLI ring object. 4509 * @mask: Host attention register mask for this ring. 4510 * 4511 * This function is called from the worker thread when there is a pending 4512 * ELS response iocb on the driver internal slow-path response iocb worker 4513 * queue. The caller does not hold any lock. The function will remove each 4514 * response iocb from the response worker queue and calls the handle 4515 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 4516 **/ 4517 static void 4518 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 4519 struct lpfc_sli_ring *pring, uint32_t mask) 4520 { 4521 struct lpfc_iocbq *irspiocbq; 4522 struct hbq_dmabuf *dmabuf; 4523 struct lpfc_cq_event *cq_event; 4524 unsigned long iflag; 4525 int count = 0; 4526 4527 clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 4528 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 4529 /* Get the response iocb from the head of work queue */ 4530 spin_lock_irqsave(&phba->hbalock, iflag); 4531 list_remove_head(&phba->sli4_hba.sp_queue_event, 4532 cq_event, struct lpfc_cq_event, list); 4533 spin_unlock_irqrestore(&phba->hbalock, iflag); 4534 4535 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 4536 case CQE_CODE_COMPL_WQE: 4537 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 4538 cq_event); 4539 /* Translate ELS WCQE to response IOCBQ */ 4540 irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba, 4541 irspiocbq); 4542 if (irspiocbq) 4543 lpfc_sli_sp_handle_rspiocb(phba, pring, 4544 irspiocbq); 4545 count++; 4546 break; 4547 case CQE_CODE_RECEIVE: 4548 case CQE_CODE_RECEIVE_V1: 4549 dmabuf = container_of(cq_event, struct hbq_dmabuf, 4550 cq_event); 4551 lpfc_sli4_handle_received_buffer(phba, dmabuf); 4552 count++; 4553 break; 4554 default: 4555 break; 4556 } 4557 4558 /* Limit the number of events to 64 to avoid soft lockups */ 4559 if (count == 64) 4560 break; 4561 } 4562 } 4563 4564 /** 4565 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 4566 * @phba: Pointer to HBA context object. 4567 * @pring: Pointer to driver SLI ring object. 4568 * 4569 * This function aborts all iocbs in the given ring and frees all the iocb 4570 * objects in txq. This function issues an abort iocb for all the iocb commands 4571 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 4572 * the return of this function. The caller is not required to hold any locks. 4573 **/ 4574 void 4575 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 4576 { 4577 LIST_HEAD(tx_completions); 4578 LIST_HEAD(txcmplq_completions); 4579 struct lpfc_iocbq *iocb, *next_iocb; 4580 int offline; 4581 4582 if (pring->ringno == LPFC_ELS_RING) { 4583 lpfc_fabric_abort_hba(phba); 4584 } 4585 offline = pci_channel_offline(phba->pcidev); 4586 4587 /* Error everything on txq and txcmplq 4588 * First do the txq. 4589 */ 4590 if (phba->sli_rev >= LPFC_SLI_REV4) { 4591 spin_lock_irq(&pring->ring_lock); 4592 list_splice_init(&pring->txq, &tx_completions); 4593 pring->txq_cnt = 0; 4594 4595 if (offline) { 4596 list_splice_init(&pring->txcmplq, 4597 &txcmplq_completions); 4598 } else { 4599 /* Next issue ABTS for everything on the txcmplq */ 4600 list_for_each_entry_safe(iocb, next_iocb, 4601 &pring->txcmplq, list) 4602 lpfc_sli_issue_abort_iotag(phba, pring, 4603 iocb, NULL); 4604 } 4605 spin_unlock_irq(&pring->ring_lock); 4606 } else { 4607 spin_lock_irq(&phba->hbalock); 4608 list_splice_init(&pring->txq, &tx_completions); 4609 pring->txq_cnt = 0; 4610 4611 if (offline) { 4612 list_splice_init(&pring->txcmplq, &txcmplq_completions); 4613 } else { 4614 /* Next issue ABTS for everything on the txcmplq */ 4615 list_for_each_entry_safe(iocb, next_iocb, 4616 &pring->txcmplq, list) 4617 lpfc_sli_issue_abort_iotag(phba, pring, 4618 iocb, NULL); 4619 } 4620 spin_unlock_irq(&phba->hbalock); 4621 } 4622 4623 if (offline) { 4624 /* Cancel all the IOCBs from the completions list */ 4625 lpfc_sli_cancel_iocbs(phba, &txcmplq_completions, 4626 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 4627 } else { 4628 /* Make sure HBA is alive */ 4629 lpfc_issue_hb_tmo(phba); 4630 } 4631 /* Cancel all the IOCBs from the completions list */ 4632 lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT, 4633 IOERR_SLI_ABORTED); 4634 } 4635 4636 /** 4637 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 4638 * @phba: Pointer to HBA context object. 4639 * 4640 * This function aborts all iocbs in FCP rings and frees all the iocb 4641 * objects in txq. This function issues an abort iocb for all the iocb commands 4642 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 4643 * the return of this function. The caller is not required to hold any locks. 4644 **/ 4645 void 4646 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 4647 { 4648 struct lpfc_sli *psli = &phba->sli; 4649 struct lpfc_sli_ring *pring; 4650 uint32_t i; 4651 4652 /* Look on all the FCP Rings for the iotag */ 4653 if (phba->sli_rev >= LPFC_SLI_REV4) { 4654 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4655 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 4656 lpfc_sli_abort_iocb_ring(phba, pring); 4657 } 4658 } else { 4659 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4660 lpfc_sli_abort_iocb_ring(phba, pring); 4661 } 4662 } 4663 4664 /** 4665 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring 4666 * @phba: Pointer to HBA context object. 4667 * 4668 * This function flushes all iocbs in the IO ring and frees all the iocb 4669 * objects in txq and txcmplq. This function will not issue abort iocbs 4670 * for all the iocb commands in txcmplq, they will just be returned with 4671 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4672 * slot has been permanently disabled. 4673 **/ 4674 void 4675 lpfc_sli_flush_io_rings(struct lpfc_hba *phba) 4676 { 4677 LIST_HEAD(txq); 4678 LIST_HEAD(txcmplq); 4679 struct lpfc_sli *psli = &phba->sli; 4680 struct lpfc_sli_ring *pring; 4681 uint32_t i; 4682 struct lpfc_iocbq *piocb, *next_iocb; 4683 4684 /* Indicate the I/O queues are flushed */ 4685 set_bit(HBA_IOQ_FLUSH, &phba->hba_flag); 4686 4687 /* Look on all the FCP Rings for the iotag */ 4688 if (phba->sli_rev >= LPFC_SLI_REV4) { 4689 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4690 if (!phba->sli4_hba.hdwq || 4691 !phba->sli4_hba.hdwq[i].io_wq) { 4692 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4693 "7777 hdwq's deleted %lx " 4694 "%lx %x %x\n", 4695 phba->pport->load_flag, 4696 phba->hba_flag, 4697 phba->link_state, 4698 phba->sli.sli_flag); 4699 return; 4700 } 4701 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 4702 4703 spin_lock_irq(&pring->ring_lock); 4704 /* Retrieve everything on txq */ 4705 list_splice_init(&pring->txq, &txq); 4706 list_for_each_entry_safe(piocb, next_iocb, 4707 &pring->txcmplq, list) 4708 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 4709 /* Retrieve everything on the txcmplq */ 4710 list_splice_init(&pring->txcmplq, &txcmplq); 4711 pring->txq_cnt = 0; 4712 pring->txcmplq_cnt = 0; 4713 spin_unlock_irq(&pring->ring_lock); 4714 4715 /* Flush the txq */ 4716 lpfc_sli_cancel_iocbs(phba, &txq, 4717 IOSTAT_LOCAL_REJECT, 4718 IOERR_SLI_DOWN); 4719 /* Flush the txcmplq */ 4720 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4721 IOSTAT_LOCAL_REJECT, 4722 IOERR_SLI_DOWN); 4723 if (unlikely(pci_channel_offline(phba->pcidev))) 4724 lpfc_sli4_io_xri_aborted(phba, NULL, 0); 4725 } 4726 } else { 4727 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4728 4729 spin_lock_irq(&phba->hbalock); 4730 /* Retrieve everything on txq */ 4731 list_splice_init(&pring->txq, &txq); 4732 list_for_each_entry_safe(piocb, next_iocb, 4733 &pring->txcmplq, list) 4734 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 4735 /* Retrieve everything on the txcmplq */ 4736 list_splice_init(&pring->txcmplq, &txcmplq); 4737 pring->txq_cnt = 0; 4738 pring->txcmplq_cnt = 0; 4739 spin_unlock_irq(&phba->hbalock); 4740 4741 /* Flush the txq */ 4742 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4743 IOERR_SLI_DOWN); 4744 /* Flush the txcmpq */ 4745 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4746 IOERR_SLI_DOWN); 4747 } 4748 } 4749 4750 /** 4751 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4752 * @phba: Pointer to HBA context object. 4753 * @mask: Bit mask to be checked. 4754 * 4755 * This function reads the host status register and compares 4756 * with the provided bit mask to check if HBA completed 4757 * the restart. This function will wait in a loop for the 4758 * HBA to complete restart. If the HBA does not restart within 4759 * 15 iterations, the function will reset the HBA again. The 4760 * function returns 1 when HBA fail to restart otherwise returns 4761 * zero. 4762 **/ 4763 static int 4764 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4765 { 4766 uint32_t status; 4767 int i = 0; 4768 int retval = 0; 4769 4770 /* Read the HBA Host Status Register */ 4771 if (lpfc_readl(phba->HSregaddr, &status)) 4772 return 1; 4773 4774 set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 4775 4776 /* 4777 * Check status register every 100ms for 5 retries, then every 4778 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4779 * every 2.5 sec for 4. 4780 * Break our of the loop if errors occurred during init. 4781 */ 4782 while (((status & mask) != mask) && 4783 !(status & HS_FFERM) && 4784 i++ < 20) { 4785 4786 if (i <= 5) 4787 msleep(10); 4788 else if (i <= 10) 4789 msleep(500); 4790 else 4791 msleep(2500); 4792 4793 if (i == 15) { 4794 /* Do post */ 4795 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4796 lpfc_sli_brdrestart(phba); 4797 } 4798 /* Read the HBA Host Status Register */ 4799 if (lpfc_readl(phba->HSregaddr, &status)) { 4800 retval = 1; 4801 break; 4802 } 4803 } 4804 4805 /* Check to see if any errors occurred during init */ 4806 if ((status & HS_FFERM) || (i >= 20)) { 4807 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4808 "2751 Adapter failed to restart, " 4809 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4810 status, 4811 readl(phba->MBslimaddr + 0xa8), 4812 readl(phba->MBslimaddr + 0xac)); 4813 phba->link_state = LPFC_HBA_ERROR; 4814 retval = 1; 4815 } 4816 4817 return retval; 4818 } 4819 4820 /** 4821 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4822 * @phba: Pointer to HBA context object. 4823 * @mask: Bit mask to be checked. 4824 * 4825 * This function checks the host status register to check if HBA is 4826 * ready. This function will wait in a loop for the HBA to be ready 4827 * If the HBA is not ready , the function will will reset the HBA PCI 4828 * function again. The function returns 1 when HBA fail to be ready 4829 * otherwise returns zero. 4830 **/ 4831 static int 4832 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4833 { 4834 uint32_t status; 4835 int retval = 0; 4836 4837 /* Read the HBA Host Status Register */ 4838 status = lpfc_sli4_post_status_check(phba); 4839 4840 if (status) { 4841 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4842 lpfc_sli_brdrestart(phba); 4843 status = lpfc_sli4_post_status_check(phba); 4844 } 4845 4846 /* Check to see if any errors occurred during init */ 4847 if (status) { 4848 phba->link_state = LPFC_HBA_ERROR; 4849 retval = 1; 4850 } else 4851 phba->sli4_hba.intr_enable = 0; 4852 4853 clear_bit(HBA_SETUP, &phba->hba_flag); 4854 return retval; 4855 } 4856 4857 /** 4858 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4859 * @phba: Pointer to HBA context object. 4860 * @mask: Bit mask to be checked. 4861 * 4862 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4863 * from the API jump table function pointer from the lpfc_hba struct. 4864 **/ 4865 int 4866 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4867 { 4868 return phba->lpfc_sli_brdready(phba, mask); 4869 } 4870 4871 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4872 4873 /** 4874 * lpfc_reset_barrier - Make HBA ready for HBA reset 4875 * @phba: Pointer to HBA context object. 4876 * 4877 * This function is called before resetting an HBA. This function is called 4878 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4879 **/ 4880 void lpfc_reset_barrier(struct lpfc_hba *phba) 4881 { 4882 uint32_t __iomem *resp_buf; 4883 uint32_t __iomem *mbox_buf; 4884 volatile struct MAILBOX_word0 mbox; 4885 uint32_t hc_copy, ha_copy, resp_data; 4886 int i; 4887 uint8_t hdrtype; 4888 4889 lockdep_assert_held(&phba->hbalock); 4890 4891 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4892 if (hdrtype != PCI_HEADER_TYPE_MFD || 4893 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4894 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4895 return; 4896 4897 /* 4898 * Tell the other part of the chip to suspend temporarily all 4899 * its DMA activity. 4900 */ 4901 resp_buf = phba->MBslimaddr; 4902 4903 /* Disable the error attention */ 4904 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4905 return; 4906 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4907 readl(phba->HCregaddr); /* flush */ 4908 phba->link_flag |= LS_IGNORE_ERATT; 4909 4910 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4911 return; 4912 if (ha_copy & HA_ERATT) { 4913 /* Clear Chip error bit */ 4914 writel(HA_ERATT, phba->HAregaddr); 4915 phba->pport->stopped = 1; 4916 } 4917 4918 mbox.word0 = 0; 4919 mbox.mbxCommand = MBX_KILL_BOARD; 4920 mbox.mbxOwner = OWN_CHIP; 4921 4922 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4923 mbox_buf = phba->MBslimaddr; 4924 writel(mbox.word0, mbox_buf); 4925 4926 for (i = 0; i < 50; i++) { 4927 if (lpfc_readl((resp_buf + 1), &resp_data)) 4928 return; 4929 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4930 mdelay(1); 4931 else 4932 break; 4933 } 4934 resp_data = 0; 4935 if (lpfc_readl((resp_buf + 1), &resp_data)) 4936 return; 4937 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4938 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4939 phba->pport->stopped) 4940 goto restore_hc; 4941 else 4942 goto clear_errat; 4943 } 4944 4945 mbox.mbxOwner = OWN_HOST; 4946 resp_data = 0; 4947 for (i = 0; i < 500; i++) { 4948 if (lpfc_readl(resp_buf, &resp_data)) 4949 return; 4950 if (resp_data != mbox.word0) 4951 mdelay(1); 4952 else 4953 break; 4954 } 4955 4956 clear_errat: 4957 4958 while (++i < 500) { 4959 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4960 return; 4961 if (!(ha_copy & HA_ERATT)) 4962 mdelay(1); 4963 else 4964 break; 4965 } 4966 4967 if (readl(phba->HAregaddr) & HA_ERATT) { 4968 writel(HA_ERATT, phba->HAregaddr); 4969 phba->pport->stopped = 1; 4970 } 4971 4972 restore_hc: 4973 phba->link_flag &= ~LS_IGNORE_ERATT; 4974 writel(hc_copy, phba->HCregaddr); 4975 readl(phba->HCregaddr); /* flush */ 4976 } 4977 4978 /** 4979 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4980 * @phba: Pointer to HBA context object. 4981 * 4982 * This function issues a kill_board mailbox command and waits for 4983 * the error attention interrupt. This function is called for stopping 4984 * the firmware processing. The caller is not required to hold any 4985 * locks. This function calls lpfc_hba_down_post function to free 4986 * any pending commands after the kill. The function will return 1 when it 4987 * fails to kill the board else will return 0. 4988 **/ 4989 int 4990 lpfc_sli_brdkill(struct lpfc_hba *phba) 4991 { 4992 struct lpfc_sli *psli; 4993 LPFC_MBOXQ_t *pmb; 4994 uint32_t status; 4995 uint32_t ha_copy; 4996 int retval; 4997 int i = 0; 4998 4999 psli = &phba->sli; 5000 5001 /* Kill HBA */ 5002 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5003 "0329 Kill HBA Data: x%x x%x\n", 5004 phba->pport->port_state, psli->sli_flag); 5005 5006 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5007 if (!pmb) 5008 return 1; 5009 5010 /* Disable the error attention */ 5011 spin_lock_irq(&phba->hbalock); 5012 if (lpfc_readl(phba->HCregaddr, &status)) { 5013 spin_unlock_irq(&phba->hbalock); 5014 mempool_free(pmb, phba->mbox_mem_pool); 5015 return 1; 5016 } 5017 status &= ~HC_ERINT_ENA; 5018 writel(status, phba->HCregaddr); 5019 readl(phba->HCregaddr); /* flush */ 5020 phba->link_flag |= LS_IGNORE_ERATT; 5021 spin_unlock_irq(&phba->hbalock); 5022 5023 lpfc_kill_board(phba, pmb); 5024 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5025 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5026 5027 if (retval != MBX_SUCCESS) { 5028 if (retval != MBX_BUSY) 5029 mempool_free(pmb, phba->mbox_mem_pool); 5030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5031 "2752 KILL_BOARD command failed retval %d\n", 5032 retval); 5033 spin_lock_irq(&phba->hbalock); 5034 phba->link_flag &= ~LS_IGNORE_ERATT; 5035 spin_unlock_irq(&phba->hbalock); 5036 return 1; 5037 } 5038 5039 spin_lock_irq(&phba->hbalock); 5040 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 5041 spin_unlock_irq(&phba->hbalock); 5042 5043 mempool_free(pmb, phba->mbox_mem_pool); 5044 5045 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 5046 * attention every 100ms for 3 seconds. If we don't get ERATT after 5047 * 3 seconds we still set HBA_ERROR state because the status of the 5048 * board is now undefined. 5049 */ 5050 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 5051 return 1; 5052 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 5053 mdelay(100); 5054 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 5055 return 1; 5056 } 5057 5058 del_timer_sync(&psli->mbox_tmo); 5059 if (ha_copy & HA_ERATT) { 5060 writel(HA_ERATT, phba->HAregaddr); 5061 phba->pport->stopped = 1; 5062 } 5063 spin_lock_irq(&phba->hbalock); 5064 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5065 psli->mbox_active = NULL; 5066 phba->link_flag &= ~LS_IGNORE_ERATT; 5067 spin_unlock_irq(&phba->hbalock); 5068 5069 lpfc_hba_down_post(phba); 5070 phba->link_state = LPFC_HBA_ERROR; 5071 5072 return ha_copy & HA_ERATT ? 0 : 1; 5073 } 5074 5075 /** 5076 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 5077 * @phba: Pointer to HBA context object. 5078 * 5079 * This function resets the HBA by writing HC_INITFF to the control 5080 * register. After the HBA resets, this function resets all the iocb ring 5081 * indices. This function disables PCI layer parity checking during 5082 * the reset. 5083 * This function returns 0 always. 5084 * The caller is not required to hold any locks. 5085 **/ 5086 int 5087 lpfc_sli_brdreset(struct lpfc_hba *phba) 5088 { 5089 struct lpfc_sli *psli; 5090 struct lpfc_sli_ring *pring; 5091 uint16_t cfg_value; 5092 int i; 5093 5094 psli = &phba->sli; 5095 5096 /* Reset HBA */ 5097 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5098 "0325 Reset HBA Data: x%x x%x\n", 5099 (phba->pport) ? phba->pport->port_state : 0, 5100 psli->sli_flag); 5101 5102 /* perform board reset */ 5103 phba->fc_eventTag = 0; 5104 phba->link_events = 0; 5105 set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 5106 if (phba->pport) { 5107 phba->pport->fc_myDID = 0; 5108 phba->pport->fc_prevDID = 0; 5109 } 5110 5111 /* Turn off parity checking and serr during the physical reset */ 5112 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) 5113 return -EIO; 5114 5115 pci_write_config_word(phba->pcidev, PCI_COMMAND, 5116 (cfg_value & 5117 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 5118 5119 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 5120 5121 /* Now toggle INITFF bit in the Host Control Register */ 5122 writel(HC_INITFF, phba->HCregaddr); 5123 mdelay(1); 5124 readl(phba->HCregaddr); /* flush */ 5125 writel(0, phba->HCregaddr); 5126 readl(phba->HCregaddr); /* flush */ 5127 5128 /* Restore PCI cmd register */ 5129 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 5130 5131 /* Initialize relevant SLI info */ 5132 for (i = 0; i < psli->num_rings; i++) { 5133 pring = &psli->sli3_ring[i]; 5134 pring->flag = 0; 5135 pring->sli.sli3.rspidx = 0; 5136 pring->sli.sli3.next_cmdidx = 0; 5137 pring->sli.sli3.local_getidx = 0; 5138 pring->sli.sli3.cmdidx = 0; 5139 pring->missbufcnt = 0; 5140 } 5141 5142 phba->link_state = LPFC_WARM_START; 5143 return 0; 5144 } 5145 5146 /** 5147 * lpfc_sli4_brdreset - Reset a sli-4 HBA 5148 * @phba: Pointer to HBA context object. 5149 * 5150 * This function resets a SLI4 HBA. This function disables PCI layer parity 5151 * checking during resets the device. The caller is not required to hold 5152 * any locks. 5153 * 5154 * This function returns 0 on success else returns negative error code. 5155 **/ 5156 int 5157 lpfc_sli4_brdreset(struct lpfc_hba *phba) 5158 { 5159 struct lpfc_sli *psli = &phba->sli; 5160 uint16_t cfg_value; 5161 int rc = 0; 5162 5163 /* Reset HBA */ 5164 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5165 "0295 Reset HBA Data: x%x x%x x%lx\n", 5166 phba->pport->port_state, psli->sli_flag, 5167 phba->hba_flag); 5168 5169 /* perform board reset */ 5170 phba->fc_eventTag = 0; 5171 phba->link_events = 0; 5172 phba->pport->fc_myDID = 0; 5173 phba->pport->fc_prevDID = 0; 5174 clear_bit(HBA_SETUP, &phba->hba_flag); 5175 5176 spin_lock_irq(&phba->hbalock); 5177 psli->sli_flag &= ~(LPFC_PROCESS_LA); 5178 phba->fcf.fcf_flag = 0; 5179 spin_unlock_irq(&phba->hbalock); 5180 5181 /* Now physically reset the device */ 5182 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5183 "0389 Performing PCI function reset!\n"); 5184 5185 /* Turn off parity checking and serr during the physical reset */ 5186 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { 5187 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5188 "3205 PCI read Config failed\n"); 5189 return -EIO; 5190 } 5191 5192 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 5193 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 5194 5195 /* Perform FCoE PCI function reset before freeing queue memory */ 5196 rc = lpfc_pci_function_reset(phba); 5197 5198 /* Restore PCI cmd register */ 5199 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 5200 5201 return rc; 5202 } 5203 5204 /** 5205 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 5206 * @phba: Pointer to HBA context object. 5207 * 5208 * This function is called in the SLI initialization code path to 5209 * restart the HBA. The caller is not required to hold any lock. 5210 * This function writes MBX_RESTART mailbox command to the SLIM and 5211 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 5212 * function to free any pending commands. The function enables 5213 * POST only during the first initialization. The function returns zero. 5214 * The function does not guarantee completion of MBX_RESTART mailbox 5215 * command before the return of this function. 5216 **/ 5217 static int 5218 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 5219 { 5220 volatile struct MAILBOX_word0 mb; 5221 struct lpfc_sli *psli; 5222 void __iomem *to_slim; 5223 5224 spin_lock_irq(&phba->hbalock); 5225 5226 psli = &phba->sli; 5227 5228 /* Restart HBA */ 5229 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5230 "0337 Restart HBA Data: x%x x%x\n", 5231 (phba->pport) ? phba->pport->port_state : 0, 5232 psli->sli_flag); 5233 5234 mb.word0 = 0; 5235 mb.mbxCommand = MBX_RESTART; 5236 mb.mbxHc = 1; 5237 5238 lpfc_reset_barrier(phba); 5239 5240 to_slim = phba->MBslimaddr; 5241 writel(mb.word0, to_slim); 5242 readl(to_slim); /* flush */ 5243 5244 /* Only skip post after fc_ffinit is completed */ 5245 if (phba->pport && phba->pport->port_state) 5246 mb.word0 = 1; /* This is really setting up word1 */ 5247 else 5248 mb.word0 = 0; /* This is really setting up word1 */ 5249 to_slim = phba->MBslimaddr + sizeof (uint32_t); 5250 writel(mb.word0, to_slim); 5251 readl(to_slim); /* flush */ 5252 5253 lpfc_sli_brdreset(phba); 5254 if (phba->pport) 5255 phba->pport->stopped = 0; 5256 phba->link_state = LPFC_INIT_START; 5257 phba->hba_flag = 0; 5258 spin_unlock_irq(&phba->hbalock); 5259 5260 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 5261 psli->stats_start = ktime_get_seconds(); 5262 5263 /* Give the INITFF and Post time to settle. */ 5264 mdelay(100); 5265 5266 lpfc_hba_down_post(phba); 5267 5268 return 0; 5269 } 5270 5271 /** 5272 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 5273 * @phba: Pointer to HBA context object. 5274 * 5275 * This function is called in the SLI initialization code path to restart 5276 * a SLI4 HBA. The caller is not required to hold any lock. 5277 * At the end of the function, it calls lpfc_hba_down_post function to 5278 * free any pending commands. 5279 **/ 5280 static int 5281 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 5282 { 5283 struct lpfc_sli *psli = &phba->sli; 5284 int rc; 5285 5286 /* Restart HBA */ 5287 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5288 "0296 Restart HBA Data: x%x x%x\n", 5289 phba->pport->port_state, psli->sli_flag); 5290 5291 rc = lpfc_sli4_brdreset(phba); 5292 if (rc) { 5293 phba->link_state = LPFC_HBA_ERROR; 5294 goto hba_down_queue; 5295 } 5296 5297 spin_lock_irq(&phba->hbalock); 5298 phba->pport->stopped = 0; 5299 phba->link_state = LPFC_INIT_START; 5300 phba->hba_flag = 0; 5301 /* Preserve FA-PWWN expectation */ 5302 phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC; 5303 spin_unlock_irq(&phba->hbalock); 5304 5305 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 5306 psli->stats_start = ktime_get_seconds(); 5307 5308 hba_down_queue: 5309 lpfc_hba_down_post(phba); 5310 lpfc_sli4_queue_destroy(phba); 5311 5312 return rc; 5313 } 5314 5315 /** 5316 * lpfc_sli_brdrestart - Wrapper func for restarting hba 5317 * @phba: Pointer to HBA context object. 5318 * 5319 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 5320 * API jump table function pointer from the lpfc_hba struct. 5321 **/ 5322 int 5323 lpfc_sli_brdrestart(struct lpfc_hba *phba) 5324 { 5325 return phba->lpfc_sli_brdrestart(phba); 5326 } 5327 5328 /** 5329 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 5330 * @phba: Pointer to HBA context object. 5331 * 5332 * This function is called after a HBA restart to wait for successful 5333 * restart of the HBA. Successful restart of the HBA is indicated by 5334 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 5335 * iteration, the function will restart the HBA again. The function returns 5336 * zero if HBA successfully restarted else returns negative error code. 5337 **/ 5338 int 5339 lpfc_sli_chipset_init(struct lpfc_hba *phba) 5340 { 5341 uint32_t status, i = 0; 5342 5343 /* Read the HBA Host Status Register */ 5344 if (lpfc_readl(phba->HSregaddr, &status)) 5345 return -EIO; 5346 5347 /* Check status register to see what current state is */ 5348 i = 0; 5349 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 5350 5351 /* Check every 10ms for 10 retries, then every 100ms for 90 5352 * retries, then every 1 sec for 50 retires for a total of 5353 * ~60 seconds before reset the board again and check every 5354 * 1 sec for 50 retries. The up to 60 seconds before the 5355 * board ready is required by the Falcon FIPS zeroization 5356 * complete, and any reset the board in between shall cause 5357 * restart of zeroization, further delay the board ready. 5358 */ 5359 if (i++ >= 200) { 5360 /* Adapter failed to init, timeout, status reg 5361 <status> */ 5362 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5363 "0436 Adapter failed to init, " 5364 "timeout, status reg x%x, " 5365 "FW Data: A8 x%x AC x%x\n", status, 5366 readl(phba->MBslimaddr + 0xa8), 5367 readl(phba->MBslimaddr + 0xac)); 5368 phba->link_state = LPFC_HBA_ERROR; 5369 return -ETIMEDOUT; 5370 } 5371 5372 /* Check to see if any errors occurred during init */ 5373 if (status & HS_FFERM) { 5374 /* ERROR: During chipset initialization */ 5375 /* Adapter failed to init, chipset, status reg 5376 <status> */ 5377 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5378 "0437 Adapter failed to init, " 5379 "chipset, status reg x%x, " 5380 "FW Data: A8 x%x AC x%x\n", status, 5381 readl(phba->MBslimaddr + 0xa8), 5382 readl(phba->MBslimaddr + 0xac)); 5383 phba->link_state = LPFC_HBA_ERROR; 5384 return -EIO; 5385 } 5386 5387 if (i <= 10) 5388 msleep(10); 5389 else if (i <= 100) 5390 msleep(100); 5391 else 5392 msleep(1000); 5393 5394 if (i == 150) { 5395 /* Do post */ 5396 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 5397 lpfc_sli_brdrestart(phba); 5398 } 5399 /* Read the HBA Host Status Register */ 5400 if (lpfc_readl(phba->HSregaddr, &status)) 5401 return -EIO; 5402 } 5403 5404 /* Check to see if any errors occurred during init */ 5405 if (status & HS_FFERM) { 5406 /* ERROR: During chipset initialization */ 5407 /* Adapter failed to init, chipset, status reg <status> */ 5408 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5409 "0438 Adapter failed to init, chipset, " 5410 "status reg x%x, " 5411 "FW Data: A8 x%x AC x%x\n", status, 5412 readl(phba->MBslimaddr + 0xa8), 5413 readl(phba->MBslimaddr + 0xac)); 5414 phba->link_state = LPFC_HBA_ERROR; 5415 return -EIO; 5416 } 5417 5418 set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 5419 5420 /* Clear all interrupt enable conditions */ 5421 writel(0, phba->HCregaddr); 5422 readl(phba->HCregaddr); /* flush */ 5423 5424 /* setup host attn register */ 5425 writel(0xffffffff, phba->HAregaddr); 5426 readl(phba->HAregaddr); /* flush */ 5427 return 0; 5428 } 5429 5430 /** 5431 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 5432 * 5433 * This function calculates and returns the number of HBQs required to be 5434 * configured. 5435 **/ 5436 int 5437 lpfc_sli_hbq_count(void) 5438 { 5439 return ARRAY_SIZE(lpfc_hbq_defs); 5440 } 5441 5442 /** 5443 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 5444 * 5445 * This function adds the number of hbq entries in every HBQ to get 5446 * the total number of hbq entries required for the HBA and returns 5447 * the total count. 5448 **/ 5449 static int 5450 lpfc_sli_hbq_entry_count(void) 5451 { 5452 int hbq_count = lpfc_sli_hbq_count(); 5453 int count = 0; 5454 int i; 5455 5456 for (i = 0; i < hbq_count; ++i) 5457 count += lpfc_hbq_defs[i]->entry_count; 5458 return count; 5459 } 5460 5461 /** 5462 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 5463 * 5464 * This function calculates amount of memory required for all hbq entries 5465 * to be configured and returns the total memory required. 5466 **/ 5467 int 5468 lpfc_sli_hbq_size(void) 5469 { 5470 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 5471 } 5472 5473 /** 5474 * lpfc_sli_hbq_setup - configure and initialize HBQs 5475 * @phba: Pointer to HBA context object. 5476 * 5477 * This function is called during the SLI initialization to configure 5478 * all the HBQs and post buffers to the HBQ. The caller is not 5479 * required to hold any locks. This function will return zero if successful 5480 * else it will return negative error code. 5481 **/ 5482 static int 5483 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 5484 { 5485 int hbq_count = lpfc_sli_hbq_count(); 5486 LPFC_MBOXQ_t *pmb; 5487 MAILBOX_t *pmbox; 5488 uint32_t hbqno; 5489 uint32_t hbq_entry_index; 5490 5491 /* Get a Mailbox buffer to setup mailbox 5492 * commands for HBA initialization 5493 */ 5494 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5495 5496 if (!pmb) 5497 return -ENOMEM; 5498 5499 pmbox = &pmb->u.mb; 5500 5501 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 5502 phba->link_state = LPFC_INIT_MBX_CMDS; 5503 phba->hbq_in_use = 1; 5504 5505 hbq_entry_index = 0; 5506 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 5507 phba->hbqs[hbqno].next_hbqPutIdx = 0; 5508 phba->hbqs[hbqno].hbqPutIdx = 0; 5509 phba->hbqs[hbqno].local_hbqGetIdx = 0; 5510 phba->hbqs[hbqno].entry_count = 5511 lpfc_hbq_defs[hbqno]->entry_count; 5512 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 5513 hbq_entry_index, pmb); 5514 hbq_entry_index += phba->hbqs[hbqno].entry_count; 5515 5516 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 5517 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 5518 mbxStatus <status>, ring <num> */ 5519 5520 lpfc_printf_log(phba, KERN_ERR, 5521 LOG_SLI | LOG_VPORT, 5522 "1805 Adapter failed to init. " 5523 "Data: x%x x%x x%x\n", 5524 pmbox->mbxCommand, 5525 pmbox->mbxStatus, hbqno); 5526 5527 phba->link_state = LPFC_HBA_ERROR; 5528 mempool_free(pmb, phba->mbox_mem_pool); 5529 return -ENXIO; 5530 } 5531 } 5532 phba->hbq_count = hbq_count; 5533 5534 mempool_free(pmb, phba->mbox_mem_pool); 5535 5536 /* Initially populate or replenish the HBQs */ 5537 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 5538 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 5539 return 0; 5540 } 5541 5542 /** 5543 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 5544 * @phba: Pointer to HBA context object. 5545 * 5546 * This function is called during the SLI initialization to configure 5547 * all the HBQs and post buffers to the HBQ. The caller is not 5548 * required to hold any locks. This function will return zero if successful 5549 * else it will return negative error code. 5550 **/ 5551 static int 5552 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 5553 { 5554 phba->hbq_in_use = 1; 5555 /** 5556 * Specific case when the MDS diagnostics is enabled and supported. 5557 * The receive buffer count is truncated to manage the incoming 5558 * traffic. 5559 **/ 5560 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) 5561 phba->hbqs[LPFC_ELS_HBQ].entry_count = 5562 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; 5563 else 5564 phba->hbqs[LPFC_ELS_HBQ].entry_count = 5565 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 5566 phba->hbq_count = 1; 5567 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 5568 /* Initially populate or replenish the HBQs */ 5569 return 0; 5570 } 5571 5572 /** 5573 * lpfc_sli_config_port - Issue config port mailbox command 5574 * @phba: Pointer to HBA context object. 5575 * @sli_mode: sli mode - 2/3 5576 * 5577 * This function is called by the sli initialization code path 5578 * to issue config_port mailbox command. This function restarts the 5579 * HBA firmware and issues a config_port mailbox command to configure 5580 * the SLI interface in the sli mode specified by sli_mode 5581 * variable. The caller is not required to hold any locks. 5582 * The function returns 0 if successful, else returns negative error 5583 * code. 5584 **/ 5585 int 5586 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 5587 { 5588 LPFC_MBOXQ_t *pmb; 5589 uint32_t resetcount = 0, rc = 0, done = 0; 5590 5591 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5592 if (!pmb) { 5593 phba->link_state = LPFC_HBA_ERROR; 5594 return -ENOMEM; 5595 } 5596 5597 phba->sli_rev = sli_mode; 5598 while (resetcount < 2 && !done) { 5599 spin_lock_irq(&phba->hbalock); 5600 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 5601 spin_unlock_irq(&phba->hbalock); 5602 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 5603 lpfc_sli_brdrestart(phba); 5604 rc = lpfc_sli_chipset_init(phba); 5605 if (rc) 5606 break; 5607 5608 spin_lock_irq(&phba->hbalock); 5609 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5610 spin_unlock_irq(&phba->hbalock); 5611 resetcount++; 5612 5613 /* Call pre CONFIG_PORT mailbox command initialization. A 5614 * value of 0 means the call was successful. Any other 5615 * nonzero value is a failure, but if ERESTART is returned, 5616 * the driver may reset the HBA and try again. 5617 */ 5618 rc = lpfc_config_port_prep(phba); 5619 if (rc == -ERESTART) { 5620 phba->link_state = LPFC_LINK_UNKNOWN; 5621 continue; 5622 } else if (rc) 5623 break; 5624 5625 phba->link_state = LPFC_INIT_MBX_CMDS; 5626 lpfc_config_port(phba, pmb); 5627 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5628 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 5629 LPFC_SLI3_HBQ_ENABLED | 5630 LPFC_SLI3_CRP_ENABLED | 5631 LPFC_SLI3_DSS_ENABLED); 5632 if (rc != MBX_SUCCESS) { 5633 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5634 "0442 Adapter failed to init, mbxCmd x%x " 5635 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 5636 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 5637 spin_lock_irq(&phba->hbalock); 5638 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 5639 spin_unlock_irq(&phba->hbalock); 5640 rc = -ENXIO; 5641 } else { 5642 /* Allow asynchronous mailbox command to go through */ 5643 spin_lock_irq(&phba->hbalock); 5644 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5645 spin_unlock_irq(&phba->hbalock); 5646 done = 1; 5647 5648 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 5649 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 5650 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5651 "3110 Port did not grant ASABT\n"); 5652 } 5653 } 5654 if (!done) { 5655 rc = -EINVAL; 5656 goto do_prep_failed; 5657 } 5658 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 5659 if (!pmb->u.mb.un.varCfgPort.cMA) { 5660 rc = -ENXIO; 5661 goto do_prep_failed; 5662 } 5663 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 5664 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 5665 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 5666 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 5667 phba->max_vpi : phba->max_vports; 5668 5669 } else 5670 phba->max_vpi = 0; 5671 if (pmb->u.mb.un.varCfgPort.gerbm) 5672 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5673 if (pmb->u.mb.un.varCfgPort.gcrp) 5674 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5675 5676 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5677 phba->port_gp = phba->mbox->us.s3_pgp.port; 5678 5679 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5680 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5681 phba->cfg_enable_bg = 0; 5682 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5683 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5684 "0443 Adapter did not grant " 5685 "BlockGuard\n"); 5686 } 5687 } 5688 } else { 5689 phba->hbq_get = NULL; 5690 phba->port_gp = phba->mbox->us.s2.port; 5691 phba->max_vpi = 0; 5692 } 5693 do_prep_failed: 5694 mempool_free(pmb, phba->mbox_mem_pool); 5695 return rc; 5696 } 5697 5698 5699 /** 5700 * lpfc_sli_hba_setup - SLI initialization function 5701 * @phba: Pointer to HBA context object. 5702 * 5703 * This function is the main SLI initialization function. This function 5704 * is called by the HBA initialization code, HBA reset code and HBA 5705 * error attention handler code. Caller is not required to hold any 5706 * locks. This function issues config_port mailbox command to configure 5707 * the SLI, setup iocb rings and HBQ rings. In the end the function 5708 * calls the config_port_post function to issue init_link mailbox 5709 * command and to start the discovery. The function will return zero 5710 * if successful, else it will return negative error code. 5711 **/ 5712 int 5713 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5714 { 5715 uint32_t rc; 5716 int i; 5717 int longs; 5718 5719 /* Enable ISR already does config_port because of config_msi mbx */ 5720 if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) { 5721 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 5722 if (rc) 5723 return -EIO; 5724 clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 5725 } 5726 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5727 5728 if (phba->sli_rev == 3) { 5729 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5730 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5731 } else { 5732 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5733 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5734 phba->sli3_options = 0; 5735 } 5736 5737 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5738 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5739 phba->sli_rev, phba->max_vpi); 5740 rc = lpfc_sli_ring_map(phba); 5741 5742 if (rc) 5743 goto lpfc_sli_hba_setup_error; 5744 5745 /* Initialize VPIs. */ 5746 if (phba->sli_rev == LPFC_SLI_REV3) { 5747 /* 5748 * The VPI bitmask and physical ID array are allocated 5749 * and initialized once only - at driver load. A port 5750 * reset doesn't need to reinitialize this memory. 5751 */ 5752 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5753 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5754 phba->vpi_bmask = kcalloc(longs, 5755 sizeof(unsigned long), 5756 GFP_KERNEL); 5757 if (!phba->vpi_bmask) { 5758 rc = -ENOMEM; 5759 goto lpfc_sli_hba_setup_error; 5760 } 5761 5762 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5763 sizeof(uint16_t), 5764 GFP_KERNEL); 5765 if (!phba->vpi_ids) { 5766 kfree(phba->vpi_bmask); 5767 rc = -ENOMEM; 5768 goto lpfc_sli_hba_setup_error; 5769 } 5770 for (i = 0; i < phba->max_vpi; i++) 5771 phba->vpi_ids[i] = i; 5772 } 5773 } 5774 5775 /* Init HBQs */ 5776 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5777 rc = lpfc_sli_hbq_setup(phba); 5778 if (rc) 5779 goto lpfc_sli_hba_setup_error; 5780 } 5781 spin_lock_irq(&phba->hbalock); 5782 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5783 spin_unlock_irq(&phba->hbalock); 5784 5785 rc = lpfc_config_port_post(phba); 5786 if (rc) 5787 goto lpfc_sli_hba_setup_error; 5788 5789 return rc; 5790 5791 lpfc_sli_hba_setup_error: 5792 phba->link_state = LPFC_HBA_ERROR; 5793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5794 "0445 Firmware initialization failed\n"); 5795 return rc; 5796 } 5797 5798 /** 5799 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5800 * @phba: Pointer to HBA context object. 5801 * 5802 * This function issue a dump mailbox command to read config region 5803 * 23 and parse the records in the region and populate driver 5804 * data structure. 5805 **/ 5806 static int 5807 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5808 { 5809 LPFC_MBOXQ_t *mboxq; 5810 struct lpfc_dmabuf *mp; 5811 struct lpfc_mqe *mqe; 5812 uint32_t data_length; 5813 int rc; 5814 5815 /* Program the default value of vlan_id and fc_map */ 5816 phba->valid_vlan = 0; 5817 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5818 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5819 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5820 5821 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5822 if (!mboxq) 5823 return -ENOMEM; 5824 5825 mqe = &mboxq->u.mqe; 5826 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5827 rc = -ENOMEM; 5828 goto out_free_mboxq; 5829 } 5830 5831 mp = mboxq->ctx_buf; 5832 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5833 5834 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5835 "(%d):2571 Mailbox cmd x%x Status x%x " 5836 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5837 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5838 "CQ: x%x x%x x%x x%x\n", 5839 mboxq->vport ? mboxq->vport->vpi : 0, 5840 bf_get(lpfc_mqe_command, mqe), 5841 bf_get(lpfc_mqe_status, mqe), 5842 mqe->un.mb_words[0], mqe->un.mb_words[1], 5843 mqe->un.mb_words[2], mqe->un.mb_words[3], 5844 mqe->un.mb_words[4], mqe->un.mb_words[5], 5845 mqe->un.mb_words[6], mqe->un.mb_words[7], 5846 mqe->un.mb_words[8], mqe->un.mb_words[9], 5847 mqe->un.mb_words[10], mqe->un.mb_words[11], 5848 mqe->un.mb_words[12], mqe->un.mb_words[13], 5849 mqe->un.mb_words[14], mqe->un.mb_words[15], 5850 mqe->un.mb_words[16], mqe->un.mb_words[50], 5851 mboxq->mcqe.word0, 5852 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5853 mboxq->mcqe.trailer); 5854 5855 if (rc) { 5856 rc = -EIO; 5857 goto out_free_mboxq; 5858 } 5859 data_length = mqe->un.mb_words[5]; 5860 if (data_length > DMP_RGN23_SIZE) { 5861 rc = -EIO; 5862 goto out_free_mboxq; 5863 } 5864 5865 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5866 rc = 0; 5867 5868 out_free_mboxq: 5869 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 5870 return rc; 5871 } 5872 5873 /** 5874 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5875 * @phba: pointer to lpfc hba data structure. 5876 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5877 * @vpd: pointer to the memory to hold resulting port vpd data. 5878 * @vpd_size: On input, the number of bytes allocated to @vpd. 5879 * On output, the number of data bytes in @vpd. 5880 * 5881 * This routine executes a READ_REV SLI4 mailbox command. In 5882 * addition, this routine gets the port vpd data. 5883 * 5884 * Return codes 5885 * 0 - successful 5886 * -ENOMEM - could not allocated memory. 5887 **/ 5888 static int 5889 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5890 uint8_t *vpd, uint32_t *vpd_size) 5891 { 5892 int rc = 0; 5893 uint32_t dma_size; 5894 struct lpfc_dmabuf *dmabuf; 5895 struct lpfc_mqe *mqe; 5896 5897 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5898 if (!dmabuf) 5899 return -ENOMEM; 5900 5901 /* 5902 * Get a DMA buffer for the vpd data resulting from the READ_REV 5903 * mailbox command. 5904 */ 5905 dma_size = *vpd_size; 5906 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5907 &dmabuf->phys, GFP_KERNEL); 5908 if (!dmabuf->virt) { 5909 kfree(dmabuf); 5910 return -ENOMEM; 5911 } 5912 5913 /* 5914 * The SLI4 implementation of READ_REV conflicts at word1, 5915 * bits 31:16 and SLI4 adds vpd functionality not present 5916 * in SLI3. This code corrects the conflicts. 5917 */ 5918 lpfc_read_rev(phba, mboxq); 5919 mqe = &mboxq->u.mqe; 5920 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5921 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5922 mqe->un.read_rev.word1 &= 0x0000FFFF; 5923 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5924 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5925 5926 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5927 if (rc) { 5928 dma_free_coherent(&phba->pcidev->dev, dma_size, 5929 dmabuf->virt, dmabuf->phys); 5930 kfree(dmabuf); 5931 return -EIO; 5932 } 5933 5934 /* 5935 * The available vpd length cannot be bigger than the 5936 * DMA buffer passed to the port. Catch the less than 5937 * case and update the caller's size. 5938 */ 5939 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5940 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5941 5942 memcpy(vpd, dmabuf->virt, *vpd_size); 5943 5944 dma_free_coherent(&phba->pcidev->dev, dma_size, 5945 dmabuf->virt, dmabuf->phys); 5946 kfree(dmabuf); 5947 return 0; 5948 } 5949 5950 /** 5951 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes 5952 * @phba: pointer to lpfc hba data structure. 5953 * 5954 * This routine retrieves SLI4 device physical port name this PCI function 5955 * is attached to. 5956 * 5957 * Return codes 5958 * 0 - successful 5959 * otherwise - failed to retrieve controller attributes 5960 **/ 5961 static int 5962 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) 5963 { 5964 LPFC_MBOXQ_t *mboxq; 5965 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5966 struct lpfc_controller_attribute *cntl_attr; 5967 void *virtaddr = NULL; 5968 uint32_t alloclen, reqlen; 5969 uint32_t shdr_status, shdr_add_status; 5970 union lpfc_sli4_cfg_shdr *shdr; 5971 int rc; 5972 5973 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5974 if (!mboxq) 5975 return -ENOMEM; 5976 5977 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ 5978 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5979 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5980 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5981 LPFC_SLI4_MBX_NEMBED); 5982 5983 if (alloclen < reqlen) { 5984 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5985 "3084 Allocated DMA memory size (%d) is " 5986 "less than the requested DMA memory size " 5987 "(%d)\n", alloclen, reqlen); 5988 rc = -ENOMEM; 5989 goto out_free_mboxq; 5990 } 5991 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5992 virtaddr = mboxq->sge_array->addr[0]; 5993 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5994 shdr = &mbx_cntl_attr->cfg_shdr; 5995 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5996 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5997 if (shdr_status || shdr_add_status || rc) { 5998 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5999 "3085 Mailbox x%x (x%x/x%x) failed, " 6000 "rc:x%x, status:x%x, add_status:x%x\n", 6001 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6002 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 6003 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 6004 rc, shdr_status, shdr_add_status); 6005 rc = -ENXIO; 6006 goto out_free_mboxq; 6007 } 6008 6009 cntl_attr = &mbx_cntl_attr->cntl_attr; 6010 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6011 phba->sli4_hba.lnk_info.lnk_tp = 6012 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 6013 phba->sli4_hba.lnk_info.lnk_no = 6014 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 6015 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); 6016 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); 6017 6018 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); 6019 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, 6020 sizeof(phba->BIOSVersion)); 6021 6022 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6023 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " 6024 "flash_id: x%02x, asic_rev: x%02x\n", 6025 phba->sli4_hba.lnk_info.lnk_tp, 6026 phba->sli4_hba.lnk_info.lnk_no, 6027 phba->BIOSVersion, phba->sli4_hba.flash_id, 6028 phba->sli4_hba.asic_rev); 6029 out_free_mboxq: 6030 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 6031 lpfc_sli4_mbox_cmd_free(phba, mboxq); 6032 else 6033 mempool_free(mboxq, phba->mbox_mem_pool); 6034 return rc; 6035 } 6036 6037 /** 6038 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 6039 * @phba: pointer to lpfc hba data structure. 6040 * 6041 * This routine retrieves SLI4 device physical port name this PCI function 6042 * is attached to. 6043 * 6044 * Return codes 6045 * 0 - successful 6046 * otherwise - failed to retrieve physical port name 6047 **/ 6048 static int 6049 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 6050 { 6051 LPFC_MBOXQ_t *mboxq; 6052 struct lpfc_mbx_get_port_name *get_port_name; 6053 uint32_t shdr_status, shdr_add_status; 6054 union lpfc_sli4_cfg_shdr *shdr; 6055 char cport_name = 0; 6056 int rc; 6057 6058 /* We assume nothing at this point */ 6059 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 6060 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 6061 6062 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6063 if (!mboxq) 6064 return -ENOMEM; 6065 /* obtain link type and link number via READ_CONFIG */ 6066 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 6067 lpfc_sli4_read_config(phba); 6068 6069 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) 6070 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; 6071 6072 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 6073 goto retrieve_ppname; 6074 6075 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 6076 rc = lpfc_sli4_get_ctl_attr(phba); 6077 if (rc) 6078 goto out_free_mboxq; 6079 6080 retrieve_ppname: 6081 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6082 LPFC_MBOX_OPCODE_GET_PORT_NAME, 6083 sizeof(struct lpfc_mbx_get_port_name) - 6084 sizeof(struct lpfc_sli4_cfg_mhdr), 6085 LPFC_SLI4_MBX_EMBED); 6086 get_port_name = &mboxq->u.mqe.un.get_port_name; 6087 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 6088 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 6089 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 6090 phba->sli4_hba.lnk_info.lnk_tp); 6091 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6092 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6093 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6094 if (shdr_status || shdr_add_status || rc) { 6095 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6096 "3087 Mailbox x%x (x%x/x%x) failed: " 6097 "rc:x%x, status:x%x, add_status:x%x\n", 6098 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6099 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 6100 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 6101 rc, shdr_status, shdr_add_status); 6102 rc = -ENXIO; 6103 goto out_free_mboxq; 6104 } 6105 switch (phba->sli4_hba.lnk_info.lnk_no) { 6106 case LPFC_LINK_NUMBER_0: 6107 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 6108 &get_port_name->u.response); 6109 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 6110 break; 6111 case LPFC_LINK_NUMBER_1: 6112 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 6113 &get_port_name->u.response); 6114 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 6115 break; 6116 case LPFC_LINK_NUMBER_2: 6117 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 6118 &get_port_name->u.response); 6119 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 6120 break; 6121 case LPFC_LINK_NUMBER_3: 6122 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 6123 &get_port_name->u.response); 6124 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 6125 break; 6126 default: 6127 break; 6128 } 6129 6130 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 6131 phba->Port[0] = cport_name; 6132 phba->Port[1] = '\0'; 6133 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6134 "3091 SLI get port name: %s\n", phba->Port); 6135 } 6136 6137 out_free_mboxq: 6138 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 6139 lpfc_sli4_mbox_cmd_free(phba, mboxq); 6140 else 6141 mempool_free(mboxq, phba->mbox_mem_pool); 6142 return rc; 6143 } 6144 6145 /** 6146 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 6147 * @phba: pointer to lpfc hba data structure. 6148 * 6149 * This routine is called to explicitly arm the SLI4 device's completion and 6150 * event queues 6151 **/ 6152 static void 6153 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 6154 { 6155 int qidx; 6156 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 6157 struct lpfc_sli4_hdw_queue *qp; 6158 struct lpfc_queue *eq; 6159 6160 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); 6161 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); 6162 if (sli4_hba->nvmels_cq) 6163 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, 6164 LPFC_QUEUE_REARM); 6165 6166 if (sli4_hba->hdwq) { 6167 /* Loop thru all Hardware Queues */ 6168 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 6169 qp = &sli4_hba->hdwq[qidx]; 6170 /* ARM the corresponding CQ */ 6171 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0, 6172 LPFC_QUEUE_REARM); 6173 } 6174 6175 /* Loop thru all IRQ vectors */ 6176 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 6177 eq = sli4_hba->hba_eq_hdl[qidx].eq; 6178 /* ARM the corresponding EQ */ 6179 sli4_hba->sli4_write_eq_db(phba, eq, 6180 0, LPFC_QUEUE_REARM); 6181 } 6182 } 6183 6184 if (phba->nvmet_support) { 6185 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 6186 sli4_hba->sli4_write_cq_db(phba, 6187 sli4_hba->nvmet_cqset[qidx], 0, 6188 LPFC_QUEUE_REARM); 6189 } 6190 } 6191 } 6192 6193 /** 6194 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 6195 * @phba: Pointer to HBA context object. 6196 * @type: The resource extent type. 6197 * @extnt_count: buffer to hold port available extent count. 6198 * @extnt_size: buffer to hold element count per extent. 6199 * 6200 * This function calls the port and retrievs the number of available 6201 * extents and their size for a particular extent type. 6202 * 6203 * Returns: 0 if successful. Nonzero otherwise. 6204 **/ 6205 int 6206 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 6207 uint16_t *extnt_count, uint16_t *extnt_size) 6208 { 6209 int rc = 0; 6210 uint32_t length; 6211 uint32_t mbox_tmo; 6212 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 6213 LPFC_MBOXQ_t *mbox; 6214 6215 *extnt_count = 0; 6216 *extnt_size = 0; 6217 6218 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6219 if (!mbox) 6220 return -ENOMEM; 6221 6222 /* Find out how many extents are available for this resource type */ 6223 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 6224 sizeof(struct lpfc_sli4_cfg_mhdr)); 6225 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6226 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 6227 length, LPFC_SLI4_MBX_EMBED); 6228 6229 /* Send an extents count of 0 - the GET doesn't use it. */ 6230 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6231 LPFC_SLI4_MBX_EMBED); 6232 if (unlikely(rc)) { 6233 rc = -EIO; 6234 goto err_exit; 6235 } 6236 6237 if (!phba->sli4_hba.intr_enable) 6238 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6239 else { 6240 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6241 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6242 } 6243 if (unlikely(rc)) { 6244 rc = -EIO; 6245 goto err_exit; 6246 } 6247 6248 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 6249 if (bf_get(lpfc_mbox_hdr_status, 6250 &rsrc_info->header.cfg_shdr.response)) { 6251 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6252 "2930 Failed to get resource extents " 6253 "Status 0x%x Add'l Status 0x%x\n", 6254 bf_get(lpfc_mbox_hdr_status, 6255 &rsrc_info->header.cfg_shdr.response), 6256 bf_get(lpfc_mbox_hdr_add_status, 6257 &rsrc_info->header.cfg_shdr.response)); 6258 rc = -EIO; 6259 goto err_exit; 6260 } 6261 6262 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 6263 &rsrc_info->u.rsp); 6264 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 6265 &rsrc_info->u.rsp); 6266 6267 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6268 "3162 Retrieved extents type-%d from port: count:%d, " 6269 "size:%d\n", type, *extnt_count, *extnt_size); 6270 6271 err_exit: 6272 mempool_free(mbox, phba->mbox_mem_pool); 6273 return rc; 6274 } 6275 6276 /** 6277 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 6278 * @phba: Pointer to HBA context object. 6279 * @type: The extent type to check. 6280 * 6281 * This function reads the current available extents from the port and checks 6282 * if the extent count or extent size has changed since the last access. 6283 * Callers use this routine post port reset to understand if there is a 6284 * extent reprovisioning requirement. 6285 * 6286 * Returns: 6287 * -Error: error indicates problem. 6288 * 1: Extent count or size has changed. 6289 * 0: No changes. 6290 **/ 6291 static int 6292 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 6293 { 6294 uint16_t curr_ext_cnt, rsrc_ext_cnt; 6295 uint16_t size_diff, rsrc_ext_size; 6296 int rc = 0; 6297 struct lpfc_rsrc_blks *rsrc_entry; 6298 struct list_head *rsrc_blk_list = NULL; 6299 6300 size_diff = 0; 6301 curr_ext_cnt = 0; 6302 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 6303 &rsrc_ext_cnt, 6304 &rsrc_ext_size); 6305 if (unlikely(rc)) 6306 return -EIO; 6307 6308 switch (type) { 6309 case LPFC_RSC_TYPE_FCOE_RPI: 6310 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 6311 break; 6312 case LPFC_RSC_TYPE_FCOE_VPI: 6313 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 6314 break; 6315 case LPFC_RSC_TYPE_FCOE_XRI: 6316 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 6317 break; 6318 case LPFC_RSC_TYPE_FCOE_VFI: 6319 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 6320 break; 6321 default: 6322 break; 6323 } 6324 6325 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 6326 curr_ext_cnt++; 6327 if (rsrc_entry->rsrc_size != rsrc_ext_size) 6328 size_diff++; 6329 } 6330 6331 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 6332 rc = 1; 6333 6334 return rc; 6335 } 6336 6337 /** 6338 * lpfc_sli4_cfg_post_extnts - 6339 * @phba: Pointer to HBA context object. 6340 * @extnt_cnt: number of available extents. 6341 * @type: the extent type (rpi, xri, vfi, vpi). 6342 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation. 6343 * @mbox: pointer to the caller's allocated mailbox structure. 6344 * 6345 * This function executes the extents allocation request. It also 6346 * takes care of the amount of memory needed to allocate or get the 6347 * allocated extents. It is the caller's responsibility to evaluate 6348 * the response. 6349 * 6350 * Returns: 6351 * -Error: Error value describes the condition found. 6352 * 0: if successful 6353 **/ 6354 static int 6355 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 6356 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 6357 { 6358 int rc = 0; 6359 uint32_t req_len; 6360 uint32_t emb_len; 6361 uint32_t alloc_len, mbox_tmo; 6362 6363 /* Calculate the total requested length of the dma memory */ 6364 req_len = extnt_cnt * sizeof(uint16_t); 6365 6366 /* 6367 * Calculate the size of an embedded mailbox. The uint32_t 6368 * accounts for extents-specific word. 6369 */ 6370 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6371 sizeof(uint32_t); 6372 6373 /* 6374 * Presume the allocation and response will fit into an embedded 6375 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6376 */ 6377 *emb = LPFC_SLI4_MBX_EMBED; 6378 if (req_len > emb_len) { 6379 req_len = extnt_cnt * sizeof(uint16_t) + 6380 sizeof(union lpfc_sli4_cfg_shdr) + 6381 sizeof(uint32_t); 6382 *emb = LPFC_SLI4_MBX_NEMBED; 6383 } 6384 6385 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6386 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 6387 req_len, *emb); 6388 if (alloc_len < req_len) { 6389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6390 "2982 Allocated DMA memory size (x%x) is " 6391 "less than the requested DMA memory " 6392 "size (x%x)\n", alloc_len, req_len); 6393 return -ENOMEM; 6394 } 6395 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 6396 if (unlikely(rc)) 6397 return -EIO; 6398 6399 if (!phba->sli4_hba.intr_enable) 6400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6401 else { 6402 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6403 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6404 } 6405 6406 if (unlikely(rc)) 6407 rc = -EIO; 6408 return rc; 6409 } 6410 6411 /** 6412 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 6413 * @phba: Pointer to HBA context object. 6414 * @type: The resource extent type to allocate. 6415 * 6416 * This function allocates the number of elements for the specified 6417 * resource type. 6418 **/ 6419 static int 6420 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 6421 { 6422 bool emb = false; 6423 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 6424 uint16_t rsrc_id, rsrc_start, j, k; 6425 uint16_t *ids; 6426 int i, rc; 6427 unsigned long longs; 6428 unsigned long *bmask; 6429 struct lpfc_rsrc_blks *rsrc_blks; 6430 LPFC_MBOXQ_t *mbox; 6431 uint32_t length; 6432 struct lpfc_id_range *id_array = NULL; 6433 void *virtaddr = NULL; 6434 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6435 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6436 struct list_head *ext_blk_list; 6437 6438 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 6439 &rsrc_cnt, 6440 &rsrc_size); 6441 if (unlikely(rc)) 6442 return -EIO; 6443 6444 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 6445 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6446 "3009 No available Resource Extents " 6447 "for resource type 0x%x: Count: 0x%x, " 6448 "Size 0x%x\n", type, rsrc_cnt, 6449 rsrc_size); 6450 return -ENOMEM; 6451 } 6452 6453 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 6454 "2903 Post resource extents type-0x%x: " 6455 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 6456 6457 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6458 if (!mbox) 6459 return -ENOMEM; 6460 6461 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 6462 if (unlikely(rc)) { 6463 rc = -EIO; 6464 goto err_exit; 6465 } 6466 6467 /* 6468 * Figure out where the response is located. Then get local pointers 6469 * to the response data. The port does not guarantee to respond to 6470 * all extents counts request so update the local variable with the 6471 * allocated count from the port. 6472 */ 6473 if (emb == LPFC_SLI4_MBX_EMBED) { 6474 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6475 id_array = &rsrc_ext->u.rsp.id[0]; 6476 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6477 } else { 6478 virtaddr = mbox->sge_array->addr[0]; 6479 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6480 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6481 id_array = &n_rsrc->id; 6482 } 6483 6484 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 6485 rsrc_id_cnt = rsrc_cnt * rsrc_size; 6486 6487 /* 6488 * Based on the resource size and count, correct the base and max 6489 * resource values. 6490 */ 6491 length = sizeof(struct lpfc_rsrc_blks); 6492 switch (type) { 6493 case LPFC_RSC_TYPE_FCOE_RPI: 6494 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6495 sizeof(unsigned long), 6496 GFP_KERNEL); 6497 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6498 rc = -ENOMEM; 6499 goto err_exit; 6500 } 6501 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 6502 sizeof(uint16_t), 6503 GFP_KERNEL); 6504 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6505 kfree(phba->sli4_hba.rpi_bmask); 6506 rc = -ENOMEM; 6507 goto err_exit; 6508 } 6509 6510 /* 6511 * The next_rpi was initialized with the maximum available 6512 * count but the port may allocate a smaller number. Catch 6513 * that case and update the next_rpi. 6514 */ 6515 phba->sli4_hba.next_rpi = rsrc_id_cnt; 6516 6517 /* Initialize local ptrs for common extent processing later. */ 6518 bmask = phba->sli4_hba.rpi_bmask; 6519 ids = phba->sli4_hba.rpi_ids; 6520 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 6521 break; 6522 case LPFC_RSC_TYPE_FCOE_VPI: 6523 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6524 GFP_KERNEL); 6525 if (unlikely(!phba->vpi_bmask)) { 6526 rc = -ENOMEM; 6527 goto err_exit; 6528 } 6529 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 6530 GFP_KERNEL); 6531 if (unlikely(!phba->vpi_ids)) { 6532 kfree(phba->vpi_bmask); 6533 rc = -ENOMEM; 6534 goto err_exit; 6535 } 6536 6537 /* Initialize local ptrs for common extent processing later. */ 6538 bmask = phba->vpi_bmask; 6539 ids = phba->vpi_ids; 6540 ext_blk_list = &phba->lpfc_vpi_blk_list; 6541 break; 6542 case LPFC_RSC_TYPE_FCOE_XRI: 6543 phba->sli4_hba.xri_bmask = kcalloc(longs, 6544 sizeof(unsigned long), 6545 GFP_KERNEL); 6546 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6547 rc = -ENOMEM; 6548 goto err_exit; 6549 } 6550 phba->sli4_hba.max_cfg_param.xri_used = 0; 6551 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 6552 sizeof(uint16_t), 6553 GFP_KERNEL); 6554 if (unlikely(!phba->sli4_hba.xri_ids)) { 6555 kfree(phba->sli4_hba.xri_bmask); 6556 rc = -ENOMEM; 6557 goto err_exit; 6558 } 6559 6560 /* Initialize local ptrs for common extent processing later. */ 6561 bmask = phba->sli4_hba.xri_bmask; 6562 ids = phba->sli4_hba.xri_ids; 6563 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 6564 break; 6565 case LPFC_RSC_TYPE_FCOE_VFI: 6566 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6567 sizeof(unsigned long), 6568 GFP_KERNEL); 6569 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6570 rc = -ENOMEM; 6571 goto err_exit; 6572 } 6573 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 6574 sizeof(uint16_t), 6575 GFP_KERNEL); 6576 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6577 kfree(phba->sli4_hba.vfi_bmask); 6578 rc = -ENOMEM; 6579 goto err_exit; 6580 } 6581 6582 /* Initialize local ptrs for common extent processing later. */ 6583 bmask = phba->sli4_hba.vfi_bmask; 6584 ids = phba->sli4_hba.vfi_ids; 6585 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 6586 break; 6587 default: 6588 /* Unsupported Opcode. Fail call. */ 6589 id_array = NULL; 6590 bmask = NULL; 6591 ids = NULL; 6592 ext_blk_list = NULL; 6593 goto err_exit; 6594 } 6595 6596 /* 6597 * Complete initializing the extent configuration with the 6598 * allocated ids assigned to this function. The bitmask serves 6599 * as an index into the array and manages the available ids. The 6600 * array just stores the ids communicated to the port via the wqes. 6601 */ 6602 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 6603 if ((i % 2) == 0) 6604 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 6605 &id_array[k]); 6606 else 6607 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 6608 &id_array[k]); 6609 6610 rsrc_blks = kzalloc(length, GFP_KERNEL); 6611 if (unlikely(!rsrc_blks)) { 6612 rc = -ENOMEM; 6613 kfree(bmask); 6614 kfree(ids); 6615 goto err_exit; 6616 } 6617 rsrc_blks->rsrc_start = rsrc_id; 6618 rsrc_blks->rsrc_size = rsrc_size; 6619 list_add_tail(&rsrc_blks->list, ext_blk_list); 6620 rsrc_start = rsrc_id; 6621 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 6622 phba->sli4_hba.io_xri_start = rsrc_start + 6623 lpfc_sli4_get_iocb_cnt(phba); 6624 } 6625 6626 while (rsrc_id < (rsrc_start + rsrc_size)) { 6627 ids[j] = rsrc_id; 6628 rsrc_id++; 6629 j++; 6630 } 6631 /* Entire word processed. Get next word.*/ 6632 if ((i % 2) == 1) 6633 k++; 6634 } 6635 err_exit: 6636 lpfc_sli4_mbox_cmd_free(phba, mbox); 6637 return rc; 6638 } 6639 6640 6641 6642 /** 6643 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 6644 * @phba: Pointer to HBA context object. 6645 * @type: the extent's type. 6646 * 6647 * This function deallocates all extents of a particular resource type. 6648 * SLI4 does not allow for deallocating a particular extent range. It 6649 * is the caller's responsibility to release all kernel memory resources. 6650 **/ 6651 static int 6652 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6653 { 6654 int rc; 6655 uint32_t length, mbox_tmo = 0; 6656 LPFC_MBOXQ_t *mbox; 6657 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6658 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6659 6660 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6661 if (!mbox) 6662 return -ENOMEM; 6663 6664 /* 6665 * This function sends an embedded mailbox because it only sends the 6666 * the resource type. All extents of this type are released by the 6667 * port. 6668 */ 6669 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6670 sizeof(struct lpfc_sli4_cfg_mhdr)); 6671 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6672 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6673 length, LPFC_SLI4_MBX_EMBED); 6674 6675 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6676 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6677 LPFC_SLI4_MBX_EMBED); 6678 if (unlikely(rc)) { 6679 rc = -EIO; 6680 goto out_free_mbox; 6681 } 6682 if (!phba->sli4_hba.intr_enable) 6683 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6684 else { 6685 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6686 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6687 } 6688 if (unlikely(rc)) { 6689 rc = -EIO; 6690 goto out_free_mbox; 6691 } 6692 6693 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6694 if (bf_get(lpfc_mbox_hdr_status, 6695 &dealloc_rsrc->header.cfg_shdr.response)) { 6696 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6697 "2919 Failed to release resource extents " 6698 "for type %d - Status 0x%x Add'l Status 0x%x. " 6699 "Resource memory not released.\n", 6700 type, 6701 bf_get(lpfc_mbox_hdr_status, 6702 &dealloc_rsrc->header.cfg_shdr.response), 6703 bf_get(lpfc_mbox_hdr_add_status, 6704 &dealloc_rsrc->header.cfg_shdr.response)); 6705 rc = -EIO; 6706 goto out_free_mbox; 6707 } 6708 6709 /* Release kernel memory resources for the specific type. */ 6710 switch (type) { 6711 case LPFC_RSC_TYPE_FCOE_VPI: 6712 kfree(phba->vpi_bmask); 6713 kfree(phba->vpi_ids); 6714 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6715 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6716 &phba->lpfc_vpi_blk_list, list) { 6717 list_del_init(&rsrc_blk->list); 6718 kfree(rsrc_blk); 6719 } 6720 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6721 break; 6722 case LPFC_RSC_TYPE_FCOE_XRI: 6723 kfree(phba->sli4_hba.xri_bmask); 6724 kfree(phba->sli4_hba.xri_ids); 6725 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6726 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6727 list_del_init(&rsrc_blk->list); 6728 kfree(rsrc_blk); 6729 } 6730 break; 6731 case LPFC_RSC_TYPE_FCOE_VFI: 6732 kfree(phba->sli4_hba.vfi_bmask); 6733 kfree(phba->sli4_hba.vfi_ids); 6734 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6735 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6736 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6737 list_del_init(&rsrc_blk->list); 6738 kfree(rsrc_blk); 6739 } 6740 break; 6741 case LPFC_RSC_TYPE_FCOE_RPI: 6742 /* RPI bitmask and physical id array are cleaned up earlier. */ 6743 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6744 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6745 list_del_init(&rsrc_blk->list); 6746 kfree(rsrc_blk); 6747 } 6748 break; 6749 default: 6750 break; 6751 } 6752 6753 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6754 6755 out_free_mbox: 6756 mempool_free(mbox, phba->mbox_mem_pool); 6757 return rc; 6758 } 6759 6760 static void 6761 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6762 uint32_t feature) 6763 { 6764 uint32_t len; 6765 u32 sig_freq = 0; 6766 6767 len = sizeof(struct lpfc_mbx_set_feature) - 6768 sizeof(struct lpfc_sli4_cfg_mhdr); 6769 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6770 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6771 LPFC_SLI4_MBX_EMBED); 6772 6773 switch (feature) { 6774 case LPFC_SET_UE_RECOVERY: 6775 bf_set(lpfc_mbx_set_feature_UER, 6776 &mbox->u.mqe.un.set_feature, 1); 6777 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6778 mbox->u.mqe.un.set_feature.param_len = 8; 6779 break; 6780 case LPFC_SET_MDS_DIAGS: 6781 bf_set(lpfc_mbx_set_feature_mds, 6782 &mbox->u.mqe.un.set_feature, 1); 6783 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6784 &mbox->u.mqe.un.set_feature, 1); 6785 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6786 mbox->u.mqe.un.set_feature.param_len = 8; 6787 break; 6788 case LPFC_SET_CGN_SIGNAL: 6789 if (phba->cmf_active_mode == LPFC_CFG_OFF) 6790 sig_freq = 0; 6791 else 6792 sig_freq = phba->cgn_sig_freq; 6793 6794 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6795 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq, 6796 &mbox->u.mqe.un.set_feature, sig_freq); 6797 bf_set(lpfc_mbx_set_feature_CGN_warn_freq, 6798 &mbox->u.mqe.un.set_feature, sig_freq); 6799 } 6800 6801 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) 6802 bf_set(lpfc_mbx_set_feature_CGN_warn_freq, 6803 &mbox->u.mqe.un.set_feature, sig_freq); 6804 6805 if (phba->cmf_active_mode == LPFC_CFG_OFF || 6806 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED) 6807 sig_freq = 0; 6808 else 6809 sig_freq = lpfc_acqe_cgn_frequency; 6810 6811 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq, 6812 &mbox->u.mqe.un.set_feature, sig_freq); 6813 6814 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL; 6815 mbox->u.mqe.un.set_feature.param_len = 12; 6816 break; 6817 case LPFC_SET_DUAL_DUMP: 6818 bf_set(lpfc_mbx_set_feature_dd, 6819 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); 6820 bf_set(lpfc_mbx_set_feature_ddquery, 6821 &mbox->u.mqe.un.set_feature, 0); 6822 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; 6823 mbox->u.mqe.un.set_feature.param_len = 4; 6824 break; 6825 case LPFC_SET_ENABLE_MI: 6826 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI; 6827 mbox->u.mqe.un.set_feature.param_len = 4; 6828 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature, 6829 phba->pport->cfg_lun_queue_depth); 6830 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature, 6831 phba->sli4_hba.pc_sli4_params.mi_ver); 6832 break; 6833 case LPFC_SET_LD_SIGNAL: 6834 mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL; 6835 mbox->u.mqe.un.set_feature.param_len = 16; 6836 bf_set(lpfc_mbx_set_feature_lds_qry, 6837 &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP); 6838 break; 6839 case LPFC_SET_ENABLE_CMF: 6840 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF; 6841 mbox->u.mqe.un.set_feature.param_len = 4; 6842 bf_set(lpfc_mbx_set_feature_cmf, 6843 &mbox->u.mqe.un.set_feature, 1); 6844 break; 6845 } 6846 return; 6847 } 6848 6849 /** 6850 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter 6851 * @phba: Pointer to HBA context object. 6852 * 6853 * Disable FW logging into host memory on the adapter. To 6854 * be done before reading logs from the host memory. 6855 **/ 6856 void 6857 lpfc_ras_stop_fwlog(struct lpfc_hba *phba) 6858 { 6859 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6860 6861 spin_lock_irq(&phba->ras_fwlog_lock); 6862 ras_fwlog->state = INACTIVE; 6863 spin_unlock_irq(&phba->ras_fwlog_lock); 6864 6865 /* Disable FW logging to host memory */ 6866 writel(LPFC_CTL_PDEV_CTL_DDL_RAS, 6867 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 6868 6869 /* Wait 10ms for firmware to stop using DMA buffer */ 6870 usleep_range(10 * 1000, 20 * 1000); 6871 } 6872 6873 /** 6874 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6875 * @phba: Pointer to HBA context object. 6876 * 6877 * This function is called to free memory allocated for RAS FW logging 6878 * support in the driver. 6879 **/ 6880 void 6881 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) 6882 { 6883 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6884 struct lpfc_dmabuf *dmabuf, *next; 6885 6886 if (!list_empty(&ras_fwlog->fwlog_buff_list)) { 6887 list_for_each_entry_safe(dmabuf, next, 6888 &ras_fwlog->fwlog_buff_list, 6889 list) { 6890 list_del(&dmabuf->list); 6891 dma_free_coherent(&phba->pcidev->dev, 6892 LPFC_RAS_MAX_ENTRY_SIZE, 6893 dmabuf->virt, dmabuf->phys); 6894 kfree(dmabuf); 6895 } 6896 } 6897 6898 if (ras_fwlog->lwpd.virt) { 6899 dma_free_coherent(&phba->pcidev->dev, 6900 sizeof(uint32_t) * 2, 6901 ras_fwlog->lwpd.virt, 6902 ras_fwlog->lwpd.phys); 6903 ras_fwlog->lwpd.virt = NULL; 6904 } 6905 6906 spin_lock_irq(&phba->ras_fwlog_lock); 6907 ras_fwlog->state = INACTIVE; 6908 spin_unlock_irq(&phba->ras_fwlog_lock); 6909 } 6910 6911 /** 6912 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support 6913 * @phba: Pointer to HBA context object. 6914 * @fwlog_buff_count: Count of buffers to be created. 6915 * 6916 * This routine DMA memory for Log Write Position Data[LPWD] and buffer 6917 * to update FW log is posted to the adapter. 6918 * Buffer count is calculated based on module param ras_fwlog_buffsize 6919 * Size of each buffer posted to FW is 64K. 6920 **/ 6921 6922 static int 6923 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, 6924 uint32_t fwlog_buff_count) 6925 { 6926 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6927 struct lpfc_dmabuf *dmabuf; 6928 int rc = 0, i = 0; 6929 6930 /* Initialize List */ 6931 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); 6932 6933 /* Allocate memory for the LWPD */ 6934 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, 6935 sizeof(uint32_t) * 2, 6936 &ras_fwlog->lwpd.phys, 6937 GFP_KERNEL); 6938 if (!ras_fwlog->lwpd.virt) { 6939 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6940 "6185 LWPD Memory Alloc Failed\n"); 6941 6942 return -ENOMEM; 6943 } 6944 6945 ras_fwlog->fw_buffcount = fwlog_buff_count; 6946 for (i = 0; i < ras_fwlog->fw_buffcount; i++) { 6947 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 6948 GFP_KERNEL); 6949 if (!dmabuf) { 6950 rc = -ENOMEM; 6951 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6952 "6186 Memory Alloc failed FW logging"); 6953 goto free_mem; 6954 } 6955 6956 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6957 LPFC_RAS_MAX_ENTRY_SIZE, 6958 &dmabuf->phys, GFP_KERNEL); 6959 if (!dmabuf->virt) { 6960 kfree(dmabuf); 6961 rc = -ENOMEM; 6962 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6963 "6187 DMA Alloc Failed FW logging"); 6964 goto free_mem; 6965 } 6966 dmabuf->buffer_tag = i; 6967 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6968 } 6969 6970 free_mem: 6971 if (rc) 6972 lpfc_sli4_ras_dma_free(phba); 6973 6974 return rc; 6975 } 6976 6977 /** 6978 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command 6979 * @phba: pointer to lpfc hba data structure. 6980 * @pmb: pointer to the driver internal queue element for mailbox command. 6981 * 6982 * Completion handler for driver's RAS MBX command to the device. 6983 **/ 6984 static void 6985 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6986 { 6987 MAILBOX_t *mb; 6988 union lpfc_sli4_cfg_shdr *shdr; 6989 uint32_t shdr_status, shdr_add_status; 6990 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6991 6992 mb = &pmb->u.mb; 6993 6994 shdr = (union lpfc_sli4_cfg_shdr *) 6995 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; 6996 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6997 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6998 6999 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 7000 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7001 "6188 FW LOG mailbox " 7002 "completed with status x%x add_status x%x," 7003 " mbx status x%x\n", 7004 shdr_status, shdr_add_status, mb->mbxStatus); 7005 7006 ras_fwlog->ras_hwsupport = false; 7007 goto disable_ras; 7008 } 7009 7010 spin_lock_irq(&phba->ras_fwlog_lock); 7011 ras_fwlog->state = ACTIVE; 7012 spin_unlock_irq(&phba->ras_fwlog_lock); 7013 mempool_free(pmb, phba->mbox_mem_pool); 7014 7015 return; 7016 7017 disable_ras: 7018 /* Free RAS DMA memory */ 7019 lpfc_sli4_ras_dma_free(phba); 7020 mempool_free(pmb, phba->mbox_mem_pool); 7021 } 7022 7023 /** 7024 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command 7025 * @phba: pointer to lpfc hba data structure. 7026 * @fwlog_level: Logging verbosity level. 7027 * @fwlog_enable: Enable/Disable logging. 7028 * 7029 * Initialize memory and post mailbox command to enable FW logging in host 7030 * memory. 7031 **/ 7032 int 7033 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, 7034 uint32_t fwlog_level, 7035 uint32_t fwlog_enable) 7036 { 7037 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 7038 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; 7039 struct lpfc_dmabuf *dmabuf; 7040 LPFC_MBOXQ_t *mbox; 7041 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; 7042 int rc = 0; 7043 7044 spin_lock_irq(&phba->ras_fwlog_lock); 7045 ras_fwlog->state = INACTIVE; 7046 spin_unlock_irq(&phba->ras_fwlog_lock); 7047 7048 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * 7049 phba->cfg_ras_fwlog_buffsize); 7050 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); 7051 7052 /* 7053 * If re-enabling FW logging support use earlier allocated 7054 * DMA buffers while posting MBX command. 7055 **/ 7056 if (!ras_fwlog->lwpd.virt) { 7057 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 7058 if (rc) { 7059 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7060 "6189 FW Log Memory Allocation Failed"); 7061 return rc; 7062 } 7063 } 7064 7065 /* Setup Mailbox command */ 7066 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7067 if (!mbox) { 7068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7069 "6190 RAS MBX Alloc Failed"); 7070 rc = -ENOMEM; 7071 goto mem_free; 7072 } 7073 7074 ras_fwlog->fw_loglevel = fwlog_level; 7075 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - 7076 sizeof(struct lpfc_sli4_cfg_mhdr)); 7077 7078 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, 7079 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, 7080 len, LPFC_SLI4_MBX_EMBED); 7081 7082 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; 7083 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, 7084 fwlog_enable); 7085 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, 7086 ras_fwlog->fw_loglevel); 7087 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, 7088 ras_fwlog->fw_buffcount); 7089 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, 7090 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); 7091 7092 /* Update DMA buffer address */ 7093 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { 7094 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 7095 7096 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = 7097 putPaddrLow(dmabuf->phys); 7098 7099 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = 7100 putPaddrHigh(dmabuf->phys); 7101 } 7102 7103 /* Update LPWD address */ 7104 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); 7105 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); 7106 7107 spin_lock_irq(&phba->ras_fwlog_lock); 7108 ras_fwlog->state = REG_INPROGRESS; 7109 spin_unlock_irq(&phba->ras_fwlog_lock); 7110 mbox->vport = phba->pport; 7111 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; 7112 7113 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7114 7115 if (rc == MBX_NOT_FINISHED) { 7116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7117 "6191 FW-Log Mailbox failed. " 7118 "status %d mbxStatus : x%x", rc, 7119 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 7120 mempool_free(mbox, phba->mbox_mem_pool); 7121 rc = -EIO; 7122 goto mem_free; 7123 } else 7124 rc = 0; 7125 mem_free: 7126 if (rc) 7127 lpfc_sli4_ras_dma_free(phba); 7128 7129 return rc; 7130 } 7131 7132 /** 7133 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter 7134 * @phba: Pointer to HBA context object. 7135 * 7136 * Check if RAS is supported on the adapter and initialize it. 7137 **/ 7138 void 7139 lpfc_sli4_ras_setup(struct lpfc_hba *phba) 7140 { 7141 /* Check RAS FW Log needs to be enabled or not */ 7142 if (lpfc_check_fwlog_support(phba)) 7143 return; 7144 7145 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 7146 LPFC_RAS_ENABLE_LOGGING); 7147 } 7148 7149 /** 7150 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 7151 * @phba: Pointer to HBA context object. 7152 * 7153 * This function allocates all SLI4 resource identifiers. 7154 **/ 7155 int 7156 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 7157 { 7158 int i, rc, error = 0; 7159 uint16_t count, base; 7160 unsigned long longs; 7161 7162 if (!phba->sli4_hba.rpi_hdrs_in_use) 7163 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 7164 if (phba->sli4_hba.extents_in_use) { 7165 /* 7166 * The port supports resource extents. The XRI, VPI, VFI, RPI 7167 * resource extent count must be read and allocated before 7168 * provisioning the resource id arrays. 7169 */ 7170 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 7171 LPFC_IDX_RSRC_RDY) { 7172 /* 7173 * Extent-based resources are set - the driver could 7174 * be in a port reset. Figure out if any corrective 7175 * actions need to be taken. 7176 */ 7177 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 7178 LPFC_RSC_TYPE_FCOE_VFI); 7179 if (rc != 0) 7180 error++; 7181 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 7182 LPFC_RSC_TYPE_FCOE_VPI); 7183 if (rc != 0) 7184 error++; 7185 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 7186 LPFC_RSC_TYPE_FCOE_XRI); 7187 if (rc != 0) 7188 error++; 7189 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 7190 LPFC_RSC_TYPE_FCOE_RPI); 7191 if (rc != 0) 7192 error++; 7193 7194 /* 7195 * It's possible that the number of resources 7196 * provided to this port instance changed between 7197 * resets. Detect this condition and reallocate 7198 * resources. Otherwise, there is no action. 7199 */ 7200 if (error) { 7201 lpfc_printf_log(phba, KERN_INFO, 7202 LOG_MBOX | LOG_INIT, 7203 "2931 Detected extent resource " 7204 "change. Reallocating all " 7205 "extents.\n"); 7206 rc = lpfc_sli4_dealloc_extent(phba, 7207 LPFC_RSC_TYPE_FCOE_VFI); 7208 rc = lpfc_sli4_dealloc_extent(phba, 7209 LPFC_RSC_TYPE_FCOE_VPI); 7210 rc = lpfc_sli4_dealloc_extent(phba, 7211 LPFC_RSC_TYPE_FCOE_XRI); 7212 rc = lpfc_sli4_dealloc_extent(phba, 7213 LPFC_RSC_TYPE_FCOE_RPI); 7214 } else 7215 return 0; 7216 } 7217 7218 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 7219 if (unlikely(rc)) 7220 goto err_exit; 7221 7222 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 7223 if (unlikely(rc)) 7224 goto err_exit; 7225 7226 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 7227 if (unlikely(rc)) 7228 goto err_exit; 7229 7230 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 7231 if (unlikely(rc)) 7232 goto err_exit; 7233 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 7234 LPFC_IDX_RSRC_RDY); 7235 return rc; 7236 } else { 7237 /* 7238 * The port does not support resource extents. The XRI, VPI, 7239 * VFI, RPI resource ids were determined from READ_CONFIG. 7240 * Just allocate the bitmasks and provision the resource id 7241 * arrays. If a port reset is active, the resources don't 7242 * need any action - just exit. 7243 */ 7244 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 7245 LPFC_IDX_RSRC_RDY) { 7246 lpfc_sli4_dealloc_resource_identifiers(phba); 7247 lpfc_sli4_remove_rpis(phba); 7248 } 7249 /* RPIs. */ 7250 count = phba->sli4_hba.max_cfg_param.max_rpi; 7251 if (count <= 0) { 7252 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7253 "3279 Invalid provisioning of " 7254 "rpi:%d\n", count); 7255 rc = -EINVAL; 7256 goto err_exit; 7257 } 7258 base = phba->sli4_hba.max_cfg_param.rpi_base; 7259 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 7260 phba->sli4_hba.rpi_bmask = kcalloc(longs, 7261 sizeof(unsigned long), 7262 GFP_KERNEL); 7263 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 7264 rc = -ENOMEM; 7265 goto err_exit; 7266 } 7267 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 7268 GFP_KERNEL); 7269 if (unlikely(!phba->sli4_hba.rpi_ids)) { 7270 rc = -ENOMEM; 7271 goto free_rpi_bmask; 7272 } 7273 7274 for (i = 0; i < count; i++) 7275 phba->sli4_hba.rpi_ids[i] = base + i; 7276 7277 /* VPIs. */ 7278 count = phba->sli4_hba.max_cfg_param.max_vpi; 7279 if (count <= 0) { 7280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7281 "3280 Invalid provisioning of " 7282 "vpi:%d\n", count); 7283 rc = -EINVAL; 7284 goto free_rpi_ids; 7285 } 7286 base = phba->sli4_hba.max_cfg_param.vpi_base; 7287 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 7288 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 7289 GFP_KERNEL); 7290 if (unlikely(!phba->vpi_bmask)) { 7291 rc = -ENOMEM; 7292 goto free_rpi_ids; 7293 } 7294 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 7295 GFP_KERNEL); 7296 if (unlikely(!phba->vpi_ids)) { 7297 rc = -ENOMEM; 7298 goto free_vpi_bmask; 7299 } 7300 7301 for (i = 0; i < count; i++) 7302 phba->vpi_ids[i] = base + i; 7303 7304 /* XRIs. */ 7305 count = phba->sli4_hba.max_cfg_param.max_xri; 7306 if (count <= 0) { 7307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7308 "3281 Invalid provisioning of " 7309 "xri:%d\n", count); 7310 rc = -EINVAL; 7311 goto free_vpi_ids; 7312 } 7313 base = phba->sli4_hba.max_cfg_param.xri_base; 7314 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 7315 phba->sli4_hba.xri_bmask = kcalloc(longs, 7316 sizeof(unsigned long), 7317 GFP_KERNEL); 7318 if (unlikely(!phba->sli4_hba.xri_bmask)) { 7319 rc = -ENOMEM; 7320 goto free_vpi_ids; 7321 } 7322 phba->sli4_hba.max_cfg_param.xri_used = 0; 7323 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 7324 GFP_KERNEL); 7325 if (unlikely(!phba->sli4_hba.xri_ids)) { 7326 rc = -ENOMEM; 7327 goto free_xri_bmask; 7328 } 7329 7330 for (i = 0; i < count; i++) 7331 phba->sli4_hba.xri_ids[i] = base + i; 7332 7333 /* VFIs. */ 7334 count = phba->sli4_hba.max_cfg_param.max_vfi; 7335 if (count <= 0) { 7336 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7337 "3282 Invalid provisioning of " 7338 "vfi:%d\n", count); 7339 rc = -EINVAL; 7340 goto free_xri_ids; 7341 } 7342 base = phba->sli4_hba.max_cfg_param.vfi_base; 7343 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 7344 phba->sli4_hba.vfi_bmask = kcalloc(longs, 7345 sizeof(unsigned long), 7346 GFP_KERNEL); 7347 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 7348 rc = -ENOMEM; 7349 goto free_xri_ids; 7350 } 7351 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 7352 GFP_KERNEL); 7353 if (unlikely(!phba->sli4_hba.vfi_ids)) { 7354 rc = -ENOMEM; 7355 goto free_vfi_bmask; 7356 } 7357 7358 for (i = 0; i < count; i++) 7359 phba->sli4_hba.vfi_ids[i] = base + i; 7360 7361 /* 7362 * Mark all resources ready. An HBA reset doesn't need 7363 * to reset the initialization. 7364 */ 7365 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 7366 LPFC_IDX_RSRC_RDY); 7367 return 0; 7368 } 7369 7370 free_vfi_bmask: 7371 kfree(phba->sli4_hba.vfi_bmask); 7372 phba->sli4_hba.vfi_bmask = NULL; 7373 free_xri_ids: 7374 kfree(phba->sli4_hba.xri_ids); 7375 phba->sli4_hba.xri_ids = NULL; 7376 free_xri_bmask: 7377 kfree(phba->sli4_hba.xri_bmask); 7378 phba->sli4_hba.xri_bmask = NULL; 7379 free_vpi_ids: 7380 kfree(phba->vpi_ids); 7381 phba->vpi_ids = NULL; 7382 free_vpi_bmask: 7383 kfree(phba->vpi_bmask); 7384 phba->vpi_bmask = NULL; 7385 free_rpi_ids: 7386 kfree(phba->sli4_hba.rpi_ids); 7387 phba->sli4_hba.rpi_ids = NULL; 7388 free_rpi_bmask: 7389 kfree(phba->sli4_hba.rpi_bmask); 7390 phba->sli4_hba.rpi_bmask = NULL; 7391 err_exit: 7392 return rc; 7393 } 7394 7395 /** 7396 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 7397 * @phba: Pointer to HBA context object. 7398 * 7399 * This function allocates the number of elements for the specified 7400 * resource type. 7401 **/ 7402 int 7403 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 7404 { 7405 if (phba->sli4_hba.extents_in_use) { 7406 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 7407 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 7408 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 7409 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 7410 } else { 7411 kfree(phba->vpi_bmask); 7412 phba->sli4_hba.max_cfg_param.vpi_used = 0; 7413 kfree(phba->vpi_ids); 7414 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 7415 kfree(phba->sli4_hba.xri_bmask); 7416 kfree(phba->sli4_hba.xri_ids); 7417 kfree(phba->sli4_hba.vfi_bmask); 7418 kfree(phba->sli4_hba.vfi_ids); 7419 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 7420 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 7421 } 7422 7423 return 0; 7424 } 7425 7426 /** 7427 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 7428 * @phba: Pointer to HBA context object. 7429 * @type: The resource extent type. 7430 * @extnt_cnt: buffer to hold port extent count response 7431 * @extnt_size: buffer to hold port extent size response. 7432 * 7433 * This function calls the port to read the host allocated extents 7434 * for a particular type. 7435 **/ 7436 int 7437 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 7438 uint16_t *extnt_cnt, uint16_t *extnt_size) 7439 { 7440 bool emb; 7441 int rc = 0; 7442 uint16_t curr_blks = 0; 7443 uint32_t req_len, emb_len; 7444 uint32_t alloc_len, mbox_tmo; 7445 struct list_head *blk_list_head; 7446 struct lpfc_rsrc_blks *rsrc_blk; 7447 LPFC_MBOXQ_t *mbox; 7448 void *virtaddr = NULL; 7449 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 7450 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 7451 union lpfc_sli4_cfg_shdr *shdr; 7452 7453 switch (type) { 7454 case LPFC_RSC_TYPE_FCOE_VPI: 7455 blk_list_head = &phba->lpfc_vpi_blk_list; 7456 break; 7457 case LPFC_RSC_TYPE_FCOE_XRI: 7458 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 7459 break; 7460 case LPFC_RSC_TYPE_FCOE_VFI: 7461 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 7462 break; 7463 case LPFC_RSC_TYPE_FCOE_RPI: 7464 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 7465 break; 7466 default: 7467 return -EIO; 7468 } 7469 7470 /* Count the number of extents currently allocatd for this type. */ 7471 list_for_each_entry(rsrc_blk, blk_list_head, list) { 7472 if (curr_blks == 0) { 7473 /* 7474 * The GET_ALLOCATED mailbox does not return the size, 7475 * just the count. The size should be just the size 7476 * stored in the current allocated block and all sizes 7477 * for an extent type are the same so set the return 7478 * value now. 7479 */ 7480 *extnt_size = rsrc_blk->rsrc_size; 7481 } 7482 curr_blks++; 7483 } 7484 7485 /* 7486 * Calculate the size of an embedded mailbox. The uint32_t 7487 * accounts for extents-specific word. 7488 */ 7489 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 7490 sizeof(uint32_t); 7491 7492 /* 7493 * Presume the allocation and response will fit into an embedded 7494 * mailbox. If not true, reconfigure to a non-embedded mailbox. 7495 */ 7496 emb = LPFC_SLI4_MBX_EMBED; 7497 req_len = emb_len; 7498 if (req_len > emb_len) { 7499 req_len = curr_blks * sizeof(uint16_t) + 7500 sizeof(union lpfc_sli4_cfg_shdr) + 7501 sizeof(uint32_t); 7502 emb = LPFC_SLI4_MBX_NEMBED; 7503 } 7504 7505 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7506 if (!mbox) 7507 return -ENOMEM; 7508 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 7509 7510 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7511 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 7512 req_len, emb); 7513 if (alloc_len < req_len) { 7514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7515 "2983 Allocated DMA memory size (x%x) is " 7516 "less than the requested DMA memory " 7517 "size (x%x)\n", alloc_len, req_len); 7518 rc = -ENOMEM; 7519 goto err_exit; 7520 } 7521 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 7522 if (unlikely(rc)) { 7523 rc = -EIO; 7524 goto err_exit; 7525 } 7526 7527 if (!phba->sli4_hba.intr_enable) 7528 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 7529 else { 7530 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 7531 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 7532 } 7533 7534 if (unlikely(rc)) { 7535 rc = -EIO; 7536 goto err_exit; 7537 } 7538 7539 /* 7540 * Figure out where the response is located. Then get local pointers 7541 * to the response data. The port does not guarantee to respond to 7542 * all extents counts request so update the local variable with the 7543 * allocated count from the port. 7544 */ 7545 if (emb == LPFC_SLI4_MBX_EMBED) { 7546 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 7547 shdr = &rsrc_ext->header.cfg_shdr; 7548 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 7549 } else { 7550 virtaddr = mbox->sge_array->addr[0]; 7551 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 7552 shdr = &n_rsrc->cfg_shdr; 7553 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 7554 } 7555 7556 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 7557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7558 "2984 Failed to read allocated resources " 7559 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 7560 type, 7561 bf_get(lpfc_mbox_hdr_status, &shdr->response), 7562 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 7563 rc = -EIO; 7564 goto err_exit; 7565 } 7566 err_exit: 7567 lpfc_sli4_mbox_cmd_free(phba, mbox); 7568 return rc; 7569 } 7570 7571 /** 7572 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 7573 * @phba: pointer to lpfc hba data structure. 7574 * @sgl_list: linked link of sgl buffers to post 7575 * @cnt: number of linked list buffers 7576 * 7577 * This routine walks the list of buffers that have been allocated and 7578 * repost them to the port by using SGL block post. This is needed after a 7579 * pci_function_reset/warm_start or start. It attempts to construct blocks 7580 * of buffer sgls which contains contiguous xris and uses the non-embedded 7581 * SGL block post mailbox commands to post them to the port. For single 7582 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 7583 * mailbox command for posting. 7584 * 7585 * Returns: 0 = success, non-zero failure. 7586 **/ 7587 static int 7588 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 7589 struct list_head *sgl_list, int cnt) 7590 { 7591 struct lpfc_sglq *sglq_entry = NULL; 7592 struct lpfc_sglq *sglq_entry_next = NULL; 7593 struct lpfc_sglq *sglq_entry_first = NULL; 7594 int status = 0, total_cnt; 7595 int post_cnt = 0, num_posted = 0, block_cnt = 0; 7596 int last_xritag = NO_XRI; 7597 LIST_HEAD(prep_sgl_list); 7598 LIST_HEAD(blck_sgl_list); 7599 LIST_HEAD(allc_sgl_list); 7600 LIST_HEAD(post_sgl_list); 7601 LIST_HEAD(free_sgl_list); 7602 7603 spin_lock_irq(&phba->hbalock); 7604 spin_lock(&phba->sli4_hba.sgl_list_lock); 7605 list_splice_init(sgl_list, &allc_sgl_list); 7606 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7607 spin_unlock_irq(&phba->hbalock); 7608 7609 total_cnt = cnt; 7610 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 7611 &allc_sgl_list, list) { 7612 list_del_init(&sglq_entry->list); 7613 block_cnt++; 7614 if ((last_xritag != NO_XRI) && 7615 (sglq_entry->sli4_xritag != last_xritag + 1)) { 7616 /* a hole in xri block, form a sgl posting block */ 7617 list_splice_init(&prep_sgl_list, &blck_sgl_list); 7618 post_cnt = block_cnt - 1; 7619 /* prepare list for next posting block */ 7620 list_add_tail(&sglq_entry->list, &prep_sgl_list); 7621 block_cnt = 1; 7622 } else { 7623 /* prepare list for next posting block */ 7624 list_add_tail(&sglq_entry->list, &prep_sgl_list); 7625 /* enough sgls for non-embed sgl mbox command */ 7626 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 7627 list_splice_init(&prep_sgl_list, 7628 &blck_sgl_list); 7629 post_cnt = block_cnt; 7630 block_cnt = 0; 7631 } 7632 } 7633 num_posted++; 7634 7635 /* keep track of last sgl's xritag */ 7636 last_xritag = sglq_entry->sli4_xritag; 7637 7638 /* end of repost sgl list condition for buffers */ 7639 if (num_posted == total_cnt) { 7640 if (post_cnt == 0) { 7641 list_splice_init(&prep_sgl_list, 7642 &blck_sgl_list); 7643 post_cnt = block_cnt; 7644 } else if (block_cnt == 1) { 7645 status = lpfc_sli4_post_sgl(phba, 7646 sglq_entry->phys, 0, 7647 sglq_entry->sli4_xritag); 7648 if (!status) { 7649 /* successful, put sgl to posted list */ 7650 list_add_tail(&sglq_entry->list, 7651 &post_sgl_list); 7652 } else { 7653 /* Failure, put sgl to free list */ 7654 lpfc_printf_log(phba, KERN_WARNING, 7655 LOG_SLI, 7656 "3159 Failed to post " 7657 "sgl, xritag:x%x\n", 7658 sglq_entry->sli4_xritag); 7659 list_add_tail(&sglq_entry->list, 7660 &free_sgl_list); 7661 total_cnt--; 7662 } 7663 } 7664 } 7665 7666 /* continue until a nembed page worth of sgls */ 7667 if (post_cnt == 0) 7668 continue; 7669 7670 /* post the buffer list sgls as a block */ 7671 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 7672 post_cnt); 7673 7674 if (!status) { 7675 /* success, put sgl list to posted sgl list */ 7676 list_splice_init(&blck_sgl_list, &post_sgl_list); 7677 } else { 7678 /* Failure, put sgl list to free sgl list */ 7679 sglq_entry_first = list_first_entry(&blck_sgl_list, 7680 struct lpfc_sglq, 7681 list); 7682 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 7683 "3160 Failed to post sgl-list, " 7684 "xritag:x%x-x%x\n", 7685 sglq_entry_first->sli4_xritag, 7686 (sglq_entry_first->sli4_xritag + 7687 post_cnt - 1)); 7688 list_splice_init(&blck_sgl_list, &free_sgl_list); 7689 total_cnt -= post_cnt; 7690 } 7691 7692 /* don't reset xirtag due to hole in xri block */ 7693 if (block_cnt == 0) 7694 last_xritag = NO_XRI; 7695 7696 /* reset sgl post count for next round of posting */ 7697 post_cnt = 0; 7698 } 7699 7700 /* free the sgls failed to post */ 7701 lpfc_free_sgl_list(phba, &free_sgl_list); 7702 7703 /* push sgls posted to the available list */ 7704 if (!list_empty(&post_sgl_list)) { 7705 spin_lock_irq(&phba->hbalock); 7706 spin_lock(&phba->sli4_hba.sgl_list_lock); 7707 list_splice_init(&post_sgl_list, sgl_list); 7708 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7709 spin_unlock_irq(&phba->hbalock); 7710 } else { 7711 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7712 "3161 Failure to post sgl to port,status %x " 7713 "blkcnt %d totalcnt %d postcnt %d\n", 7714 status, block_cnt, total_cnt, post_cnt); 7715 return -EIO; 7716 } 7717 7718 /* return the number of XRIs actually posted */ 7719 return total_cnt; 7720 } 7721 7722 /** 7723 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls 7724 * @phba: pointer to lpfc hba data structure. 7725 * 7726 * This routine walks the list of nvme buffers that have been allocated and 7727 * repost them to the port by using SGL block post. This is needed after a 7728 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 7729 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list 7730 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. 7731 * 7732 * Returns: 0 = success, non-zero failure. 7733 **/ 7734 static int 7735 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) 7736 { 7737 LIST_HEAD(post_nblist); 7738 int num_posted, rc = 0; 7739 7740 /* get all NVME buffers need to repost to a local list */ 7741 lpfc_io_buf_flush(phba, &post_nblist); 7742 7743 /* post the list of nvme buffer sgls to port if available */ 7744 if (!list_empty(&post_nblist)) { 7745 num_posted = lpfc_sli4_post_io_sgl_list( 7746 phba, &post_nblist, phba->sli4_hba.io_xri_cnt); 7747 /* failed to post any nvme buffer, return error */ 7748 if (num_posted == 0) 7749 rc = -EIO; 7750 } 7751 return rc; 7752 } 7753 7754 static void 7755 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 7756 { 7757 uint32_t len; 7758 7759 len = sizeof(struct lpfc_mbx_set_host_data) - 7760 sizeof(struct lpfc_sli4_cfg_mhdr); 7761 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7762 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 7763 LPFC_SLI4_MBX_EMBED); 7764 7765 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 7766 mbox->u.mqe.un.set_host_data.param_len = 7767 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 7768 snprintf(mbox->u.mqe.un.set_host_data.un.data, 7769 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7770 "Linux %s v"LPFC_DRIVER_VERSION, 7771 test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC"); 7772 } 7773 7774 int 7775 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 7776 struct lpfc_queue *drq, int count, int idx) 7777 { 7778 int rc, i; 7779 struct lpfc_rqe hrqe; 7780 struct lpfc_rqe drqe; 7781 struct lpfc_rqb *rqbp; 7782 unsigned long flags; 7783 struct rqb_dmabuf *rqb_buffer; 7784 LIST_HEAD(rqb_buf_list); 7785 7786 rqbp = hrq->rqbp; 7787 for (i = 0; i < count; i++) { 7788 spin_lock_irqsave(&phba->hbalock, flags); 7789 /* IF RQ is already full, don't bother */ 7790 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) { 7791 spin_unlock_irqrestore(&phba->hbalock, flags); 7792 break; 7793 } 7794 spin_unlock_irqrestore(&phba->hbalock, flags); 7795 7796 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 7797 if (!rqb_buffer) 7798 break; 7799 rqb_buffer->hrq = hrq; 7800 rqb_buffer->drq = drq; 7801 rqb_buffer->idx = idx; 7802 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 7803 } 7804 7805 spin_lock_irqsave(&phba->hbalock, flags); 7806 while (!list_empty(&rqb_buf_list)) { 7807 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 7808 hbuf.list); 7809 7810 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 7811 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 7812 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 7813 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7814 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7815 if (rc < 0) { 7816 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7817 "6421 Cannot post to HRQ %d: %x %x %x " 7818 "DRQ %x %x\n", 7819 hrq->queue_id, 7820 hrq->host_index, 7821 hrq->hba_index, 7822 hrq->entry_count, 7823 drq->host_index, 7824 drq->hba_index); 7825 rqbp->rqb_free_buffer(phba, rqb_buffer); 7826 } else { 7827 list_add_tail(&rqb_buffer->hbuf.list, 7828 &rqbp->rqb_buffer_list); 7829 rqbp->buffer_count++; 7830 } 7831 } 7832 spin_unlock_irqrestore(&phba->hbalock, flags); 7833 return 1; 7834 } 7835 7836 static void 7837 lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7838 { 7839 union lpfc_sli4_cfg_shdr *shdr; 7840 u32 shdr_status, shdr_add_status; 7841 7842 shdr = (union lpfc_sli4_cfg_shdr *) 7843 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7844 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7845 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7846 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { 7847 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX, 7848 "4622 SET_FEATURE (x%x) mbox failed, " 7849 "status x%x add_status x%x, mbx status x%x\n", 7850 LPFC_SET_LD_SIGNAL, shdr_status, 7851 shdr_add_status, pmb->u.mb.mbxStatus); 7852 phba->degrade_activate_threshold = 0; 7853 phba->degrade_deactivate_threshold = 0; 7854 phba->fec_degrade_interval = 0; 7855 goto out; 7856 } 7857 7858 phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7; 7859 phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8; 7860 phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10; 7861 7862 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT, 7863 "4624 Success: da x%x dd x%x interval x%x\n", 7864 phba->degrade_activate_threshold, 7865 phba->degrade_deactivate_threshold, 7866 phba->fec_degrade_interval); 7867 out: 7868 mempool_free(pmb, phba->mbox_mem_pool); 7869 } 7870 7871 int 7872 lpfc_read_lds_params(struct lpfc_hba *phba) 7873 { 7874 LPFC_MBOXQ_t *mboxq; 7875 int rc; 7876 7877 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7878 if (!mboxq) 7879 return -ENOMEM; 7880 7881 lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL); 7882 mboxq->vport = phba->pport; 7883 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params; 7884 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 7885 if (rc == MBX_NOT_FINISHED) { 7886 mempool_free(mboxq, phba->mbox_mem_pool); 7887 return -EIO; 7888 } 7889 return 0; 7890 } 7891 7892 static void 7893 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7894 { 7895 struct lpfc_vport *vport = pmb->vport; 7896 union lpfc_sli4_cfg_shdr *shdr; 7897 u32 shdr_status, shdr_add_status; 7898 u32 sig, acqe; 7899 7900 /* Two outcomes. (1) Set featurs was successul and EDC negotiation 7901 * is done. (2) Mailbox failed and send FPIN support only. 7902 */ 7903 shdr = (union lpfc_sli4_cfg_shdr *) 7904 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7905 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7906 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7907 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { 7908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, 7909 "2516 CGN SET_FEATURE mbox failed with " 7910 "status x%x add_status x%x, mbx status x%x " 7911 "Reset Congestion to FPINs only\n", 7912 shdr_status, shdr_add_status, 7913 pmb->u.mb.mbxStatus); 7914 /* If there is a mbox error, move on to RDF */ 7915 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 7916 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 7917 goto out; 7918 } 7919 7920 /* Zero out Congestion Signal ACQE counter */ 7921 phba->cgn_acqe_cnt = 0; 7922 7923 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq, 7924 &pmb->u.mqe.un.set_feature); 7925 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq, 7926 &pmb->u.mqe.un.set_feature); 7927 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7928 "4620 SET_FEATURES Success: Freq: %ds %dms " 7929 " Reg: x%x x%x\n", acqe, sig, 7930 phba->cgn_reg_signal, phba->cgn_reg_fpin); 7931 out: 7932 mempool_free(pmb, phba->mbox_mem_pool); 7933 7934 /* Register for FPIN events from the fabric now that the 7935 * EDC common_set_features has completed. 7936 */ 7937 lpfc_issue_els_rdf(vport, 0); 7938 } 7939 7940 int 7941 lpfc_config_cgn_signal(struct lpfc_hba *phba) 7942 { 7943 LPFC_MBOXQ_t *mboxq; 7944 u32 rc; 7945 7946 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7947 if (!mboxq) 7948 goto out_rdf; 7949 7950 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL); 7951 mboxq->vport = phba->pport; 7952 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs; 7953 7954 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7955 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: " 7956 "Reg: x%x x%x\n", 7957 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency, 7958 phba->cgn_reg_signal, phba->cgn_reg_fpin); 7959 7960 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 7961 if (rc == MBX_NOT_FINISHED) 7962 goto out; 7963 return 0; 7964 7965 out: 7966 mempool_free(mboxq, phba->mbox_mem_pool); 7967 out_rdf: 7968 /* If there is a mbox error, move on to RDF */ 7969 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 7970 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 7971 lpfc_issue_els_rdf(phba->pport, 0); 7972 return -EIO; 7973 } 7974 7975 /** 7976 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking 7977 * @phba: pointer to lpfc hba data structure. 7978 * 7979 * This routine initializes the per-eq idle_stat to dynamically dictate 7980 * polling decisions. 7981 * 7982 * Return codes: 7983 * None 7984 **/ 7985 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) 7986 { 7987 int i; 7988 struct lpfc_sli4_hdw_queue *hdwq; 7989 struct lpfc_queue *eq; 7990 struct lpfc_idle_stat *idle_stat; 7991 u64 wall; 7992 7993 for_each_present_cpu(i) { 7994 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 7995 eq = hdwq->hba_eq; 7996 7997 /* Skip if we've already handled this eq's primary CPU */ 7998 if (eq->chann != i) 7999 continue; 8000 8001 idle_stat = &phba->sli4_hba.idle_stat[i]; 8002 8003 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1); 8004 idle_stat->prev_wall = wall; 8005 8006 if (phba->nvmet_support || 8007 phba->cmf_active_mode != LPFC_CFG_OFF || 8008 phba->intr_type != MSIX) 8009 eq->poll_mode = LPFC_QUEUE_WORK; 8010 else 8011 eq->poll_mode = LPFC_THREADED_IRQ; 8012 } 8013 8014 if (!phba->nvmet_support && phba->intr_type == MSIX) 8015 schedule_delayed_work(&phba->idle_stat_delay_work, 8016 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 8017 } 8018 8019 static void lpfc_sli4_dip(struct lpfc_hba *phba) 8020 { 8021 uint32_t if_type; 8022 8023 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8024 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 || 8025 if_type == LPFC_SLI_INTF_IF_TYPE_6) { 8026 struct lpfc_register reg_data; 8027 8028 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 8029 ®_data.word0)) 8030 return; 8031 8032 if (bf_get(lpfc_sliport_status_dip, ®_data)) 8033 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8034 "2904 Firmware Dump Image Present" 8035 " on Adapter"); 8036 } 8037 } 8038 8039 /** 8040 * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor 8041 * @rx_monitor: Pointer to lpfc_rx_info_monitor object 8042 * @entries: Number of rx_info_entry objects to allocate in ring 8043 * 8044 * Return: 8045 * 0 - Success 8046 * ENOMEM - Failure to kmalloc 8047 **/ 8048 int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor, 8049 u32 entries) 8050 { 8051 rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry), 8052 GFP_KERNEL); 8053 if (!rx_monitor->ring) 8054 return -ENOMEM; 8055 8056 rx_monitor->head_idx = 0; 8057 rx_monitor->tail_idx = 0; 8058 spin_lock_init(&rx_monitor->lock); 8059 rx_monitor->entries = entries; 8060 8061 return 0; 8062 } 8063 8064 /** 8065 * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor 8066 * @rx_monitor: Pointer to lpfc_rx_info_monitor object 8067 * 8068 * Called after cancellation of cmf_timer. 8069 **/ 8070 void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor) 8071 { 8072 kfree(rx_monitor->ring); 8073 rx_monitor->ring = NULL; 8074 rx_monitor->entries = 0; 8075 rx_monitor->head_idx = 0; 8076 rx_monitor->tail_idx = 0; 8077 } 8078 8079 /** 8080 * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring 8081 * @rx_monitor: Pointer to lpfc_rx_info_monitor object 8082 * @entry: Pointer to rx_info_entry 8083 * 8084 * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a 8085 * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr. 8086 * 8087 * This is called from lpfc_cmf_timer, which is in timer/softirq context. 8088 * 8089 * In cases of old data overflow, we do a best effort of FIFO order. 8090 **/ 8091 void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor, 8092 struct rx_info_entry *entry) 8093 { 8094 struct rx_info_entry *ring = rx_monitor->ring; 8095 u32 *head_idx = &rx_monitor->head_idx; 8096 u32 *tail_idx = &rx_monitor->tail_idx; 8097 spinlock_t *ring_lock = &rx_monitor->lock; 8098 u32 ring_size = rx_monitor->entries; 8099 8100 spin_lock(ring_lock); 8101 memcpy(&ring[*tail_idx], entry, sizeof(*entry)); 8102 *tail_idx = (*tail_idx + 1) % ring_size; 8103 8104 /* Best effort of FIFO saved data */ 8105 if (*tail_idx == *head_idx) 8106 *head_idx = (*head_idx + 1) % ring_size; 8107 8108 spin_unlock(ring_lock); 8109 } 8110 8111 /** 8112 * lpfc_rx_monitor_report - Read out rx_monitor's ring 8113 * @phba: Pointer to lpfc_hba object 8114 * @rx_monitor: Pointer to lpfc_rx_info_monitor object 8115 * @buf: Pointer to char buffer that will contain rx monitor info data 8116 * @buf_len: Length buf including null char 8117 * @max_read_entries: Maximum number of entries to read out of ring 8118 * 8119 * Used to dump/read what's in rx_monitor's ring buffer. 8120 * 8121 * If buf is NULL || buf_len == 0, then it is implied that we want to log the 8122 * information to kmsg instead of filling out buf. 8123 * 8124 * Return: 8125 * Number of entries read out of the ring 8126 **/ 8127 u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, 8128 struct lpfc_rx_info_monitor *rx_monitor, char *buf, 8129 u32 buf_len, u32 max_read_entries) 8130 { 8131 struct rx_info_entry *ring = rx_monitor->ring; 8132 struct rx_info_entry *entry; 8133 u32 *head_idx = &rx_monitor->head_idx; 8134 u32 *tail_idx = &rx_monitor->tail_idx; 8135 spinlock_t *ring_lock = &rx_monitor->lock; 8136 u32 ring_size = rx_monitor->entries; 8137 u32 cnt = 0; 8138 char tmp[DBG_LOG_STR_SZ] = {0}; 8139 bool log_to_kmsg = (!buf || !buf_len) ? true : false; 8140 8141 if (!log_to_kmsg) { 8142 /* clear the buffer to be sure */ 8143 memset(buf, 0, buf_len); 8144 8145 scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s" 8146 "%-8s%-8s%-8s%-16s\n", 8147 "MaxBPI", "Tot_Data_CMF", 8148 "Tot_Data_Cmd", "Tot_Data_Cmpl", 8149 "Lat(us)", "Avg_IO", "Max_IO", "Bsy", 8150 "IO_cnt", "Info", "BWutil(ms)"); 8151 } 8152 8153 /* Needs to be _irq because record is called from timer interrupt 8154 * context 8155 */ 8156 spin_lock_irq(ring_lock); 8157 while (*head_idx != *tail_idx) { 8158 entry = &ring[*head_idx]; 8159 8160 /* Read out this entry's data. */ 8161 if (!log_to_kmsg) { 8162 /* If !log_to_kmsg, then store to buf. */ 8163 scnprintf(tmp, sizeof(tmp), 8164 "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu" 8165 "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n", 8166 *head_idx, entry->max_bytes_per_interval, 8167 entry->cmf_bytes, entry->total_bytes, 8168 entry->rcv_bytes, entry->avg_io_latency, 8169 entry->avg_io_size, entry->max_read_cnt, 8170 entry->cmf_busy, entry->io_cnt, 8171 entry->cmf_info, entry->timer_utilization, 8172 entry->timer_interval); 8173 8174 /* Check for buffer overflow */ 8175 if ((strlen(buf) + strlen(tmp)) >= buf_len) 8176 break; 8177 8178 /* Append entry's data to buffer */ 8179 strlcat(buf, tmp, buf_len); 8180 } else { 8181 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 8182 "4410 %02u: MBPI %llu Xmit %llu " 8183 "Cmpl %llu Lat %llu ASz %llu Info %02u " 8184 "BWUtil %u Int %u slot %u\n", 8185 cnt, entry->max_bytes_per_interval, 8186 entry->total_bytes, entry->rcv_bytes, 8187 entry->avg_io_latency, 8188 entry->avg_io_size, entry->cmf_info, 8189 entry->timer_utilization, 8190 entry->timer_interval, *head_idx); 8191 } 8192 8193 *head_idx = (*head_idx + 1) % ring_size; 8194 8195 /* Don't feed more than max_read_entries */ 8196 cnt++; 8197 if (cnt >= max_read_entries) 8198 break; 8199 } 8200 spin_unlock_irq(ring_lock); 8201 8202 return cnt; 8203 } 8204 8205 /** 8206 * lpfc_cmf_setup - Initialize idle_stat tracking 8207 * @phba: Pointer to HBA context object. 8208 * 8209 * This is called from HBA setup during driver load or when the HBA 8210 * comes online. this does all the initialization to support CMF and MI. 8211 **/ 8212 static int 8213 lpfc_cmf_setup(struct lpfc_hba *phba) 8214 { 8215 LPFC_MBOXQ_t *mboxq; 8216 struct lpfc_dmabuf *mp; 8217 struct lpfc_pc_sli4_params *sli4_params; 8218 int rc, cmf, mi_ver; 8219 8220 rc = lpfc_sli4_refresh_params(phba); 8221 if (unlikely(rc)) 8222 return rc; 8223 8224 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8225 if (!mboxq) 8226 return -ENOMEM; 8227 8228 sli4_params = &phba->sli4_hba.pc_sli4_params; 8229 8230 /* Always try to enable MI feature if we can */ 8231 if (sli4_params->mi_ver) { 8232 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI); 8233 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8234 mi_ver = bf_get(lpfc_mbx_set_feature_mi, 8235 &mboxq->u.mqe.un.set_feature); 8236 8237 if (rc == MBX_SUCCESS) { 8238 if (mi_ver) { 8239 lpfc_printf_log(phba, 8240 KERN_WARNING, LOG_CGN_MGMT, 8241 "6215 MI is enabled\n"); 8242 sli4_params->mi_ver = mi_ver; 8243 } else { 8244 lpfc_printf_log(phba, 8245 KERN_WARNING, LOG_CGN_MGMT, 8246 "6338 MI is disabled\n"); 8247 sli4_params->mi_ver = 0; 8248 } 8249 } else { 8250 /* mi_ver is already set from GET_SLI4_PARAMETERS */ 8251 lpfc_printf_log(phba, KERN_INFO, 8252 LOG_CGN_MGMT | LOG_INIT, 8253 "6245 Enable MI Mailbox x%x (x%x/x%x) " 8254 "failed, rc:x%x mi:x%x\n", 8255 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8256 lpfc_sli_config_mbox_subsys_get 8257 (phba, mboxq), 8258 lpfc_sli_config_mbox_opcode_get 8259 (phba, mboxq), 8260 rc, sli4_params->mi_ver); 8261 } 8262 } else { 8263 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8264 "6217 MI is disabled\n"); 8265 } 8266 8267 /* Ensure FDMI is enabled for MI if enable_mi is set */ 8268 if (sli4_params->mi_ver) 8269 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; 8270 8271 /* Always try to enable CMF feature if we can */ 8272 if (sli4_params->cmf) { 8273 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF); 8274 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8275 cmf = bf_get(lpfc_mbx_set_feature_cmf, 8276 &mboxq->u.mqe.un.set_feature); 8277 if (rc == MBX_SUCCESS && cmf) { 8278 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8279 "6218 CMF is enabled: mode %d\n", 8280 phba->cmf_active_mode); 8281 } else { 8282 lpfc_printf_log(phba, KERN_WARNING, 8283 LOG_CGN_MGMT | LOG_INIT, 8284 "6219 Enable CMF Mailbox x%x (x%x/x%x) " 8285 "failed, rc:x%x dd:x%x\n", 8286 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8287 lpfc_sli_config_mbox_subsys_get 8288 (phba, mboxq), 8289 lpfc_sli_config_mbox_opcode_get 8290 (phba, mboxq), 8291 rc, cmf); 8292 sli4_params->cmf = 0; 8293 phba->cmf_active_mode = LPFC_CFG_OFF; 8294 goto no_cmf; 8295 } 8296 8297 /* Allocate Congestion Information Buffer */ 8298 if (!phba->cgn_i) { 8299 mp = kmalloc(sizeof(*mp), GFP_KERNEL); 8300 if (mp) 8301 mp->virt = dma_alloc_coherent 8302 (&phba->pcidev->dev, 8303 sizeof(struct lpfc_cgn_info), 8304 &mp->phys, GFP_KERNEL); 8305 if (!mp || !mp->virt) { 8306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8307 "2640 Failed to alloc memory " 8308 "for Congestion Info\n"); 8309 kfree(mp); 8310 sli4_params->cmf = 0; 8311 phba->cmf_active_mode = LPFC_CFG_OFF; 8312 goto no_cmf; 8313 } 8314 phba->cgn_i = mp; 8315 8316 /* initialize congestion buffer info */ 8317 lpfc_init_congestion_buf(phba); 8318 lpfc_init_congestion_stat(phba); 8319 8320 /* Zero out Congestion Signal counters */ 8321 atomic64_set(&phba->cgn_acqe_stat.alarm, 0); 8322 atomic64_set(&phba->cgn_acqe_stat.warn, 0); 8323 } 8324 8325 rc = lpfc_sli4_cgn_params_read(phba); 8326 if (rc < 0) { 8327 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 8328 "6242 Error reading Cgn Params (%d)\n", 8329 rc); 8330 /* Ensure CGN Mode is off */ 8331 sli4_params->cmf = 0; 8332 } else if (!rc) { 8333 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 8334 "6243 CGN Event empty object.\n"); 8335 /* Ensure CGN Mode is off */ 8336 sli4_params->cmf = 0; 8337 } 8338 } else { 8339 no_cmf: 8340 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8341 "6220 CMF is disabled\n"); 8342 } 8343 8344 /* Only register congestion buffer with firmware if BOTH 8345 * CMF and E2E are enabled. 8346 */ 8347 if (sli4_params->cmf && sli4_params->mi_ver) { 8348 rc = lpfc_reg_congestion_buf(phba); 8349 if (rc) { 8350 dma_free_coherent(&phba->pcidev->dev, 8351 sizeof(struct lpfc_cgn_info), 8352 phba->cgn_i->virt, phba->cgn_i->phys); 8353 kfree(phba->cgn_i); 8354 phba->cgn_i = NULL; 8355 /* Ensure CGN Mode is off */ 8356 phba->cmf_active_mode = LPFC_CFG_OFF; 8357 sli4_params->cmf = 0; 8358 return 0; 8359 } 8360 } 8361 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8362 "6470 Setup MI version %d CMF %d mode %d\n", 8363 sli4_params->mi_ver, sli4_params->cmf, 8364 phba->cmf_active_mode); 8365 8366 mempool_free(mboxq, phba->mbox_mem_pool); 8367 8368 /* Initialize atomic counters */ 8369 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 8370 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 8371 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 8372 atomic_set(&phba->cgn_sync_warn_cnt, 0); 8373 atomic_set(&phba->cgn_driver_evt_cnt, 0); 8374 atomic_set(&phba->cgn_latency_evt_cnt, 0); 8375 atomic64_set(&phba->cgn_latency_evt, 0); 8376 8377 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; 8378 8379 /* Allocate RX Monitor Buffer */ 8380 if (!phba->rx_monitor) { 8381 phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor), 8382 GFP_KERNEL); 8383 8384 if (!phba->rx_monitor) { 8385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8386 "2644 Failed to alloc memory " 8387 "for RX Monitor Buffer\n"); 8388 return -ENOMEM; 8389 } 8390 8391 /* Instruct the rx_monitor object to instantiate its ring */ 8392 if (lpfc_rx_monitor_create_ring(phba->rx_monitor, 8393 LPFC_MAX_RXMONITOR_ENTRY)) { 8394 kfree(phba->rx_monitor); 8395 phba->rx_monitor = NULL; 8396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8397 "2645 Failed to alloc memory " 8398 "for RX Monitor's Ring\n"); 8399 return -ENOMEM; 8400 } 8401 } 8402 8403 return 0; 8404 } 8405 8406 static int 8407 lpfc_set_host_tm(struct lpfc_hba *phba) 8408 { 8409 LPFC_MBOXQ_t *mboxq; 8410 uint32_t len, rc; 8411 struct timespec64 cur_time; 8412 struct tm broken; 8413 uint32_t month, day, year; 8414 uint32_t hour, minute, second; 8415 struct lpfc_mbx_set_host_date_time *tm; 8416 8417 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8418 if (!mboxq) 8419 return -ENOMEM; 8420 8421 len = sizeof(struct lpfc_mbx_set_host_data) - 8422 sizeof(struct lpfc_sli4_cfg_mhdr); 8423 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8424 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 8425 LPFC_SLI4_MBX_EMBED); 8426 8427 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME; 8428 mboxq->u.mqe.un.set_host_data.param_len = 8429 sizeof(struct lpfc_mbx_set_host_date_time); 8430 tm = &mboxq->u.mqe.un.set_host_data.un.tm; 8431 ktime_get_real_ts64(&cur_time); 8432 time64_to_tm(cur_time.tv_sec, 0, &broken); 8433 month = broken.tm_mon + 1; 8434 day = broken.tm_mday; 8435 year = broken.tm_year - 100; 8436 hour = broken.tm_hour; 8437 minute = broken.tm_min; 8438 second = broken.tm_sec; 8439 bf_set(lpfc_mbx_set_host_month, tm, month); 8440 bf_set(lpfc_mbx_set_host_day, tm, day); 8441 bf_set(lpfc_mbx_set_host_year, tm, year); 8442 bf_set(lpfc_mbx_set_host_hour, tm, hour); 8443 bf_set(lpfc_mbx_set_host_min, tm, minute); 8444 bf_set(lpfc_mbx_set_host_sec, tm, second); 8445 8446 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8447 mempool_free(mboxq, phba->mbox_mem_pool); 8448 return rc; 8449 } 8450 8451 /** 8452 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 8453 * @phba: Pointer to HBA context object. 8454 * 8455 * This function is the main SLI4 device initialization PCI function. This 8456 * function is called by the HBA initialization code, HBA reset code and 8457 * HBA error attention handler code. Caller is not required to hold any 8458 * locks. 8459 **/ 8460 int 8461 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 8462 { 8463 int rc, i, cnt, len, dd; 8464 LPFC_MBOXQ_t *mboxq; 8465 struct lpfc_mqe *mqe; 8466 uint8_t *vpd; 8467 uint32_t vpd_size; 8468 uint32_t ftr_rsp = 0; 8469 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 8470 struct lpfc_vport *vport = phba->pport; 8471 struct lpfc_dmabuf *mp; 8472 struct lpfc_rqb *rqbp; 8473 u32 flg; 8474 8475 /* Perform a PCI function reset to start from clean */ 8476 rc = lpfc_pci_function_reset(phba); 8477 if (unlikely(rc)) 8478 return -ENODEV; 8479 8480 /* Check the HBA Host Status Register for readyness */ 8481 rc = lpfc_sli4_post_status_check(phba); 8482 if (unlikely(rc)) 8483 return -ENODEV; 8484 else { 8485 spin_lock_irq(&phba->hbalock); 8486 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 8487 flg = phba->sli.sli_flag; 8488 spin_unlock_irq(&phba->hbalock); 8489 /* Allow a little time after setting SLI_ACTIVE for any polled 8490 * MBX commands to complete via BSG. 8491 */ 8492 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { 8493 msleep(20); 8494 spin_lock_irq(&phba->hbalock); 8495 flg = phba->sli.sli_flag; 8496 spin_unlock_irq(&phba->hbalock); 8497 } 8498 } 8499 clear_bit(HBA_SETUP, &phba->hba_flag); 8500 8501 lpfc_sli4_dip(phba); 8502 8503 /* 8504 * Allocate a single mailbox container for initializing the 8505 * port. 8506 */ 8507 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8508 if (!mboxq) 8509 return -ENOMEM; 8510 8511 /* Issue READ_REV to collect vpd and FW information. */ 8512 vpd_size = SLI4_PAGE_SIZE; 8513 vpd = kzalloc(vpd_size, GFP_KERNEL); 8514 if (!vpd) { 8515 rc = -ENOMEM; 8516 goto out_free_mbox; 8517 } 8518 8519 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 8520 if (unlikely(rc)) { 8521 kfree(vpd); 8522 goto out_free_mbox; 8523 } 8524 8525 mqe = &mboxq->u.mqe; 8526 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 8527 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 8528 set_bit(HBA_FCOE_MODE, &phba->hba_flag); 8529 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 8530 } else { 8531 clear_bit(HBA_FCOE_MODE, &phba->hba_flag); 8532 } 8533 8534 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 8535 LPFC_DCBX_CEE_MODE) 8536 set_bit(HBA_FIP_SUPPORT, &phba->hba_flag); 8537 else 8538 clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag); 8539 8540 clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag); 8541 8542 if (phba->sli_rev != LPFC_SLI_REV4) { 8543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8544 "0376 READ_REV Error. SLI Level %d " 8545 "FCoE enabled %d\n", 8546 phba->sli_rev, 8547 test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0); 8548 rc = -EIO; 8549 kfree(vpd); 8550 goto out_free_mbox; 8551 } 8552 8553 rc = lpfc_set_host_tm(phba); 8554 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 8555 "6468 Set host date / time: Status x%x:\n", rc); 8556 8557 /* 8558 * Continue initialization with default values even if driver failed 8559 * to read FCoE param config regions, only read parameters if the 8560 * board is FCoE 8561 */ 8562 if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 8563 lpfc_sli4_read_fcoe_params(phba)) 8564 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 8565 "2570 Failed to read FCoE parameters\n"); 8566 8567 /* 8568 * Retrieve sli4 device physical port name, failure of doing it 8569 * is considered as non-fatal. 8570 */ 8571 rc = lpfc_sli4_retrieve_pport_name(phba); 8572 if (!rc) 8573 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8574 "3080 Successful retrieving SLI4 device " 8575 "physical port name: %s.\n", phba->Port); 8576 8577 rc = lpfc_sli4_get_ctl_attr(phba); 8578 if (!rc) 8579 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8580 "8351 Successful retrieving SLI4 device " 8581 "CTL ATTR\n"); 8582 8583 /* 8584 * Evaluate the read rev and vpd data. Populate the driver 8585 * state with the results. If this routine fails, the failure 8586 * is not fatal as the driver will use generic values. 8587 */ 8588 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 8589 if (unlikely(!rc)) 8590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8591 "0377 Error %d parsing vpd. " 8592 "Using defaults.\n", rc); 8593 kfree(vpd); 8594 8595 /* Save information as VPD data */ 8596 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 8597 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 8598 8599 /* 8600 * This is because first G7 ASIC doesn't support the standard 8601 * 0x5a NVME cmd descriptor type/subtype 8602 */ 8603 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8604 LPFC_SLI_INTF_IF_TYPE_6) && 8605 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 8606 (phba->vpd.rev.smRev == 0) && 8607 (phba->cfg_nvme_embed_cmd == 1)) 8608 phba->cfg_nvme_embed_cmd = 0; 8609 8610 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 8611 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 8612 &mqe->un.read_rev); 8613 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 8614 &mqe->un.read_rev); 8615 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 8616 &mqe->un.read_rev); 8617 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 8618 &mqe->un.read_rev); 8619 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 8620 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 8621 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 8622 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 8623 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 8624 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 8625 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8626 "(%d):0380 READ_REV Status x%x " 8627 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 8628 mboxq->vport ? mboxq->vport->vpi : 0, 8629 bf_get(lpfc_mqe_status, mqe), 8630 phba->vpd.rev.opFwName, 8631 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 8632 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 8633 8634 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8635 LPFC_SLI_INTF_IF_TYPE_0) { 8636 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 8637 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8638 if (rc == MBX_SUCCESS) { 8639 set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag); 8640 /* Set 1Sec interval to detect UE */ 8641 phba->eratt_poll_interval = 1; 8642 phba->sli4_hba.ue_to_sr = bf_get( 8643 lpfc_mbx_set_feature_UESR, 8644 &mboxq->u.mqe.un.set_feature); 8645 phba->sli4_hba.ue_to_rp = bf_get( 8646 lpfc_mbx_set_feature_UERP, 8647 &mboxq->u.mqe.un.set_feature); 8648 } 8649 } 8650 8651 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 8652 /* Enable MDS Diagnostics only if the SLI Port supports it */ 8653 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 8654 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8655 if (rc != MBX_SUCCESS) 8656 phba->mds_diags_support = 0; 8657 } 8658 8659 /* 8660 * Discover the port's supported feature set and match it against the 8661 * hosts requests. 8662 */ 8663 lpfc_request_features(phba, mboxq); 8664 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8665 if (unlikely(rc)) { 8666 rc = -EIO; 8667 goto out_free_mbox; 8668 } 8669 8670 /* Disable VMID if app header is not supported */ 8671 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr, 8672 &mqe->un.req_ftrs))) { 8673 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0); 8674 phba->cfg_vmid_app_header = 0; 8675 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI, 8676 "1242 vmid feature not supported\n"); 8677 } 8678 8679 /* 8680 * The port must support FCP initiator mode as this is the 8681 * only mode running in the host. 8682 */ 8683 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 8684 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8685 "0378 No support for fcpi mode.\n"); 8686 ftr_rsp++; 8687 } 8688 8689 /* Performance Hints are ONLY for FCoE */ 8690 if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 8691 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 8692 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 8693 else 8694 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 8695 } 8696 8697 /* 8698 * If the port cannot support the host's requested features 8699 * then turn off the global config parameters to disable the 8700 * feature in the driver. This is not a fatal error. 8701 */ 8702 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 8703 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 8704 phba->cfg_enable_bg = 0; 8705 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 8706 ftr_rsp++; 8707 } 8708 } 8709 8710 if (phba->max_vpi && phba->cfg_enable_npiv && 8711 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 8712 ftr_rsp++; 8713 8714 if (ftr_rsp) { 8715 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8716 "0379 Feature Mismatch Data: x%08x %08x " 8717 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 8718 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 8719 phba->cfg_enable_npiv, phba->max_vpi); 8720 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 8721 phba->cfg_enable_bg = 0; 8722 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 8723 phba->cfg_enable_npiv = 0; 8724 } 8725 8726 /* These SLI3 features are assumed in SLI4 */ 8727 spin_lock_irq(&phba->hbalock); 8728 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 8729 spin_unlock_irq(&phba->hbalock); 8730 8731 /* Always try to enable dual dump feature if we can */ 8732 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP); 8733 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8734 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); 8735 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) 8736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8737 "6448 Dual Dump is enabled\n"); 8738 else 8739 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, 8740 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, " 8741 "rc:x%x dd:x%x\n", 8742 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8743 lpfc_sli_config_mbox_subsys_get( 8744 phba, mboxq), 8745 lpfc_sli_config_mbox_opcode_get( 8746 phba, mboxq), 8747 rc, dd); 8748 /* 8749 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 8750 * calls depends on these resources to complete port setup. 8751 */ 8752 rc = lpfc_sli4_alloc_resource_identifiers(phba); 8753 if (rc) { 8754 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8755 "2920 Failed to alloc Resource IDs " 8756 "rc = x%x\n", rc); 8757 goto out_free_mbox; 8758 } 8759 8760 lpfc_set_host_data(phba, mboxq); 8761 8762 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8763 if (rc) { 8764 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8765 "2134 Failed to set host os driver version %x", 8766 rc); 8767 } 8768 8769 /* Read the port's service parameters. */ 8770 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 8771 if (rc) { 8772 phba->link_state = LPFC_HBA_ERROR; 8773 rc = -ENOMEM; 8774 goto out_free_mbox; 8775 } 8776 8777 mboxq->vport = vport; 8778 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8779 mp = mboxq->ctx_buf; 8780 if (rc == MBX_SUCCESS) { 8781 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 8782 rc = 0; 8783 } 8784 8785 /* 8786 * This memory was allocated by the lpfc_read_sparam routine but is 8787 * no longer needed. It is released and ctx_buf NULLed to prevent 8788 * unintended pointer access as the mbox is reused. 8789 */ 8790 lpfc_mbuf_free(phba, mp->virt, mp->phys); 8791 kfree(mp); 8792 mboxq->ctx_buf = NULL; 8793 if (unlikely(rc)) { 8794 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8795 "0382 READ_SPARAM command failed " 8796 "status %d, mbxStatus x%x\n", 8797 rc, bf_get(lpfc_mqe_status, mqe)); 8798 phba->link_state = LPFC_HBA_ERROR; 8799 rc = -EIO; 8800 goto out_free_mbox; 8801 } 8802 8803 lpfc_update_vport_wwn(vport); 8804 8805 /* Update the fc_host data structures with new wwn. */ 8806 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 8807 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 8808 8809 /* Create all the SLI4 queues */ 8810 rc = lpfc_sli4_queue_create(phba); 8811 if (rc) { 8812 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8813 "3089 Failed to allocate queues\n"); 8814 rc = -ENODEV; 8815 goto out_free_mbox; 8816 } 8817 /* Set up all the queues to the device */ 8818 rc = lpfc_sli4_queue_setup(phba); 8819 if (unlikely(rc)) { 8820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8821 "0381 Error %d during queue setup.\n ", rc); 8822 goto out_stop_timers; 8823 } 8824 /* Initialize the driver internal SLI layer lists. */ 8825 lpfc_sli4_setup(phba); 8826 lpfc_sli4_queue_init(phba); 8827 8828 /* update host els xri-sgl sizes and mappings */ 8829 rc = lpfc_sli4_els_sgl_update(phba); 8830 if (unlikely(rc)) { 8831 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8832 "1400 Failed to update xri-sgl size and " 8833 "mapping: %d\n", rc); 8834 goto out_destroy_queue; 8835 } 8836 8837 /* register the els sgl pool to the port */ 8838 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 8839 phba->sli4_hba.els_xri_cnt); 8840 if (unlikely(rc < 0)) { 8841 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8842 "0582 Error %d during els sgl post " 8843 "operation\n", rc); 8844 rc = -ENODEV; 8845 goto out_destroy_queue; 8846 } 8847 phba->sli4_hba.els_xri_cnt = rc; 8848 8849 if (phba->nvmet_support) { 8850 /* update host nvmet xri-sgl sizes and mappings */ 8851 rc = lpfc_sli4_nvmet_sgl_update(phba); 8852 if (unlikely(rc)) { 8853 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8854 "6308 Failed to update nvmet-sgl size " 8855 "and mapping: %d\n", rc); 8856 goto out_destroy_queue; 8857 } 8858 8859 /* register the nvmet sgl pool to the port */ 8860 rc = lpfc_sli4_repost_sgl_list( 8861 phba, 8862 &phba->sli4_hba.lpfc_nvmet_sgl_list, 8863 phba->sli4_hba.nvmet_xri_cnt); 8864 if (unlikely(rc < 0)) { 8865 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8866 "3117 Error %d during nvmet " 8867 "sgl post\n", rc); 8868 rc = -ENODEV; 8869 goto out_destroy_queue; 8870 } 8871 phba->sli4_hba.nvmet_xri_cnt = rc; 8872 8873 /* We allocate an iocbq for every receive context SGL. 8874 * The additional allocation is for abort and ls handling. 8875 */ 8876 cnt = phba->sli4_hba.nvmet_xri_cnt + 8877 phba->sli4_hba.max_cfg_param.max_xri; 8878 } else { 8879 /* update host common xri-sgl sizes and mappings */ 8880 rc = lpfc_sli4_io_sgl_update(phba); 8881 if (unlikely(rc)) { 8882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8883 "6082 Failed to update nvme-sgl size " 8884 "and mapping: %d\n", rc); 8885 goto out_destroy_queue; 8886 } 8887 8888 /* register the allocated common sgl pool to the port */ 8889 rc = lpfc_sli4_repost_io_sgl_list(phba); 8890 if (unlikely(rc)) { 8891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8892 "6116 Error %d during nvme sgl post " 8893 "operation\n", rc); 8894 /* Some NVME buffers were moved to abort nvme list */ 8895 /* A pci function reset will repost them */ 8896 rc = -ENODEV; 8897 goto out_destroy_queue; 8898 } 8899 /* Each lpfc_io_buf job structure has an iocbq element. 8900 * This cnt provides for abort, els, ct and ls requests. 8901 */ 8902 cnt = phba->sli4_hba.max_cfg_param.max_xri; 8903 } 8904 8905 if (!phba->sli.iocbq_lookup) { 8906 /* Initialize and populate the iocb list per host */ 8907 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8908 "2821 initialize iocb list with %d entries\n", 8909 cnt); 8910 rc = lpfc_init_iocb_list(phba, cnt); 8911 if (rc) { 8912 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8913 "1413 Failed to init iocb list.\n"); 8914 goto out_destroy_queue; 8915 } 8916 } 8917 8918 if (phba->nvmet_support) 8919 lpfc_nvmet_create_targetport(phba); 8920 8921 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 8922 /* Post initial buffers to all RQs created */ 8923 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 8924 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 8925 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 8926 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 8927 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 8928 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 8929 rqbp->buffer_count = 0; 8930 8931 lpfc_post_rq_buffer( 8932 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 8933 phba->sli4_hba.nvmet_mrq_data[i], 8934 phba->cfg_nvmet_mrq_post, i); 8935 } 8936 } 8937 8938 /* Post the rpi header region to the device. */ 8939 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 8940 if (unlikely(rc)) { 8941 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8942 "0393 Error %d during rpi post operation\n", 8943 rc); 8944 rc = -ENODEV; 8945 goto out_free_iocblist; 8946 } 8947 lpfc_sli4_node_prep(phba); 8948 8949 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 8950 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 8951 /* 8952 * The FC Port needs to register FCFI (index 0) 8953 */ 8954 lpfc_reg_fcfi(phba, mboxq); 8955 mboxq->vport = phba->pport; 8956 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8957 if (rc != MBX_SUCCESS) 8958 goto out_unset_queue; 8959 rc = 0; 8960 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 8961 &mboxq->u.mqe.un.reg_fcfi); 8962 } else { 8963 /* We are a NVME Target mode with MRQ > 1 */ 8964 8965 /* First register the FCFI */ 8966 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 8967 mboxq->vport = phba->pport; 8968 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8969 if (rc != MBX_SUCCESS) 8970 goto out_unset_queue; 8971 rc = 0; 8972 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 8973 &mboxq->u.mqe.un.reg_fcfi_mrq); 8974 8975 /* Next register the MRQs */ 8976 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 8977 mboxq->vport = phba->pport; 8978 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8979 if (rc != MBX_SUCCESS) 8980 goto out_unset_queue; 8981 rc = 0; 8982 } 8983 /* Check if the port is configured to be disabled */ 8984 lpfc_sli_read_link_ste(phba); 8985 } 8986 8987 /* Don't post more new bufs if repost already recovered 8988 * the nvme sgls. 8989 */ 8990 if (phba->nvmet_support == 0) { 8991 if (phba->sli4_hba.io_xri_cnt == 0) { 8992 len = lpfc_new_io_buf( 8993 phba, phba->sli4_hba.io_xri_max); 8994 if (len == 0) { 8995 rc = -ENOMEM; 8996 goto out_unset_queue; 8997 } 8998 8999 if (phba->cfg_xri_rebalancing) 9000 lpfc_create_multixri_pools(phba); 9001 } 9002 } else { 9003 phba->cfg_xri_rebalancing = 0; 9004 } 9005 9006 /* Allow asynchronous mailbox command to go through */ 9007 spin_lock_irq(&phba->hbalock); 9008 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 9009 spin_unlock_irq(&phba->hbalock); 9010 9011 /* Post receive buffers to the device */ 9012 lpfc_sli4_rb_setup(phba); 9013 9014 /* Reset HBA FCF states after HBA reset */ 9015 phba->fcf.fcf_flag = 0; 9016 phba->fcf.current_rec.flag = 0; 9017 9018 /* Start the ELS watchdog timer */ 9019 mod_timer(&vport->els_tmofunc, 9020 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 9021 9022 /* Start heart beat timer */ 9023 mod_timer(&phba->hb_tmofunc, 9024 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 9025 clear_bit(HBA_HBEAT_INP, &phba->hba_flag); 9026 clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); 9027 phba->last_completion_time = jiffies; 9028 9029 /* start eq_delay heartbeat */ 9030 if (phba->cfg_auto_imax) 9031 queue_delayed_work(phba->wq, &phba->eq_delay_work, 9032 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 9033 9034 /* start per phba idle_stat_delay heartbeat */ 9035 lpfc_init_idle_stat_hb(phba); 9036 9037 /* Start error attention (ERATT) polling timer */ 9038 mod_timer(&phba->eratt_poll, 9039 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 9040 9041 /* 9042 * The port is ready, set the host's link state to LINK_DOWN 9043 * in preparation for link interrupts. 9044 */ 9045 spin_lock_irq(&phba->hbalock); 9046 phba->link_state = LPFC_LINK_DOWN; 9047 9048 /* Check if physical ports are trunked */ 9049 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 9050 phba->trunk_link.link0.state = LPFC_LINK_DOWN; 9051 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 9052 phba->trunk_link.link1.state = LPFC_LINK_DOWN; 9053 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 9054 phba->trunk_link.link2.state = LPFC_LINK_DOWN; 9055 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 9056 phba->trunk_link.link3.state = LPFC_LINK_DOWN; 9057 spin_unlock_irq(&phba->hbalock); 9058 9059 /* Arm the CQs and then EQs on device */ 9060 lpfc_sli4_arm_cqeq_intr(phba); 9061 9062 /* Indicate device interrupt mode */ 9063 phba->sli4_hba.intr_enable = 1; 9064 9065 /* Setup CMF after HBA is initialized */ 9066 lpfc_cmf_setup(phba); 9067 9068 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 9069 test_bit(LINK_DISABLED, &phba->hba_flag)) { 9070 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9071 "3103 Adapter Link is disabled.\n"); 9072 lpfc_down_link(phba, mboxq); 9073 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9074 if (rc != MBX_SUCCESS) { 9075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9076 "3104 Adapter failed to issue " 9077 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 9078 goto out_io_buff_free; 9079 } 9080 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 9081 /* don't perform init_link on SLI4 FC port loopback test */ 9082 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 9083 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 9084 if (rc) 9085 goto out_io_buff_free; 9086 } 9087 } 9088 mempool_free(mboxq, phba->mbox_mem_pool); 9089 9090 /* Enable RAS FW log support */ 9091 lpfc_sli4_ras_setup(phba); 9092 9093 set_bit(HBA_SETUP, &phba->hba_flag); 9094 return rc; 9095 9096 out_io_buff_free: 9097 /* Free allocated IO Buffers */ 9098 lpfc_io_free(phba); 9099 out_unset_queue: 9100 /* Unset all the queues set up in this routine when error out */ 9101 lpfc_sli4_queue_unset(phba); 9102 out_free_iocblist: 9103 lpfc_free_iocb_list(phba); 9104 out_destroy_queue: 9105 lpfc_sli4_queue_destroy(phba); 9106 out_stop_timers: 9107 lpfc_stop_hba_timers(phba); 9108 out_free_mbox: 9109 mempool_free(mboxq, phba->mbox_mem_pool); 9110 return rc; 9111 } 9112 9113 /** 9114 * lpfc_mbox_timeout - Timeout call back function for mbox timer 9115 * @t: Context to fetch pointer to hba structure from. 9116 * 9117 * This is the callback function for mailbox timer. The mailbox 9118 * timer is armed when a new mailbox command is issued and the timer 9119 * is deleted when the mailbox complete. The function is called by 9120 * the kernel timer code when a mailbox does not complete within 9121 * expected time. This function wakes up the worker thread to 9122 * process the mailbox timeout and returns. All the processing is 9123 * done by the worker thread function lpfc_mbox_timeout_handler. 9124 **/ 9125 void 9126 lpfc_mbox_timeout(struct timer_list *t) 9127 { 9128 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 9129 unsigned long iflag; 9130 uint32_t tmo_posted; 9131 9132 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 9133 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 9134 if (!tmo_posted) 9135 phba->pport->work_port_events |= WORKER_MBOX_TMO; 9136 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 9137 9138 if (!tmo_posted) 9139 lpfc_worker_wake_up(phba); 9140 return; 9141 } 9142 9143 /** 9144 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 9145 * are pending 9146 * @phba: Pointer to HBA context object. 9147 * 9148 * This function checks if any mailbox completions are present on the mailbox 9149 * completion queue. 9150 **/ 9151 static bool 9152 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 9153 { 9154 9155 uint32_t idx; 9156 struct lpfc_queue *mcq; 9157 struct lpfc_mcqe *mcqe; 9158 bool pending_completions = false; 9159 uint8_t qe_valid; 9160 9161 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 9162 return false; 9163 9164 /* Check for completions on mailbox completion queue */ 9165 9166 mcq = phba->sli4_hba.mbx_cq; 9167 idx = mcq->hba_index; 9168 qe_valid = mcq->qe_valid; 9169 while (bf_get_le32(lpfc_cqe_valid, 9170 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { 9171 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); 9172 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 9173 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 9174 pending_completions = true; 9175 break; 9176 } 9177 idx = (idx + 1) % mcq->entry_count; 9178 if (mcq->hba_index == idx) 9179 break; 9180 9181 /* if the index wrapped around, toggle the valid bit */ 9182 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 9183 qe_valid = (qe_valid) ? 0 : 1; 9184 } 9185 return pending_completions; 9186 9187 } 9188 9189 /** 9190 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 9191 * that were missed. 9192 * @phba: Pointer to HBA context object. 9193 * 9194 * For sli4, it is possible to miss an interrupt. As such mbox completions 9195 * maybe missed causing erroneous mailbox timeouts to occur. This function 9196 * checks to see if mbox completions are on the mailbox completion queue 9197 * and will process all the completions associated with the eq for the 9198 * mailbox completion queue. 9199 **/ 9200 static bool 9201 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 9202 { 9203 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 9204 uint32_t eqidx; 9205 struct lpfc_queue *fpeq = NULL; 9206 struct lpfc_queue *eq; 9207 bool mbox_pending; 9208 9209 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 9210 return false; 9211 9212 /* Find the EQ associated with the mbox CQ */ 9213 if (sli4_hba->hdwq) { 9214 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { 9215 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; 9216 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { 9217 fpeq = eq; 9218 break; 9219 } 9220 } 9221 } 9222 if (!fpeq) 9223 return false; 9224 9225 /* Turn off interrupts from this EQ */ 9226 9227 sli4_hba->sli4_eq_clr_intr(fpeq); 9228 9229 /* Check to see if a mbox completion is pending */ 9230 9231 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 9232 9233 /* 9234 * If a mbox completion is pending, process all the events on EQ 9235 * associated with the mbox completion queue (this could include 9236 * mailbox commands, async events, els commands, receive queue data 9237 * and fcp commands) 9238 */ 9239 9240 if (mbox_pending) 9241 /* process and rearm the EQ */ 9242 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, 9243 LPFC_QUEUE_WORK); 9244 else 9245 /* Always clear and re-arm the EQ */ 9246 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); 9247 9248 return mbox_pending; 9249 9250 } 9251 9252 /** 9253 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 9254 * @phba: Pointer to HBA context object. 9255 * 9256 * This function is called from worker thread when a mailbox command times out. 9257 * The caller is not required to hold any locks. This function will reset the 9258 * HBA and recover all the pending commands. 9259 **/ 9260 void 9261 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 9262 { 9263 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 9264 MAILBOX_t *mb = NULL; 9265 9266 struct lpfc_sli *psli = &phba->sli; 9267 9268 /* If the mailbox completed, process the completion */ 9269 lpfc_sli4_process_missed_mbox_completions(phba); 9270 9271 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) 9272 return; 9273 9274 if (pmbox != NULL) 9275 mb = &pmbox->u.mb; 9276 /* Check the pmbox pointer first. There is a race condition 9277 * between the mbox timeout handler getting executed in the 9278 * worklist and the mailbox actually completing. When this 9279 * race condition occurs, the mbox_active will be NULL. 9280 */ 9281 spin_lock_irq(&phba->hbalock); 9282 if (pmbox == NULL) { 9283 lpfc_printf_log(phba, KERN_WARNING, 9284 LOG_MBOX | LOG_SLI, 9285 "0353 Active Mailbox cleared - mailbox timeout " 9286 "exiting\n"); 9287 spin_unlock_irq(&phba->hbalock); 9288 return; 9289 } 9290 9291 /* Mbox cmd <mbxCommand> timeout */ 9292 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9293 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", 9294 mb->mbxCommand, 9295 phba->pport->port_state, 9296 phba->sli.sli_flag, 9297 phba->sli.mbox_active); 9298 spin_unlock_irq(&phba->hbalock); 9299 9300 /* Setting state unknown so lpfc_sli_abort_iocb_ring 9301 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 9302 * it to fail all outstanding SCSI IO. 9303 */ 9304 set_bit(MBX_TMO_ERR, &phba->bit_flags); 9305 spin_lock_irq(&phba->pport->work_port_lock); 9306 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 9307 spin_unlock_irq(&phba->pport->work_port_lock); 9308 spin_lock_irq(&phba->hbalock); 9309 phba->link_state = LPFC_LINK_UNKNOWN; 9310 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9311 spin_unlock_irq(&phba->hbalock); 9312 9313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9314 "0345 Resetting board due to mailbox timeout\n"); 9315 9316 /* Reset the HBA device */ 9317 lpfc_reset_hba(phba); 9318 } 9319 9320 /** 9321 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 9322 * @phba: Pointer to HBA context object. 9323 * @pmbox: Pointer to mailbox object. 9324 * @flag: Flag indicating how the mailbox need to be processed. 9325 * 9326 * This function is called by discovery code and HBA management code 9327 * to submit a mailbox command to firmware with SLI-3 interface spec. This 9328 * function gets the hbalock to protect the data structures. 9329 * The mailbox command can be submitted in polling mode, in which case 9330 * this function will wait in a polling loop for the completion of the 9331 * mailbox. 9332 * If the mailbox is submitted in no_wait mode (not polling) the 9333 * function will submit the command and returns immediately without waiting 9334 * for the mailbox completion. The no_wait is supported only when HBA 9335 * is in SLI2/SLI3 mode - interrupts are enabled. 9336 * The SLI interface allows only one mailbox pending at a time. If the 9337 * mailbox is issued in polling mode and there is already a mailbox 9338 * pending, then the function will return an error. If the mailbox is issued 9339 * in NO_WAIT mode and there is a mailbox pending already, the function 9340 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 9341 * The sli layer owns the mailbox object until the completion of mailbox 9342 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 9343 * return codes the caller owns the mailbox command after the return of 9344 * the function. 9345 **/ 9346 static int 9347 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 9348 uint32_t flag) 9349 { 9350 MAILBOX_t *mbx; 9351 struct lpfc_sli *psli = &phba->sli; 9352 uint32_t status, evtctr; 9353 uint32_t ha_copy, hc_copy; 9354 int i; 9355 unsigned long timeout; 9356 unsigned long drvr_flag = 0; 9357 uint32_t word0, ldata; 9358 void __iomem *to_slim; 9359 int processing_queue = 0; 9360 9361 spin_lock_irqsave(&phba->hbalock, drvr_flag); 9362 if (!pmbox) { 9363 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9364 /* processing mbox queue from intr_handler */ 9365 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 9366 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9367 return MBX_SUCCESS; 9368 } 9369 processing_queue = 1; 9370 pmbox = lpfc_mbox_get(phba); 9371 if (!pmbox) { 9372 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9373 return MBX_SUCCESS; 9374 } 9375 } 9376 9377 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 9378 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 9379 if(!pmbox->vport) { 9380 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9381 lpfc_printf_log(phba, KERN_ERR, 9382 LOG_MBOX | LOG_VPORT, 9383 "1806 Mbox x%x failed. No vport\n", 9384 pmbox->u.mb.mbxCommand); 9385 dump_stack(); 9386 goto out_not_finished; 9387 } 9388 } 9389 9390 /* If the PCI channel is in offline state, do not post mbox. */ 9391 if (unlikely(pci_channel_offline(phba->pcidev))) { 9392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9393 goto out_not_finished; 9394 } 9395 9396 /* If HBA has a deferred error attention, fail the iocb. */ 9397 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { 9398 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9399 goto out_not_finished; 9400 } 9401 9402 psli = &phba->sli; 9403 9404 mbx = &pmbox->u.mb; 9405 status = MBX_SUCCESS; 9406 9407 if (phba->link_state == LPFC_HBA_ERROR) { 9408 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9409 9410 /* Mbox command <mbxCommand> cannot issue */ 9411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9412 "(%d):0311 Mailbox command x%x cannot " 9413 "issue Data: x%x x%x\n", 9414 pmbox->vport ? pmbox->vport->vpi : 0, 9415 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 9416 goto out_not_finished; 9417 } 9418 9419 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 9420 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 9421 !(hc_copy & HC_MBINT_ENA)) { 9422 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9423 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9424 "(%d):2528 Mailbox command x%x cannot " 9425 "issue Data: x%x x%x\n", 9426 pmbox->vport ? pmbox->vport->vpi : 0, 9427 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 9428 goto out_not_finished; 9429 } 9430 } 9431 9432 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9433 /* Polling for a mbox command when another one is already active 9434 * is not allowed in SLI. Also, the driver must have established 9435 * SLI2 mode to queue and process multiple mbox commands. 9436 */ 9437 9438 if (flag & MBX_POLL) { 9439 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9440 9441 /* Mbox command <mbxCommand> cannot issue */ 9442 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9443 "(%d):2529 Mailbox command x%x " 9444 "cannot issue Data: x%x x%x\n", 9445 pmbox->vport ? pmbox->vport->vpi : 0, 9446 pmbox->u.mb.mbxCommand, 9447 psli->sli_flag, flag); 9448 goto out_not_finished; 9449 } 9450 9451 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 9452 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9453 /* Mbox command <mbxCommand> cannot issue */ 9454 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9455 "(%d):2530 Mailbox command x%x " 9456 "cannot issue Data: x%x x%x\n", 9457 pmbox->vport ? pmbox->vport->vpi : 0, 9458 pmbox->u.mb.mbxCommand, 9459 psli->sli_flag, flag); 9460 goto out_not_finished; 9461 } 9462 9463 /* Another mailbox command is still being processed, queue this 9464 * command to be processed later. 9465 */ 9466 lpfc_mbox_put(phba, pmbox); 9467 9468 /* Mbox cmd issue - BUSY */ 9469 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 9470 "(%d):0308 Mbox cmd issue - BUSY Data: " 9471 "x%x x%x x%x x%x\n", 9472 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 9473 mbx->mbxCommand, 9474 phba->pport ? phba->pport->port_state : 0xff, 9475 psli->sli_flag, flag); 9476 9477 psli->slistat.mbox_busy++; 9478 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9479 9480 if (pmbox->vport) { 9481 lpfc_debugfs_disc_trc(pmbox->vport, 9482 LPFC_DISC_TRC_MBOX_VPORT, 9483 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 9484 (uint32_t)mbx->mbxCommand, 9485 mbx->un.varWords[0], mbx->un.varWords[1]); 9486 } 9487 else { 9488 lpfc_debugfs_disc_trc(phba->pport, 9489 LPFC_DISC_TRC_MBOX, 9490 "MBOX Bsy: cmd:x%x mb:x%x x%x", 9491 (uint32_t)mbx->mbxCommand, 9492 mbx->un.varWords[0], mbx->un.varWords[1]); 9493 } 9494 9495 return MBX_BUSY; 9496 } 9497 9498 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 9499 9500 /* If we are not polling, we MUST be in SLI2 mode */ 9501 if (flag != MBX_POLL) { 9502 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 9503 (mbx->mbxCommand != MBX_KILL_BOARD)) { 9504 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9505 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9506 /* Mbox command <mbxCommand> cannot issue */ 9507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9508 "(%d):2531 Mailbox command x%x " 9509 "cannot issue Data: x%x x%x\n", 9510 pmbox->vport ? pmbox->vport->vpi : 0, 9511 pmbox->u.mb.mbxCommand, 9512 psli->sli_flag, flag); 9513 goto out_not_finished; 9514 } 9515 /* timeout active mbox command */ 9516 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 9517 1000); 9518 mod_timer(&psli->mbox_tmo, jiffies + timeout); 9519 } 9520 9521 /* Mailbox cmd <cmd> issue */ 9522 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 9523 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 9524 "x%x\n", 9525 pmbox->vport ? pmbox->vport->vpi : 0, 9526 mbx->mbxCommand, 9527 phba->pport ? phba->pport->port_state : 0xff, 9528 psli->sli_flag, flag); 9529 9530 if (mbx->mbxCommand != MBX_HEARTBEAT) { 9531 if (pmbox->vport) { 9532 lpfc_debugfs_disc_trc(pmbox->vport, 9533 LPFC_DISC_TRC_MBOX_VPORT, 9534 "MBOX Send vport: cmd:x%x mb:x%x x%x", 9535 (uint32_t)mbx->mbxCommand, 9536 mbx->un.varWords[0], mbx->un.varWords[1]); 9537 } 9538 else { 9539 lpfc_debugfs_disc_trc(phba->pport, 9540 LPFC_DISC_TRC_MBOX, 9541 "MBOX Send: cmd:x%x mb:x%x x%x", 9542 (uint32_t)mbx->mbxCommand, 9543 mbx->un.varWords[0], mbx->un.varWords[1]); 9544 } 9545 } 9546 9547 psli->slistat.mbox_cmd++; 9548 evtctr = psli->slistat.mbox_event; 9549 9550 /* next set own bit for the adapter and copy over command word */ 9551 mbx->mbxOwner = OWN_CHIP; 9552 9553 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9554 /* Populate mbox extension offset word. */ 9555 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 9556 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 9557 = (uint8_t *)phba->mbox_ext 9558 - (uint8_t *)phba->mbox; 9559 } 9560 9561 /* Copy the mailbox extension data */ 9562 if (pmbox->in_ext_byte_len && pmbox->ext_buf) { 9563 lpfc_sli_pcimem_bcopy(pmbox->ext_buf, 9564 (uint8_t *)phba->mbox_ext, 9565 pmbox->in_ext_byte_len); 9566 } 9567 /* Copy command data to host SLIM area */ 9568 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 9569 } else { 9570 /* Populate mbox extension offset word. */ 9571 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 9572 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 9573 = MAILBOX_HBA_EXT_OFFSET; 9574 9575 /* Copy the mailbox extension data */ 9576 if (pmbox->in_ext_byte_len && pmbox->ext_buf) 9577 lpfc_memcpy_to_slim(phba->MBslimaddr + 9578 MAILBOX_HBA_EXT_OFFSET, 9579 pmbox->ext_buf, pmbox->in_ext_byte_len); 9580 9581 if (mbx->mbxCommand == MBX_CONFIG_PORT) 9582 /* copy command data into host mbox for cmpl */ 9583 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 9584 MAILBOX_CMD_SIZE); 9585 9586 /* First copy mbox command data to HBA SLIM, skip past first 9587 word */ 9588 to_slim = phba->MBslimaddr + sizeof (uint32_t); 9589 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 9590 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 9591 9592 /* Next copy over first word, with mbxOwner set */ 9593 ldata = *((uint32_t *)mbx); 9594 to_slim = phba->MBslimaddr; 9595 writel(ldata, to_slim); 9596 readl(to_slim); /* flush */ 9597 9598 if (mbx->mbxCommand == MBX_CONFIG_PORT) 9599 /* switch over to host mailbox */ 9600 psli->sli_flag |= LPFC_SLI_ACTIVE; 9601 } 9602 9603 wmb(); 9604 9605 switch (flag) { 9606 case MBX_NOWAIT: 9607 /* Set up reference to mailbox command */ 9608 psli->mbox_active = pmbox; 9609 /* Interrupt board to do it */ 9610 writel(CA_MBATT, phba->CAregaddr); 9611 readl(phba->CAregaddr); /* flush */ 9612 /* Don't wait for it to finish, just return */ 9613 break; 9614 9615 case MBX_POLL: 9616 /* Set up null reference to mailbox command */ 9617 psli->mbox_active = NULL; 9618 /* Interrupt board to do it */ 9619 writel(CA_MBATT, phba->CAregaddr); 9620 readl(phba->CAregaddr); /* flush */ 9621 9622 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9623 /* First read mbox status word */ 9624 word0 = *((uint32_t *)phba->mbox); 9625 word0 = le32_to_cpu(word0); 9626 } else { 9627 /* First read mbox status word */ 9628 if (lpfc_readl(phba->MBslimaddr, &word0)) { 9629 spin_unlock_irqrestore(&phba->hbalock, 9630 drvr_flag); 9631 goto out_not_finished; 9632 } 9633 } 9634 9635 /* Read the HBA Host Attention Register */ 9636 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 9637 spin_unlock_irqrestore(&phba->hbalock, 9638 drvr_flag); 9639 goto out_not_finished; 9640 } 9641 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 9642 1000) + jiffies; 9643 i = 0; 9644 /* Wait for command to complete */ 9645 while (((word0 & OWN_CHIP) == OWN_CHIP) || 9646 (!(ha_copy & HA_MBATT) && 9647 (phba->link_state > LPFC_WARM_START))) { 9648 if (time_after(jiffies, timeout)) { 9649 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9650 spin_unlock_irqrestore(&phba->hbalock, 9651 drvr_flag); 9652 goto out_not_finished; 9653 } 9654 9655 /* Check if we took a mbox interrupt while we were 9656 polling */ 9657 if (((word0 & OWN_CHIP) != OWN_CHIP) 9658 && (evtctr != psli->slistat.mbox_event)) 9659 break; 9660 9661 if (i++ > 10) { 9662 spin_unlock_irqrestore(&phba->hbalock, 9663 drvr_flag); 9664 msleep(1); 9665 spin_lock_irqsave(&phba->hbalock, drvr_flag); 9666 } 9667 9668 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9669 /* First copy command data */ 9670 word0 = *((uint32_t *)phba->mbox); 9671 word0 = le32_to_cpu(word0); 9672 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 9673 MAILBOX_t *slimmb; 9674 uint32_t slimword0; 9675 /* Check real SLIM for any errors */ 9676 slimword0 = readl(phba->MBslimaddr); 9677 slimmb = (MAILBOX_t *) & slimword0; 9678 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 9679 && slimmb->mbxStatus) { 9680 psli->sli_flag &= 9681 ~LPFC_SLI_ACTIVE; 9682 word0 = slimword0; 9683 } 9684 } 9685 } else { 9686 /* First copy command data */ 9687 word0 = readl(phba->MBslimaddr); 9688 } 9689 /* Read the HBA Host Attention Register */ 9690 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 9691 spin_unlock_irqrestore(&phba->hbalock, 9692 drvr_flag); 9693 goto out_not_finished; 9694 } 9695 } 9696 9697 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9698 /* copy results back to user */ 9699 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 9700 MAILBOX_CMD_SIZE); 9701 /* Copy the mailbox extension data */ 9702 if (pmbox->out_ext_byte_len && pmbox->ext_buf) { 9703 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 9704 pmbox->ext_buf, 9705 pmbox->out_ext_byte_len); 9706 } 9707 } else { 9708 /* First copy command data */ 9709 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 9710 MAILBOX_CMD_SIZE); 9711 /* Copy the mailbox extension data */ 9712 if (pmbox->out_ext_byte_len && pmbox->ext_buf) { 9713 lpfc_memcpy_from_slim( 9714 pmbox->ext_buf, 9715 phba->MBslimaddr + 9716 MAILBOX_HBA_EXT_OFFSET, 9717 pmbox->out_ext_byte_len); 9718 } 9719 } 9720 9721 writel(HA_MBATT, phba->HAregaddr); 9722 readl(phba->HAregaddr); /* flush */ 9723 9724 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9725 status = mbx->mbxStatus; 9726 } 9727 9728 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9729 return status; 9730 9731 out_not_finished: 9732 if (processing_queue) { 9733 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 9734 lpfc_mbox_cmpl_put(phba, pmbox); 9735 } 9736 return MBX_NOT_FINISHED; 9737 } 9738 9739 /** 9740 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 9741 * @phba: Pointer to HBA context object. 9742 * 9743 * The function blocks the posting of SLI4 asynchronous mailbox commands from 9744 * the driver internal pending mailbox queue. It will then try to wait out the 9745 * possible outstanding mailbox command before return. 9746 * 9747 * Returns: 9748 * 0 - the outstanding mailbox command completed; otherwise, the wait for 9749 * the outstanding mailbox command timed out. 9750 **/ 9751 static int 9752 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 9753 { 9754 struct lpfc_sli *psli = &phba->sli; 9755 LPFC_MBOXQ_t *mboxq; 9756 int rc = 0; 9757 unsigned long timeout = 0; 9758 u32 sli_flag; 9759 u8 cmd, subsys, opcode; 9760 9761 /* Mark the asynchronous mailbox command posting as blocked */ 9762 spin_lock_irq(&phba->hbalock); 9763 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9764 /* Determine how long we might wait for the active mailbox 9765 * command to be gracefully completed by firmware. 9766 */ 9767 if (phba->sli.mbox_active) 9768 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 9769 phba->sli.mbox_active) * 9770 1000) + jiffies; 9771 spin_unlock_irq(&phba->hbalock); 9772 9773 /* Make sure the mailbox is really active */ 9774 if (timeout) 9775 lpfc_sli4_process_missed_mbox_completions(phba); 9776 9777 /* Wait for the outstanding mailbox command to complete */ 9778 while (phba->sli.mbox_active) { 9779 /* Check active mailbox complete status every 2ms */ 9780 msleep(2); 9781 if (time_after(jiffies, timeout)) { 9782 /* Timeout, mark the outstanding cmd not complete */ 9783 9784 /* Sanity check sli.mbox_active has not completed or 9785 * cancelled from another context during last 2ms sleep, 9786 * so take hbalock to be sure before logging. 9787 */ 9788 spin_lock_irq(&phba->hbalock); 9789 if (phba->sli.mbox_active) { 9790 mboxq = phba->sli.mbox_active; 9791 cmd = mboxq->u.mb.mbxCommand; 9792 subsys = lpfc_sli_config_mbox_subsys_get(phba, 9793 mboxq); 9794 opcode = lpfc_sli_config_mbox_opcode_get(phba, 9795 mboxq); 9796 sli_flag = psli->sli_flag; 9797 spin_unlock_irq(&phba->hbalock); 9798 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9799 "2352 Mailbox command x%x " 9800 "(x%x/x%x) sli_flag x%x could " 9801 "not complete\n", 9802 cmd, subsys, opcode, 9803 sli_flag); 9804 } else { 9805 spin_unlock_irq(&phba->hbalock); 9806 } 9807 9808 rc = 1; 9809 break; 9810 } 9811 } 9812 9813 /* Can not cleanly block async mailbox command, fails it */ 9814 if (rc) { 9815 spin_lock_irq(&phba->hbalock); 9816 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 9817 spin_unlock_irq(&phba->hbalock); 9818 } 9819 return rc; 9820 } 9821 9822 /** 9823 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 9824 * @phba: Pointer to HBA context object. 9825 * 9826 * The function unblocks and resume posting of SLI4 asynchronous mailbox 9827 * commands from the driver internal pending mailbox queue. It makes sure 9828 * that there is no outstanding mailbox command before resuming posting 9829 * asynchronous mailbox commands. If, for any reason, there is outstanding 9830 * mailbox command, it will try to wait it out before resuming asynchronous 9831 * mailbox command posting. 9832 **/ 9833 static void 9834 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 9835 { 9836 struct lpfc_sli *psli = &phba->sli; 9837 9838 spin_lock_irq(&phba->hbalock); 9839 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 9840 /* Asynchronous mailbox posting is not blocked, do nothing */ 9841 spin_unlock_irq(&phba->hbalock); 9842 return; 9843 } 9844 9845 /* Outstanding synchronous mailbox command is guaranteed to be done, 9846 * successful or timeout, after timing-out the outstanding mailbox 9847 * command shall always be removed, so just unblock posting async 9848 * mailbox command and resume 9849 */ 9850 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 9851 spin_unlock_irq(&phba->hbalock); 9852 9853 /* wake up worker thread to post asynchronous mailbox command */ 9854 lpfc_worker_wake_up(phba); 9855 } 9856 9857 /** 9858 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 9859 * @phba: Pointer to HBA context object. 9860 * @mboxq: Pointer to mailbox object. 9861 * 9862 * The function waits for the bootstrap mailbox register ready bit from 9863 * port for twice the regular mailbox command timeout value. 9864 * 9865 * 0 - no timeout on waiting for bootstrap mailbox register ready. 9866 * MBXERR_ERROR - wait for bootstrap mailbox register timed out or port 9867 * is in an unrecoverable state. 9868 **/ 9869 static int 9870 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9871 { 9872 uint32_t db_ready; 9873 unsigned long timeout; 9874 struct lpfc_register bmbx_reg; 9875 struct lpfc_register portstat_reg = {-1}; 9876 9877 /* Sanity check - there is no point to wait if the port is in an 9878 * unrecoverable state. 9879 */ 9880 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 9881 LPFC_SLI_INTF_IF_TYPE_2) { 9882 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9883 &portstat_reg.word0) || 9884 lpfc_sli4_unrecoverable_port(&portstat_reg)) { 9885 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9886 "3858 Skipping bmbx ready because " 9887 "Port Status x%x\n", 9888 portstat_reg.word0); 9889 return MBXERR_ERROR; 9890 } 9891 } 9892 9893 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 9894 * 1000) + jiffies; 9895 9896 do { 9897 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 9898 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 9899 if (!db_ready) 9900 mdelay(2); 9901 9902 if (time_after(jiffies, timeout)) 9903 return MBXERR_ERROR; 9904 } while (!db_ready); 9905 9906 return 0; 9907 } 9908 9909 /** 9910 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 9911 * @phba: Pointer to HBA context object. 9912 * @mboxq: Pointer to mailbox object. 9913 * 9914 * The function posts a mailbox to the port. The mailbox is expected 9915 * to be comletely filled in and ready for the port to operate on it. 9916 * This routine executes a synchronous completion operation on the 9917 * mailbox by polling for its completion. 9918 * 9919 * The caller must not be holding any locks when calling this routine. 9920 * 9921 * Returns: 9922 * MBX_SUCCESS - mailbox posted successfully 9923 * Any of the MBX error values. 9924 **/ 9925 static int 9926 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9927 { 9928 int rc = MBX_SUCCESS; 9929 unsigned long iflag; 9930 uint32_t mcqe_status; 9931 uint32_t mbx_cmnd; 9932 struct lpfc_sli *psli = &phba->sli; 9933 struct lpfc_mqe *mb = &mboxq->u.mqe; 9934 struct lpfc_bmbx_create *mbox_rgn; 9935 struct dma_address *dma_address; 9936 9937 /* 9938 * Only one mailbox can be active to the bootstrap mailbox region 9939 * at a time and there is no queueing provided. 9940 */ 9941 spin_lock_irqsave(&phba->hbalock, iflag); 9942 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9943 spin_unlock_irqrestore(&phba->hbalock, iflag); 9944 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9945 "(%d):2532 Mailbox command x%x (x%x/x%x) " 9946 "cannot issue Data: x%x x%x\n", 9947 mboxq->vport ? mboxq->vport->vpi : 0, 9948 mboxq->u.mb.mbxCommand, 9949 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 9950 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 9951 psli->sli_flag, MBX_POLL); 9952 return MBXERR_ERROR; 9953 } 9954 /* The server grabs the token and owns it until release */ 9955 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 9956 phba->sli.mbox_active = mboxq; 9957 spin_unlock_irqrestore(&phba->hbalock, iflag); 9958 9959 /* wait for bootstrap mbox register for readyness */ 9960 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 9961 if (rc) 9962 goto exit; 9963 /* 9964 * Initialize the bootstrap memory region to avoid stale data areas 9965 * in the mailbox post. Then copy the caller's mailbox contents to 9966 * the bmbx mailbox region. 9967 */ 9968 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 9969 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 9970 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 9971 sizeof(struct lpfc_mqe)); 9972 9973 /* Post the high mailbox dma address to the port and wait for ready. */ 9974 dma_address = &phba->sli4_hba.bmbx.dma_address; 9975 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 9976 9977 /* wait for bootstrap mbox register for hi-address write done */ 9978 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 9979 if (rc) 9980 goto exit; 9981 9982 /* Post the low mailbox dma address to the port. */ 9983 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 9984 9985 /* wait for bootstrap mbox register for low address write done */ 9986 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 9987 if (rc) 9988 goto exit; 9989 9990 /* 9991 * Read the CQ to ensure the mailbox has completed. 9992 * If so, update the mailbox status so that the upper layers 9993 * can complete the request normally. 9994 */ 9995 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 9996 sizeof(struct lpfc_mqe)); 9997 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 9998 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 9999 sizeof(struct lpfc_mcqe)); 10000 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 10001 /* 10002 * When the CQE status indicates a failure and the mailbox status 10003 * indicates success then copy the CQE status into the mailbox status 10004 * (and prefix it with x4000). 10005 */ 10006 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 10007 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 10008 bf_set(lpfc_mqe_status, mb, 10009 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 10010 rc = MBXERR_ERROR; 10011 } else 10012 lpfc_sli4_swap_str(phba, mboxq); 10013 10014 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 10015 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 10016 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 10017 " x%x x%x CQ: x%x x%x x%x x%x\n", 10018 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 10019 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10020 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10021 bf_get(lpfc_mqe_status, mb), 10022 mb->un.mb_words[0], mb->un.mb_words[1], 10023 mb->un.mb_words[2], mb->un.mb_words[3], 10024 mb->un.mb_words[4], mb->un.mb_words[5], 10025 mb->un.mb_words[6], mb->un.mb_words[7], 10026 mb->un.mb_words[8], mb->un.mb_words[9], 10027 mb->un.mb_words[10], mb->un.mb_words[11], 10028 mb->un.mb_words[12], mboxq->mcqe.word0, 10029 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 10030 mboxq->mcqe.trailer); 10031 exit: 10032 /* We are holding the token, no needed for lock when release */ 10033 spin_lock_irqsave(&phba->hbalock, iflag); 10034 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10035 phba->sli.mbox_active = NULL; 10036 spin_unlock_irqrestore(&phba->hbalock, iflag); 10037 return rc; 10038 } 10039 10040 /** 10041 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 10042 * @phba: Pointer to HBA context object. 10043 * @mboxq: Pointer to mailbox object. 10044 * @flag: Flag indicating how the mailbox need to be processed. 10045 * 10046 * This function is called by discovery code and HBA management code to submit 10047 * a mailbox command to firmware with SLI-4 interface spec. 10048 * 10049 * Return codes the caller owns the mailbox command after the return of the 10050 * function. 10051 **/ 10052 static int 10053 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 10054 uint32_t flag) 10055 { 10056 struct lpfc_sli *psli = &phba->sli; 10057 unsigned long iflags; 10058 int rc; 10059 10060 /* dump from issue mailbox command if setup */ 10061 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 10062 10063 rc = lpfc_mbox_dev_check(phba); 10064 if (unlikely(rc)) { 10065 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10066 "(%d):2544 Mailbox command x%x (x%x/x%x) " 10067 "cannot issue Data: x%x x%x\n", 10068 mboxq->vport ? mboxq->vport->vpi : 0, 10069 mboxq->u.mb.mbxCommand, 10070 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10071 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10072 psli->sli_flag, flag); 10073 goto out_not_finished; 10074 } 10075 10076 /* Detect polling mode and jump to a handler */ 10077 if (!phba->sli4_hba.intr_enable) { 10078 if (flag == MBX_POLL) 10079 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 10080 else 10081 rc = -EIO; 10082 if (rc != MBX_SUCCESS) 10083 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 10084 "(%d):2541 Mailbox command x%x " 10085 "(x%x/x%x) failure: " 10086 "mqe_sta: x%x mcqe_sta: x%x/x%x " 10087 "Data: x%x x%x\n", 10088 mboxq->vport ? mboxq->vport->vpi : 0, 10089 mboxq->u.mb.mbxCommand, 10090 lpfc_sli_config_mbox_subsys_get(phba, 10091 mboxq), 10092 lpfc_sli_config_mbox_opcode_get(phba, 10093 mboxq), 10094 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 10095 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 10096 bf_get(lpfc_mcqe_ext_status, 10097 &mboxq->mcqe), 10098 psli->sli_flag, flag); 10099 return rc; 10100 } else if (flag == MBX_POLL) { 10101 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 10102 "(%d):2542 Try to issue mailbox command " 10103 "x%x (x%x/x%x) synchronously ahead of async " 10104 "mailbox command queue: x%x x%x\n", 10105 mboxq->vport ? mboxq->vport->vpi : 0, 10106 mboxq->u.mb.mbxCommand, 10107 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10108 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10109 psli->sli_flag, flag); 10110 /* Try to block the asynchronous mailbox posting */ 10111 rc = lpfc_sli4_async_mbox_block(phba); 10112 if (!rc) { 10113 /* Successfully blocked, now issue sync mbox cmd */ 10114 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 10115 if (rc != MBX_SUCCESS) 10116 lpfc_printf_log(phba, KERN_WARNING, 10117 LOG_MBOX | LOG_SLI, 10118 "(%d):2597 Sync Mailbox command " 10119 "x%x (x%x/x%x) failure: " 10120 "mqe_sta: x%x mcqe_sta: x%x/x%x " 10121 "Data: x%x x%x\n", 10122 mboxq->vport ? mboxq->vport->vpi : 0, 10123 mboxq->u.mb.mbxCommand, 10124 lpfc_sli_config_mbox_subsys_get(phba, 10125 mboxq), 10126 lpfc_sli_config_mbox_opcode_get(phba, 10127 mboxq), 10128 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 10129 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 10130 bf_get(lpfc_mcqe_ext_status, 10131 &mboxq->mcqe), 10132 psli->sli_flag, flag); 10133 /* Unblock the async mailbox posting afterward */ 10134 lpfc_sli4_async_mbox_unblock(phba); 10135 } 10136 return rc; 10137 } 10138 10139 /* Now, interrupt mode asynchronous mailbox command */ 10140 rc = lpfc_mbox_cmd_check(phba, mboxq); 10141 if (rc) { 10142 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10143 "(%d):2543 Mailbox command x%x (x%x/x%x) " 10144 "cannot issue Data: x%x x%x\n", 10145 mboxq->vport ? mboxq->vport->vpi : 0, 10146 mboxq->u.mb.mbxCommand, 10147 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10148 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10149 psli->sli_flag, flag); 10150 goto out_not_finished; 10151 } 10152 10153 /* Put the mailbox command to the driver internal FIFO */ 10154 psli->slistat.mbox_busy++; 10155 spin_lock_irqsave(&phba->hbalock, iflags); 10156 lpfc_mbox_put(phba, mboxq); 10157 spin_unlock_irqrestore(&phba->hbalock, iflags); 10158 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 10159 "(%d):0354 Mbox cmd issue - Enqueue Data: " 10160 "x%x (x%x/x%x) x%x x%x x%x x%x\n", 10161 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 10162 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 10163 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10164 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10165 mboxq->u.mb.un.varUnregLogin.rpi, 10166 phba->pport->port_state, 10167 psli->sli_flag, MBX_NOWAIT); 10168 /* Wake up worker thread to transport mailbox command from head */ 10169 lpfc_worker_wake_up(phba); 10170 10171 return MBX_BUSY; 10172 10173 out_not_finished: 10174 return MBX_NOT_FINISHED; 10175 } 10176 10177 /** 10178 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 10179 * @phba: Pointer to HBA context object. 10180 * 10181 * This function is called by worker thread to send a mailbox command to 10182 * SLI4 HBA firmware. 10183 * 10184 **/ 10185 int 10186 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 10187 { 10188 struct lpfc_sli *psli = &phba->sli; 10189 LPFC_MBOXQ_t *mboxq; 10190 int rc = MBX_SUCCESS; 10191 unsigned long iflags; 10192 struct lpfc_mqe *mqe; 10193 uint32_t mbx_cmnd; 10194 10195 /* Check interrupt mode before post async mailbox command */ 10196 if (unlikely(!phba->sli4_hba.intr_enable)) 10197 return MBX_NOT_FINISHED; 10198 10199 /* Check for mailbox command service token */ 10200 spin_lock_irqsave(&phba->hbalock, iflags); 10201 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 10202 spin_unlock_irqrestore(&phba->hbalock, iflags); 10203 return MBX_NOT_FINISHED; 10204 } 10205 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 10206 spin_unlock_irqrestore(&phba->hbalock, iflags); 10207 return MBX_NOT_FINISHED; 10208 } 10209 if (unlikely(phba->sli.mbox_active)) { 10210 spin_unlock_irqrestore(&phba->hbalock, iflags); 10211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10212 "0384 There is pending active mailbox cmd\n"); 10213 return MBX_NOT_FINISHED; 10214 } 10215 /* Take the mailbox command service token */ 10216 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 10217 10218 /* Get the next mailbox command from head of queue */ 10219 mboxq = lpfc_mbox_get(phba); 10220 10221 /* If no more mailbox command waiting for post, we're done */ 10222 if (!mboxq) { 10223 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10224 spin_unlock_irqrestore(&phba->hbalock, iflags); 10225 return MBX_SUCCESS; 10226 } 10227 phba->sli.mbox_active = mboxq; 10228 spin_unlock_irqrestore(&phba->hbalock, iflags); 10229 10230 /* Check device readiness for posting mailbox command */ 10231 rc = lpfc_mbox_dev_check(phba); 10232 if (unlikely(rc)) 10233 /* Driver clean routine will clean up pending mailbox */ 10234 goto out_not_finished; 10235 10236 /* Prepare the mbox command to be posted */ 10237 mqe = &mboxq->u.mqe; 10238 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 10239 10240 /* Start timer for the mbox_tmo and log some mailbox post messages */ 10241 mod_timer(&psli->mbox_tmo, (jiffies + 10242 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 10243 10244 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 10245 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 10246 "x%x x%x\n", 10247 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 10248 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10249 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10250 phba->pport->port_state, psli->sli_flag); 10251 10252 if (mbx_cmnd != MBX_HEARTBEAT) { 10253 if (mboxq->vport) { 10254 lpfc_debugfs_disc_trc(mboxq->vport, 10255 LPFC_DISC_TRC_MBOX_VPORT, 10256 "MBOX Send vport: cmd:x%x mb:x%x x%x", 10257 mbx_cmnd, mqe->un.mb_words[0], 10258 mqe->un.mb_words[1]); 10259 } else { 10260 lpfc_debugfs_disc_trc(phba->pport, 10261 LPFC_DISC_TRC_MBOX, 10262 "MBOX Send: cmd:x%x mb:x%x x%x", 10263 mbx_cmnd, mqe->un.mb_words[0], 10264 mqe->un.mb_words[1]); 10265 } 10266 } 10267 psli->slistat.mbox_cmd++; 10268 10269 /* Post the mailbox command to the port */ 10270 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 10271 if (rc != MBX_SUCCESS) { 10272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10273 "(%d):2533 Mailbox command x%x (x%x/x%x) " 10274 "cannot issue Data: x%x x%x\n", 10275 mboxq->vport ? mboxq->vport->vpi : 0, 10276 mboxq->u.mb.mbxCommand, 10277 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10278 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10279 psli->sli_flag, MBX_NOWAIT); 10280 goto out_not_finished; 10281 } 10282 10283 return rc; 10284 10285 out_not_finished: 10286 spin_lock_irqsave(&phba->hbalock, iflags); 10287 if (phba->sli.mbox_active) { 10288 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 10289 __lpfc_mbox_cmpl_put(phba, mboxq); 10290 /* Release the token */ 10291 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10292 phba->sli.mbox_active = NULL; 10293 } 10294 spin_unlock_irqrestore(&phba->hbalock, iflags); 10295 10296 return MBX_NOT_FINISHED; 10297 } 10298 10299 /** 10300 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 10301 * @phba: Pointer to HBA context object. 10302 * @pmbox: Pointer to mailbox object. 10303 * @flag: Flag indicating how the mailbox need to be processed. 10304 * 10305 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 10306 * the API jump table function pointer from the lpfc_hba struct. 10307 * 10308 * Return codes the caller owns the mailbox command after the return of the 10309 * function. 10310 **/ 10311 int 10312 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 10313 { 10314 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 10315 } 10316 10317 /** 10318 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 10319 * @phba: The hba struct for which this call is being executed. 10320 * @dev_grp: The HBA PCI-Device group number. 10321 * 10322 * This routine sets up the mbox interface API function jump table in @phba 10323 * struct. 10324 * Returns: 0 - success, -ENODEV - failure. 10325 **/ 10326 int 10327 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 10328 { 10329 10330 switch (dev_grp) { 10331 case LPFC_PCI_DEV_LP: 10332 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 10333 phba->lpfc_sli_handle_slow_ring_event = 10334 lpfc_sli_handle_slow_ring_event_s3; 10335 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 10336 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 10337 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 10338 break; 10339 case LPFC_PCI_DEV_OC: 10340 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 10341 phba->lpfc_sli_handle_slow_ring_event = 10342 lpfc_sli_handle_slow_ring_event_s4; 10343 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 10344 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 10345 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 10346 break; 10347 default: 10348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10349 "1420 Invalid HBA PCI-device group: 0x%x\n", 10350 dev_grp); 10351 return -ENODEV; 10352 } 10353 return 0; 10354 } 10355 10356 /** 10357 * __lpfc_sli_ringtx_put - Add an iocb to the txq 10358 * @phba: Pointer to HBA context object. 10359 * @pring: Pointer to driver SLI ring object. 10360 * @piocb: Pointer to address of newly added command iocb. 10361 * 10362 * This function is called with hbalock held for SLI3 ports or 10363 * the ring lock held for SLI4 ports to add a command 10364 * iocb to the txq when SLI layer cannot submit the command iocb 10365 * to the ring. 10366 **/ 10367 void 10368 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10369 struct lpfc_iocbq *piocb) 10370 { 10371 if (phba->sli_rev == LPFC_SLI_REV4) 10372 lockdep_assert_held(&pring->ring_lock); 10373 else 10374 lockdep_assert_held(&phba->hbalock); 10375 /* Insert the caller's iocb in the txq tail for later processing. */ 10376 list_add_tail(&piocb->list, &pring->txq); 10377 } 10378 10379 /** 10380 * lpfc_sli_next_iocb - Get the next iocb in the txq 10381 * @phba: Pointer to HBA context object. 10382 * @pring: Pointer to driver SLI ring object. 10383 * @piocb: Pointer to address of newly added command iocb. 10384 * 10385 * This function is called with hbalock held before a new 10386 * iocb is submitted to the firmware. This function checks 10387 * txq to flush the iocbs in txq to Firmware before 10388 * submitting new iocbs to the Firmware. 10389 * If there are iocbs in the txq which need to be submitted 10390 * to firmware, lpfc_sli_next_iocb returns the first element 10391 * of the txq after dequeuing it from txq. 10392 * If there is no iocb in the txq then the function will return 10393 * *piocb and *piocb is set to NULL. Caller needs to check 10394 * *piocb to find if there are more commands in the txq. 10395 **/ 10396 static struct lpfc_iocbq * 10397 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10398 struct lpfc_iocbq **piocb) 10399 { 10400 struct lpfc_iocbq * nextiocb; 10401 10402 lockdep_assert_held(&phba->hbalock); 10403 10404 nextiocb = lpfc_sli_ringtx_get(phba, pring); 10405 if (!nextiocb) { 10406 nextiocb = *piocb; 10407 *piocb = NULL; 10408 } 10409 10410 return nextiocb; 10411 } 10412 10413 /** 10414 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 10415 * @phba: Pointer to HBA context object. 10416 * @ring_number: SLI ring number to issue iocb on. 10417 * @piocb: Pointer to command iocb. 10418 * @flag: Flag indicating if this command can be put into txq. 10419 * 10420 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 10421 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 10422 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 10423 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 10424 * this function allows only iocbs for posting buffers. This function finds 10425 * next available slot in the command ring and posts the command to the 10426 * available slot and writes the port attention register to request HBA start 10427 * processing new iocb. If there is no slot available in the ring and 10428 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 10429 * the function returns IOCB_BUSY. 10430 * 10431 * This function is called with hbalock held. The function will return success 10432 * after it successfully submit the iocb to firmware or after adding to the 10433 * txq. 10434 **/ 10435 static int 10436 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 10437 struct lpfc_iocbq *piocb, uint32_t flag) 10438 { 10439 struct lpfc_iocbq *nextiocb; 10440 IOCB_t *iocb; 10441 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 10442 10443 lockdep_assert_held(&phba->hbalock); 10444 10445 if (piocb->cmd_cmpl && (!piocb->vport) && 10446 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 10447 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 10448 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10449 "1807 IOCB x%x failed. No vport\n", 10450 piocb->iocb.ulpCommand); 10451 dump_stack(); 10452 return IOCB_ERROR; 10453 } 10454 10455 10456 /* If the PCI channel is in offline state, do not post iocbs. */ 10457 if (unlikely(pci_channel_offline(phba->pcidev))) 10458 return IOCB_ERROR; 10459 10460 /* If HBA has a deferred error attention, fail the iocb. */ 10461 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) 10462 return IOCB_ERROR; 10463 10464 /* 10465 * We should never get an IOCB if we are in a < LINK_DOWN state 10466 */ 10467 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 10468 return IOCB_ERROR; 10469 10470 /* 10471 * Check to see if we are blocking IOCB processing because of a 10472 * outstanding event. 10473 */ 10474 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 10475 goto iocb_busy; 10476 10477 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 10478 /* 10479 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 10480 * can be issued if the link is not up. 10481 */ 10482 switch (piocb->iocb.ulpCommand) { 10483 case CMD_QUE_RING_BUF_CN: 10484 case CMD_QUE_RING_BUF64_CN: 10485 /* 10486 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 10487 * completion, cmd_cmpl MUST be 0. 10488 */ 10489 if (piocb->cmd_cmpl) 10490 piocb->cmd_cmpl = NULL; 10491 fallthrough; 10492 case CMD_CREATE_XRI_CR: 10493 case CMD_CLOSE_XRI_CN: 10494 case CMD_CLOSE_XRI_CX: 10495 break; 10496 default: 10497 goto iocb_busy; 10498 } 10499 10500 /* 10501 * For FCP commands, we must be in a state where we can process link 10502 * attention events. 10503 */ 10504 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 10505 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 10506 goto iocb_busy; 10507 } 10508 10509 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 10510 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 10511 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 10512 10513 if (iocb) 10514 lpfc_sli_update_ring(phba, pring); 10515 else 10516 lpfc_sli_update_full_ring(phba, pring); 10517 10518 if (!piocb) 10519 return IOCB_SUCCESS; 10520 10521 goto out_busy; 10522 10523 iocb_busy: 10524 pring->stats.iocb_cmd_delay++; 10525 10526 out_busy: 10527 10528 if (!(flag & SLI_IOCB_RET_IOCB)) { 10529 __lpfc_sli_ringtx_put(phba, pring, piocb); 10530 return IOCB_SUCCESS; 10531 } 10532 10533 return IOCB_BUSY; 10534 } 10535 10536 /** 10537 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb 10538 * @phba: Pointer to HBA context object. 10539 * @ring_number: SLI ring number to issue wqe on. 10540 * @piocb: Pointer to command iocb. 10541 * @flag: Flag indicating if this command can be put into txq. 10542 * 10543 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to 10544 * send an iocb command to an HBA with SLI-3 interface spec. 10545 * 10546 * This function takes the hbalock before invoking the lockless version. 10547 * The function will return success after it successfully submit the wqe to 10548 * firmware or after adding to the txq. 10549 **/ 10550 static int 10551 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number, 10552 struct lpfc_iocbq *piocb, uint32_t flag) 10553 { 10554 unsigned long iflags; 10555 int rc; 10556 10557 spin_lock_irqsave(&phba->hbalock, iflags); 10558 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag); 10559 spin_unlock_irqrestore(&phba->hbalock, iflags); 10560 10561 return rc; 10562 } 10563 10564 /** 10565 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe 10566 * @phba: Pointer to HBA context object. 10567 * @ring_number: SLI ring number to issue wqe on. 10568 * @piocb: Pointer to command iocb. 10569 * @flag: Flag indicating if this command can be put into txq. 10570 * 10571 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue 10572 * an wqe command to an HBA with SLI-4 interface spec. 10573 * 10574 * This function is a lockless version. The function will return success 10575 * after it successfully submit the wqe to firmware or after adding to the 10576 * txq. 10577 **/ 10578 static int 10579 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number, 10580 struct lpfc_iocbq *piocb, uint32_t flag) 10581 { 10582 struct lpfc_io_buf *lpfc_cmd = piocb->io_buf; 10583 10584 lpfc_prep_embed_io(phba, lpfc_cmd); 10585 return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb); 10586 } 10587 10588 void 10589 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 10590 { 10591 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; 10592 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; 10593 struct sli4_sge_le *sgl; 10594 u32 type_size; 10595 10596 /* 128 byte wqe support here */ 10597 sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; 10598 10599 if (phba->fcp_embed_io) { 10600 struct fcp_cmnd *fcp_cmnd; 10601 u32 *ptr; 10602 10603 fcp_cmnd = lpfc_cmd->fcp_cmnd; 10604 10605 /* Word 0-2 - FCP_CMND */ 10606 type_size = le32_to_cpu(sgl->sge_len); 10607 type_size |= ULP_BDE64_TYPE_BDE_IMMED; 10608 wqe->generic.bde.tus.w = type_size; 10609 wqe->generic.bde.addrHigh = 0; 10610 wqe->generic.bde.addrLow = 72; /* Word 18 */ 10611 10612 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 10613 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 10614 10615 /* Word 18-29 FCP CMND Payload */ 10616 ptr = &wqe->words[18]; 10617 lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len)); 10618 } else { 10619 /* Word 0-2 - Inline BDE */ 10620 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 10621 wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len); 10622 wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi); 10623 wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo); 10624 10625 /* Word 10 */ 10626 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); 10627 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); 10628 } 10629 10630 /* add the VMID tags as per switch response */ 10631 if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) { 10632 if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { 10633 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 10634 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 10635 (piocb->vmid_tag.cs_ctl_vmid)); 10636 } else if (phba->cfg_vmid_app_header) { 10637 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); 10638 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 10639 wqe->words[31] = piocb->vmid_tag.app_id; 10640 } 10641 } 10642 } 10643 10644 /** 10645 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 10646 * @phba: Pointer to HBA context object. 10647 * @ring_number: SLI ring number to issue iocb on. 10648 * @piocb: Pointer to command iocb. 10649 * @flag: Flag indicating if this command can be put into txq. 10650 * 10651 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 10652 * an iocb command to an HBA with SLI-4 interface spec. 10653 * 10654 * This function is called with ringlock held. The function will return success 10655 * after it successfully submit the iocb to firmware or after adding to the 10656 * txq. 10657 **/ 10658 static int 10659 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 10660 struct lpfc_iocbq *piocb, uint32_t flag) 10661 { 10662 struct lpfc_sglq *sglq; 10663 union lpfc_wqe128 *wqe; 10664 struct lpfc_queue *wq; 10665 struct lpfc_sli_ring *pring; 10666 u32 ulp_command = get_job_cmnd(phba, piocb); 10667 10668 /* Get the WQ */ 10669 if ((piocb->cmd_flag & LPFC_IO_FCP) || 10670 (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { 10671 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; 10672 } else { 10673 wq = phba->sli4_hba.els_wq; 10674 } 10675 10676 /* Get corresponding ring */ 10677 pring = wq->pring; 10678 10679 /* 10680 * The WQE can be either 64 or 128 bytes, 10681 */ 10682 10683 lockdep_assert_held(&pring->ring_lock); 10684 wqe = &piocb->wqe; 10685 if (piocb->sli4_xritag == NO_XRI) { 10686 if (ulp_command == CMD_ABORT_XRI_CX) 10687 sglq = NULL; 10688 else { 10689 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 10690 if (!sglq) { 10691 if (!(flag & SLI_IOCB_RET_IOCB)) { 10692 __lpfc_sli_ringtx_put(phba, 10693 pring, 10694 piocb); 10695 return IOCB_SUCCESS; 10696 } else { 10697 return IOCB_BUSY; 10698 } 10699 } 10700 } 10701 } else if (piocb->cmd_flag & LPFC_IO_FCP) { 10702 /* These IO's already have an XRI and a mapped sgl. */ 10703 sglq = NULL; 10704 } 10705 else { 10706 /* 10707 * This is a continuation of a commandi,(CX) so this 10708 * sglq is on the active list 10709 */ 10710 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 10711 if (!sglq) 10712 return IOCB_ERROR; 10713 } 10714 10715 if (sglq) { 10716 piocb->sli4_lxritag = sglq->sli4_lxritag; 10717 piocb->sli4_xritag = sglq->sli4_xritag; 10718 10719 /* ABTS sent by initiator to CT exchange, the 10720 * RX_ID field will be filled with the newly 10721 * allocated responder XRI. 10722 */ 10723 if (ulp_command == CMD_XMIT_BLS_RSP64_CX && 10724 piocb->abort_bls == LPFC_ABTS_UNSOL_INT) 10725 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 10726 piocb->sli4_xritag); 10727 10728 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, 10729 piocb->sli4_xritag); 10730 10731 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI) 10732 return IOCB_ERROR; 10733 } 10734 10735 if (lpfc_sli4_wq_put(wq, wqe)) 10736 return IOCB_ERROR; 10737 10738 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 10739 10740 return 0; 10741 } 10742 10743 /* 10744 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o 10745 * 10746 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4 10747 * or IOCB for sli-3 function. 10748 * pointer from the lpfc_hba struct. 10749 * 10750 * Return codes: 10751 * IOCB_ERROR - Error 10752 * IOCB_SUCCESS - Success 10753 * IOCB_BUSY - Busy 10754 **/ 10755 int 10756 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number, 10757 struct lpfc_iocbq *piocb, uint32_t flag) 10758 { 10759 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag); 10760 } 10761 10762 /* 10763 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 10764 * 10765 * This routine wraps the actual lockless version for issusing IOCB function 10766 * pointer from the lpfc_hba struct. 10767 * 10768 * Return codes: 10769 * IOCB_ERROR - Error 10770 * IOCB_SUCCESS - Success 10771 * IOCB_BUSY - Busy 10772 **/ 10773 int 10774 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 10775 struct lpfc_iocbq *piocb, uint32_t flag) 10776 { 10777 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10778 } 10779 10780 static void 10781 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq, 10782 struct lpfc_vport *vport, 10783 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, 10784 u32 elscmd, u8 tmo, u8 expect_rsp) 10785 { 10786 struct lpfc_hba *phba = vport->phba; 10787 IOCB_t *cmd; 10788 10789 cmd = &cmdiocbq->iocb; 10790 memset(cmd, 0, sizeof(*cmd)); 10791 10792 cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 10793 cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys); 10794 cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 10795 10796 if (expect_rsp) { 10797 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 10798 cmd->un.elsreq64.remoteID = did; /* DID */ 10799 cmd->ulpCommand = CMD_ELS_REQUEST64_CR; 10800 cmd->ulpTimeout = tmo; 10801 } else { 10802 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); 10803 cmd->un.genreq64.xmit_els_remoteID = did; /* DID */ 10804 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 10805 cmd->ulpPU = PARM_NPIV_DID; 10806 } 10807 cmd->ulpBdeCount = 1; 10808 cmd->ulpLe = 1; 10809 cmd->ulpClass = CLASS3; 10810 10811 /* If we have NPIV enabled, we want to send ELS traffic by VPI. */ 10812 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 10813 if (expect_rsp) { 10814 cmd->un.elsreq64.myID = vport->fc_myDID; 10815 10816 /* For ELS_REQUEST64_CR, use the VPI by default */ 10817 cmd->ulpContext = phba->vpi_ids[vport->vpi]; 10818 } 10819 10820 cmd->ulpCt_h = 0; 10821 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 10822 if (elscmd == ELS_CMD_ECHO) 10823 cmd->ulpCt_l = 0; /* context = invalid RPI */ 10824 else 10825 cmd->ulpCt_l = 1; /* context = VPI */ 10826 } 10827 } 10828 10829 static void 10830 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq, 10831 struct lpfc_vport *vport, 10832 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, 10833 u32 elscmd, u8 tmo, u8 expect_rsp) 10834 { 10835 struct lpfc_hba *phba = vport->phba; 10836 union lpfc_wqe128 *wqe; 10837 struct ulp_bde64_le *bde; 10838 u8 els_id; 10839 10840 wqe = &cmdiocbq->wqe; 10841 memset(wqe, 0, sizeof(*wqe)); 10842 10843 /* Word 0 - 2 BDE */ 10844 bde = (struct ulp_bde64_le *)&wqe->generic.bde; 10845 bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys)); 10846 bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys)); 10847 bde->type_size = cpu_to_le32(cmd_size); 10848 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 10849 10850 if (expect_rsp) { 10851 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE); 10852 10853 /* Transfer length */ 10854 wqe->els_req.payload_len = cmd_size; 10855 wqe->els_req.max_response_payload_len = FCELSSIZE; 10856 10857 /* DID */ 10858 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did); 10859 10860 /* Word 11 - ELS_ID */ 10861 switch (elscmd) { 10862 case ELS_CMD_PLOGI: 10863 els_id = LPFC_ELS_ID_PLOGI; 10864 break; 10865 case ELS_CMD_FLOGI: 10866 els_id = LPFC_ELS_ID_FLOGI; 10867 break; 10868 case ELS_CMD_LOGO: 10869 els_id = LPFC_ELS_ID_LOGO; 10870 break; 10871 case ELS_CMD_FDISC: 10872 if (!vport->fc_myDID) { 10873 els_id = LPFC_ELS_ID_FDISC; 10874 break; 10875 } 10876 fallthrough; 10877 default: 10878 els_id = LPFC_ELS_ID_DEFAULT; 10879 break; 10880 } 10881 10882 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 10883 } else { 10884 /* DID */ 10885 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did); 10886 10887 /* Transfer length */ 10888 wqe->xmit_els_rsp.response_payload_len = cmd_size; 10889 10890 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com, 10891 CMD_XMIT_ELS_RSP64_WQE); 10892 } 10893 10894 bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo); 10895 bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag); 10896 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); 10897 10898 /* If we have NPIV enabled, we want to send ELS traffic by VPI. 10899 * For SLI4, since the driver controls VPIs we also want to include 10900 * all ELS pt2pt protocol traffic as well. 10901 */ 10902 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 10903 test_bit(FC_PT2PT, &vport->fc_flag)) { 10904 if (expect_rsp) { 10905 bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID); 10906 10907 /* For ELS_REQUEST64_WQE, use the VPI by default */ 10908 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 10909 phba->vpi_ids[vport->vpi]); 10910 } 10911 10912 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 10913 if (elscmd == ELS_CMD_ECHO) 10914 bf_set(wqe_ct, &wqe->generic.wqe_com, 0); 10915 else 10916 bf_set(wqe_ct, &wqe->generic.wqe_com, 1); 10917 } 10918 } 10919 10920 void 10921 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, 10922 struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, 10923 u16 cmd_size, u32 did, u32 elscmd, u8 tmo, 10924 u8 expect_rsp) 10925 { 10926 phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did, 10927 elscmd, tmo, expect_rsp); 10928 } 10929 10930 static void 10931 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, 10932 u16 rpi, u32 num_entry, u8 tmo) 10933 { 10934 IOCB_t *cmd; 10935 10936 cmd = &cmdiocbq->iocb; 10937 memset(cmd, 0, sizeof(*cmd)); 10938 10939 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 10940 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 10941 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 10942 cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64); 10943 10944 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 10945 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 10946 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 10947 10948 cmd->ulpContext = rpi; 10949 cmd->ulpClass = CLASS3; 10950 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 10951 cmd->ulpBdeCount = 1; 10952 cmd->ulpLe = 1; 10953 cmd->ulpOwner = OWN_CHIP; 10954 cmd->ulpTimeout = tmo; 10955 } 10956 10957 static void 10958 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, 10959 u16 rpi, u32 num_entry, u8 tmo) 10960 { 10961 union lpfc_wqe128 *cmdwqe; 10962 struct ulp_bde64_le *bde, *bpl; 10963 u32 xmit_len = 0, total_len = 0, size, type, i; 10964 10965 cmdwqe = &cmdiocbq->wqe; 10966 memset(cmdwqe, 0, sizeof(*cmdwqe)); 10967 10968 /* Calculate total_len and xmit_len */ 10969 bpl = (struct ulp_bde64_le *)bmp->virt; 10970 for (i = 0; i < num_entry; i++) { 10971 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; 10972 total_len += size; 10973 } 10974 for (i = 0; i < num_entry; i++) { 10975 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; 10976 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK; 10977 if (type != ULP_BDE64_TYPE_BDE_64) 10978 break; 10979 xmit_len += size; 10980 } 10981 10982 /* Words 0 - 2 */ 10983 bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde; 10984 bde->addr_low = bpl->addr_low; 10985 bde->addr_high = bpl->addr_high; 10986 bde->type_size = cpu_to_le32(xmit_len); 10987 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 10988 10989 /* Word 3 */ 10990 cmdwqe->gen_req.request_payload_len = xmit_len; 10991 10992 /* Word 5 */ 10993 bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT); 10994 bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); 10995 bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1); 10996 bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1); 10997 10998 /* Word 6 */ 10999 bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi); 11000 11001 /* Word 7 */ 11002 bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo); 11003 bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3); 11004 bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR); 11005 bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI); 11006 11007 /* Word 12 */ 11008 cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len; 11009 } 11010 11011 void 11012 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, 11013 struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo) 11014 { 11015 phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo); 11016 } 11017 11018 static void 11019 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq, 11020 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, 11021 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) 11022 { 11023 IOCB_t *icmd; 11024 11025 icmd = &cmdiocbq->iocb; 11026 memset(icmd, 0, sizeof(*icmd)); 11027 11028 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 11029 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 11030 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 11031 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 11032 icmd->un.xseq64.w5.hcsw.Fctl = LA; 11033 if (last_seq) 11034 icmd->un.xseq64.w5.hcsw.Fctl |= LS; 11035 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 11036 icmd->un.xseq64.w5.hcsw.Rctl = rctl; 11037 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 11038 11039 icmd->ulpBdeCount = 1; 11040 icmd->ulpLe = 1; 11041 icmd->ulpClass = CLASS3; 11042 11043 switch (cr_cx_cmd) { 11044 case CMD_XMIT_SEQUENCE64_CR: 11045 icmd->ulpContext = rpi; 11046 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 11047 break; 11048 case CMD_XMIT_SEQUENCE64_CX: 11049 icmd->ulpContext = ox_id; 11050 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 11051 break; 11052 default: 11053 break; 11054 } 11055 } 11056 11057 static void 11058 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq, 11059 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, 11060 u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd) 11061 { 11062 union lpfc_wqe128 *wqe; 11063 struct ulp_bde64 *bpl; 11064 11065 wqe = &cmdiocbq->wqe; 11066 memset(wqe, 0, sizeof(*wqe)); 11067 11068 /* Words 0 - 2 */ 11069 bpl = (struct ulp_bde64 *)bmp->virt; 11070 wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh; 11071 wqe->xmit_sequence.bde.addrLow = bpl->addrLow; 11072 wqe->xmit_sequence.bde.tus.w = bpl->tus.w; 11073 11074 /* Word 5 */ 11075 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq); 11076 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1); 11077 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); 11078 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl); 11079 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT); 11080 11081 /* Word 6 */ 11082 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi); 11083 11084 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, 11085 CMD_XMIT_SEQUENCE64_WQE); 11086 11087 /* Word 7 */ 11088 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); 11089 11090 /* Word 9 */ 11091 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); 11092 11093 /* Word 12 */ 11094 if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) 11095 wqe->xmit_sequence.xmit_len = full_size; 11096 else 11097 wqe->xmit_sequence.xmit_len = 11098 wqe->xmit_sequence.bde.tus.f.bdeSize; 11099 } 11100 11101 void 11102 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, 11103 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, 11104 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) 11105 { 11106 phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry, 11107 rctl, last_seq, cr_cx_cmd); 11108 } 11109 11110 static void 11111 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, 11112 u16 iotag, u8 ulp_class, u16 cqid, bool ia, 11113 bool wqec) 11114 { 11115 IOCB_t *icmd = NULL; 11116 11117 icmd = &cmdiocbq->iocb; 11118 memset(icmd, 0, sizeof(*icmd)); 11119 11120 /* Word 5 */ 11121 icmd->un.acxri.abortContextTag = ulp_context; 11122 icmd->un.acxri.abortIoTag = iotag; 11123 11124 if (ia) { 11125 /* Word 7 */ 11126 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 11127 } else { 11128 /* Word 3 */ 11129 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 11130 11131 /* Word 7 */ 11132 icmd->ulpClass = ulp_class; 11133 icmd->ulpCommand = CMD_ABORT_XRI_CN; 11134 } 11135 11136 /* Word 7 */ 11137 icmd->ulpLe = 1; 11138 } 11139 11140 static void 11141 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, 11142 u16 iotag, u8 ulp_class, u16 cqid, bool ia, 11143 bool wqec) 11144 { 11145 union lpfc_wqe128 *wqe; 11146 11147 wqe = &cmdiocbq->wqe; 11148 memset(wqe, 0, sizeof(*wqe)); 11149 11150 /* Word 3 */ 11151 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 11152 if (ia) 11153 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 11154 else 11155 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 11156 11157 /* Word 7 */ 11158 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE); 11159 11160 /* Word 8 */ 11161 wqe->abort_cmd.wqe_com.abort_tag = ulp_context; 11162 11163 /* Word 9 */ 11164 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag); 11165 11166 /* Word 10 */ 11167 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 11168 11169 /* Word 11 */ 11170 if (wqec) 11171 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1); 11172 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid); 11173 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND); 11174 } 11175 11176 void 11177 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, 11178 u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid, 11179 bool ia, bool wqec) 11180 { 11181 phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class, 11182 cqid, ia, wqec); 11183 } 11184 11185 /** 11186 * lpfc_sli_api_table_setup - Set up sli api function jump table 11187 * @phba: The hba struct for which this call is being executed. 11188 * @dev_grp: The HBA PCI-Device group number. 11189 * 11190 * This routine sets up the SLI interface API function jump table in @phba 11191 * struct. 11192 * Returns: 0 - success, -ENODEV - failure. 11193 **/ 11194 int 11195 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 11196 { 11197 11198 switch (dev_grp) { 11199 case LPFC_PCI_DEV_LP: 11200 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 11201 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 11202 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3; 11203 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3; 11204 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3; 11205 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3; 11206 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3; 11207 break; 11208 case LPFC_PCI_DEV_OC: 11209 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 11210 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 11211 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4; 11212 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4; 11213 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4; 11214 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4; 11215 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4; 11216 break; 11217 default: 11218 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11219 "1419 Invalid HBA PCI-device group: 0x%x\n", 11220 dev_grp); 11221 return -ENODEV; 11222 } 11223 return 0; 11224 } 11225 11226 /** 11227 * lpfc_sli4_calc_ring - Calculates which ring to use 11228 * @phba: Pointer to HBA context object. 11229 * @piocb: Pointer to command iocb. 11230 * 11231 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 11232 * hba_wqidx, thus we need to calculate the corresponding ring. 11233 * Since ABORTS must go on the same WQ of the command they are 11234 * aborting, we use command's hba_wqidx. 11235 */ 11236 struct lpfc_sli_ring * 11237 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 11238 { 11239 struct lpfc_io_buf *lpfc_cmd; 11240 11241 if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 11242 if (unlikely(!phba->sli4_hba.hdwq)) 11243 return NULL; 11244 /* 11245 * for abort iocb hba_wqidx should already 11246 * be setup based on what work queue we used. 11247 */ 11248 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { 11249 lpfc_cmd = piocb->io_buf; 11250 piocb->hba_wqidx = lpfc_cmd->hdwq_no; 11251 } 11252 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring; 11253 } else { 11254 if (unlikely(!phba->sli4_hba.els_wq)) 11255 return NULL; 11256 piocb->hba_wqidx = 0; 11257 return phba->sli4_hba.els_wq->pring; 11258 } 11259 } 11260 11261 inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) 11262 { 11263 struct lpfc_hba *phba = eq->phba; 11264 11265 /* 11266 * Unlocking an irq is one of the entry point to check 11267 * for re-schedule, but we are good for io submission 11268 * path as midlayer does a get_cpu to glue us in. Flush 11269 * out the invalidate queue so we can see the updated 11270 * value for flag. 11271 */ 11272 smp_rmb(); 11273 11274 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) 11275 /* We will not likely get the completion for the caller 11276 * during this iteration but i guess that's fine. 11277 * Future io's coming on this eq should be able to 11278 * pick it up. As for the case of single io's, they 11279 * will be handled through a sched from polling timer 11280 * function which is currently triggered every 1msec. 11281 */ 11282 lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM, 11283 LPFC_QUEUE_WORK); 11284 } 11285 11286 /** 11287 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 11288 * @phba: Pointer to HBA context object. 11289 * @ring_number: Ring number 11290 * @piocb: Pointer to command iocb. 11291 * @flag: Flag indicating if this command can be put into txq. 11292 * 11293 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 11294 * function. This function gets the hbalock and calls 11295 * __lpfc_sli_issue_iocb function and will return the error returned 11296 * by __lpfc_sli_issue_iocb function. This wrapper is used by 11297 * functions which do not hold hbalock. 11298 **/ 11299 int 11300 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 11301 struct lpfc_iocbq *piocb, uint32_t flag) 11302 { 11303 struct lpfc_sli_ring *pring; 11304 struct lpfc_queue *eq; 11305 unsigned long iflags; 11306 int rc; 11307 11308 /* If the PCI channel is in offline state, do not post iocbs. */ 11309 if (unlikely(pci_channel_offline(phba->pcidev))) 11310 return IOCB_ERROR; 11311 11312 if (phba->sli_rev == LPFC_SLI_REV4) { 11313 lpfc_sli_prep_wqe(phba, piocb); 11314 11315 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; 11316 11317 pring = lpfc_sli4_calc_ring(phba, piocb); 11318 if (unlikely(pring == NULL)) 11319 return IOCB_ERROR; 11320 11321 spin_lock_irqsave(&pring->ring_lock, iflags); 11322 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 11323 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11324 11325 lpfc_sli4_poll_eq(eq); 11326 } else { 11327 /* For now, SLI2/3 will still use hbalock */ 11328 spin_lock_irqsave(&phba->hbalock, iflags); 11329 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 11330 spin_unlock_irqrestore(&phba->hbalock, iflags); 11331 } 11332 return rc; 11333 } 11334 11335 /** 11336 * lpfc_extra_ring_setup - Extra ring setup function 11337 * @phba: Pointer to HBA context object. 11338 * 11339 * This function is called while driver attaches with the 11340 * HBA to setup the extra ring. The extra ring is used 11341 * only when driver needs to support target mode functionality 11342 * or IP over FC functionalities. 11343 * 11344 * This function is called with no lock held. SLI3 only. 11345 **/ 11346 static int 11347 lpfc_extra_ring_setup( struct lpfc_hba *phba) 11348 { 11349 struct lpfc_sli *psli; 11350 struct lpfc_sli_ring *pring; 11351 11352 psli = &phba->sli; 11353 11354 /* Adjust cmd/rsp ring iocb entries more evenly */ 11355 11356 /* Take some away from the FCP ring */ 11357 pring = &psli->sli3_ring[LPFC_FCP_RING]; 11358 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 11359 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 11360 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 11361 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 11362 11363 /* and give them to the extra ring */ 11364 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 11365 11366 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 11367 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 11368 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 11369 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 11370 11371 /* Setup default profile for this ring */ 11372 pring->iotag_max = 4096; 11373 pring->num_mask = 1; 11374 pring->prt[0].profile = 0; /* Mask 0 */ 11375 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 11376 pring->prt[0].type = phba->cfg_multi_ring_type; 11377 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 11378 return 0; 11379 } 11380 11381 static void 11382 lpfc_sli_post_recovery_event(struct lpfc_hba *phba, 11383 struct lpfc_nodelist *ndlp) 11384 { 11385 unsigned long iflags; 11386 struct lpfc_work_evt *evtp = &ndlp->recovery_evt; 11387 11388 /* Hold a node reference for outstanding queued work */ 11389 if (!lpfc_nlp_get(ndlp)) 11390 return; 11391 11392 spin_lock_irqsave(&phba->hbalock, iflags); 11393 if (!list_empty(&evtp->evt_listp)) { 11394 spin_unlock_irqrestore(&phba->hbalock, iflags); 11395 lpfc_nlp_put(ndlp); 11396 return; 11397 } 11398 11399 evtp->evt_arg1 = ndlp; 11400 evtp->evt = LPFC_EVT_RECOVER_PORT; 11401 list_add_tail(&evtp->evt_listp, &phba->work_list); 11402 spin_unlock_irqrestore(&phba->hbalock, iflags); 11403 11404 lpfc_worker_wake_up(phba); 11405 } 11406 11407 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 11408 * @phba: Pointer to HBA context object. 11409 * @iocbq: Pointer to iocb object. 11410 * 11411 * The async_event handler calls this routine when it receives 11412 * an ASYNC_STATUS_CN event from the port. The port generates 11413 * this event when an Abort Sequence request to an rport fails 11414 * twice in succession. The abort could be originated by the 11415 * driver or by the port. The ABTS could have been for an ELS 11416 * or FCP IO. The port only generates this event when an ABTS 11417 * fails to complete after one retry. 11418 */ 11419 static void 11420 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 11421 struct lpfc_iocbq *iocbq) 11422 { 11423 struct lpfc_nodelist *ndlp = NULL; 11424 uint16_t rpi = 0, vpi = 0; 11425 struct lpfc_vport *vport = NULL; 11426 11427 /* The rpi in the ulpContext is vport-sensitive. */ 11428 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 11429 rpi = iocbq->iocb.ulpContext; 11430 11431 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11432 "3092 Port generated ABTS async event " 11433 "on vpi %d rpi %d status 0x%x\n", 11434 vpi, rpi, iocbq->iocb.ulpStatus); 11435 11436 vport = lpfc_find_vport_by_vpid(phba, vpi); 11437 if (!vport) 11438 goto err_exit; 11439 ndlp = lpfc_findnode_rpi(vport, rpi); 11440 if (!ndlp) 11441 goto err_exit; 11442 11443 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 11444 lpfc_sli_abts_recover_port(vport, ndlp); 11445 return; 11446 11447 err_exit: 11448 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11449 "3095 Event Context not found, no " 11450 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 11451 vpi, rpi, iocbq->iocb.ulpStatus, 11452 iocbq->iocb.ulpContext); 11453 } 11454 11455 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 11456 * @phba: pointer to HBA context object. 11457 * @ndlp: nodelist pointer for the impacted rport. 11458 * @axri: pointer to the wcqe containing the failed exchange. 11459 * 11460 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 11461 * port. The port generates this event when an abort exchange request to an 11462 * rport fails twice in succession with no reply. The abort could be originated 11463 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 11464 */ 11465 void 11466 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 11467 struct lpfc_nodelist *ndlp, 11468 struct sli4_wcqe_xri_aborted *axri) 11469 { 11470 uint32_t ext_status = 0; 11471 11472 if (!ndlp) { 11473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11474 "3115 Node Context not found, driver " 11475 "ignoring abts err event\n"); 11476 return; 11477 } 11478 11479 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11480 "3116 Port generated FCP XRI ABORT event on " 11481 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 11482 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 11483 bf_get(lpfc_wcqe_xa_xri, axri), 11484 bf_get(lpfc_wcqe_xa_status, axri), 11485 axri->parameter); 11486 11487 /* 11488 * Catch the ABTS protocol failure case. Older OCe FW releases returned 11489 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 11490 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 11491 */ 11492 ext_status = axri->parameter & IOERR_PARAM_MASK; 11493 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 11494 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 11495 lpfc_sli_post_recovery_event(phba, ndlp); 11496 } 11497 11498 /** 11499 * lpfc_sli_async_event_handler - ASYNC iocb handler function 11500 * @phba: Pointer to HBA context object. 11501 * @pring: Pointer to driver SLI ring object. 11502 * @iocbq: Pointer to iocb object. 11503 * 11504 * This function is called by the slow ring event handler 11505 * function when there is an ASYNC event iocb in the ring. 11506 * This function is called with no lock held. 11507 * Currently this function handles only temperature related 11508 * ASYNC events. The function decodes the temperature sensor 11509 * event message and posts events for the management applications. 11510 **/ 11511 static void 11512 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 11513 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 11514 { 11515 IOCB_t *icmd; 11516 uint16_t evt_code; 11517 struct temp_event temp_event_data; 11518 struct Scsi_Host *shost; 11519 uint32_t *iocb_w; 11520 11521 icmd = &iocbq->iocb; 11522 evt_code = icmd->un.asyncstat.evt_code; 11523 11524 switch (evt_code) { 11525 case ASYNC_TEMP_WARN: 11526 case ASYNC_TEMP_SAFE: 11527 temp_event_data.data = (uint32_t) icmd->ulpContext; 11528 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 11529 if (evt_code == ASYNC_TEMP_WARN) { 11530 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 11531 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11532 "0347 Adapter is very hot, please take " 11533 "corrective action. temperature : %d Celsius\n", 11534 (uint32_t) icmd->ulpContext); 11535 } else { 11536 temp_event_data.event_code = LPFC_NORMAL_TEMP; 11537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11538 "0340 Adapter temperature is OK now. " 11539 "temperature : %d Celsius\n", 11540 (uint32_t) icmd->ulpContext); 11541 } 11542 11543 /* Send temperature change event to applications */ 11544 shost = lpfc_shost_from_vport(phba->pport); 11545 fc_host_post_vendor_event(shost, fc_get_event_number(), 11546 sizeof(temp_event_data), (char *) &temp_event_data, 11547 LPFC_NL_VENDOR_ID); 11548 break; 11549 case ASYNC_STATUS_CN: 11550 lpfc_sli_abts_err_handler(phba, iocbq); 11551 break; 11552 default: 11553 iocb_w = (uint32_t *) icmd; 11554 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11555 "0346 Ring %d handler: unexpected ASYNC_STATUS" 11556 " evt_code 0x%x\n" 11557 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 11558 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 11559 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 11560 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 11561 pring->ringno, icmd->un.asyncstat.evt_code, 11562 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 11563 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 11564 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 11565 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 11566 11567 break; 11568 } 11569 } 11570 11571 11572 /** 11573 * lpfc_sli4_setup - SLI ring setup function 11574 * @phba: Pointer to HBA context object. 11575 * 11576 * lpfc_sli_setup sets up rings of the SLI interface with 11577 * number of iocbs per ring and iotags. This function is 11578 * called while driver attach to the HBA and before the 11579 * interrupts are enabled. So there is no need for locking. 11580 * 11581 * This function always returns 0. 11582 **/ 11583 int 11584 lpfc_sli4_setup(struct lpfc_hba *phba) 11585 { 11586 struct lpfc_sli_ring *pring; 11587 11588 pring = phba->sli4_hba.els_wq->pring; 11589 pring->num_mask = LPFC_MAX_RING_MASK; 11590 pring->prt[0].profile = 0; /* Mask 0 */ 11591 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 11592 pring->prt[0].type = FC_TYPE_ELS; 11593 pring->prt[0].lpfc_sli_rcv_unsol_event = 11594 lpfc_els_unsol_event; 11595 pring->prt[1].profile = 0; /* Mask 1 */ 11596 pring->prt[1].rctl = FC_RCTL_ELS_REP; 11597 pring->prt[1].type = FC_TYPE_ELS; 11598 pring->prt[1].lpfc_sli_rcv_unsol_event = 11599 lpfc_els_unsol_event; 11600 pring->prt[2].profile = 0; /* Mask 2 */ 11601 /* NameServer Inquiry */ 11602 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 11603 /* NameServer */ 11604 pring->prt[2].type = FC_TYPE_CT; 11605 pring->prt[2].lpfc_sli_rcv_unsol_event = 11606 lpfc_ct_unsol_event; 11607 pring->prt[3].profile = 0; /* Mask 3 */ 11608 /* NameServer response */ 11609 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 11610 /* NameServer */ 11611 pring->prt[3].type = FC_TYPE_CT; 11612 pring->prt[3].lpfc_sli_rcv_unsol_event = 11613 lpfc_ct_unsol_event; 11614 return 0; 11615 } 11616 11617 /** 11618 * lpfc_sli_setup - SLI ring setup function 11619 * @phba: Pointer to HBA context object. 11620 * 11621 * lpfc_sli_setup sets up rings of the SLI interface with 11622 * number of iocbs per ring and iotags. This function is 11623 * called while driver attach to the HBA and before the 11624 * interrupts are enabled. So there is no need for locking. 11625 * 11626 * This function always returns 0. SLI3 only. 11627 **/ 11628 int 11629 lpfc_sli_setup(struct lpfc_hba *phba) 11630 { 11631 int i, totiocbsize = 0; 11632 struct lpfc_sli *psli = &phba->sli; 11633 struct lpfc_sli_ring *pring; 11634 11635 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 11636 psli->sli_flag = 0; 11637 11638 psli->iocbq_lookup = NULL; 11639 psli->iocbq_lookup_len = 0; 11640 psli->last_iotag = 0; 11641 11642 for (i = 0; i < psli->num_rings; i++) { 11643 pring = &psli->sli3_ring[i]; 11644 switch (i) { 11645 case LPFC_FCP_RING: /* ring 0 - FCP */ 11646 /* numCiocb and numRiocb are used in config_port */ 11647 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 11648 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 11649 pring->sli.sli3.numCiocb += 11650 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 11651 pring->sli.sli3.numRiocb += 11652 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 11653 pring->sli.sli3.numCiocb += 11654 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 11655 pring->sli.sli3.numRiocb += 11656 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 11657 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 11658 SLI3_IOCB_CMD_SIZE : 11659 SLI2_IOCB_CMD_SIZE; 11660 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 11661 SLI3_IOCB_RSP_SIZE : 11662 SLI2_IOCB_RSP_SIZE; 11663 pring->iotag_ctr = 0; 11664 pring->iotag_max = 11665 (phba->cfg_hba_queue_depth * 2); 11666 pring->fast_iotag = pring->iotag_max; 11667 pring->num_mask = 0; 11668 break; 11669 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 11670 /* numCiocb and numRiocb are used in config_port */ 11671 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 11672 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 11673 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 11674 SLI3_IOCB_CMD_SIZE : 11675 SLI2_IOCB_CMD_SIZE; 11676 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 11677 SLI3_IOCB_RSP_SIZE : 11678 SLI2_IOCB_RSP_SIZE; 11679 pring->iotag_max = phba->cfg_hba_queue_depth; 11680 pring->num_mask = 0; 11681 break; 11682 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 11683 /* numCiocb and numRiocb are used in config_port */ 11684 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 11685 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 11686 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 11687 SLI3_IOCB_CMD_SIZE : 11688 SLI2_IOCB_CMD_SIZE; 11689 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 11690 SLI3_IOCB_RSP_SIZE : 11691 SLI2_IOCB_RSP_SIZE; 11692 pring->fast_iotag = 0; 11693 pring->iotag_ctr = 0; 11694 pring->iotag_max = 4096; 11695 pring->lpfc_sli_rcv_async_status = 11696 lpfc_sli_async_event_handler; 11697 pring->num_mask = LPFC_MAX_RING_MASK; 11698 pring->prt[0].profile = 0; /* Mask 0 */ 11699 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 11700 pring->prt[0].type = FC_TYPE_ELS; 11701 pring->prt[0].lpfc_sli_rcv_unsol_event = 11702 lpfc_els_unsol_event; 11703 pring->prt[1].profile = 0; /* Mask 1 */ 11704 pring->prt[1].rctl = FC_RCTL_ELS_REP; 11705 pring->prt[1].type = FC_TYPE_ELS; 11706 pring->prt[1].lpfc_sli_rcv_unsol_event = 11707 lpfc_els_unsol_event; 11708 pring->prt[2].profile = 0; /* Mask 2 */ 11709 /* NameServer Inquiry */ 11710 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 11711 /* NameServer */ 11712 pring->prt[2].type = FC_TYPE_CT; 11713 pring->prt[2].lpfc_sli_rcv_unsol_event = 11714 lpfc_ct_unsol_event; 11715 pring->prt[3].profile = 0; /* Mask 3 */ 11716 /* NameServer response */ 11717 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 11718 /* NameServer */ 11719 pring->prt[3].type = FC_TYPE_CT; 11720 pring->prt[3].lpfc_sli_rcv_unsol_event = 11721 lpfc_ct_unsol_event; 11722 break; 11723 } 11724 totiocbsize += (pring->sli.sli3.numCiocb * 11725 pring->sli.sli3.sizeCiocb) + 11726 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 11727 } 11728 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 11729 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 11730 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 11731 "SLI2 SLIM Data: x%x x%lx\n", 11732 phba->brd_no, totiocbsize, 11733 (unsigned long) MAX_SLIM_IOCB_SIZE); 11734 } 11735 if (phba->cfg_multi_ring_support == 2) 11736 lpfc_extra_ring_setup(phba); 11737 11738 return 0; 11739 } 11740 11741 /** 11742 * lpfc_sli4_queue_init - Queue initialization function 11743 * @phba: Pointer to HBA context object. 11744 * 11745 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 11746 * ring. This function also initializes ring indices of each ring. 11747 * This function is called during the initialization of the SLI 11748 * interface of an HBA. 11749 * This function is called with no lock held and always returns 11750 * 1. 11751 **/ 11752 void 11753 lpfc_sli4_queue_init(struct lpfc_hba *phba) 11754 { 11755 struct lpfc_sli *psli; 11756 struct lpfc_sli_ring *pring; 11757 int i; 11758 11759 psli = &phba->sli; 11760 spin_lock_irq(&phba->hbalock); 11761 INIT_LIST_HEAD(&psli->mboxq); 11762 INIT_LIST_HEAD(&psli->mboxq_cmpl); 11763 /* Initialize list headers for txq and txcmplq as double linked lists */ 11764 for (i = 0; i < phba->cfg_hdw_queue; i++) { 11765 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 11766 pring->flag = 0; 11767 pring->ringno = LPFC_FCP_RING; 11768 pring->txcmplq_cnt = 0; 11769 INIT_LIST_HEAD(&pring->txq); 11770 INIT_LIST_HEAD(&pring->txcmplq); 11771 INIT_LIST_HEAD(&pring->iocb_continueq); 11772 spin_lock_init(&pring->ring_lock); 11773 } 11774 pring = phba->sli4_hba.els_wq->pring; 11775 pring->flag = 0; 11776 pring->ringno = LPFC_ELS_RING; 11777 pring->txcmplq_cnt = 0; 11778 INIT_LIST_HEAD(&pring->txq); 11779 INIT_LIST_HEAD(&pring->txcmplq); 11780 INIT_LIST_HEAD(&pring->iocb_continueq); 11781 spin_lock_init(&pring->ring_lock); 11782 11783 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11784 pring = phba->sli4_hba.nvmels_wq->pring; 11785 pring->flag = 0; 11786 pring->ringno = LPFC_ELS_RING; 11787 pring->txcmplq_cnt = 0; 11788 INIT_LIST_HEAD(&pring->txq); 11789 INIT_LIST_HEAD(&pring->txcmplq); 11790 INIT_LIST_HEAD(&pring->iocb_continueq); 11791 spin_lock_init(&pring->ring_lock); 11792 } 11793 11794 spin_unlock_irq(&phba->hbalock); 11795 } 11796 11797 /** 11798 * lpfc_sli_queue_init - Queue initialization function 11799 * @phba: Pointer to HBA context object. 11800 * 11801 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 11802 * ring. This function also initializes ring indices of each ring. 11803 * This function is called during the initialization of the SLI 11804 * interface of an HBA. 11805 * This function is called with no lock held and always returns 11806 * 1. 11807 **/ 11808 void 11809 lpfc_sli_queue_init(struct lpfc_hba *phba) 11810 { 11811 struct lpfc_sli *psli; 11812 struct lpfc_sli_ring *pring; 11813 int i; 11814 11815 psli = &phba->sli; 11816 spin_lock_irq(&phba->hbalock); 11817 INIT_LIST_HEAD(&psli->mboxq); 11818 INIT_LIST_HEAD(&psli->mboxq_cmpl); 11819 /* Initialize list headers for txq and txcmplq as double linked lists */ 11820 for (i = 0; i < psli->num_rings; i++) { 11821 pring = &psli->sli3_ring[i]; 11822 pring->ringno = i; 11823 pring->sli.sli3.next_cmdidx = 0; 11824 pring->sli.sli3.local_getidx = 0; 11825 pring->sli.sli3.cmdidx = 0; 11826 INIT_LIST_HEAD(&pring->iocb_continueq); 11827 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 11828 INIT_LIST_HEAD(&pring->postbufq); 11829 pring->flag = 0; 11830 INIT_LIST_HEAD(&pring->txq); 11831 INIT_LIST_HEAD(&pring->txcmplq); 11832 spin_lock_init(&pring->ring_lock); 11833 } 11834 spin_unlock_irq(&phba->hbalock); 11835 } 11836 11837 /** 11838 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 11839 * @phba: Pointer to HBA context object. 11840 * 11841 * This routine flushes the mailbox command subsystem. It will unconditionally 11842 * flush all the mailbox commands in the three possible stages in the mailbox 11843 * command sub-system: pending mailbox command queue; the outstanding mailbox 11844 * command; and completed mailbox command queue. It is caller's responsibility 11845 * to make sure that the driver is in the proper state to flush the mailbox 11846 * command sub-system. Namely, the posting of mailbox commands into the 11847 * pending mailbox command queue from the various clients must be stopped; 11848 * either the HBA is in a state that it will never works on the outstanding 11849 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 11850 * mailbox command has been completed. 11851 **/ 11852 static void 11853 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 11854 { 11855 LIST_HEAD(completions); 11856 struct lpfc_sli *psli = &phba->sli; 11857 LPFC_MBOXQ_t *pmb; 11858 unsigned long iflag; 11859 11860 /* Disable softirqs, including timers from obtaining phba->hbalock */ 11861 local_bh_disable(); 11862 11863 /* Flush all the mailbox commands in the mbox system */ 11864 spin_lock_irqsave(&phba->hbalock, iflag); 11865 11866 /* The pending mailbox command queue */ 11867 list_splice_init(&phba->sli.mboxq, &completions); 11868 /* The outstanding active mailbox command */ 11869 if (psli->mbox_active) { 11870 list_add_tail(&psli->mbox_active->list, &completions); 11871 psli->mbox_active = NULL; 11872 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11873 } 11874 /* The completed mailbox command queue */ 11875 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 11876 spin_unlock_irqrestore(&phba->hbalock, iflag); 11877 11878 /* Enable softirqs again, done with phba->hbalock */ 11879 local_bh_enable(); 11880 11881 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 11882 while (!list_empty(&completions)) { 11883 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 11884 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 11885 if (pmb->mbox_cmpl) 11886 pmb->mbox_cmpl(phba, pmb); 11887 } 11888 } 11889 11890 /** 11891 * lpfc_sli_host_down - Vport cleanup function 11892 * @vport: Pointer to virtual port object. 11893 * 11894 * lpfc_sli_host_down is called to clean up the resources 11895 * associated with a vport before destroying virtual 11896 * port data structures. 11897 * This function does following operations: 11898 * - Free discovery resources associated with this virtual 11899 * port. 11900 * - Free iocbs associated with this virtual port in 11901 * the txq. 11902 * - Send abort for all iocb commands associated with this 11903 * vport in txcmplq. 11904 * 11905 * This function is called with no lock held and always returns 1. 11906 **/ 11907 int 11908 lpfc_sli_host_down(struct lpfc_vport *vport) 11909 { 11910 LIST_HEAD(completions); 11911 struct lpfc_hba *phba = vport->phba; 11912 struct lpfc_sli *psli = &phba->sli; 11913 struct lpfc_queue *qp = NULL; 11914 struct lpfc_sli_ring *pring; 11915 struct lpfc_iocbq *iocb, *next_iocb; 11916 int i; 11917 unsigned long flags = 0; 11918 uint16_t prev_pring_flag; 11919 11920 lpfc_cleanup_discovery_resources(vport); 11921 11922 spin_lock_irqsave(&phba->hbalock, flags); 11923 11924 /* 11925 * Error everything on the txq since these iocbs 11926 * have not been given to the FW yet. 11927 * Also issue ABTS for everything on the txcmplq 11928 */ 11929 if (phba->sli_rev != LPFC_SLI_REV4) { 11930 for (i = 0; i < psli->num_rings; i++) { 11931 pring = &psli->sli3_ring[i]; 11932 prev_pring_flag = pring->flag; 11933 /* Only slow rings */ 11934 if (pring->ringno == LPFC_ELS_RING) { 11935 pring->flag |= LPFC_DEFERRED_RING_EVENT; 11936 /* Set the lpfc data pending flag */ 11937 set_bit(LPFC_DATA_READY, &phba->data_flags); 11938 } 11939 list_for_each_entry_safe(iocb, next_iocb, 11940 &pring->txq, list) { 11941 if (iocb->vport != vport) 11942 continue; 11943 list_move_tail(&iocb->list, &completions); 11944 } 11945 list_for_each_entry_safe(iocb, next_iocb, 11946 &pring->txcmplq, list) { 11947 if (iocb->vport != vport) 11948 continue; 11949 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 11950 NULL); 11951 } 11952 pring->flag = prev_pring_flag; 11953 } 11954 } else { 11955 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 11956 pring = qp->pring; 11957 if (!pring) 11958 continue; 11959 if (pring == phba->sli4_hba.els_wq->pring) { 11960 pring->flag |= LPFC_DEFERRED_RING_EVENT; 11961 /* Set the lpfc data pending flag */ 11962 set_bit(LPFC_DATA_READY, &phba->data_flags); 11963 } 11964 prev_pring_flag = pring->flag; 11965 spin_lock(&pring->ring_lock); 11966 list_for_each_entry_safe(iocb, next_iocb, 11967 &pring->txq, list) { 11968 if (iocb->vport != vport) 11969 continue; 11970 list_move_tail(&iocb->list, &completions); 11971 } 11972 spin_unlock(&pring->ring_lock); 11973 list_for_each_entry_safe(iocb, next_iocb, 11974 &pring->txcmplq, list) { 11975 if (iocb->vport != vport) 11976 continue; 11977 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 11978 NULL); 11979 } 11980 pring->flag = prev_pring_flag; 11981 } 11982 } 11983 spin_unlock_irqrestore(&phba->hbalock, flags); 11984 11985 /* Make sure HBA is alive */ 11986 lpfc_issue_hb_tmo(phba); 11987 11988 /* Cancel all the IOCBs from the completions list */ 11989 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11990 IOERR_SLI_DOWN); 11991 return 1; 11992 } 11993 11994 /** 11995 * lpfc_sli_hba_down - Resource cleanup function for the HBA 11996 * @phba: Pointer to HBA context object. 11997 * 11998 * This function cleans up all iocb, buffers, mailbox commands 11999 * while shutting down the HBA. This function is called with no 12000 * lock held and always returns 1. 12001 * This function does the following to cleanup driver resources: 12002 * - Free discovery resources for each virtual port 12003 * - Cleanup any pending fabric iocbs 12004 * - Iterate through the iocb txq and free each entry 12005 * in the list. 12006 * - Free up any buffer posted to the HBA 12007 * - Free mailbox commands in the mailbox queue. 12008 **/ 12009 int 12010 lpfc_sli_hba_down(struct lpfc_hba *phba) 12011 { 12012 LIST_HEAD(completions); 12013 struct lpfc_sli *psli = &phba->sli; 12014 struct lpfc_queue *qp = NULL; 12015 struct lpfc_sli_ring *pring; 12016 struct lpfc_dmabuf *buf_ptr; 12017 unsigned long flags = 0; 12018 int i; 12019 12020 /* Shutdown the mailbox command sub-system */ 12021 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 12022 12023 lpfc_hba_down_prep(phba); 12024 12025 /* Disable softirqs, including timers from obtaining phba->hbalock */ 12026 local_bh_disable(); 12027 12028 lpfc_fabric_abort_hba(phba); 12029 12030 spin_lock_irqsave(&phba->hbalock, flags); 12031 12032 /* 12033 * Error everything on the txq since these iocbs 12034 * have not been given to the FW yet. 12035 */ 12036 if (phba->sli_rev != LPFC_SLI_REV4) { 12037 for (i = 0; i < psli->num_rings; i++) { 12038 pring = &psli->sli3_ring[i]; 12039 /* Only slow rings */ 12040 if (pring->ringno == LPFC_ELS_RING) { 12041 pring->flag |= LPFC_DEFERRED_RING_EVENT; 12042 /* Set the lpfc data pending flag */ 12043 set_bit(LPFC_DATA_READY, &phba->data_flags); 12044 } 12045 list_splice_init(&pring->txq, &completions); 12046 } 12047 } else { 12048 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 12049 pring = qp->pring; 12050 if (!pring) 12051 continue; 12052 spin_lock(&pring->ring_lock); 12053 list_splice_init(&pring->txq, &completions); 12054 spin_unlock(&pring->ring_lock); 12055 if (pring == phba->sli4_hba.els_wq->pring) { 12056 pring->flag |= LPFC_DEFERRED_RING_EVENT; 12057 /* Set the lpfc data pending flag */ 12058 set_bit(LPFC_DATA_READY, &phba->data_flags); 12059 } 12060 } 12061 } 12062 spin_unlock_irqrestore(&phba->hbalock, flags); 12063 12064 /* Cancel all the IOCBs from the completions list */ 12065 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 12066 IOERR_SLI_DOWN); 12067 12068 spin_lock_irqsave(&phba->hbalock, flags); 12069 list_splice_init(&phba->elsbuf, &completions); 12070 phba->elsbuf_cnt = 0; 12071 phba->elsbuf_prev_cnt = 0; 12072 spin_unlock_irqrestore(&phba->hbalock, flags); 12073 12074 while (!list_empty(&completions)) { 12075 list_remove_head(&completions, buf_ptr, 12076 struct lpfc_dmabuf, list); 12077 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 12078 kfree(buf_ptr); 12079 } 12080 12081 /* Enable softirqs again, done with phba->hbalock */ 12082 local_bh_enable(); 12083 12084 /* Return any active mbox cmds */ 12085 del_timer_sync(&psli->mbox_tmo); 12086 12087 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 12088 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 12089 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 12090 12091 return 1; 12092 } 12093 12094 /** 12095 * lpfc_sli_pcimem_bcopy - SLI memory copy function 12096 * @srcp: Source memory pointer. 12097 * @destp: Destination memory pointer. 12098 * @cnt: Number of words required to be copied. 12099 * 12100 * This function is used for copying data between driver memory 12101 * and the SLI memory. This function also changes the endianness 12102 * of each word if native endianness is different from SLI 12103 * endianness. This function can be called with or without 12104 * lock. 12105 **/ 12106 void 12107 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 12108 { 12109 uint32_t *src = srcp; 12110 uint32_t *dest = destp; 12111 uint32_t ldata; 12112 int i; 12113 12114 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 12115 ldata = *src; 12116 ldata = le32_to_cpu(ldata); 12117 *dest = ldata; 12118 src++; 12119 dest++; 12120 } 12121 } 12122 12123 12124 /** 12125 * lpfc_sli_bemem_bcopy - SLI memory copy function 12126 * @srcp: Source memory pointer. 12127 * @destp: Destination memory pointer. 12128 * @cnt: Number of words required to be copied. 12129 * 12130 * This function is used for copying data between a data structure 12131 * with big endian representation to local endianness. 12132 * This function can be called with or without lock. 12133 **/ 12134 void 12135 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 12136 { 12137 uint32_t *src = srcp; 12138 uint32_t *dest = destp; 12139 uint32_t ldata; 12140 int i; 12141 12142 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 12143 ldata = *src; 12144 ldata = be32_to_cpu(ldata); 12145 *dest = ldata; 12146 src++; 12147 dest++; 12148 } 12149 } 12150 12151 /** 12152 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 12153 * @phba: Pointer to HBA context object. 12154 * @pring: Pointer to driver SLI ring object. 12155 * @mp: Pointer to driver buffer object. 12156 * 12157 * This function is called with no lock held. 12158 * It always return zero after adding the buffer to the postbufq 12159 * buffer list. 12160 **/ 12161 int 12162 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 12163 struct lpfc_dmabuf *mp) 12164 { 12165 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 12166 later */ 12167 spin_lock_irq(&phba->hbalock); 12168 list_add_tail(&mp->list, &pring->postbufq); 12169 pring->postbufq_cnt++; 12170 spin_unlock_irq(&phba->hbalock); 12171 return 0; 12172 } 12173 12174 /** 12175 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 12176 * @phba: Pointer to HBA context object. 12177 * 12178 * When HBQ is enabled, buffers are searched based on tags. This function 12179 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 12180 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 12181 * does not conflict with tags of buffer posted for unsolicited events. 12182 * The function returns the allocated tag. The function is called with 12183 * no locks held. 12184 **/ 12185 uint32_t 12186 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 12187 { 12188 spin_lock_irq(&phba->hbalock); 12189 phba->buffer_tag_count++; 12190 /* 12191 * Always set the QUE_BUFTAG_BIT to distiguish between 12192 * a tag assigned by HBQ. 12193 */ 12194 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 12195 spin_unlock_irq(&phba->hbalock); 12196 return phba->buffer_tag_count; 12197 } 12198 12199 /** 12200 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 12201 * @phba: Pointer to HBA context object. 12202 * @pring: Pointer to driver SLI ring object. 12203 * @tag: Buffer tag. 12204 * 12205 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 12206 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 12207 * iocb is posted to the response ring with the tag of the buffer. 12208 * This function searches the pring->postbufq list using the tag 12209 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 12210 * iocb. If the buffer is found then lpfc_dmabuf object of the 12211 * buffer is returned to the caller else NULL is returned. 12212 * This function is called with no lock held. 12213 **/ 12214 struct lpfc_dmabuf * 12215 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 12216 uint32_t tag) 12217 { 12218 struct lpfc_dmabuf *mp, *next_mp; 12219 struct list_head *slp = &pring->postbufq; 12220 12221 /* Search postbufq, from the beginning, looking for a match on tag */ 12222 spin_lock_irq(&phba->hbalock); 12223 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 12224 if (mp->buffer_tag == tag) { 12225 list_del_init(&mp->list); 12226 pring->postbufq_cnt--; 12227 spin_unlock_irq(&phba->hbalock); 12228 return mp; 12229 } 12230 } 12231 12232 spin_unlock_irq(&phba->hbalock); 12233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12234 "0402 Cannot find virtual addr for buffer tag on " 12235 "ring %d Data x%lx x%px x%px x%x\n", 12236 pring->ringno, (unsigned long) tag, 12237 slp->next, slp->prev, pring->postbufq_cnt); 12238 12239 return NULL; 12240 } 12241 12242 /** 12243 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 12244 * @phba: Pointer to HBA context object. 12245 * @pring: Pointer to driver SLI ring object. 12246 * @phys: DMA address of the buffer. 12247 * 12248 * This function searches the buffer list using the dma_address 12249 * of unsolicited event to find the driver's lpfc_dmabuf object 12250 * corresponding to the dma_address. The function returns the 12251 * lpfc_dmabuf object if a buffer is found else it returns NULL. 12252 * This function is called by the ct and els unsolicited event 12253 * handlers to get the buffer associated with the unsolicited 12254 * event. 12255 * 12256 * This function is called with no lock held. 12257 **/ 12258 struct lpfc_dmabuf * 12259 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 12260 dma_addr_t phys) 12261 { 12262 struct lpfc_dmabuf *mp, *next_mp; 12263 struct list_head *slp = &pring->postbufq; 12264 12265 /* Search postbufq, from the beginning, looking for a match on phys */ 12266 spin_lock_irq(&phba->hbalock); 12267 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 12268 if (mp->phys == phys) { 12269 list_del_init(&mp->list); 12270 pring->postbufq_cnt--; 12271 spin_unlock_irq(&phba->hbalock); 12272 return mp; 12273 } 12274 } 12275 12276 spin_unlock_irq(&phba->hbalock); 12277 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12278 "0410 Cannot find virtual addr for mapped buf on " 12279 "ring %d Data x%llx x%px x%px x%x\n", 12280 pring->ringno, (unsigned long long)phys, 12281 slp->next, slp->prev, pring->postbufq_cnt); 12282 return NULL; 12283 } 12284 12285 /** 12286 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 12287 * @phba: Pointer to HBA context object. 12288 * @cmdiocb: Pointer to driver command iocb object. 12289 * @rspiocb: Pointer to driver response iocb object. 12290 * 12291 * This function is the completion handler for the abort iocbs for 12292 * ELS commands. This function is called from the ELS ring event 12293 * handler with no lock held. This function frees memory resources 12294 * associated with the abort iocb. 12295 **/ 12296 static void 12297 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12298 struct lpfc_iocbq *rspiocb) 12299 { 12300 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12301 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12302 u8 cmnd = get_job_cmnd(phba, cmdiocb); 12303 12304 if (ulp_status) { 12305 /* 12306 * Assume that the port already completed and returned, or 12307 * will return the iocb. Just Log the message. 12308 */ 12309 if (phba->sli_rev < LPFC_SLI_REV4) { 12310 if (cmnd == CMD_ABORT_XRI_CX && 12311 ulp_status == IOSTAT_LOCAL_REJECT && 12312 ulp_word4 == IOERR_ABORT_REQUESTED) { 12313 goto release_iocb; 12314 } 12315 } 12316 } 12317 12318 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 12319 "0327 Abort els iocb complete x%px with io cmd xri %x " 12320 "abort tag x%x abort status %x abort code %x\n", 12321 cmdiocb, get_job_abtsiotag(phba, cmdiocb), 12322 (phba->sli_rev == LPFC_SLI_REV4) ? 12323 get_wqe_reqtag(cmdiocb) : 12324 cmdiocb->iocb.ulpIoTag, 12325 ulp_status, ulp_word4); 12326 release_iocb: 12327 lpfc_sli_release_iocbq(phba, cmdiocb); 12328 return; 12329 } 12330 12331 /** 12332 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 12333 * @phba: Pointer to HBA context object. 12334 * @cmdiocb: Pointer to driver command iocb object. 12335 * @rspiocb: Pointer to driver response iocb object. 12336 * 12337 * The function is called from SLI ring event handler with no 12338 * lock held. This function is the completion handler for ELS commands 12339 * which are aborted. The function frees memory resources used for 12340 * the aborted ELS commands. 12341 **/ 12342 void 12343 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12344 struct lpfc_iocbq *rspiocb) 12345 { 12346 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12347 IOCB_t *irsp; 12348 LPFC_MBOXQ_t *mbox; 12349 u32 ulp_command, ulp_status, ulp_word4, iotag; 12350 12351 ulp_command = get_job_cmnd(phba, cmdiocb); 12352 ulp_status = get_job_ulpstatus(phba, rspiocb); 12353 ulp_word4 = get_job_word4(phba, rspiocb); 12354 12355 if (phba->sli_rev == LPFC_SLI_REV4) { 12356 iotag = get_wqe_reqtag(cmdiocb); 12357 } else { 12358 irsp = &rspiocb->iocb; 12359 iotag = irsp->ulpIoTag; 12360 12361 /* It is possible a PLOGI_RJT for NPIV ports to get aborted. 12362 * The MBX_REG_LOGIN64 mbox command is freed back to the 12363 * mbox_mem_pool here. 12364 */ 12365 if (cmdiocb->context_un.mbox) { 12366 mbox = cmdiocb->context_un.mbox; 12367 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 12368 cmdiocb->context_un.mbox = NULL; 12369 } 12370 } 12371 12372 /* ELS cmd tag <ulpIoTag> completes */ 12373 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 12374 "0139 Ignoring ELS cmd code x%x ref cnt x%x Data: " 12375 "x%x x%x x%x x%px\n", 12376 ulp_command, kref_read(&cmdiocb->ndlp->kref), 12377 ulp_status, ulp_word4, iotag, cmdiocb->ndlp); 12378 /* 12379 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp 12380 * if exchange is busy. 12381 */ 12382 if (ulp_command == CMD_GEN_REQUEST64_CR) 12383 lpfc_ct_free_iocb(phba, cmdiocb); 12384 else 12385 lpfc_els_free_iocb(phba, cmdiocb); 12386 12387 lpfc_nlp_put(ndlp); 12388 } 12389 12390 /** 12391 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 12392 * @phba: Pointer to HBA context object. 12393 * @pring: Pointer to driver SLI ring object. 12394 * @cmdiocb: Pointer to driver command iocb object. 12395 * @cmpl: completion function. 12396 * 12397 * This function issues an abort iocb for the provided command iocb. In case 12398 * of unloading, the abort iocb will not be issued to commands on the ELS 12399 * ring. Instead, the callback function shall be changed to those commands 12400 * so that nothing happens when them finishes. This function is called with 12401 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS 12402 * when the command iocb is an abort request. 12403 * 12404 **/ 12405 int 12406 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 12407 struct lpfc_iocbq *cmdiocb, void *cmpl) 12408 { 12409 struct lpfc_vport *vport = cmdiocb->vport; 12410 struct lpfc_iocbq *abtsiocbp; 12411 int retval = IOCB_ERROR; 12412 unsigned long iflags; 12413 struct lpfc_nodelist *ndlp = NULL; 12414 u32 ulp_command = get_job_cmnd(phba, cmdiocb); 12415 u16 ulp_context, iotag; 12416 bool ia; 12417 12418 /* 12419 * There are certain command types we don't want to abort. And we 12420 * don't want to abort commands that are already in the process of 12421 * being aborted. 12422 */ 12423 if (ulp_command == CMD_ABORT_XRI_WQE || 12424 ulp_command == CMD_ABORT_XRI_CN || 12425 ulp_command == CMD_CLOSE_XRI_CN || 12426 cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED) 12427 return IOCB_ABORTING; 12428 12429 if (!pring) { 12430 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) 12431 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; 12432 else 12433 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; 12434 return retval; 12435 } 12436 12437 /* 12438 * If we're unloading, don't abort iocb on the ELS ring, but change 12439 * the callback so that nothing happens when it finishes. 12440 */ 12441 if (test_bit(FC_UNLOADING, &vport->load_flag) && 12442 pring->ringno == LPFC_ELS_RING) { 12443 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) 12444 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; 12445 else 12446 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; 12447 return retval; 12448 } 12449 12450 /* issue ABTS for this IOCB based on iotag */ 12451 abtsiocbp = __lpfc_sli_get_iocbq(phba); 12452 if (abtsiocbp == NULL) 12453 return IOCB_NORESOURCE; 12454 12455 /* This signals the response to set the correct status 12456 * before calling the completion handler 12457 */ 12458 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; 12459 12460 if (phba->sli_rev == LPFC_SLI_REV4) { 12461 ulp_context = cmdiocb->sli4_xritag; 12462 iotag = abtsiocbp->iotag; 12463 } else { 12464 iotag = cmdiocb->iocb.ulpIoTag; 12465 if (pring->ringno == LPFC_ELS_RING) { 12466 ndlp = cmdiocb->ndlp; 12467 ulp_context = ndlp->nlp_rpi; 12468 } else { 12469 ulp_context = cmdiocb->iocb.ulpContext; 12470 } 12471 } 12472 12473 /* Just close the exchange under certain conditions. */ 12474 if (test_bit(FC_UNLOADING, &vport->load_flag) || 12475 phba->link_state < LPFC_LINK_UP || 12476 (phba->sli_rev == LPFC_SLI_REV4 && 12477 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) || 12478 (phba->link_flag & LS_EXTERNAL_LOOPBACK)) 12479 ia = true; 12480 else 12481 ia = false; 12482 12483 lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag, 12484 cmdiocb->iocb.ulpClass, 12485 LPFC_WQE_CQ_ID_DEFAULT, ia, false); 12486 12487 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 12488 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 12489 if (cmdiocb->cmd_flag & LPFC_IO_FCP) 12490 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX); 12491 12492 if (cmdiocb->cmd_flag & LPFC_IO_FOF) 12493 abtsiocbp->cmd_flag |= LPFC_IO_FOF; 12494 12495 if (cmpl) 12496 abtsiocbp->cmd_cmpl = cmpl; 12497 else 12498 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl; 12499 abtsiocbp->vport = vport; 12500 12501 if (phba->sli_rev == LPFC_SLI_REV4) { 12502 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 12503 if (unlikely(pring == NULL)) 12504 goto abort_iotag_exit; 12505 /* Note: both hbalock and ring_lock need to be set here */ 12506 spin_lock_irqsave(&pring->ring_lock, iflags); 12507 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 12508 abtsiocbp, 0); 12509 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12510 } else { 12511 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 12512 abtsiocbp, 0); 12513 } 12514 12515 abort_iotag_exit: 12516 12517 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 12518 "0339 Abort IO XRI x%x, Original iotag x%x, " 12519 "abort tag x%x Cmdjob : x%px Abortjob : x%px " 12520 "retval x%x : IA %d cmd_cmpl %ps\n", 12521 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ? 12522 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp, 12523 retval, ia, abtsiocbp->cmd_cmpl); 12524 if (retval) { 12525 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; 12526 __lpfc_sli_release_iocbq(phba, abtsiocbp); 12527 } 12528 12529 /* 12530 * Caller to this routine should check for IOCB_ERROR 12531 * and handle it properly. This routine no longer removes 12532 * iocb off txcmplq and call compl in case of IOCB_ERROR. 12533 */ 12534 return retval; 12535 } 12536 12537 /** 12538 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 12539 * @phba: pointer to lpfc HBA data structure. 12540 * 12541 * This routine will abort all pending and outstanding iocbs to an HBA. 12542 **/ 12543 void 12544 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 12545 { 12546 struct lpfc_sli *psli = &phba->sli; 12547 struct lpfc_sli_ring *pring; 12548 struct lpfc_queue *qp = NULL; 12549 int i; 12550 12551 if (phba->sli_rev != LPFC_SLI_REV4) { 12552 for (i = 0; i < psli->num_rings; i++) { 12553 pring = &psli->sli3_ring[i]; 12554 lpfc_sli_abort_iocb_ring(phba, pring); 12555 } 12556 return; 12557 } 12558 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 12559 pring = qp->pring; 12560 if (!pring) 12561 continue; 12562 lpfc_sli_abort_iocb_ring(phba, pring); 12563 } 12564 } 12565 12566 /** 12567 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts 12568 * @iocbq: Pointer to iocb object. 12569 * @vport: Pointer to driver virtual port object. 12570 * 12571 * This function acts as an iocb filter for functions which abort FCP iocbs. 12572 * 12573 * Return values 12574 * -ENODEV, if a null iocb or vport ptr is encountered 12575 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as 12576 * driver already started the abort process, or is an abort iocb itself 12577 * 0, passes criteria for aborting the FCP I/O iocb 12578 **/ 12579 static int 12580 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq, 12581 struct lpfc_vport *vport) 12582 { 12583 u8 ulp_command; 12584 12585 /* No null ptr vports */ 12586 if (!iocbq || iocbq->vport != vport) 12587 return -ENODEV; 12588 12589 /* iocb must be for FCP IO, already exists on the TX cmpl queue, 12590 * can't be premarked as driver aborted, nor be an ABORT iocb itself 12591 */ 12592 ulp_command = get_job_cmnd(vport->phba, iocbq); 12593 if (!(iocbq->cmd_flag & LPFC_IO_FCP) || 12594 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) || 12595 (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || 12596 (ulp_command == CMD_ABORT_XRI_CN || 12597 ulp_command == CMD_CLOSE_XRI_CN || 12598 ulp_command == CMD_ABORT_XRI_WQE)) 12599 return -EINVAL; 12600 12601 return 0; 12602 } 12603 12604 /** 12605 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target 12606 * @iocbq: Pointer to driver iocb object. 12607 * @vport: Pointer to driver virtual port object. 12608 * @tgt_id: SCSI ID of the target. 12609 * @lun_id: LUN ID of the scsi device. 12610 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 12611 * 12612 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI 12613 * host. 12614 * 12615 * It will return 12616 * 0 if the filtering criteria is met for the given iocb and will return 12617 * 1 if the filtering criteria is not met. 12618 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 12619 * given iocb is for the SCSI device specified by vport, tgt_id and 12620 * lun_id parameter. 12621 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 12622 * given iocb is for the SCSI target specified by vport and tgt_id 12623 * parameters. 12624 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 12625 * given iocb is for the SCSI host associated with the given vport. 12626 * This function is called with no locks held. 12627 **/ 12628 static int 12629 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 12630 uint16_t tgt_id, uint64_t lun_id, 12631 lpfc_ctx_cmd ctx_cmd) 12632 { 12633 struct lpfc_io_buf *lpfc_cmd; 12634 int rc = 1; 12635 12636 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 12637 12638 if (lpfc_cmd->pCmd == NULL) 12639 return rc; 12640 12641 switch (ctx_cmd) { 12642 case LPFC_CTX_LUN: 12643 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 12644 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 12645 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 12646 rc = 0; 12647 break; 12648 case LPFC_CTX_TGT: 12649 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 12650 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 12651 rc = 0; 12652 break; 12653 case LPFC_CTX_HOST: 12654 rc = 0; 12655 break; 12656 default: 12657 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 12658 __func__, ctx_cmd); 12659 break; 12660 } 12661 12662 return rc; 12663 } 12664 12665 /** 12666 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 12667 * @vport: Pointer to virtual port. 12668 * @tgt_id: SCSI ID of the target. 12669 * @lun_id: LUN ID of the scsi device. 12670 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 12671 * 12672 * This function returns number of FCP commands pending for the vport. 12673 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 12674 * commands pending on the vport associated with SCSI device specified 12675 * by tgt_id and lun_id parameters. 12676 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 12677 * commands pending on the vport associated with SCSI target specified 12678 * by tgt_id parameter. 12679 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 12680 * commands pending on the vport. 12681 * This function returns the number of iocbs which satisfy the filter. 12682 * This function is called without any lock held. 12683 **/ 12684 int 12685 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 12686 lpfc_ctx_cmd ctx_cmd) 12687 { 12688 struct lpfc_hba *phba = vport->phba; 12689 struct lpfc_iocbq *iocbq; 12690 int sum, i; 12691 unsigned long iflags; 12692 u8 ulp_command; 12693 12694 spin_lock_irqsave(&phba->hbalock, iflags); 12695 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 12696 iocbq = phba->sli.iocbq_lookup[i]; 12697 12698 if (!iocbq || iocbq->vport != vport) 12699 continue; 12700 if (!(iocbq->cmd_flag & LPFC_IO_FCP) || 12701 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) 12702 continue; 12703 12704 /* Include counting outstanding aborts */ 12705 ulp_command = get_job_cmnd(phba, iocbq); 12706 if (ulp_command == CMD_ABORT_XRI_CN || 12707 ulp_command == CMD_CLOSE_XRI_CN || 12708 ulp_command == CMD_ABORT_XRI_WQE) { 12709 sum++; 12710 continue; 12711 } 12712 12713 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 12714 ctx_cmd) == 0) 12715 sum++; 12716 } 12717 spin_unlock_irqrestore(&phba->hbalock, iflags); 12718 12719 return sum; 12720 } 12721 12722 /** 12723 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 12724 * @phba: Pointer to HBA context object 12725 * @cmdiocb: Pointer to command iocb object. 12726 * @rspiocb: Pointer to response iocb object. 12727 * 12728 * This function is called when an aborted FCP iocb completes. This 12729 * function is called by the ring event handler with no lock held. 12730 * This function frees the iocb. 12731 **/ 12732 void 12733 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12734 struct lpfc_iocbq *rspiocb) 12735 { 12736 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 12737 "3096 ABORT_XRI_CX completing on rpi x%x " 12738 "original iotag x%x, abort cmd iotag x%x " 12739 "status 0x%x, reason 0x%x\n", 12740 (phba->sli_rev == LPFC_SLI_REV4) ? 12741 cmdiocb->sli4_xritag : 12742 cmdiocb->iocb.un.acxri.abortContextTag, 12743 get_job_abtsiotag(phba, cmdiocb), 12744 cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb), 12745 get_job_word4(phba, rspiocb)); 12746 lpfc_sli_release_iocbq(phba, cmdiocb); 12747 return; 12748 } 12749 12750 /** 12751 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 12752 * @vport: Pointer to virtual port. 12753 * @tgt_id: SCSI ID of the target. 12754 * @lun_id: LUN ID of the scsi device. 12755 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 12756 * 12757 * This function sends an abort command for every SCSI command 12758 * associated with the given virtual port pending on the ring 12759 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then 12760 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before 12761 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort 12762 * followed by lpfc_sli_validate_fcp_iocb. 12763 * 12764 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 12765 * FCP iocbs associated with lun specified by tgt_id and lun_id 12766 * parameters 12767 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 12768 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 12769 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 12770 * FCP iocbs associated with virtual port. 12771 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4 12772 * lpfc_sli4_calc_ring is used. 12773 * This function returns number of iocbs it failed to abort. 12774 * This function is called with no locks held. 12775 **/ 12776 int 12777 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, 12778 lpfc_ctx_cmd abort_cmd) 12779 { 12780 struct lpfc_hba *phba = vport->phba; 12781 struct lpfc_sli_ring *pring = NULL; 12782 struct lpfc_iocbq *iocbq; 12783 int errcnt = 0, ret_val = 0; 12784 unsigned long iflags; 12785 int i; 12786 12787 /* all I/Os are in process of being flushed */ 12788 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) 12789 return errcnt; 12790 12791 for (i = 1; i <= phba->sli.last_iotag; i++) { 12792 iocbq = phba->sli.iocbq_lookup[i]; 12793 12794 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) 12795 continue; 12796 12797 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 12798 abort_cmd) != 0) 12799 continue; 12800 12801 spin_lock_irqsave(&phba->hbalock, iflags); 12802 if (phba->sli_rev == LPFC_SLI_REV3) { 12803 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12804 } else if (phba->sli_rev == LPFC_SLI_REV4) { 12805 pring = lpfc_sli4_calc_ring(phba, iocbq); 12806 } 12807 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq, 12808 lpfc_sli_abort_fcp_cmpl); 12809 spin_unlock_irqrestore(&phba->hbalock, iflags); 12810 if (ret_val != IOCB_SUCCESS) 12811 errcnt++; 12812 } 12813 12814 return errcnt; 12815 } 12816 12817 /** 12818 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 12819 * @vport: Pointer to virtual port. 12820 * @pring: Pointer to driver SLI ring object. 12821 * @tgt_id: SCSI ID of the target. 12822 * @lun_id: LUN ID of the scsi device. 12823 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 12824 * 12825 * This function sends an abort command for every SCSI command 12826 * associated with the given virtual port pending on the ring 12827 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then 12828 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before 12829 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort 12830 * followed by lpfc_sli_validate_fcp_iocb. 12831 * 12832 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 12833 * FCP iocbs associated with lun specified by tgt_id and lun_id 12834 * parameters 12835 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 12836 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 12837 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 12838 * FCP iocbs associated with virtual port. 12839 * This function returns number of iocbs it aborted . 12840 * This function is called with no locks held right after a taskmgmt 12841 * command is sent. 12842 **/ 12843 int 12844 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 12845 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 12846 { 12847 struct lpfc_hba *phba = vport->phba; 12848 struct lpfc_io_buf *lpfc_cmd; 12849 struct lpfc_iocbq *abtsiocbq; 12850 struct lpfc_nodelist *ndlp = NULL; 12851 struct lpfc_iocbq *iocbq; 12852 int sum, i, ret_val; 12853 unsigned long iflags; 12854 struct lpfc_sli_ring *pring_s4 = NULL; 12855 u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT; 12856 bool ia; 12857 12858 /* all I/Os are in process of being flushed */ 12859 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) 12860 return 0; 12861 12862 sum = 0; 12863 12864 spin_lock_irqsave(&phba->hbalock, iflags); 12865 for (i = 1; i <= phba->sli.last_iotag; i++) { 12866 iocbq = phba->sli.iocbq_lookup[i]; 12867 12868 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) 12869 continue; 12870 12871 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 12872 cmd) != 0) 12873 continue; 12874 12875 /* Guard against IO completion being called at same time */ 12876 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 12877 spin_lock(&lpfc_cmd->buf_lock); 12878 12879 if (!lpfc_cmd->pCmd) { 12880 spin_unlock(&lpfc_cmd->buf_lock); 12881 continue; 12882 } 12883 12884 if (phba->sli_rev == LPFC_SLI_REV4) { 12885 pring_s4 = 12886 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring; 12887 if (!pring_s4) { 12888 spin_unlock(&lpfc_cmd->buf_lock); 12889 continue; 12890 } 12891 /* Note: both hbalock and ring_lock must be set here */ 12892 spin_lock(&pring_s4->ring_lock); 12893 } 12894 12895 /* 12896 * If the iocbq is already being aborted, don't take a second 12897 * action, but do count it. 12898 */ 12899 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || 12900 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { 12901 if (phba->sli_rev == LPFC_SLI_REV4) 12902 spin_unlock(&pring_s4->ring_lock); 12903 spin_unlock(&lpfc_cmd->buf_lock); 12904 continue; 12905 } 12906 12907 /* issue ABTS for this IOCB based on iotag */ 12908 abtsiocbq = __lpfc_sli_get_iocbq(phba); 12909 if (!abtsiocbq) { 12910 if (phba->sli_rev == LPFC_SLI_REV4) 12911 spin_unlock(&pring_s4->ring_lock); 12912 spin_unlock(&lpfc_cmd->buf_lock); 12913 continue; 12914 } 12915 12916 if (phba->sli_rev == LPFC_SLI_REV4) { 12917 iotag = abtsiocbq->iotag; 12918 ulp_context = iocbq->sli4_xritag; 12919 cqid = lpfc_cmd->hdwq->io_cq_map; 12920 } else { 12921 iotag = iocbq->iocb.ulpIoTag; 12922 if (pring->ringno == LPFC_ELS_RING) { 12923 ndlp = iocbq->ndlp; 12924 ulp_context = ndlp->nlp_rpi; 12925 } else { 12926 ulp_context = iocbq->iocb.ulpContext; 12927 } 12928 } 12929 12930 ndlp = lpfc_cmd->rdata->pnode; 12931 12932 if (lpfc_is_link_up(phba) && 12933 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) && 12934 !(phba->link_flag & LS_EXTERNAL_LOOPBACK)) 12935 ia = false; 12936 else 12937 ia = true; 12938 12939 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag, 12940 iocbq->iocb.ulpClass, cqid, 12941 ia, false); 12942 12943 abtsiocbq->vport = vport; 12944 12945 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 12946 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 12947 if (iocbq->cmd_flag & LPFC_IO_FCP) 12948 abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX; 12949 if (iocbq->cmd_flag & LPFC_IO_FOF) 12950 abtsiocbq->cmd_flag |= LPFC_IO_FOF; 12951 12952 /* Setup callback routine and issue the command. */ 12953 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl; 12954 12955 /* 12956 * Indicate the IO is being aborted by the driver and set 12957 * the caller's flag into the aborted IO. 12958 */ 12959 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED; 12960 12961 if (phba->sli_rev == LPFC_SLI_REV4) { 12962 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 12963 abtsiocbq, 0); 12964 spin_unlock(&pring_s4->ring_lock); 12965 } else { 12966 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 12967 abtsiocbq, 0); 12968 } 12969 12970 spin_unlock(&lpfc_cmd->buf_lock); 12971 12972 if (ret_val == IOCB_ERROR) 12973 __lpfc_sli_release_iocbq(phba, abtsiocbq); 12974 else 12975 sum++; 12976 } 12977 spin_unlock_irqrestore(&phba->hbalock, iflags); 12978 return sum; 12979 } 12980 12981 /** 12982 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 12983 * @phba: Pointer to HBA context object. 12984 * @cmdiocbq: Pointer to command iocb. 12985 * @rspiocbq: Pointer to response iocb. 12986 * 12987 * This function is the completion handler for iocbs issued using 12988 * lpfc_sli_issue_iocb_wait function. This function is called by the 12989 * ring event handler function without any lock held. This function 12990 * can be called from both worker thread context and interrupt 12991 * context. This function also can be called from other thread which 12992 * cleans up the SLI layer objects. 12993 * This function copy the contents of the response iocb to the 12994 * response iocb memory object provided by the caller of 12995 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 12996 * sleeps for the iocb completion. 12997 **/ 12998 static void 12999 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 13000 struct lpfc_iocbq *cmdiocbq, 13001 struct lpfc_iocbq *rspiocbq) 13002 { 13003 wait_queue_head_t *pdone_q; 13004 unsigned long iflags; 13005 struct lpfc_io_buf *lpfc_cmd; 13006 size_t offset = offsetof(struct lpfc_iocbq, wqe); 13007 13008 spin_lock_irqsave(&phba->hbalock, iflags); 13009 if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) { 13010 13011 /* 13012 * A time out has occurred for the iocb. If a time out 13013 * completion handler has been supplied, call it. Otherwise, 13014 * just free the iocbq. 13015 */ 13016 13017 spin_unlock_irqrestore(&phba->hbalock, iflags); 13018 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; 13019 cmdiocbq->wait_cmd_cmpl = NULL; 13020 if (cmdiocbq->cmd_cmpl) 13021 cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL); 13022 else 13023 lpfc_sli_release_iocbq(phba, cmdiocbq); 13024 return; 13025 } 13026 13027 /* Copy the contents of the local rspiocb into the caller's buffer. */ 13028 cmdiocbq->cmd_flag |= LPFC_IO_WAKE; 13029 if (cmdiocbq->rsp_iocb && rspiocbq) 13030 memcpy((char *)cmdiocbq->rsp_iocb + offset, 13031 (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset); 13032 13033 /* Set the exchange busy flag for task management commands */ 13034 if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && 13035 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { 13036 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, 13037 cur_iocbq); 13038 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) 13039 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 13040 else 13041 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 13042 } 13043 13044 pdone_q = cmdiocbq->context_un.wait_queue; 13045 if (pdone_q) 13046 wake_up(pdone_q); 13047 spin_unlock_irqrestore(&phba->hbalock, iflags); 13048 return; 13049 } 13050 13051 /** 13052 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 13053 * @phba: Pointer to HBA context object.. 13054 * @piocbq: Pointer to command iocb. 13055 * @flag: Flag to test. 13056 * 13057 * This routine grabs the hbalock and then test the cmd_flag to 13058 * see if the passed in flag is set. 13059 * Returns: 13060 * 1 if flag is set. 13061 * 0 if flag is not set. 13062 **/ 13063 static int 13064 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 13065 struct lpfc_iocbq *piocbq, uint32_t flag) 13066 { 13067 unsigned long iflags; 13068 int ret; 13069 13070 spin_lock_irqsave(&phba->hbalock, iflags); 13071 ret = piocbq->cmd_flag & flag; 13072 spin_unlock_irqrestore(&phba->hbalock, iflags); 13073 return ret; 13074 13075 } 13076 13077 /** 13078 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 13079 * @phba: Pointer to HBA context object.. 13080 * @ring_number: Ring number 13081 * @piocb: Pointer to command iocb. 13082 * @prspiocbq: Pointer to response iocb. 13083 * @timeout: Timeout in number of seconds. 13084 * 13085 * This function issues the iocb to firmware and waits for the 13086 * iocb to complete. The cmd_cmpl field of the shall be used 13087 * to handle iocbs which time out. If the field is NULL, the 13088 * function shall free the iocbq structure. If more clean up is 13089 * needed, the caller is expected to provide a completion function 13090 * that will provide the needed clean up. If the iocb command is 13091 * not completed within timeout seconds, the function will either 13092 * free the iocbq structure (if cmd_cmpl == NULL) or execute the 13093 * completion function set in the cmd_cmpl field and then return 13094 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 13095 * resources if this function returns IOCB_TIMEDOUT. 13096 * The function waits for the iocb completion using an 13097 * non-interruptible wait. 13098 * This function will sleep while waiting for iocb completion. 13099 * So, this function should not be called from any context which 13100 * does not allow sleeping. Due to the same reason, this function 13101 * cannot be called with interrupt disabled. 13102 * This function assumes that the iocb completions occur while 13103 * this function sleep. So, this function cannot be called from 13104 * the thread which process iocb completion for this ring. 13105 * This function clears the cmd_flag of the iocb object before 13106 * issuing the iocb and the iocb completion handler sets this 13107 * flag and wakes this thread when the iocb completes. 13108 * The contents of the response iocb will be copied to prspiocbq 13109 * by the completion handler when the command completes. 13110 * This function returns IOCB_SUCCESS when success. 13111 * This function is called with no lock held. 13112 **/ 13113 int 13114 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 13115 uint32_t ring_number, 13116 struct lpfc_iocbq *piocb, 13117 struct lpfc_iocbq *prspiocbq, 13118 uint32_t timeout) 13119 { 13120 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 13121 long timeleft, timeout_req = 0; 13122 int retval = IOCB_SUCCESS; 13123 uint32_t creg_val; 13124 struct lpfc_iocbq *iocb; 13125 int txq_cnt = 0; 13126 int txcmplq_cnt = 0; 13127 struct lpfc_sli_ring *pring; 13128 unsigned long iflags; 13129 bool iocb_completed = true; 13130 13131 if (phba->sli_rev >= LPFC_SLI_REV4) { 13132 lpfc_sli_prep_wqe(phba, piocb); 13133 13134 pring = lpfc_sli4_calc_ring(phba, piocb); 13135 } else 13136 pring = &phba->sli.sli3_ring[ring_number]; 13137 /* 13138 * If the caller has provided a response iocbq buffer, then rsp_iocb 13139 * is NULL or its an error. 13140 */ 13141 if (prspiocbq) { 13142 if (piocb->rsp_iocb) 13143 return IOCB_ERROR; 13144 piocb->rsp_iocb = prspiocbq; 13145 } 13146 13147 piocb->wait_cmd_cmpl = piocb->cmd_cmpl; 13148 piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait; 13149 piocb->context_un.wait_queue = &done_q; 13150 piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 13151 13152 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 13153 if (lpfc_readl(phba->HCregaddr, &creg_val)) 13154 return IOCB_ERROR; 13155 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 13156 writel(creg_val, phba->HCregaddr); 13157 readl(phba->HCregaddr); /* flush */ 13158 } 13159 13160 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 13161 SLI_IOCB_RET_IOCB); 13162 if (retval == IOCB_SUCCESS) { 13163 timeout_req = msecs_to_jiffies(timeout * 1000); 13164 timeleft = wait_event_timeout(done_q, 13165 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 13166 timeout_req); 13167 spin_lock_irqsave(&phba->hbalock, iflags); 13168 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) { 13169 13170 /* 13171 * IOCB timed out. Inform the wake iocb wait 13172 * completion function and set local status 13173 */ 13174 13175 iocb_completed = false; 13176 piocb->cmd_flag |= LPFC_IO_WAKE_TMO; 13177 } 13178 spin_unlock_irqrestore(&phba->hbalock, iflags); 13179 if (iocb_completed) { 13180 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13181 "0331 IOCB wake signaled\n"); 13182 /* Note: we are not indicating if the IOCB has a success 13183 * status or not - that's for the caller to check. 13184 * IOCB_SUCCESS means just that the command was sent and 13185 * completed. Not that it completed successfully. 13186 * */ 13187 } else if (timeleft == 0) { 13188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13189 "0338 IOCB wait timeout error - no " 13190 "wake response Data x%x\n", timeout); 13191 retval = IOCB_TIMEDOUT; 13192 } else { 13193 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13194 "0330 IOCB wake NOT set, " 13195 "Data x%x x%lx\n", 13196 timeout, (timeleft / jiffies)); 13197 retval = IOCB_TIMEDOUT; 13198 } 13199 } else if (retval == IOCB_BUSY) { 13200 if (phba->cfg_log_verbose & LOG_SLI) { 13201 list_for_each_entry(iocb, &pring->txq, list) { 13202 txq_cnt++; 13203 } 13204 list_for_each_entry(iocb, &pring->txcmplq, list) { 13205 txcmplq_cnt++; 13206 } 13207 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13208 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 13209 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 13210 } 13211 return retval; 13212 } else { 13213 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13214 "0332 IOCB wait issue failed, Data x%x\n", 13215 retval); 13216 retval = IOCB_ERROR; 13217 } 13218 13219 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 13220 if (lpfc_readl(phba->HCregaddr, &creg_val)) 13221 return IOCB_ERROR; 13222 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 13223 writel(creg_val, phba->HCregaddr); 13224 readl(phba->HCregaddr); /* flush */ 13225 } 13226 13227 if (prspiocbq) 13228 piocb->rsp_iocb = NULL; 13229 13230 piocb->context_un.wait_queue = NULL; 13231 piocb->cmd_cmpl = NULL; 13232 return retval; 13233 } 13234 13235 /** 13236 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 13237 * @phba: Pointer to HBA context object. 13238 * @pmboxq: Pointer to driver mailbox object. 13239 * @timeout: Timeout in number of seconds. 13240 * 13241 * This function issues the mailbox to firmware and waits for the 13242 * mailbox command to complete. If the mailbox command is not 13243 * completed within timeout seconds, it returns MBX_TIMEOUT. 13244 * The function waits for the mailbox completion using an 13245 * interruptible wait. If the thread is woken up due to a 13246 * signal, MBX_TIMEOUT error is returned to the caller. Caller 13247 * should not free the mailbox resources, if this function returns 13248 * MBX_TIMEOUT. 13249 * This function will sleep while waiting for mailbox completion. 13250 * So, this function should not be called from any context which 13251 * does not allow sleeping. Due to the same reason, this function 13252 * cannot be called with interrupt disabled. 13253 * This function assumes that the mailbox completion occurs while 13254 * this function sleep. So, this function cannot be called from 13255 * the worker thread which processes mailbox completion. 13256 * This function is called in the context of HBA management 13257 * applications. 13258 * This function returns MBX_SUCCESS when successful. 13259 * This function is called with no lock held. 13260 **/ 13261 int 13262 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 13263 uint32_t timeout) 13264 { 13265 struct completion mbox_done; 13266 int retval; 13267 unsigned long flag; 13268 13269 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 13270 /* setup wake call as IOCB callback */ 13271 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 13272 13273 /* setup ctx_u field to pass wait_queue pointer to wake function */ 13274 init_completion(&mbox_done); 13275 pmboxq->ctx_u.mbox_wait = &mbox_done; 13276 /* now issue the command */ 13277 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 13278 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 13279 wait_for_completion_timeout(&mbox_done, 13280 msecs_to_jiffies(timeout * 1000)); 13281 13282 spin_lock_irqsave(&phba->hbalock, flag); 13283 pmboxq->ctx_u.mbox_wait = NULL; 13284 /* 13285 * if LPFC_MBX_WAKE flag is set the mailbox is completed 13286 * else do not free the resources. 13287 */ 13288 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 13289 retval = MBX_SUCCESS; 13290 } else { 13291 retval = MBX_TIMEOUT; 13292 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13293 } 13294 spin_unlock_irqrestore(&phba->hbalock, flag); 13295 } 13296 return retval; 13297 } 13298 13299 /** 13300 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 13301 * @phba: Pointer to HBA context. 13302 * @mbx_action: Mailbox shutdown options. 13303 * 13304 * This function is called to shutdown the driver's mailbox sub-system. 13305 * It first marks the mailbox sub-system is in a block state to prevent 13306 * the asynchronous mailbox command from issued off the pending mailbox 13307 * command queue. If the mailbox command sub-system shutdown is due to 13308 * HBA error conditions such as EEH or ERATT, this routine shall invoke 13309 * the mailbox sub-system flush routine to forcefully bring down the 13310 * mailbox sub-system. Otherwise, if it is due to normal condition (such 13311 * as with offline or HBA function reset), this routine will wait for the 13312 * outstanding mailbox command to complete before invoking the mailbox 13313 * sub-system flush routine to gracefully bring down mailbox sub-system. 13314 **/ 13315 void 13316 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 13317 { 13318 struct lpfc_sli *psli = &phba->sli; 13319 unsigned long timeout; 13320 13321 if (mbx_action == LPFC_MBX_NO_WAIT) { 13322 /* delay 100ms for port state */ 13323 msleep(100); 13324 lpfc_sli_mbox_sys_flush(phba); 13325 return; 13326 } 13327 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 13328 13329 /* Disable softirqs, including timers from obtaining phba->hbalock */ 13330 local_bh_disable(); 13331 13332 spin_lock_irq(&phba->hbalock); 13333 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 13334 13335 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 13336 /* Determine how long we might wait for the active mailbox 13337 * command to be gracefully completed by firmware. 13338 */ 13339 if (phba->sli.mbox_active) 13340 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 13341 phba->sli.mbox_active) * 13342 1000) + jiffies; 13343 spin_unlock_irq(&phba->hbalock); 13344 13345 /* Enable softirqs again, done with phba->hbalock */ 13346 local_bh_enable(); 13347 13348 while (phba->sli.mbox_active) { 13349 /* Check active mailbox complete status every 2ms */ 13350 msleep(2); 13351 if (time_after(jiffies, timeout)) 13352 /* Timeout, let the mailbox flush routine to 13353 * forcefully release active mailbox command 13354 */ 13355 break; 13356 } 13357 } else { 13358 spin_unlock_irq(&phba->hbalock); 13359 13360 /* Enable softirqs again, done with phba->hbalock */ 13361 local_bh_enable(); 13362 } 13363 13364 lpfc_sli_mbox_sys_flush(phba); 13365 } 13366 13367 /** 13368 * lpfc_sli_eratt_read - read sli-3 error attention events 13369 * @phba: Pointer to HBA context. 13370 * 13371 * This function is called to read the SLI3 device error attention registers 13372 * for possible error attention events. The caller must hold the hostlock 13373 * with spin_lock_irq(). 13374 * 13375 * This function returns 1 when there is Error Attention in the Host Attention 13376 * Register and returns 0 otherwise. 13377 **/ 13378 static int 13379 lpfc_sli_eratt_read(struct lpfc_hba *phba) 13380 { 13381 uint32_t ha_copy; 13382 13383 /* Read chip Host Attention (HA) register */ 13384 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 13385 goto unplug_err; 13386 13387 if (ha_copy & HA_ERATT) { 13388 /* Read host status register to retrieve error event */ 13389 if (lpfc_sli_read_hs(phba)) 13390 goto unplug_err; 13391 13392 /* Check if there is a deferred error condition is active */ 13393 if ((HS_FFER1 & phba->work_hs) && 13394 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 13395 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 13396 set_bit(DEFER_ERATT, &phba->hba_flag); 13397 /* Clear all interrupt enable conditions */ 13398 writel(0, phba->HCregaddr); 13399 readl(phba->HCregaddr); 13400 } 13401 13402 /* Set the driver HA work bitmap */ 13403 phba->work_ha |= HA_ERATT; 13404 /* Indicate polling handles this ERATT */ 13405 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13406 return 1; 13407 } 13408 return 0; 13409 13410 unplug_err: 13411 /* Set the driver HS work bitmap */ 13412 phba->work_hs |= UNPLUG_ERR; 13413 /* Set the driver HA work bitmap */ 13414 phba->work_ha |= HA_ERATT; 13415 /* Indicate polling handles this ERATT */ 13416 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13417 return 1; 13418 } 13419 13420 /** 13421 * lpfc_sli4_eratt_read - read sli-4 error attention events 13422 * @phba: Pointer to HBA context. 13423 * 13424 * This function is called to read the SLI4 device error attention registers 13425 * for possible error attention events. The caller must hold the hostlock 13426 * with spin_lock_irq(). 13427 * 13428 * This function returns 1 when there is Error Attention in the Host Attention 13429 * Register and returns 0 otherwise. 13430 **/ 13431 static int 13432 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 13433 { 13434 uint32_t uerr_sta_hi, uerr_sta_lo; 13435 uint32_t if_type, portsmphr; 13436 struct lpfc_register portstat_reg; 13437 u32 logmask; 13438 13439 /* 13440 * For now, use the SLI4 device internal unrecoverable error 13441 * registers for error attention. This can be changed later. 13442 */ 13443 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 13444 switch (if_type) { 13445 case LPFC_SLI_INTF_IF_TYPE_0: 13446 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 13447 &uerr_sta_lo) || 13448 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 13449 &uerr_sta_hi)) { 13450 phba->work_hs |= UNPLUG_ERR; 13451 phba->work_ha |= HA_ERATT; 13452 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13453 return 1; 13454 } 13455 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 13456 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 13457 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13458 "1423 HBA Unrecoverable error: " 13459 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 13460 "ue_mask_lo_reg=0x%x, " 13461 "ue_mask_hi_reg=0x%x\n", 13462 uerr_sta_lo, uerr_sta_hi, 13463 phba->sli4_hba.ue_mask_lo, 13464 phba->sli4_hba.ue_mask_hi); 13465 phba->work_status[0] = uerr_sta_lo; 13466 phba->work_status[1] = uerr_sta_hi; 13467 phba->work_ha |= HA_ERATT; 13468 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13469 return 1; 13470 } 13471 break; 13472 case LPFC_SLI_INTF_IF_TYPE_2: 13473 case LPFC_SLI_INTF_IF_TYPE_6: 13474 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 13475 &portstat_reg.word0) || 13476 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 13477 &portsmphr)){ 13478 phba->work_hs |= UNPLUG_ERR; 13479 phba->work_ha |= HA_ERATT; 13480 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13481 return 1; 13482 } 13483 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 13484 phba->work_status[0] = 13485 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 13486 phba->work_status[1] = 13487 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 13488 logmask = LOG_TRACE_EVENT; 13489 if (phba->work_status[0] == 13490 SLIPORT_ERR1_REG_ERR_CODE_2 && 13491 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) 13492 logmask = LOG_SLI; 13493 lpfc_printf_log(phba, KERN_ERR, logmask, 13494 "2885 Port Status Event: " 13495 "port status reg 0x%x, " 13496 "port smphr reg 0x%x, " 13497 "error 1=0x%x, error 2=0x%x\n", 13498 portstat_reg.word0, 13499 portsmphr, 13500 phba->work_status[0], 13501 phba->work_status[1]); 13502 phba->work_ha |= HA_ERATT; 13503 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13504 return 1; 13505 } 13506 break; 13507 case LPFC_SLI_INTF_IF_TYPE_1: 13508 default: 13509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13510 "2886 HBA Error Attention on unsupported " 13511 "if type %d.", if_type); 13512 return 1; 13513 } 13514 13515 return 0; 13516 } 13517 13518 /** 13519 * lpfc_sli_check_eratt - check error attention events 13520 * @phba: Pointer to HBA context. 13521 * 13522 * This function is called from timer soft interrupt context to check HBA's 13523 * error attention register bit for error attention events. 13524 * 13525 * This function returns 1 when there is Error Attention in the Host Attention 13526 * Register and returns 0 otherwise. 13527 **/ 13528 int 13529 lpfc_sli_check_eratt(struct lpfc_hba *phba) 13530 { 13531 uint32_t ha_copy; 13532 13533 /* If somebody is waiting to handle an eratt, don't process it 13534 * here. The brdkill function will do this. 13535 */ 13536 if (phba->link_flag & LS_IGNORE_ERATT) 13537 return 0; 13538 13539 /* Check if interrupt handler handles this ERATT */ 13540 if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) 13541 /* Interrupt handler has handled ERATT */ 13542 return 0; 13543 13544 /* 13545 * If there is deferred error attention, do not check for error 13546 * attention 13547 */ 13548 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) 13549 return 0; 13550 13551 spin_lock_irq(&phba->hbalock); 13552 /* If PCI channel is offline, don't process it */ 13553 if (unlikely(pci_channel_offline(phba->pcidev))) { 13554 spin_unlock_irq(&phba->hbalock); 13555 return 0; 13556 } 13557 13558 switch (phba->sli_rev) { 13559 case LPFC_SLI_REV2: 13560 case LPFC_SLI_REV3: 13561 /* Read chip Host Attention (HA) register */ 13562 ha_copy = lpfc_sli_eratt_read(phba); 13563 break; 13564 case LPFC_SLI_REV4: 13565 /* Read device Uncoverable Error (UERR) registers */ 13566 ha_copy = lpfc_sli4_eratt_read(phba); 13567 break; 13568 default: 13569 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13570 "0299 Invalid SLI revision (%d)\n", 13571 phba->sli_rev); 13572 ha_copy = 0; 13573 break; 13574 } 13575 spin_unlock_irq(&phba->hbalock); 13576 13577 return ha_copy; 13578 } 13579 13580 /** 13581 * lpfc_intr_state_check - Check device state for interrupt handling 13582 * @phba: Pointer to HBA context. 13583 * 13584 * This inline routine checks whether a device or its PCI slot is in a state 13585 * that the interrupt should be handled. 13586 * 13587 * This function returns 0 if the device or the PCI slot is in a state that 13588 * interrupt should be handled, otherwise -EIO. 13589 */ 13590 static inline int 13591 lpfc_intr_state_check(struct lpfc_hba *phba) 13592 { 13593 /* If the pci channel is offline, ignore all the interrupts */ 13594 if (unlikely(pci_channel_offline(phba->pcidev))) 13595 return -EIO; 13596 13597 /* Update device level interrupt statistics */ 13598 phba->sli.slistat.sli_intr++; 13599 13600 /* Ignore all interrupts during initialization. */ 13601 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 13602 return -EIO; 13603 13604 return 0; 13605 } 13606 13607 /** 13608 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 13609 * @irq: Interrupt number. 13610 * @dev_id: The device context pointer. 13611 * 13612 * This function is directly called from the PCI layer as an interrupt 13613 * service routine when device with SLI-3 interface spec is enabled with 13614 * MSI-X multi-message interrupt mode and there are slow-path events in 13615 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 13616 * interrupt mode, this function is called as part of the device-level 13617 * interrupt handler. When the PCI slot is in error recovery or the HBA 13618 * is undergoing initialization, the interrupt handler will not process 13619 * the interrupt. The link attention and ELS ring attention events are 13620 * handled by the worker thread. The interrupt handler signals the worker 13621 * thread and returns for these events. This function is called without 13622 * any lock held. It gets the hbalock to access and update SLI data 13623 * structures. 13624 * 13625 * This function returns IRQ_HANDLED when interrupt is handled else it 13626 * returns IRQ_NONE. 13627 **/ 13628 irqreturn_t 13629 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 13630 { 13631 struct lpfc_hba *phba; 13632 uint32_t ha_copy, hc_copy; 13633 uint32_t work_ha_copy; 13634 unsigned long status; 13635 unsigned long iflag; 13636 uint32_t control; 13637 13638 MAILBOX_t *mbox, *pmbox; 13639 struct lpfc_vport *vport; 13640 struct lpfc_nodelist *ndlp; 13641 struct lpfc_dmabuf *mp; 13642 LPFC_MBOXQ_t *pmb; 13643 int rc; 13644 13645 /* 13646 * Get the driver's phba structure from the dev_id and 13647 * assume the HBA is not interrupting. 13648 */ 13649 phba = (struct lpfc_hba *)dev_id; 13650 13651 if (unlikely(!phba)) 13652 return IRQ_NONE; 13653 13654 /* 13655 * Stuff needs to be attented to when this function is invoked as an 13656 * individual interrupt handler in MSI-X multi-message interrupt mode 13657 */ 13658 if (phba->intr_type == MSIX) { 13659 /* Check device state for handling interrupt */ 13660 if (lpfc_intr_state_check(phba)) 13661 return IRQ_NONE; 13662 /* Need to read HA REG for slow-path events */ 13663 spin_lock_irqsave(&phba->hbalock, iflag); 13664 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 13665 goto unplug_error; 13666 /* If somebody is waiting to handle an eratt don't process it 13667 * here. The brdkill function will do this. 13668 */ 13669 if (phba->link_flag & LS_IGNORE_ERATT) 13670 ha_copy &= ~HA_ERATT; 13671 /* Check the need for handling ERATT in interrupt handler */ 13672 if (ha_copy & HA_ERATT) { 13673 if (test_and_set_bit(HBA_ERATT_HANDLED, 13674 &phba->hba_flag)) 13675 /* ERATT polling has handled ERATT */ 13676 ha_copy &= ~HA_ERATT; 13677 } 13678 13679 /* 13680 * If there is deferred error attention, do not check for any 13681 * interrupt. 13682 */ 13683 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { 13684 spin_unlock_irqrestore(&phba->hbalock, iflag); 13685 return IRQ_NONE; 13686 } 13687 13688 /* Clear up only attention source related to slow-path */ 13689 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 13690 goto unplug_error; 13691 13692 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 13693 HC_LAINT_ENA | HC_ERINT_ENA), 13694 phba->HCregaddr); 13695 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 13696 phba->HAregaddr); 13697 writel(hc_copy, phba->HCregaddr); 13698 readl(phba->HAregaddr); /* flush */ 13699 spin_unlock_irqrestore(&phba->hbalock, iflag); 13700 } else 13701 ha_copy = phba->ha_copy; 13702 13703 work_ha_copy = ha_copy & phba->work_ha_mask; 13704 13705 if (work_ha_copy) { 13706 if (work_ha_copy & HA_LATT) { 13707 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 13708 /* 13709 * Turn off Link Attention interrupts 13710 * until CLEAR_LA done 13711 */ 13712 spin_lock_irqsave(&phba->hbalock, iflag); 13713 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 13714 if (lpfc_readl(phba->HCregaddr, &control)) 13715 goto unplug_error; 13716 control &= ~HC_LAINT_ENA; 13717 writel(control, phba->HCregaddr); 13718 readl(phba->HCregaddr); /* flush */ 13719 spin_unlock_irqrestore(&phba->hbalock, iflag); 13720 } 13721 else 13722 work_ha_copy &= ~HA_LATT; 13723 } 13724 13725 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 13726 /* 13727 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 13728 * the only slow ring. 13729 */ 13730 status = (work_ha_copy & 13731 (HA_RXMASK << (4*LPFC_ELS_RING))); 13732 status >>= (4*LPFC_ELS_RING); 13733 if (status & HA_RXMASK) { 13734 spin_lock_irqsave(&phba->hbalock, iflag); 13735 if (lpfc_readl(phba->HCregaddr, &control)) 13736 goto unplug_error; 13737 13738 lpfc_debugfs_slow_ring_trc(phba, 13739 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 13740 control, status, 13741 (uint32_t)phba->sli.slistat.sli_intr); 13742 13743 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 13744 lpfc_debugfs_slow_ring_trc(phba, 13745 "ISR Disable ring:" 13746 "pwork:x%x hawork:x%x wait:x%x", 13747 phba->work_ha, work_ha_copy, 13748 (uint32_t)((unsigned long) 13749 &phba->work_waitq)); 13750 13751 control &= 13752 ~(HC_R0INT_ENA << LPFC_ELS_RING); 13753 writel(control, phba->HCregaddr); 13754 readl(phba->HCregaddr); /* flush */ 13755 } 13756 else { 13757 lpfc_debugfs_slow_ring_trc(phba, 13758 "ISR slow ring: pwork:" 13759 "x%x hawork:x%x wait:x%x", 13760 phba->work_ha, work_ha_copy, 13761 (uint32_t)((unsigned long) 13762 &phba->work_waitq)); 13763 } 13764 spin_unlock_irqrestore(&phba->hbalock, iflag); 13765 } 13766 } 13767 spin_lock_irqsave(&phba->hbalock, iflag); 13768 if (work_ha_copy & HA_ERATT) { 13769 if (lpfc_sli_read_hs(phba)) 13770 goto unplug_error; 13771 /* 13772 * Check if there is a deferred error condition 13773 * is active 13774 */ 13775 if ((HS_FFER1 & phba->work_hs) && 13776 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 13777 HS_FFER6 | HS_FFER7 | HS_FFER8) & 13778 phba->work_hs)) { 13779 set_bit(DEFER_ERATT, &phba->hba_flag); 13780 /* Clear all interrupt enable conditions */ 13781 writel(0, phba->HCregaddr); 13782 readl(phba->HCregaddr); 13783 } 13784 } 13785 13786 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 13787 pmb = phba->sli.mbox_active; 13788 pmbox = &pmb->u.mb; 13789 mbox = phba->mbox; 13790 vport = pmb->vport; 13791 13792 /* First check out the status word */ 13793 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 13794 if (pmbox->mbxOwner != OWN_HOST) { 13795 spin_unlock_irqrestore(&phba->hbalock, iflag); 13796 /* 13797 * Stray Mailbox Interrupt, mbxCommand <cmd> 13798 * mbxStatus <status> 13799 */ 13800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13801 "(%d):0304 Stray Mailbox " 13802 "Interrupt mbxCommand x%x " 13803 "mbxStatus x%x\n", 13804 (vport ? vport->vpi : 0), 13805 pmbox->mbxCommand, 13806 pmbox->mbxStatus); 13807 /* clear mailbox attention bit */ 13808 work_ha_copy &= ~HA_MBATT; 13809 } else { 13810 phba->sli.mbox_active = NULL; 13811 spin_unlock_irqrestore(&phba->hbalock, iflag); 13812 phba->last_completion_time = jiffies; 13813 del_timer(&phba->sli.mbox_tmo); 13814 if (pmb->mbox_cmpl) { 13815 lpfc_sli_pcimem_bcopy(mbox, pmbox, 13816 MAILBOX_CMD_SIZE); 13817 if (pmb->out_ext_byte_len && 13818 pmb->ext_buf) 13819 lpfc_sli_pcimem_bcopy( 13820 phba->mbox_ext, 13821 pmb->ext_buf, 13822 pmb->out_ext_byte_len); 13823 } 13824 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 13825 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 13826 13827 lpfc_debugfs_disc_trc(vport, 13828 LPFC_DISC_TRC_MBOX_VPORT, 13829 "MBOX dflt rpi: : " 13830 "status:x%x rpi:x%x", 13831 (uint32_t)pmbox->mbxStatus, 13832 pmbox->un.varWords[0], 0); 13833 13834 if (!pmbox->mbxStatus) { 13835 mp = pmb->ctx_buf; 13836 ndlp = pmb->ctx_ndlp; 13837 13838 /* Reg_LOGIN of dflt RPI was 13839 * successful. new lets get 13840 * rid of the RPI using the 13841 * same mbox buffer. 13842 */ 13843 lpfc_unreg_login(phba, 13844 vport->vpi, 13845 pmbox->un.varWords[0], 13846 pmb); 13847 pmb->mbox_cmpl = 13848 lpfc_mbx_cmpl_dflt_rpi; 13849 pmb->ctx_buf = mp; 13850 pmb->ctx_ndlp = ndlp; 13851 pmb->vport = vport; 13852 rc = lpfc_sli_issue_mbox(phba, 13853 pmb, 13854 MBX_NOWAIT); 13855 if (rc != MBX_BUSY) 13856 lpfc_printf_log(phba, 13857 KERN_ERR, 13858 LOG_TRACE_EVENT, 13859 "0350 rc should have" 13860 "been MBX_BUSY\n"); 13861 if (rc != MBX_NOT_FINISHED) 13862 goto send_current_mbox; 13863 } 13864 } 13865 spin_lock_irqsave( 13866 &phba->pport->work_port_lock, 13867 iflag); 13868 phba->pport->work_port_events &= 13869 ~WORKER_MBOX_TMO; 13870 spin_unlock_irqrestore( 13871 &phba->pport->work_port_lock, 13872 iflag); 13873 13874 /* Do NOT queue MBX_HEARTBEAT to the worker 13875 * thread for processing. 13876 */ 13877 if (pmbox->mbxCommand == MBX_HEARTBEAT) { 13878 /* Process mbox now */ 13879 phba->sli.mbox_active = NULL; 13880 phba->sli.sli_flag &= 13881 ~LPFC_SLI_MBOX_ACTIVE; 13882 if (pmb->mbox_cmpl) 13883 pmb->mbox_cmpl(phba, pmb); 13884 } else { 13885 /* Queue to worker thread to process */ 13886 lpfc_mbox_cmpl_put(phba, pmb); 13887 } 13888 } 13889 } else 13890 spin_unlock_irqrestore(&phba->hbalock, iflag); 13891 13892 if ((work_ha_copy & HA_MBATT) && 13893 (phba->sli.mbox_active == NULL)) { 13894 send_current_mbox: 13895 /* Process next mailbox command if there is one */ 13896 do { 13897 rc = lpfc_sli_issue_mbox(phba, NULL, 13898 MBX_NOWAIT); 13899 } while (rc == MBX_NOT_FINISHED); 13900 if (rc != MBX_SUCCESS) 13901 lpfc_printf_log(phba, KERN_ERR, 13902 LOG_TRACE_EVENT, 13903 "0349 rc should be " 13904 "MBX_SUCCESS\n"); 13905 } 13906 13907 spin_lock_irqsave(&phba->hbalock, iflag); 13908 phba->work_ha |= work_ha_copy; 13909 spin_unlock_irqrestore(&phba->hbalock, iflag); 13910 lpfc_worker_wake_up(phba); 13911 } 13912 return IRQ_HANDLED; 13913 unplug_error: 13914 spin_unlock_irqrestore(&phba->hbalock, iflag); 13915 return IRQ_HANDLED; 13916 13917 } /* lpfc_sli_sp_intr_handler */ 13918 13919 /** 13920 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 13921 * @irq: Interrupt number. 13922 * @dev_id: The device context pointer. 13923 * 13924 * This function is directly called from the PCI layer as an interrupt 13925 * service routine when device with SLI-3 interface spec is enabled with 13926 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 13927 * ring event in the HBA. However, when the device is enabled with either 13928 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 13929 * device-level interrupt handler. When the PCI slot is in error recovery 13930 * or the HBA is undergoing initialization, the interrupt handler will not 13931 * process the interrupt. The SCSI FCP fast-path ring event are handled in 13932 * the intrrupt context. This function is called without any lock held. 13933 * It gets the hbalock to access and update SLI data structures. 13934 * 13935 * This function returns IRQ_HANDLED when interrupt is handled else it 13936 * returns IRQ_NONE. 13937 **/ 13938 irqreturn_t 13939 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 13940 { 13941 struct lpfc_hba *phba; 13942 uint32_t ha_copy; 13943 unsigned long status; 13944 unsigned long iflag; 13945 struct lpfc_sli_ring *pring; 13946 13947 /* Get the driver's phba structure from the dev_id and 13948 * assume the HBA is not interrupting. 13949 */ 13950 phba = (struct lpfc_hba *) dev_id; 13951 13952 if (unlikely(!phba)) 13953 return IRQ_NONE; 13954 13955 /* 13956 * Stuff needs to be attented to when this function is invoked as an 13957 * individual interrupt handler in MSI-X multi-message interrupt mode 13958 */ 13959 if (phba->intr_type == MSIX) { 13960 /* Check device state for handling interrupt */ 13961 if (lpfc_intr_state_check(phba)) 13962 return IRQ_NONE; 13963 /* Need to read HA REG for FCP ring and other ring events */ 13964 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 13965 return IRQ_HANDLED; 13966 13967 /* 13968 * If there is deferred error attention, do not check for 13969 * any interrupt. 13970 */ 13971 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) 13972 return IRQ_NONE; 13973 13974 /* Clear up only attention source related to fast-path */ 13975 spin_lock_irqsave(&phba->hbalock, iflag); 13976 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 13977 phba->HAregaddr); 13978 readl(phba->HAregaddr); /* flush */ 13979 spin_unlock_irqrestore(&phba->hbalock, iflag); 13980 } else 13981 ha_copy = phba->ha_copy; 13982 13983 /* 13984 * Process all events on FCP ring. Take the optimized path for FCP IO. 13985 */ 13986 ha_copy &= ~(phba->work_ha_mask); 13987 13988 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 13989 status >>= (4*LPFC_FCP_RING); 13990 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 13991 if (status & HA_RXMASK) 13992 lpfc_sli_handle_fast_ring_event(phba, pring, status); 13993 13994 if (phba->cfg_multi_ring_support == 2) { 13995 /* 13996 * Process all events on extra ring. Take the optimized path 13997 * for extra ring IO. 13998 */ 13999 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 14000 status >>= (4*LPFC_EXTRA_RING); 14001 if (status & HA_RXMASK) { 14002 lpfc_sli_handle_fast_ring_event(phba, 14003 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 14004 status); 14005 } 14006 } 14007 return IRQ_HANDLED; 14008 } /* lpfc_sli_fp_intr_handler */ 14009 14010 /** 14011 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 14012 * @irq: Interrupt number. 14013 * @dev_id: The device context pointer. 14014 * 14015 * This function is the HBA device-level interrupt handler to device with 14016 * SLI-3 interface spec, called from the PCI layer when either MSI or 14017 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 14018 * requires driver attention. This function invokes the slow-path interrupt 14019 * attention handling function and fast-path interrupt attention handling 14020 * function in turn to process the relevant HBA attention events. This 14021 * function is called without any lock held. It gets the hbalock to access 14022 * and update SLI data structures. 14023 * 14024 * This function returns IRQ_HANDLED when interrupt is handled, else it 14025 * returns IRQ_NONE. 14026 **/ 14027 irqreturn_t 14028 lpfc_sli_intr_handler(int irq, void *dev_id) 14029 { 14030 struct lpfc_hba *phba; 14031 irqreturn_t sp_irq_rc, fp_irq_rc; 14032 unsigned long status1, status2; 14033 uint32_t hc_copy; 14034 14035 /* 14036 * Get the driver's phba structure from the dev_id and 14037 * assume the HBA is not interrupting. 14038 */ 14039 phba = (struct lpfc_hba *) dev_id; 14040 14041 if (unlikely(!phba)) 14042 return IRQ_NONE; 14043 14044 /* Check device state for handling interrupt */ 14045 if (lpfc_intr_state_check(phba)) 14046 return IRQ_NONE; 14047 14048 spin_lock(&phba->hbalock); 14049 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 14050 spin_unlock(&phba->hbalock); 14051 return IRQ_HANDLED; 14052 } 14053 14054 if (unlikely(!phba->ha_copy)) { 14055 spin_unlock(&phba->hbalock); 14056 return IRQ_NONE; 14057 } else if (phba->ha_copy & HA_ERATT) { 14058 if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) 14059 /* ERATT polling has handled ERATT */ 14060 phba->ha_copy &= ~HA_ERATT; 14061 } 14062 14063 /* 14064 * If there is deferred error attention, do not check for any interrupt. 14065 */ 14066 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { 14067 spin_unlock(&phba->hbalock); 14068 return IRQ_NONE; 14069 } 14070 14071 /* Clear attention sources except link and error attentions */ 14072 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 14073 spin_unlock(&phba->hbalock); 14074 return IRQ_HANDLED; 14075 } 14076 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 14077 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 14078 phba->HCregaddr); 14079 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 14080 writel(hc_copy, phba->HCregaddr); 14081 readl(phba->HAregaddr); /* flush */ 14082 spin_unlock(&phba->hbalock); 14083 14084 /* 14085 * Invokes slow-path host attention interrupt handling as appropriate. 14086 */ 14087 14088 /* status of events with mailbox and link attention */ 14089 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 14090 14091 /* status of events with ELS ring */ 14092 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 14093 status2 >>= (4*LPFC_ELS_RING); 14094 14095 if (status1 || (status2 & HA_RXMASK)) 14096 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 14097 else 14098 sp_irq_rc = IRQ_NONE; 14099 14100 /* 14101 * Invoke fast-path host attention interrupt handling as appropriate. 14102 */ 14103 14104 /* status of events with FCP ring */ 14105 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 14106 status1 >>= (4*LPFC_FCP_RING); 14107 14108 /* status of events with extra ring */ 14109 if (phba->cfg_multi_ring_support == 2) { 14110 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 14111 status2 >>= (4*LPFC_EXTRA_RING); 14112 } else 14113 status2 = 0; 14114 14115 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 14116 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 14117 else 14118 fp_irq_rc = IRQ_NONE; 14119 14120 /* Return device-level interrupt handling status */ 14121 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 14122 } /* lpfc_sli_intr_handler */ 14123 14124 /** 14125 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 14126 * @phba: pointer to lpfc hba data structure. 14127 * 14128 * This routine is invoked by the worker thread to process all the pending 14129 * SLI4 els abort xri events. 14130 **/ 14131 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 14132 { 14133 struct lpfc_cq_event *cq_event; 14134 unsigned long iflags; 14135 14136 /* First, declare the els xri abort event has been handled */ 14137 clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); 14138 14139 /* Now, handle all the els xri abort events */ 14140 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 14141 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 14142 /* Get the first event from the head of the event queue */ 14143 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 14144 cq_event, struct lpfc_cq_event, list); 14145 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, 14146 iflags); 14147 /* Notify aborted XRI for ELS work queue */ 14148 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 14149 14150 /* Free the event processed back to the free pool */ 14151 lpfc_sli4_cq_event_release(phba, cq_event); 14152 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, 14153 iflags); 14154 } 14155 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 14156 } 14157 14158 /** 14159 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe 14160 * @phba: Pointer to HBA context object. 14161 * @irspiocbq: Pointer to work-queue completion queue entry. 14162 * 14163 * This routine handles an ELS work-queue completion event and construct 14164 * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common 14165 * discovery engine to handle. 14166 * 14167 * Return: Pointer to the receive IOCBQ, NULL otherwise. 14168 **/ 14169 static struct lpfc_iocbq * 14170 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, 14171 struct lpfc_iocbq *irspiocbq) 14172 { 14173 struct lpfc_sli_ring *pring; 14174 struct lpfc_iocbq *cmdiocbq; 14175 struct lpfc_wcqe_complete *wcqe; 14176 unsigned long iflags; 14177 14178 pring = lpfc_phba_elsring(phba); 14179 if (unlikely(!pring)) 14180 return NULL; 14181 14182 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 14183 spin_lock_irqsave(&pring->ring_lock, iflags); 14184 pring->stats.iocb_event++; 14185 /* Look up the ELS command IOCB and create pseudo response IOCB */ 14186 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 14187 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 14188 if (unlikely(!cmdiocbq)) { 14189 spin_unlock_irqrestore(&pring->ring_lock, iflags); 14190 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14191 "0386 ELS complete with no corresponding " 14192 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 14193 wcqe->word0, wcqe->total_data_placed, 14194 wcqe->parameter, wcqe->word3); 14195 lpfc_sli_release_iocbq(phba, irspiocbq); 14196 return NULL; 14197 } 14198 14199 memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128)); 14200 memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe)); 14201 14202 /* Put the iocb back on the txcmplq */ 14203 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 14204 spin_unlock_irqrestore(&pring->ring_lock, iflags); 14205 14206 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 14207 spin_lock_irqsave(&phba->hbalock, iflags); 14208 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; 14209 spin_unlock_irqrestore(&phba->hbalock, iflags); 14210 } 14211 14212 return irspiocbq; 14213 } 14214 14215 inline struct lpfc_cq_event * 14216 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 14217 { 14218 struct lpfc_cq_event *cq_event; 14219 14220 /* Allocate a new internal CQ_EVENT entry */ 14221 cq_event = lpfc_sli4_cq_event_alloc(phba); 14222 if (!cq_event) { 14223 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14224 "0602 Failed to alloc CQ_EVENT entry\n"); 14225 return NULL; 14226 } 14227 14228 /* Move the CQE into the event */ 14229 memcpy(&cq_event->cqe, entry, size); 14230 return cq_event; 14231 } 14232 14233 /** 14234 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event 14235 * @phba: Pointer to HBA context object. 14236 * @mcqe: Pointer to mailbox completion queue entry. 14237 * 14238 * This routine process a mailbox completion queue entry with asynchronous 14239 * event. 14240 * 14241 * Return: true if work posted to worker thread, otherwise false. 14242 **/ 14243 static bool 14244 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 14245 { 14246 struct lpfc_cq_event *cq_event; 14247 unsigned long iflags; 14248 14249 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 14250 "0392 Async Event: word0:x%x, word1:x%x, " 14251 "word2:x%x, word3:x%x\n", mcqe->word0, 14252 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 14253 14254 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 14255 if (!cq_event) 14256 return false; 14257 14258 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 14259 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 14260 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 14261 14262 /* Set the async event flag */ 14263 set_bit(ASYNC_EVENT, &phba->hba_flag); 14264 14265 return true; 14266 } 14267 14268 /** 14269 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 14270 * @phba: Pointer to HBA context object. 14271 * @mcqe: Pointer to mailbox completion queue entry. 14272 * 14273 * This routine process a mailbox completion queue entry with mailbox 14274 * completion event. 14275 * 14276 * Return: true if work posted to worker thread, otherwise false. 14277 **/ 14278 static bool 14279 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 14280 { 14281 uint32_t mcqe_status; 14282 MAILBOX_t *mbox, *pmbox; 14283 struct lpfc_mqe *mqe; 14284 struct lpfc_vport *vport; 14285 struct lpfc_nodelist *ndlp; 14286 struct lpfc_dmabuf *mp; 14287 unsigned long iflags; 14288 LPFC_MBOXQ_t *pmb; 14289 bool workposted = false; 14290 int rc; 14291 14292 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 14293 if (!bf_get(lpfc_trailer_completed, mcqe)) 14294 goto out_no_mqe_complete; 14295 14296 /* Get the reference to the active mbox command */ 14297 spin_lock_irqsave(&phba->hbalock, iflags); 14298 pmb = phba->sli.mbox_active; 14299 if (unlikely(!pmb)) { 14300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14301 "1832 No pending MBOX command to handle\n"); 14302 spin_unlock_irqrestore(&phba->hbalock, iflags); 14303 goto out_no_mqe_complete; 14304 } 14305 spin_unlock_irqrestore(&phba->hbalock, iflags); 14306 mqe = &pmb->u.mqe; 14307 pmbox = (MAILBOX_t *)&pmb->u.mqe; 14308 mbox = phba->mbox; 14309 vport = pmb->vport; 14310 14311 /* Reset heartbeat timer */ 14312 phba->last_completion_time = jiffies; 14313 del_timer(&phba->sli.mbox_tmo); 14314 14315 /* Move mbox data to caller's mailbox region, do endian swapping */ 14316 if (pmb->mbox_cmpl && mbox) 14317 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 14318 14319 /* 14320 * For mcqe errors, conditionally move a modified error code to 14321 * the mbox so that the error will not be missed. 14322 */ 14323 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 14324 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 14325 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 14326 bf_set(lpfc_mqe_status, mqe, 14327 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 14328 } 14329 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 14330 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 14331 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 14332 "MBOX dflt rpi: status:x%x rpi:x%x", 14333 mcqe_status, 14334 pmbox->un.varWords[0], 0); 14335 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 14336 mp = pmb->ctx_buf; 14337 ndlp = pmb->ctx_ndlp; 14338 14339 /* Reg_LOGIN of dflt RPI was successful. Mark the 14340 * node as having an UNREG_LOGIN in progress to stop 14341 * an unsolicited PLOGI from the same NPortId from 14342 * starting another mailbox transaction. 14343 */ 14344 spin_lock_irqsave(&ndlp->lock, iflags); 14345 ndlp->nlp_flag |= NLP_UNREG_INP; 14346 spin_unlock_irqrestore(&ndlp->lock, iflags); 14347 lpfc_unreg_login(phba, vport->vpi, 14348 pmbox->un.varWords[0], pmb); 14349 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 14350 pmb->ctx_buf = mp; 14351 14352 /* No reference taken here. This is a default 14353 * RPI reg/immediate unreg cycle. The reference was 14354 * taken in the reg rpi path and is released when 14355 * this mailbox completes. 14356 */ 14357 pmb->ctx_ndlp = ndlp; 14358 pmb->vport = vport; 14359 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 14360 if (rc != MBX_BUSY) 14361 lpfc_printf_log(phba, KERN_ERR, 14362 LOG_TRACE_EVENT, 14363 "0385 rc should " 14364 "have been MBX_BUSY\n"); 14365 if (rc != MBX_NOT_FINISHED) 14366 goto send_current_mbox; 14367 } 14368 } 14369 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 14370 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 14371 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 14372 14373 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */ 14374 if (pmbox->mbxCommand == MBX_HEARTBEAT) { 14375 spin_lock_irqsave(&phba->hbalock, iflags); 14376 /* Release the mailbox command posting token */ 14377 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 14378 phba->sli.mbox_active = NULL; 14379 if (bf_get(lpfc_trailer_consumed, mcqe)) 14380 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 14381 spin_unlock_irqrestore(&phba->hbalock, iflags); 14382 14383 /* Post the next mbox command, if there is one */ 14384 lpfc_sli4_post_async_mbox(phba); 14385 14386 /* Process cmpl now */ 14387 if (pmb->mbox_cmpl) 14388 pmb->mbox_cmpl(phba, pmb); 14389 return false; 14390 } 14391 14392 /* There is mailbox completion work to queue to the worker thread */ 14393 spin_lock_irqsave(&phba->hbalock, iflags); 14394 __lpfc_mbox_cmpl_put(phba, pmb); 14395 phba->work_ha |= HA_MBATT; 14396 spin_unlock_irqrestore(&phba->hbalock, iflags); 14397 workposted = true; 14398 14399 send_current_mbox: 14400 spin_lock_irqsave(&phba->hbalock, iflags); 14401 /* Release the mailbox command posting token */ 14402 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 14403 /* Setting active mailbox pointer need to be in sync to flag clear */ 14404 phba->sli.mbox_active = NULL; 14405 if (bf_get(lpfc_trailer_consumed, mcqe)) 14406 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 14407 spin_unlock_irqrestore(&phba->hbalock, iflags); 14408 /* Wake up worker thread to post the next pending mailbox command */ 14409 lpfc_worker_wake_up(phba); 14410 return workposted; 14411 14412 out_no_mqe_complete: 14413 spin_lock_irqsave(&phba->hbalock, iflags); 14414 if (bf_get(lpfc_trailer_consumed, mcqe)) 14415 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 14416 spin_unlock_irqrestore(&phba->hbalock, iflags); 14417 return false; 14418 } 14419 14420 /** 14421 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 14422 * @phba: Pointer to HBA context object. 14423 * @cq: Pointer to associated CQ 14424 * @cqe: Pointer to mailbox completion queue entry. 14425 * 14426 * This routine process a mailbox completion queue entry, it invokes the 14427 * proper mailbox complete handling or asynchronous event handling routine 14428 * according to the MCQE's async bit. 14429 * 14430 * Return: true if work posted to worker thread, otherwise false. 14431 **/ 14432 static bool 14433 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 14434 struct lpfc_cqe *cqe) 14435 { 14436 struct lpfc_mcqe mcqe; 14437 bool workposted; 14438 14439 cq->CQ_mbox++; 14440 14441 /* Copy the mailbox MCQE and convert endian order as needed */ 14442 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 14443 14444 /* Invoke the proper event handling routine */ 14445 if (!bf_get(lpfc_trailer_async, &mcqe)) 14446 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 14447 else 14448 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 14449 return workposted; 14450 } 14451 14452 /** 14453 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 14454 * @phba: Pointer to HBA context object. 14455 * @cq: Pointer to associated CQ 14456 * @wcqe: Pointer to work-queue completion queue entry. 14457 * 14458 * This routine handles an ELS work-queue completion event. 14459 * 14460 * Return: true if work posted to worker thread, otherwise false. 14461 **/ 14462 static bool 14463 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 14464 struct lpfc_wcqe_complete *wcqe) 14465 { 14466 struct lpfc_iocbq *irspiocbq; 14467 unsigned long iflags; 14468 struct lpfc_sli_ring *pring = cq->pring; 14469 int txq_cnt = 0; 14470 int txcmplq_cnt = 0; 14471 14472 /* Check for response status */ 14473 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 14474 /* Log the error status */ 14475 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 14476 "0357 ELS CQE error: status=x%x: " 14477 "CQE: %08x %08x %08x %08x\n", 14478 bf_get(lpfc_wcqe_c_status, wcqe), 14479 wcqe->word0, wcqe->total_data_placed, 14480 wcqe->parameter, wcqe->word3); 14481 } 14482 14483 /* Get an irspiocbq for later ELS response processing use */ 14484 irspiocbq = lpfc_sli_get_iocbq(phba); 14485 if (!irspiocbq) { 14486 if (!list_empty(&pring->txq)) 14487 txq_cnt++; 14488 if (!list_empty(&pring->txcmplq)) 14489 txcmplq_cnt++; 14490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14491 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 14492 "els_txcmplq_cnt=%d\n", 14493 txq_cnt, phba->iocb_cnt, 14494 txcmplq_cnt); 14495 return false; 14496 } 14497 14498 /* Save off the slow-path queue event for work thread to process */ 14499 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 14500 spin_lock_irqsave(&phba->hbalock, iflags); 14501 list_add_tail(&irspiocbq->cq_event.list, 14502 &phba->sli4_hba.sp_queue_event); 14503 spin_unlock_irqrestore(&phba->hbalock, iflags); 14504 set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 14505 14506 return true; 14507 } 14508 14509 /** 14510 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 14511 * @phba: Pointer to HBA context object. 14512 * @wcqe: Pointer to work-queue completion queue entry. 14513 * 14514 * This routine handles slow-path WQ entry consumed event by invoking the 14515 * proper WQ release routine to the slow-path WQ. 14516 **/ 14517 static void 14518 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 14519 struct lpfc_wcqe_release *wcqe) 14520 { 14521 /* sanity check on queue memory */ 14522 if (unlikely(!phba->sli4_hba.els_wq)) 14523 return; 14524 /* Check for the slow-path ELS work queue */ 14525 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 14526 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 14527 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 14528 else 14529 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14530 "2579 Slow-path wqe consume event carries " 14531 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 14532 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 14533 phba->sli4_hba.els_wq->queue_id); 14534 } 14535 14536 /** 14537 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 14538 * @phba: Pointer to HBA context object. 14539 * @cq: Pointer to a WQ completion queue. 14540 * @wcqe: Pointer to work-queue completion queue entry. 14541 * 14542 * This routine handles an XRI abort event. 14543 * 14544 * Return: true if work posted to worker thread, otherwise false. 14545 **/ 14546 static bool 14547 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 14548 struct lpfc_queue *cq, 14549 struct sli4_wcqe_xri_aborted *wcqe) 14550 { 14551 bool workposted = false; 14552 struct lpfc_cq_event *cq_event; 14553 unsigned long iflags; 14554 14555 switch (cq->subtype) { 14556 case LPFC_IO: 14557 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq); 14558 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14559 /* Notify aborted XRI for NVME work queue */ 14560 if (phba->nvmet_support) 14561 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 14562 } 14563 workposted = false; 14564 break; 14565 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 14566 case LPFC_ELS: 14567 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe)); 14568 if (!cq_event) { 14569 workposted = false; 14570 break; 14571 } 14572 cq_event->hdwq = cq->hdwq; 14573 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, 14574 iflags); 14575 list_add_tail(&cq_event->list, 14576 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 14577 /* Set the els xri abort event flag */ 14578 set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); 14579 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, 14580 iflags); 14581 workposted = true; 14582 break; 14583 default: 14584 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14585 "0603 Invalid CQ subtype %d: " 14586 "%08x %08x %08x %08x\n", 14587 cq->subtype, wcqe->word0, wcqe->parameter, 14588 wcqe->word2, wcqe->word3); 14589 workposted = false; 14590 break; 14591 } 14592 return workposted; 14593 } 14594 14595 #define FC_RCTL_MDS_DIAGS 0xF4 14596 14597 /** 14598 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 14599 * @phba: Pointer to HBA context object. 14600 * @rcqe: Pointer to receive-queue completion queue entry. 14601 * 14602 * This routine process a receive-queue completion queue entry. 14603 * 14604 * Return: true if work posted to worker thread, otherwise false. 14605 **/ 14606 static bool 14607 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 14608 { 14609 bool workposted = false; 14610 struct fc_frame_header *fc_hdr; 14611 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 14612 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 14613 struct lpfc_nvmet_tgtport *tgtp; 14614 struct hbq_dmabuf *dma_buf; 14615 uint32_t status, rq_id; 14616 unsigned long iflags; 14617 14618 /* sanity check on queue memory */ 14619 if (unlikely(!hrq) || unlikely(!drq)) 14620 return workposted; 14621 14622 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 14623 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 14624 else 14625 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 14626 if (rq_id != hrq->queue_id) 14627 goto out; 14628 14629 status = bf_get(lpfc_rcqe_status, rcqe); 14630 switch (status) { 14631 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 14632 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14633 "2537 Receive Frame Truncated!!\n"); 14634 fallthrough; 14635 case FC_STATUS_RQ_SUCCESS: 14636 spin_lock_irqsave(&phba->hbalock, iflags); 14637 lpfc_sli4_rq_release(hrq, drq); 14638 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 14639 if (!dma_buf) { 14640 hrq->RQ_no_buf_found++; 14641 spin_unlock_irqrestore(&phba->hbalock, iflags); 14642 goto out; 14643 } 14644 hrq->RQ_rcv_buf++; 14645 hrq->RQ_buf_posted--; 14646 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 14647 14648 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 14649 14650 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 14651 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 14652 spin_unlock_irqrestore(&phba->hbalock, iflags); 14653 /* Handle MDS Loopback frames */ 14654 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 14655 lpfc_sli4_handle_mds_loopback(phba->pport, 14656 dma_buf); 14657 else 14658 lpfc_in_buf_free(phba, &dma_buf->dbuf); 14659 break; 14660 } 14661 14662 /* save off the frame for the work thread to process */ 14663 list_add_tail(&dma_buf->cq_event.list, 14664 &phba->sli4_hba.sp_queue_event); 14665 spin_unlock_irqrestore(&phba->hbalock, iflags); 14666 /* Frame received */ 14667 set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 14668 workposted = true; 14669 break; 14670 case FC_STATUS_INSUFF_BUF_FRM_DISC: 14671 if (phba->nvmet_support) { 14672 tgtp = phba->targetport->private; 14673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14674 "6402 RQE Error x%x, posted %d err_cnt " 14675 "%d: %x %x %x\n", 14676 status, hrq->RQ_buf_posted, 14677 hrq->RQ_no_posted_buf, 14678 atomic_read(&tgtp->rcv_fcp_cmd_in), 14679 atomic_read(&tgtp->rcv_fcp_cmd_out), 14680 atomic_read(&tgtp->xmt_fcp_release)); 14681 } 14682 fallthrough; 14683 14684 case FC_STATUS_INSUFF_BUF_NEED_BUF: 14685 hrq->RQ_no_posted_buf++; 14686 /* Post more buffers if possible */ 14687 set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag); 14688 workposted = true; 14689 break; 14690 case FC_STATUS_RQ_DMA_FAILURE: 14691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14692 "2564 RQE DMA Error x%x, x%08x x%08x x%08x " 14693 "x%08x\n", 14694 status, rcqe->word0, rcqe->word1, 14695 rcqe->word2, rcqe->word3); 14696 14697 /* If IV set, no further recovery */ 14698 if (bf_get(lpfc_rcqe_iv, rcqe)) 14699 break; 14700 14701 /* recycle consumed resource */ 14702 spin_lock_irqsave(&phba->hbalock, iflags); 14703 lpfc_sli4_rq_release(hrq, drq); 14704 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 14705 if (!dma_buf) { 14706 hrq->RQ_no_buf_found++; 14707 spin_unlock_irqrestore(&phba->hbalock, iflags); 14708 break; 14709 } 14710 hrq->RQ_rcv_buf++; 14711 hrq->RQ_buf_posted--; 14712 spin_unlock_irqrestore(&phba->hbalock, iflags); 14713 lpfc_in_buf_free(phba, &dma_buf->dbuf); 14714 break; 14715 default: 14716 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14717 "2565 Unexpected RQE Status x%x, w0-3 x%08x " 14718 "x%08x x%08x x%08x\n", 14719 status, rcqe->word0, rcqe->word1, 14720 rcqe->word2, rcqe->word3); 14721 break; 14722 } 14723 out: 14724 return workposted; 14725 } 14726 14727 /** 14728 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 14729 * @phba: Pointer to HBA context object. 14730 * @cq: Pointer to the completion queue. 14731 * @cqe: Pointer to a completion queue entry. 14732 * 14733 * This routine process a slow-path work-queue or receive queue completion queue 14734 * entry. 14735 * 14736 * Return: true if work posted to worker thread, otherwise false. 14737 **/ 14738 static bool 14739 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 14740 struct lpfc_cqe *cqe) 14741 { 14742 struct lpfc_cqe cqevt; 14743 bool workposted = false; 14744 14745 /* Copy the work queue CQE and convert endian order if needed */ 14746 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 14747 14748 /* Check and process for different type of WCQE and dispatch */ 14749 switch (bf_get(lpfc_cqe_code, &cqevt)) { 14750 case CQE_CODE_COMPL_WQE: 14751 /* Process the WQ/RQ complete event */ 14752 phba->last_completion_time = jiffies; 14753 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 14754 (struct lpfc_wcqe_complete *)&cqevt); 14755 break; 14756 case CQE_CODE_RELEASE_WQE: 14757 /* Process the WQ release event */ 14758 lpfc_sli4_sp_handle_rel_wcqe(phba, 14759 (struct lpfc_wcqe_release *)&cqevt); 14760 break; 14761 case CQE_CODE_XRI_ABORTED: 14762 /* Process the WQ XRI abort event */ 14763 phba->last_completion_time = jiffies; 14764 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 14765 (struct sli4_wcqe_xri_aborted *)&cqevt); 14766 break; 14767 case CQE_CODE_RECEIVE: 14768 case CQE_CODE_RECEIVE_V1: 14769 /* Process the RQ event */ 14770 phba->last_completion_time = jiffies; 14771 workposted = lpfc_sli4_sp_handle_rcqe(phba, 14772 (struct lpfc_rcqe *)&cqevt); 14773 break; 14774 default: 14775 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14776 "0388 Not a valid WCQE code: x%x\n", 14777 bf_get(lpfc_cqe_code, &cqevt)); 14778 break; 14779 } 14780 return workposted; 14781 } 14782 14783 /** 14784 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 14785 * @phba: Pointer to HBA context object. 14786 * @eqe: Pointer to fast-path event queue entry. 14787 * @speq: Pointer to slow-path event queue. 14788 * 14789 * This routine process a event queue entry from the slow-path event queue. 14790 * It will check the MajorCode and MinorCode to determine this is for a 14791 * completion event on a completion queue, if not, an error shall be logged 14792 * and just return. Otherwise, it will get to the corresponding completion 14793 * queue and process all the entries on that completion queue, rearm the 14794 * completion queue, and then return. 14795 * 14796 **/ 14797 static void 14798 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 14799 struct lpfc_queue *speq) 14800 { 14801 struct lpfc_queue *cq = NULL, *childq; 14802 uint16_t cqid; 14803 int ret = 0; 14804 14805 /* Get the reference to the corresponding CQ */ 14806 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14807 14808 list_for_each_entry(childq, &speq->child_list, list) { 14809 if (childq->queue_id == cqid) { 14810 cq = childq; 14811 break; 14812 } 14813 } 14814 if (unlikely(!cq)) { 14815 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 14816 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14817 "0365 Slow-path CQ identifier " 14818 "(%d) does not exist\n", cqid); 14819 return; 14820 } 14821 14822 /* Save EQ associated with this CQ */ 14823 cq->assoc_qp = speq; 14824 14825 if (is_kdump_kernel()) 14826 ret = queue_work(phba->wq, &cq->spwork); 14827 else 14828 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork); 14829 14830 if (!ret) 14831 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14832 "0390 Cannot schedule queue work " 14833 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14834 cqid, cq->queue_id, raw_smp_processor_id()); 14835 } 14836 14837 /** 14838 * __lpfc_sli4_process_cq - Process elements of a CQ 14839 * @phba: Pointer to HBA context object. 14840 * @cq: Pointer to CQ to be processed 14841 * @handler: Routine to process each cqe 14842 * @delay: Pointer to usdelay to set in case of rescheduling of the handler 14843 * 14844 * This routine processes completion queue entries in a CQ. While a valid 14845 * queue element is found, the handler is called. During processing checks 14846 * are made for periodic doorbell writes to let the hardware know of 14847 * element consumption. 14848 * 14849 * If the max limit on cqes to process is hit, or there are no more valid 14850 * entries, the loop stops. If we processed a sufficient number of elements, 14851 * meaning there is sufficient load, rather than rearming and generating 14852 * another interrupt, a cq rescheduling delay will be set. A delay of 0 14853 * indicates no rescheduling. 14854 * 14855 * Returns True if work scheduled, False otherwise. 14856 **/ 14857 static bool 14858 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, 14859 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, 14860 struct lpfc_cqe *), unsigned long *delay) 14861 { 14862 struct lpfc_cqe *cqe; 14863 bool workposted = false; 14864 int count = 0, consumed = 0; 14865 bool arm = true; 14866 14867 /* default - no reschedule */ 14868 *delay = 0; 14869 14870 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) 14871 goto rearm_and_exit; 14872 14873 /* Process all the entries to the CQ */ 14874 cq->q_flag = 0; 14875 cqe = lpfc_sli4_cq_get(cq); 14876 while (cqe) { 14877 workposted |= handler(phba, cq, cqe); 14878 __lpfc_sli4_consume_cqe(phba, cq, cqe); 14879 14880 consumed++; 14881 if (!(++count % cq->max_proc_limit)) 14882 break; 14883 14884 if (!(count % cq->notify_interval)) { 14885 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 14886 LPFC_QUEUE_NOARM); 14887 consumed = 0; 14888 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK; 14889 } 14890 14891 if (count == LPFC_NVMET_CQ_NOTIFY) 14892 cq->q_flag |= HBA_NVMET_CQ_NOTIFY; 14893 14894 cqe = lpfc_sli4_cq_get(cq); 14895 } 14896 if (count >= phba->cfg_cq_poll_threshold) { 14897 *delay = 1; 14898 arm = false; 14899 } 14900 14901 /* Track the max number of CQEs processed in 1 EQ */ 14902 if (count > cq->CQ_max_cqe) 14903 cq->CQ_max_cqe = count; 14904 14905 cq->assoc_qp->EQ_cqe_cnt += count; 14906 14907 /* Catch the no cq entry condition */ 14908 if (unlikely(count == 0)) 14909 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 14910 "0369 No entry from completion queue " 14911 "qid=%d\n", cq->queue_id); 14912 14913 xchg(&cq->queue_claimed, 0); 14914 14915 rearm_and_exit: 14916 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 14917 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); 14918 14919 return workposted; 14920 } 14921 14922 /** 14923 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 14924 * @cq: pointer to CQ to process 14925 * 14926 * This routine calls the cq processing routine with a handler specific 14927 * to the type of queue bound to it. 14928 * 14929 * The CQ routine returns two values: the first is the calling status, 14930 * which indicates whether work was queued to the background discovery 14931 * thread. If true, the routine should wakeup the discovery thread; 14932 * the second is the delay parameter. If non-zero, rather than rearming 14933 * the CQ and yet another interrupt, the CQ handler should be queued so 14934 * that it is processed in a subsequent polling action. The value of 14935 * the delay indicates when to reschedule it. 14936 **/ 14937 static void 14938 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) 14939 { 14940 struct lpfc_hba *phba = cq->phba; 14941 unsigned long delay; 14942 bool workposted = false; 14943 int ret = 0; 14944 14945 /* Process and rearm the CQ */ 14946 switch (cq->type) { 14947 case LPFC_MCQ: 14948 workposted |= __lpfc_sli4_process_cq(phba, cq, 14949 lpfc_sli4_sp_handle_mcqe, 14950 &delay); 14951 break; 14952 case LPFC_WCQ: 14953 if (cq->subtype == LPFC_IO) 14954 workposted |= __lpfc_sli4_process_cq(phba, cq, 14955 lpfc_sli4_fp_handle_cqe, 14956 &delay); 14957 else 14958 workposted |= __lpfc_sli4_process_cq(phba, cq, 14959 lpfc_sli4_sp_handle_cqe, 14960 &delay); 14961 break; 14962 default: 14963 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14964 "0370 Invalid completion queue type (%d)\n", 14965 cq->type); 14966 return; 14967 } 14968 14969 if (delay) { 14970 if (is_kdump_kernel()) 14971 ret = queue_delayed_work(phba->wq, &cq->sched_spwork, 14972 delay); 14973 else 14974 ret = queue_delayed_work_on(cq->chann, phba->wq, 14975 &cq->sched_spwork, delay); 14976 if (!ret) 14977 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14978 "0394 Cannot schedule queue work " 14979 "for cqid=%d on CPU %d\n", 14980 cq->queue_id, cq->chann); 14981 } 14982 14983 /* wake up worker thread if there are works to be done */ 14984 if (workposted) 14985 lpfc_worker_wake_up(phba); 14986 } 14987 14988 /** 14989 * lpfc_sli4_sp_process_cq - slow-path work handler when started by 14990 * interrupt 14991 * @work: pointer to work element 14992 * 14993 * translates from the work handler and calls the slow-path handler. 14994 **/ 14995 static void 14996 lpfc_sli4_sp_process_cq(struct work_struct *work) 14997 { 14998 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); 14999 15000 __lpfc_sli4_sp_process_cq(cq); 15001 } 15002 15003 /** 15004 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer 15005 * @work: pointer to work element 15006 * 15007 * translates from the work handler and calls the slow-path handler. 15008 **/ 15009 static void 15010 lpfc_sli4_dly_sp_process_cq(struct work_struct *work) 15011 { 15012 struct lpfc_queue *cq = container_of(to_delayed_work(work), 15013 struct lpfc_queue, sched_spwork); 15014 15015 __lpfc_sli4_sp_process_cq(cq); 15016 } 15017 15018 /** 15019 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 15020 * @phba: Pointer to HBA context object. 15021 * @cq: Pointer to associated CQ 15022 * @wcqe: Pointer to work-queue completion queue entry. 15023 * 15024 * This routine process a fast-path work queue completion entry from fast-path 15025 * event queue for FCP command response completion. 15026 **/ 15027 static void 15028 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 15029 struct lpfc_wcqe_complete *wcqe) 15030 { 15031 struct lpfc_sli_ring *pring = cq->pring; 15032 struct lpfc_iocbq *cmdiocbq; 15033 unsigned long iflags; 15034 15035 /* Check for response status */ 15036 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 15037 /* If resource errors reported from HBA, reduce queue 15038 * depth of the SCSI device. 15039 */ 15040 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 15041 IOSTAT_LOCAL_REJECT)) && 15042 ((wcqe->parameter & IOERR_PARAM_MASK) == 15043 IOERR_NO_RESOURCES)) 15044 phba->lpfc_rampdown_queue_depth(phba); 15045 15046 /* Log the cmpl status */ 15047 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 15048 "0373 FCP CQE cmpl: status=x%x: " 15049 "CQE: %08x %08x %08x %08x\n", 15050 bf_get(lpfc_wcqe_c_status, wcqe), 15051 wcqe->word0, wcqe->total_data_placed, 15052 wcqe->parameter, wcqe->word3); 15053 } 15054 15055 /* Look up the FCP command IOCB and create pseudo response IOCB */ 15056 spin_lock_irqsave(&pring->ring_lock, iflags); 15057 pring->stats.iocb_event++; 15058 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 15059 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 15060 spin_unlock_irqrestore(&pring->ring_lock, iflags); 15061 if (unlikely(!cmdiocbq)) { 15062 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 15063 "0374 FCP complete with no corresponding " 15064 "cmdiocb: iotag (%d)\n", 15065 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 15066 return; 15067 } 15068 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 15069 cmdiocbq->isr_timestamp = cq->isr_timestamp; 15070 #endif 15071 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 15072 spin_lock_irqsave(&phba->hbalock, iflags); 15073 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; 15074 spin_unlock_irqrestore(&phba->hbalock, iflags); 15075 } 15076 15077 if (cmdiocbq->cmd_cmpl) { 15078 /* For FCP the flag is cleared in cmd_cmpl */ 15079 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) && 15080 cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) { 15081 spin_lock_irqsave(&phba->hbalock, iflags); 15082 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; 15083 spin_unlock_irqrestore(&phba->hbalock, iflags); 15084 } 15085 15086 /* Pass the cmd_iocb and the wcqe to the upper layer */ 15087 memcpy(&cmdiocbq->wcqe_cmpl, wcqe, 15088 sizeof(struct lpfc_wcqe_complete)); 15089 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq); 15090 } else { 15091 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 15092 "0375 FCP cmdiocb not callback function " 15093 "iotag: (%d)\n", 15094 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 15095 } 15096 } 15097 15098 /** 15099 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 15100 * @phba: Pointer to HBA context object. 15101 * @cq: Pointer to completion queue. 15102 * @wcqe: Pointer to work-queue completion queue entry. 15103 * 15104 * This routine handles an fast-path WQ entry consumed event by invoking the 15105 * proper WQ release routine to the slow-path WQ. 15106 **/ 15107 static void 15108 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 15109 struct lpfc_wcqe_release *wcqe) 15110 { 15111 struct lpfc_queue *childwq; 15112 bool wqid_matched = false; 15113 uint16_t hba_wqid; 15114 15115 /* Check for fast-path FCP work queue release */ 15116 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 15117 list_for_each_entry(childwq, &cq->child_list, list) { 15118 if (childwq->queue_id == hba_wqid) { 15119 lpfc_sli4_wq_release(childwq, 15120 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 15121 if (childwq->q_flag & HBA_NVMET_WQFULL) 15122 lpfc_nvmet_wqfull_process(phba, childwq); 15123 wqid_matched = true; 15124 break; 15125 } 15126 } 15127 /* Report warning log message if no match found */ 15128 if (wqid_matched != true) 15129 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 15130 "2580 Fast-path wqe consume event carries " 15131 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 15132 } 15133 15134 /** 15135 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 15136 * @phba: Pointer to HBA context object. 15137 * @cq: Pointer to completion queue. 15138 * @rcqe: Pointer to receive-queue completion queue entry. 15139 * 15140 * This routine process a receive-queue completion queue entry. 15141 * 15142 * Return: true if work posted to worker thread, otherwise false. 15143 **/ 15144 static bool 15145 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 15146 struct lpfc_rcqe *rcqe) 15147 { 15148 bool workposted = false; 15149 struct lpfc_queue *hrq; 15150 struct lpfc_queue *drq; 15151 struct rqb_dmabuf *dma_buf; 15152 struct fc_frame_header *fc_hdr; 15153 struct lpfc_nvmet_tgtport *tgtp; 15154 uint32_t status, rq_id; 15155 unsigned long iflags; 15156 uint32_t fctl, idx; 15157 15158 if ((phba->nvmet_support == 0) || 15159 (phba->sli4_hba.nvmet_cqset == NULL)) 15160 return workposted; 15161 15162 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 15163 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 15164 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 15165 15166 /* sanity check on queue memory */ 15167 if (unlikely(!hrq) || unlikely(!drq)) 15168 return workposted; 15169 15170 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 15171 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 15172 else 15173 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 15174 15175 if ((phba->nvmet_support == 0) || 15176 (rq_id != hrq->queue_id)) 15177 return workposted; 15178 15179 status = bf_get(lpfc_rcqe_status, rcqe); 15180 switch (status) { 15181 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 15182 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15183 "6126 Receive Frame Truncated!!\n"); 15184 fallthrough; 15185 case FC_STATUS_RQ_SUCCESS: 15186 spin_lock_irqsave(&phba->hbalock, iflags); 15187 lpfc_sli4_rq_release(hrq, drq); 15188 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 15189 if (!dma_buf) { 15190 hrq->RQ_no_buf_found++; 15191 spin_unlock_irqrestore(&phba->hbalock, iflags); 15192 goto out; 15193 } 15194 spin_unlock_irqrestore(&phba->hbalock, iflags); 15195 hrq->RQ_rcv_buf++; 15196 hrq->RQ_buf_posted--; 15197 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 15198 15199 /* Just some basic sanity checks on FCP Command frame */ 15200 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 15201 fc_hdr->fh_f_ctl[1] << 8 | 15202 fc_hdr->fh_f_ctl[2]); 15203 if (((fctl & 15204 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 15205 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 15206 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 15207 goto drop; 15208 15209 if (fc_hdr->fh_type == FC_TYPE_FCP) { 15210 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 15211 lpfc_nvmet_unsol_fcp_event( 15212 phba, idx, dma_buf, cq->isr_timestamp, 15213 cq->q_flag & HBA_NVMET_CQ_NOTIFY); 15214 return false; 15215 } 15216 drop: 15217 lpfc_rq_buf_free(phba, &dma_buf->hbuf); 15218 break; 15219 case FC_STATUS_INSUFF_BUF_FRM_DISC: 15220 if (phba->nvmet_support) { 15221 tgtp = phba->targetport->private; 15222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15223 "6401 RQE Error x%x, posted %d err_cnt " 15224 "%d: %x %x %x\n", 15225 status, hrq->RQ_buf_posted, 15226 hrq->RQ_no_posted_buf, 15227 atomic_read(&tgtp->rcv_fcp_cmd_in), 15228 atomic_read(&tgtp->rcv_fcp_cmd_out), 15229 atomic_read(&tgtp->xmt_fcp_release)); 15230 } 15231 fallthrough; 15232 15233 case FC_STATUS_INSUFF_BUF_NEED_BUF: 15234 hrq->RQ_no_posted_buf++; 15235 /* Post more buffers if possible */ 15236 break; 15237 case FC_STATUS_RQ_DMA_FAILURE: 15238 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15239 "2575 RQE DMA Error x%x, x%08x x%08x x%08x " 15240 "x%08x\n", 15241 status, rcqe->word0, rcqe->word1, 15242 rcqe->word2, rcqe->word3); 15243 15244 /* If IV set, no further recovery */ 15245 if (bf_get(lpfc_rcqe_iv, rcqe)) 15246 break; 15247 15248 /* recycle consumed resource */ 15249 spin_lock_irqsave(&phba->hbalock, iflags); 15250 lpfc_sli4_rq_release(hrq, drq); 15251 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 15252 if (!dma_buf) { 15253 hrq->RQ_no_buf_found++; 15254 spin_unlock_irqrestore(&phba->hbalock, iflags); 15255 break; 15256 } 15257 hrq->RQ_rcv_buf++; 15258 hrq->RQ_buf_posted--; 15259 spin_unlock_irqrestore(&phba->hbalock, iflags); 15260 lpfc_rq_buf_free(phba, &dma_buf->hbuf); 15261 break; 15262 default: 15263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15264 "2576 Unexpected RQE Status x%x, w0-3 x%08x " 15265 "x%08x x%08x x%08x\n", 15266 status, rcqe->word0, rcqe->word1, 15267 rcqe->word2, rcqe->word3); 15268 break; 15269 } 15270 out: 15271 return workposted; 15272 } 15273 15274 /** 15275 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 15276 * @phba: adapter with cq 15277 * @cq: Pointer to the completion queue. 15278 * @cqe: Pointer to fast-path completion queue entry. 15279 * 15280 * This routine process a fast-path work queue completion entry from fast-path 15281 * event queue for FCP command response completion. 15282 * 15283 * Return: true if work posted to worker thread, otherwise false. 15284 **/ 15285 static bool 15286 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 15287 struct lpfc_cqe *cqe) 15288 { 15289 struct lpfc_wcqe_release wcqe; 15290 bool workposted = false; 15291 15292 /* Copy the work queue CQE and convert endian order if needed */ 15293 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 15294 15295 /* Check and process for different type of WCQE and dispatch */ 15296 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 15297 case CQE_CODE_COMPL_WQE: 15298 case CQE_CODE_NVME_ERSP: 15299 cq->CQ_wq++; 15300 /* Process the WQ complete event */ 15301 phba->last_completion_time = jiffies; 15302 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) 15303 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 15304 (struct lpfc_wcqe_complete *)&wcqe); 15305 break; 15306 case CQE_CODE_RELEASE_WQE: 15307 cq->CQ_release_wqe++; 15308 /* Process the WQ release event */ 15309 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 15310 (struct lpfc_wcqe_release *)&wcqe); 15311 break; 15312 case CQE_CODE_XRI_ABORTED: 15313 cq->CQ_xri_aborted++; 15314 /* Process the WQ XRI abort event */ 15315 phba->last_completion_time = jiffies; 15316 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 15317 (struct sli4_wcqe_xri_aborted *)&wcqe); 15318 break; 15319 case CQE_CODE_RECEIVE_V1: 15320 case CQE_CODE_RECEIVE: 15321 phba->last_completion_time = jiffies; 15322 if (cq->subtype == LPFC_NVMET) { 15323 workposted = lpfc_sli4_nvmet_handle_rcqe( 15324 phba, cq, (struct lpfc_rcqe *)&wcqe); 15325 } 15326 break; 15327 default: 15328 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15329 "0144 Not a valid CQE code: x%x\n", 15330 bf_get(lpfc_wcqe_c_code, &wcqe)); 15331 break; 15332 } 15333 return workposted; 15334 } 15335 15336 /** 15337 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 15338 * @cq: Pointer to CQ to be processed 15339 * 15340 * This routine calls the cq processing routine with the handler for 15341 * fast path CQEs. 15342 * 15343 * The CQ routine returns two values: the first is the calling status, 15344 * which indicates whether work was queued to the background discovery 15345 * thread. If true, the routine should wakeup the discovery thread; 15346 * the second is the delay parameter. If non-zero, rather than rearming 15347 * the CQ and yet another interrupt, the CQ handler should be queued so 15348 * that it is processed in a subsequent polling action. The value of 15349 * the delay indicates when to reschedule it. 15350 **/ 15351 static void 15352 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) 15353 { 15354 struct lpfc_hba *phba = cq->phba; 15355 unsigned long delay; 15356 bool workposted = false; 15357 int ret; 15358 15359 /* process and rearm the CQ */ 15360 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, 15361 &delay); 15362 15363 if (delay) { 15364 if (is_kdump_kernel()) 15365 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, 15366 delay); 15367 else 15368 ret = queue_delayed_work_on(cq->chann, phba->wq, 15369 &cq->sched_irqwork, delay); 15370 if (!ret) 15371 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15372 "0367 Cannot schedule queue work " 15373 "for cqid=%d on CPU %d\n", 15374 cq->queue_id, cq->chann); 15375 } 15376 15377 /* wake up worker thread if there are works to be done */ 15378 if (workposted) 15379 lpfc_worker_wake_up(phba); 15380 } 15381 15382 /** 15383 * lpfc_sli4_hba_process_cq - fast-path work handler when started by 15384 * interrupt 15385 * @work: pointer to work element 15386 * 15387 * translates from the work handler and calls the fast-path handler. 15388 **/ 15389 static void 15390 lpfc_sli4_hba_process_cq(struct work_struct *work) 15391 { 15392 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); 15393 15394 __lpfc_sli4_hba_process_cq(cq); 15395 } 15396 15397 /** 15398 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 15399 * @phba: Pointer to HBA context object. 15400 * @eq: Pointer to the queue structure. 15401 * @eqe: Pointer to fast-path event queue entry. 15402 * @poll_mode: poll_mode to execute processing the cq. 15403 * 15404 * This routine process a event queue entry from the fast-path event queue. 15405 * It will check the MajorCode and MinorCode to determine this is for a 15406 * completion event on a completion queue, if not, an error shall be logged 15407 * and just return. Otherwise, it will get to the corresponding completion 15408 * queue and process all the entries on the completion queue, rearm the 15409 * completion queue, and then return. 15410 **/ 15411 static void 15412 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 15413 struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode) 15414 { 15415 struct lpfc_queue *cq = NULL; 15416 uint32_t qidx = eq->hdwq; 15417 uint16_t cqid, id; 15418 int ret; 15419 15420 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 15421 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15422 "0366 Not a valid completion " 15423 "event: majorcode=x%x, minorcode=x%x\n", 15424 bf_get_le32(lpfc_eqe_major_code, eqe), 15425 bf_get_le32(lpfc_eqe_minor_code, eqe)); 15426 return; 15427 } 15428 15429 /* Get the reference to the corresponding CQ */ 15430 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 15431 15432 /* Use the fast lookup method first */ 15433 if (cqid <= phba->sli4_hba.cq_max) { 15434 cq = phba->sli4_hba.cq_lookup[cqid]; 15435 if (cq) 15436 goto work_cq; 15437 } 15438 15439 /* Next check for NVMET completion */ 15440 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 15441 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 15442 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 15443 /* Process NVMET unsol rcv */ 15444 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 15445 goto process_cq; 15446 } 15447 } 15448 15449 if (phba->sli4_hba.nvmels_cq && 15450 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 15451 /* Process NVME unsol rcv */ 15452 cq = phba->sli4_hba.nvmels_cq; 15453 } 15454 15455 /* Otherwise this is a Slow path event */ 15456 if (cq == NULL) { 15457 lpfc_sli4_sp_handle_eqe(phba, eqe, 15458 phba->sli4_hba.hdwq[qidx].hba_eq); 15459 return; 15460 } 15461 15462 process_cq: 15463 if (unlikely(cqid != cq->queue_id)) { 15464 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15465 "0368 Miss-matched fast-path completion " 15466 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 15467 cqid, cq->queue_id); 15468 return; 15469 } 15470 15471 work_cq: 15472 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) 15473 if (phba->ktime_on) 15474 cq->isr_timestamp = ktime_get_ns(); 15475 else 15476 cq->isr_timestamp = 0; 15477 #endif 15478 15479 switch (poll_mode) { 15480 case LPFC_THREADED_IRQ: 15481 __lpfc_sli4_hba_process_cq(cq); 15482 break; 15483 case LPFC_QUEUE_WORK: 15484 default: 15485 if (is_kdump_kernel()) 15486 ret = queue_work(phba->wq, &cq->irqwork); 15487 else 15488 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); 15489 if (!ret) 15490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15491 "0383 Cannot schedule queue work " 15492 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 15493 cqid, cq->queue_id, 15494 raw_smp_processor_id()); 15495 break; 15496 } 15497 } 15498 15499 /** 15500 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer 15501 * @work: pointer to work element 15502 * 15503 * translates from the work handler and calls the fast-path handler. 15504 **/ 15505 static void 15506 lpfc_sli4_dly_hba_process_cq(struct work_struct *work) 15507 { 15508 struct lpfc_queue *cq = container_of(to_delayed_work(work), 15509 struct lpfc_queue, sched_irqwork); 15510 15511 __lpfc_sli4_hba_process_cq(cq); 15512 } 15513 15514 /** 15515 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 15516 * @irq: Interrupt number. 15517 * @dev_id: The device context pointer. 15518 * 15519 * This function is directly called from the PCI layer as an interrupt 15520 * service routine when device with SLI-4 interface spec is enabled with 15521 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 15522 * ring event in the HBA. However, when the device is enabled with either 15523 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 15524 * device-level interrupt handler. When the PCI slot is in error recovery 15525 * or the HBA is undergoing initialization, the interrupt handler will not 15526 * process the interrupt. The SCSI FCP fast-path ring event are handled in 15527 * the intrrupt context. This function is called without any lock held. 15528 * It gets the hbalock to access and update SLI data structures. Note that, 15529 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 15530 * equal to that of FCP CQ index. 15531 * 15532 * The link attention and ELS ring attention events are handled 15533 * by the worker thread. The interrupt handler signals the worker thread 15534 * and returns for these events. This function is called without any lock 15535 * held. It gets the hbalock to access and update SLI data structures. 15536 * 15537 * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD 15538 * when interrupt is scheduled to be handled from a threaded irq context, or 15539 * else returns IRQ_NONE. 15540 **/ 15541 irqreturn_t 15542 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 15543 { 15544 struct lpfc_hba *phba; 15545 struct lpfc_hba_eq_hdl *hba_eq_hdl; 15546 struct lpfc_queue *fpeq; 15547 unsigned long iflag; 15548 int hba_eqidx; 15549 int ecount = 0; 15550 struct lpfc_eq_intr_info *eqi; 15551 15552 /* Get the driver's phba structure from the dev_id */ 15553 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 15554 phba = hba_eq_hdl->phba; 15555 hba_eqidx = hba_eq_hdl->idx; 15556 15557 if (unlikely(!phba)) 15558 return IRQ_NONE; 15559 if (unlikely(!phba->sli4_hba.hdwq)) 15560 return IRQ_NONE; 15561 15562 /* Get to the EQ struct associated with this vector */ 15563 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; 15564 if (unlikely(!fpeq)) 15565 return IRQ_NONE; 15566 15567 /* Check device state for handling interrupt */ 15568 if (unlikely(lpfc_intr_state_check(phba))) { 15569 /* Check again for link_state with lock held */ 15570 spin_lock_irqsave(&phba->hbalock, iflag); 15571 if (phba->link_state < LPFC_LINK_DOWN) 15572 /* Flush, clear interrupt, and rearm the EQ */ 15573 lpfc_sli4_eqcq_flush(phba, fpeq); 15574 spin_unlock_irqrestore(&phba->hbalock, iflag); 15575 return IRQ_NONE; 15576 } 15577 15578 switch (fpeq->poll_mode) { 15579 case LPFC_THREADED_IRQ: 15580 /* CGN mgmt is mutually exclusive from irq processing */ 15581 if (phba->cmf_active_mode == LPFC_CFG_OFF) 15582 return IRQ_WAKE_THREAD; 15583 fallthrough; 15584 case LPFC_QUEUE_WORK: 15585 default: 15586 eqi = this_cpu_ptr(phba->sli4_hba.eq_info); 15587 eqi->icnt++; 15588 15589 fpeq->last_cpu = raw_smp_processor_id(); 15590 15591 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && 15592 fpeq->q_flag & HBA_EQ_DELAY_CHK && 15593 phba->cfg_auto_imax && 15594 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 15595 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 15596 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, 15597 LPFC_MAX_AUTO_EQ_DELAY); 15598 15599 /* process and rearm the EQ */ 15600 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, 15601 LPFC_QUEUE_WORK); 15602 15603 if (unlikely(ecount == 0)) { 15604 fpeq->EQ_no_entry++; 15605 if (phba->intr_type == MSIX) 15606 /* MSI-X treated interrupt served as no EQ share INT */ 15607 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 15608 "0358 MSI-X interrupt with no EQE\n"); 15609 else 15610 /* Non MSI-X treated on interrupt as EQ share INT */ 15611 return IRQ_NONE; 15612 } 15613 } 15614 15615 return IRQ_HANDLED; 15616 } /* lpfc_sli4_hba_intr_handler */ 15617 15618 /** 15619 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 15620 * @irq: Interrupt number. 15621 * @dev_id: The device context pointer. 15622 * 15623 * This function is the device-level interrupt handler to device with SLI-4 15624 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 15625 * interrupt mode is enabled and there is an event in the HBA which requires 15626 * driver attention. This function invokes the slow-path interrupt attention 15627 * handling function and fast-path interrupt attention handling function in 15628 * turn to process the relevant HBA attention events. This function is called 15629 * without any lock held. It gets the hbalock to access and update SLI data 15630 * structures. 15631 * 15632 * This function returns IRQ_HANDLED when interrupt is handled, else it 15633 * returns IRQ_NONE. 15634 **/ 15635 irqreturn_t 15636 lpfc_sli4_intr_handler(int irq, void *dev_id) 15637 { 15638 struct lpfc_hba *phba; 15639 irqreturn_t hba_irq_rc; 15640 bool hba_handled = false; 15641 int qidx; 15642 15643 /* Get the driver's phba structure from the dev_id */ 15644 phba = (struct lpfc_hba *)dev_id; 15645 15646 if (unlikely(!phba)) 15647 return IRQ_NONE; 15648 15649 /* 15650 * Invoke fast-path host attention interrupt handling as appropriate. 15651 */ 15652 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 15653 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 15654 &phba->sli4_hba.hba_eq_hdl[qidx]); 15655 if (hba_irq_rc == IRQ_HANDLED) 15656 hba_handled |= true; 15657 } 15658 15659 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 15660 } /* lpfc_sli4_intr_handler */ 15661 15662 void lpfc_sli4_poll_hbtimer(struct timer_list *t) 15663 { 15664 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); 15665 struct lpfc_queue *eq; 15666 15667 rcu_read_lock(); 15668 15669 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) 15670 lpfc_sli4_poll_eq(eq); 15671 if (!list_empty(&phba->poll_list)) 15672 mod_timer(&phba->cpuhp_poll_timer, 15673 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 15674 15675 rcu_read_unlock(); 15676 } 15677 15678 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) 15679 { 15680 struct lpfc_hba *phba = eq->phba; 15681 15682 /* kickstart slowpath processing if needed */ 15683 if (list_empty(&phba->poll_list)) 15684 mod_timer(&phba->cpuhp_poll_timer, 15685 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 15686 15687 list_add_rcu(&eq->_poll_list, &phba->poll_list); 15688 synchronize_rcu(); 15689 } 15690 15691 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) 15692 { 15693 struct lpfc_hba *phba = eq->phba; 15694 15695 /* Disable slowpath processing for this eq. Kick start the eq 15696 * by RE-ARMING the eq's ASAP 15697 */ 15698 list_del_rcu(&eq->_poll_list); 15699 synchronize_rcu(); 15700 15701 if (list_empty(&phba->poll_list)) 15702 del_timer_sync(&phba->cpuhp_poll_timer); 15703 } 15704 15705 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) 15706 { 15707 struct lpfc_queue *eq, *next; 15708 15709 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) 15710 list_del(&eq->_poll_list); 15711 15712 INIT_LIST_HEAD(&phba->poll_list); 15713 synchronize_rcu(); 15714 } 15715 15716 static inline void 15717 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode) 15718 { 15719 if (mode == eq->mode) 15720 return; 15721 /* 15722 * currently this function is only called during a hotplug 15723 * event and the cpu on which this function is executing 15724 * is going offline. By now the hotplug has instructed 15725 * the scheduler to remove this cpu from cpu active mask. 15726 * So we don't need to work about being put aside by the 15727 * scheduler for a high priority process. Yes, the inte- 15728 * rrupts could come but they are known to retire ASAP. 15729 */ 15730 15731 /* Disable polling in the fastpath */ 15732 WRITE_ONCE(eq->mode, mode); 15733 /* flush out the store buffer */ 15734 smp_wmb(); 15735 15736 /* 15737 * Add this eq to the polling list and start polling. For 15738 * a grace period both interrupt handler and poller will 15739 * try to process the eq _but_ that's fine. We have a 15740 * synchronization mechanism in place (queue_claimed) to 15741 * deal with it. This is just a draining phase for int- 15742 * errupt handler (not eq's) as we have guranteed through 15743 * barrier that all the CPUs have seen the new CQ_POLLED 15744 * state. which will effectively disable the REARMING of 15745 * the EQ. The whole idea is eq's die off eventually as 15746 * we are not rearming EQ's anymore. 15747 */ 15748 mode ? lpfc_sli4_add_to_poll_list(eq) : 15749 lpfc_sli4_remove_from_poll_list(eq); 15750 } 15751 15752 void lpfc_sli4_start_polling(struct lpfc_queue *eq) 15753 { 15754 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL); 15755 } 15756 15757 void lpfc_sli4_stop_polling(struct lpfc_queue *eq) 15758 { 15759 struct lpfc_hba *phba = eq->phba; 15760 15761 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT); 15762 15763 /* Kick start for the pending io's in h/w. 15764 * Once we switch back to interrupt processing on a eq 15765 * the io path completion will only arm eq's when it 15766 * receives a completion. But since eq's are in disa- 15767 * rmed state it doesn't receive a completion. This 15768 * creates a deadlock scenaro. 15769 */ 15770 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); 15771 } 15772 15773 /** 15774 * lpfc_sli4_queue_free - free a queue structure and associated memory 15775 * @queue: The queue structure to free. 15776 * 15777 * This function frees a queue structure and the DMAable memory used for 15778 * the host resident queue. This function must be called after destroying the 15779 * queue on the HBA. 15780 **/ 15781 void 15782 lpfc_sli4_queue_free(struct lpfc_queue *queue) 15783 { 15784 struct lpfc_dmabuf *dmabuf; 15785 15786 if (!queue) 15787 return; 15788 15789 if (!list_empty(&queue->wq_list)) 15790 list_del(&queue->wq_list); 15791 15792 while (!list_empty(&queue->page_list)) { 15793 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 15794 list); 15795 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 15796 dmabuf->virt, dmabuf->phys); 15797 kfree(dmabuf); 15798 } 15799 if (queue->rqbp) { 15800 lpfc_free_rq_buffer(queue->phba, queue); 15801 kfree(queue->rqbp); 15802 } 15803 15804 if (!list_empty(&queue->cpu_list)) 15805 list_del(&queue->cpu_list); 15806 15807 kfree(queue); 15808 return; 15809 } 15810 15811 /** 15812 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 15813 * @phba: The HBA that this queue is being created on. 15814 * @page_size: The size of a queue page 15815 * @entry_size: The size of each queue entry for this queue. 15816 * @entry_count: The number of entries that this queue will handle. 15817 * @cpu: The cpu that will primarily utilize this queue. 15818 * 15819 * This function allocates a queue structure and the DMAable memory used for 15820 * the host resident queue. This function must be called before creating the 15821 * queue on the HBA. 15822 **/ 15823 struct lpfc_queue * 15824 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 15825 uint32_t entry_size, uint32_t entry_count, int cpu) 15826 { 15827 struct lpfc_queue *queue; 15828 struct lpfc_dmabuf *dmabuf; 15829 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15830 uint16_t x, pgcnt; 15831 15832 if (!phba->sli4_hba.pc_sli4_params.supported) 15833 hw_page_size = page_size; 15834 15835 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; 15836 15837 /* If needed, Adjust page count to match the max the adapter supports */ 15838 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) 15839 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; 15840 15841 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), 15842 GFP_KERNEL, cpu_to_node(cpu)); 15843 if (!queue) 15844 return NULL; 15845 15846 INIT_LIST_HEAD(&queue->list); 15847 INIT_LIST_HEAD(&queue->_poll_list); 15848 INIT_LIST_HEAD(&queue->wq_list); 15849 INIT_LIST_HEAD(&queue->wqfull_list); 15850 INIT_LIST_HEAD(&queue->page_list); 15851 INIT_LIST_HEAD(&queue->child_list); 15852 INIT_LIST_HEAD(&queue->cpu_list); 15853 15854 /* Set queue parameters now. If the system cannot provide memory 15855 * resources, the free routine needs to know what was allocated. 15856 */ 15857 queue->page_count = pgcnt; 15858 queue->q_pgs = (void **)&queue[1]; 15859 queue->entry_cnt_per_pg = hw_page_size / entry_size; 15860 queue->entry_size = entry_size; 15861 queue->entry_count = entry_count; 15862 queue->page_size = hw_page_size; 15863 queue->phba = phba; 15864 15865 for (x = 0; x < queue->page_count; x++) { 15866 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, 15867 dev_to_node(&phba->pcidev->dev)); 15868 if (!dmabuf) 15869 goto out_fail; 15870 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 15871 hw_page_size, &dmabuf->phys, 15872 GFP_KERNEL); 15873 if (!dmabuf->virt) { 15874 kfree(dmabuf); 15875 goto out_fail; 15876 } 15877 dmabuf->buffer_tag = x; 15878 list_add_tail(&dmabuf->list, &queue->page_list); 15879 /* use lpfc_sli4_qe to index a paritcular entry in this page */ 15880 queue->q_pgs[x] = dmabuf->virt; 15881 } 15882 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 15883 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 15884 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); 15885 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); 15886 15887 /* notify_interval will be set during q creation */ 15888 15889 return queue; 15890 out_fail: 15891 lpfc_sli4_queue_free(queue); 15892 return NULL; 15893 } 15894 15895 /** 15896 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 15897 * @phba: HBA structure that indicates port to create a queue on. 15898 * @pci_barset: PCI BAR set flag. 15899 * 15900 * This function shall perform iomap of the specified PCI BAR address to host 15901 * memory address if not already done so and return it. The returned host 15902 * memory address can be NULL. 15903 */ 15904 static void __iomem * 15905 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 15906 { 15907 if (!phba->pcidev) 15908 return NULL; 15909 15910 switch (pci_barset) { 15911 case WQ_PCI_BAR_0_AND_1: 15912 return phba->pci_bar0_memmap_p; 15913 case WQ_PCI_BAR_2_AND_3: 15914 return phba->pci_bar2_memmap_p; 15915 case WQ_PCI_BAR_4_AND_5: 15916 return phba->pci_bar4_memmap_p; 15917 default: 15918 break; 15919 } 15920 return NULL; 15921 } 15922 15923 /** 15924 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 15925 * @phba: HBA structure that EQs are on. 15926 * @startq: The starting EQ index to modify 15927 * @numq: The number of EQs (consecutive indexes) to modify 15928 * @usdelay: amount of delay 15929 * 15930 * This function revises the EQ delay on 1 or more EQs. The EQ delay 15931 * is set either by writing to a register (if supported by the SLI Port) 15932 * or by mailbox command. The mailbox command allows several EQs to be 15933 * updated at once. 15934 * 15935 * The @phba struct is used to send a mailbox command to HBA. The @startq 15936 * is used to get the starting EQ index to change. The @numq value is 15937 * used to specify how many consecutive EQ indexes, starting at EQ index, 15938 * are to be changed. This function is asynchronous and will wait for any 15939 * mailbox commands to finish before returning. 15940 * 15941 * On success this function will return a zero. If unable to allocate 15942 * enough memory this function will return -ENOMEM. If a mailbox command 15943 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may 15944 * have had their delay multipler changed. 15945 **/ 15946 void 15947 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 15948 uint32_t numq, uint32_t usdelay) 15949 { 15950 struct lpfc_mbx_modify_eq_delay *eq_delay; 15951 LPFC_MBOXQ_t *mbox; 15952 struct lpfc_queue *eq; 15953 int cnt = 0, rc, length; 15954 uint32_t shdr_status, shdr_add_status; 15955 uint32_t dmult; 15956 int qidx; 15957 union lpfc_sli4_cfg_shdr *shdr; 15958 15959 if (startq >= phba->cfg_irq_chann) 15960 return; 15961 15962 if (usdelay > 0xFFFF) { 15963 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, 15964 "6429 usdelay %d too large. Scaled down to " 15965 "0xFFFF.\n", usdelay); 15966 usdelay = 0xFFFF; 15967 } 15968 15969 /* set values by EQ_DELAY register if supported */ 15970 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 15971 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 15972 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 15973 if (!eq) 15974 continue; 15975 15976 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); 15977 15978 if (++cnt >= numq) 15979 break; 15980 } 15981 return; 15982 } 15983 15984 /* Otherwise, set values by mailbox cmd */ 15985 15986 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15987 if (!mbox) { 15988 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15989 "6428 Failed allocating mailbox cmd buffer." 15990 " EQ delay was not set.\n"); 15991 return; 15992 } 15993 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 15994 sizeof(struct lpfc_sli4_cfg_mhdr)); 15995 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15996 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 15997 length, LPFC_SLI4_MBX_EMBED); 15998 eq_delay = &mbox->u.mqe.un.eq_delay; 15999 16000 /* Calculate delay multiper from maximum interrupt per second */ 16001 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; 16002 if (dmult) 16003 dmult--; 16004 if (dmult > LPFC_DMULT_MAX) 16005 dmult = LPFC_DMULT_MAX; 16006 16007 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 16008 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 16009 if (!eq) 16010 continue; 16011 eq->q_mode = usdelay; 16012 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 16013 eq_delay->u.request.eq[cnt].phase = 0; 16014 eq_delay->u.request.eq[cnt].delay_multi = dmult; 16015 16016 if (++cnt >= numq) 16017 break; 16018 } 16019 eq_delay->u.request.num_eq = cnt; 16020 16021 mbox->vport = phba->pport; 16022 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16023 mbox->ctx_ndlp = NULL; 16024 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16025 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 16026 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16027 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16028 if (shdr_status || shdr_add_status || rc) { 16029 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16030 "2512 MODIFY_EQ_DELAY mailbox failed with " 16031 "status x%x add_status x%x, mbx status x%x\n", 16032 shdr_status, shdr_add_status, rc); 16033 } 16034 mempool_free(mbox, phba->mbox_mem_pool); 16035 return; 16036 } 16037 16038 /** 16039 * lpfc_eq_create - Create an Event Queue on the HBA 16040 * @phba: HBA structure that indicates port to create a queue on. 16041 * @eq: The queue structure to use to create the event queue. 16042 * @imax: The maximum interrupt per second limit. 16043 * 16044 * This function creates an event queue, as detailed in @eq, on a port, 16045 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 16046 * 16047 * The @phba struct is used to send mailbox command to HBA. The @eq struct 16048 * is used to get the entry count and entry size that are necessary to 16049 * determine the number of pages to allocate and use for this queue. This 16050 * function will send the EQ_CREATE mailbox command to the HBA to setup the 16051 * event queue. This function is asynchronous and will wait for the mailbox 16052 * command to finish before continuing. 16053 * 16054 * On success this function will return a zero. If unable to allocate enough 16055 * memory this function will return -ENOMEM. If the queue create mailbox command 16056 * fails this function will return -ENXIO. 16057 **/ 16058 int 16059 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 16060 { 16061 struct lpfc_mbx_eq_create *eq_create; 16062 LPFC_MBOXQ_t *mbox; 16063 int rc, length, status = 0; 16064 struct lpfc_dmabuf *dmabuf; 16065 uint32_t shdr_status, shdr_add_status; 16066 union lpfc_sli4_cfg_shdr *shdr; 16067 uint16_t dmult; 16068 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16069 16070 /* sanity check on queue memory */ 16071 if (!eq) 16072 return -ENODEV; 16073 if (!phba->sli4_hba.pc_sli4_params.supported) 16074 hw_page_size = SLI4_PAGE_SIZE; 16075 16076 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16077 if (!mbox) 16078 return -ENOMEM; 16079 length = (sizeof(struct lpfc_mbx_eq_create) - 16080 sizeof(struct lpfc_sli4_cfg_mhdr)); 16081 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16082 LPFC_MBOX_OPCODE_EQ_CREATE, 16083 length, LPFC_SLI4_MBX_EMBED); 16084 eq_create = &mbox->u.mqe.un.eq_create; 16085 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 16086 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 16087 eq->page_count); 16088 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 16089 LPFC_EQE_SIZE); 16090 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 16091 16092 /* Use version 2 of CREATE_EQ if eqav is set */ 16093 if (phba->sli4_hba.pc_sli4_params.eqav) { 16094 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16095 LPFC_Q_CREATE_VERSION_2); 16096 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 16097 phba->sli4_hba.pc_sli4_params.eqav); 16098 } 16099 16100 /* don't setup delay multiplier using EQ_CREATE */ 16101 dmult = 0; 16102 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 16103 dmult); 16104 switch (eq->entry_count) { 16105 default: 16106 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16107 "0360 Unsupported EQ count. (%d)\n", 16108 eq->entry_count); 16109 if (eq->entry_count < 256) { 16110 status = -EINVAL; 16111 goto out; 16112 } 16113 fallthrough; /* otherwise default to smallest count */ 16114 case 256: 16115 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16116 LPFC_EQ_CNT_256); 16117 break; 16118 case 512: 16119 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16120 LPFC_EQ_CNT_512); 16121 break; 16122 case 1024: 16123 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16124 LPFC_EQ_CNT_1024); 16125 break; 16126 case 2048: 16127 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16128 LPFC_EQ_CNT_2048); 16129 break; 16130 case 4096: 16131 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16132 LPFC_EQ_CNT_4096); 16133 break; 16134 } 16135 list_for_each_entry(dmabuf, &eq->page_list, list) { 16136 memset(dmabuf->virt, 0, hw_page_size); 16137 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 16138 putPaddrLow(dmabuf->phys); 16139 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 16140 putPaddrHigh(dmabuf->phys); 16141 } 16142 mbox->vport = phba->pport; 16143 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16144 mbox->ctx_buf = NULL; 16145 mbox->ctx_ndlp = NULL; 16146 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16147 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16148 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16149 if (shdr_status || shdr_add_status || rc) { 16150 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16151 "2500 EQ_CREATE mailbox failed with " 16152 "status x%x add_status x%x, mbx status x%x\n", 16153 shdr_status, shdr_add_status, rc); 16154 status = -ENXIO; 16155 } 16156 eq->type = LPFC_EQ; 16157 eq->subtype = LPFC_NONE; 16158 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 16159 if (eq->queue_id == 0xFFFF) 16160 status = -ENXIO; 16161 eq->host_index = 0; 16162 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; 16163 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; 16164 out: 16165 mempool_free(mbox, phba->mbox_mem_pool); 16166 return status; 16167 } 16168 16169 /** 16170 * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler 16171 * @irq: Interrupt number. 16172 * @dev_id: The device context pointer. 16173 * 16174 * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within 16175 * threaded irq context. 16176 * 16177 * Returns 16178 * IRQ_HANDLED - interrupt is handled 16179 * IRQ_NONE - otherwise 16180 **/ 16181 irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id) 16182 { 16183 struct lpfc_hba *phba; 16184 struct lpfc_hba_eq_hdl *hba_eq_hdl; 16185 struct lpfc_queue *fpeq; 16186 int ecount = 0; 16187 int hba_eqidx; 16188 struct lpfc_eq_intr_info *eqi; 16189 16190 /* Get the driver's phba structure from the dev_id */ 16191 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 16192 phba = hba_eq_hdl->phba; 16193 hba_eqidx = hba_eq_hdl->idx; 16194 16195 if (unlikely(!phba)) 16196 return IRQ_NONE; 16197 if (unlikely(!phba->sli4_hba.hdwq)) 16198 return IRQ_NONE; 16199 16200 /* Get to the EQ struct associated with this vector */ 16201 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; 16202 if (unlikely(!fpeq)) 16203 return IRQ_NONE; 16204 16205 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id()); 16206 eqi->icnt++; 16207 16208 fpeq->last_cpu = raw_smp_processor_id(); 16209 16210 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && 16211 fpeq->q_flag & HBA_EQ_DELAY_CHK && 16212 phba->cfg_auto_imax && 16213 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 16214 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 16215 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); 16216 16217 /* process and rearm the EQ */ 16218 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, 16219 LPFC_THREADED_IRQ); 16220 16221 if (unlikely(ecount == 0)) { 16222 fpeq->EQ_no_entry++; 16223 if (phba->intr_type == MSIX) 16224 /* MSI-X treated interrupt served as no EQ share INT */ 16225 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16226 "3358 MSI-X interrupt with no EQE\n"); 16227 else 16228 /* Non MSI-X treated on interrupt as EQ share INT */ 16229 return IRQ_NONE; 16230 } 16231 return IRQ_HANDLED; 16232 } 16233 16234 /** 16235 * lpfc_cq_create - Create a Completion Queue on the HBA 16236 * @phba: HBA structure that indicates port to create a queue on. 16237 * @cq: The queue structure to use to create the completion queue. 16238 * @eq: The event queue to bind this completion queue to. 16239 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). 16240 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). 16241 * 16242 * This function creates a completion queue, as detailed in @wq, on a port, 16243 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 16244 * 16245 * The @phba struct is used to send mailbox command to HBA. The @cq struct 16246 * is used to get the entry count and entry size that are necessary to 16247 * determine the number of pages to allocate and use for this queue. The @eq 16248 * is used to indicate which event queue to bind this completion queue to. This 16249 * function will send the CQ_CREATE mailbox command to the HBA to setup the 16250 * completion queue. This function is asynchronous and will wait for the mailbox 16251 * command to finish before continuing. 16252 * 16253 * On success this function will return a zero. If unable to allocate enough 16254 * memory this function will return -ENOMEM. If the queue create mailbox command 16255 * fails this function will return -ENXIO. 16256 **/ 16257 int 16258 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 16259 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 16260 { 16261 struct lpfc_mbx_cq_create *cq_create; 16262 struct lpfc_dmabuf *dmabuf; 16263 LPFC_MBOXQ_t *mbox; 16264 int rc, length, status = 0; 16265 uint32_t shdr_status, shdr_add_status; 16266 union lpfc_sli4_cfg_shdr *shdr; 16267 16268 /* sanity check on queue memory */ 16269 if (!cq || !eq) 16270 return -ENODEV; 16271 16272 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16273 if (!mbox) 16274 return -ENOMEM; 16275 length = (sizeof(struct lpfc_mbx_cq_create) - 16276 sizeof(struct lpfc_sli4_cfg_mhdr)); 16277 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16278 LPFC_MBOX_OPCODE_CQ_CREATE, 16279 length, LPFC_SLI4_MBX_EMBED); 16280 cq_create = &mbox->u.mqe.un.cq_create; 16281 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 16282 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 16283 cq->page_count); 16284 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 16285 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 16286 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16287 phba->sli4_hba.pc_sli4_params.cqv); 16288 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 16289 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 16290 (cq->page_size / SLI4_PAGE_SIZE)); 16291 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 16292 eq->queue_id); 16293 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 16294 phba->sli4_hba.pc_sli4_params.cqav); 16295 } else { 16296 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 16297 eq->queue_id); 16298 } 16299 switch (cq->entry_count) { 16300 case 2048: 16301 case 4096: 16302 if (phba->sli4_hba.pc_sli4_params.cqv == 16303 LPFC_Q_CREATE_VERSION_2) { 16304 cq_create->u.request.context.lpfc_cq_context_count = 16305 cq->entry_count; 16306 bf_set(lpfc_cq_context_count, 16307 &cq_create->u.request.context, 16308 LPFC_CQ_CNT_WORD7); 16309 break; 16310 } 16311 fallthrough; 16312 default: 16313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16314 "0361 Unsupported CQ count: " 16315 "entry cnt %d sz %d pg cnt %d\n", 16316 cq->entry_count, cq->entry_size, 16317 cq->page_count); 16318 if (cq->entry_count < 256) { 16319 status = -EINVAL; 16320 goto out; 16321 } 16322 fallthrough; /* otherwise default to smallest count */ 16323 case 256: 16324 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 16325 LPFC_CQ_CNT_256); 16326 break; 16327 case 512: 16328 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 16329 LPFC_CQ_CNT_512); 16330 break; 16331 case 1024: 16332 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 16333 LPFC_CQ_CNT_1024); 16334 break; 16335 } 16336 list_for_each_entry(dmabuf, &cq->page_list, list) { 16337 memset(dmabuf->virt, 0, cq->page_size); 16338 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 16339 putPaddrLow(dmabuf->phys); 16340 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 16341 putPaddrHigh(dmabuf->phys); 16342 } 16343 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16344 16345 /* The IOCTL status is embedded in the mailbox subheader. */ 16346 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16347 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16348 if (shdr_status || shdr_add_status || rc) { 16349 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16350 "2501 CQ_CREATE mailbox failed with " 16351 "status x%x add_status x%x, mbx status x%x\n", 16352 shdr_status, shdr_add_status, rc); 16353 status = -ENXIO; 16354 goto out; 16355 } 16356 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 16357 if (cq->queue_id == 0xFFFF) { 16358 status = -ENXIO; 16359 goto out; 16360 } 16361 /* link the cq onto the parent eq child list */ 16362 list_add_tail(&cq->list, &eq->child_list); 16363 /* Set up completion queue's type and subtype */ 16364 cq->type = type; 16365 cq->subtype = subtype; 16366 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 16367 cq->assoc_qid = eq->queue_id; 16368 cq->assoc_qp = eq; 16369 cq->host_index = 0; 16370 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 16371 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); 16372 16373 if (cq->queue_id > phba->sli4_hba.cq_max) 16374 phba->sli4_hba.cq_max = cq->queue_id; 16375 out: 16376 mempool_free(mbox, phba->mbox_mem_pool); 16377 return status; 16378 } 16379 16380 /** 16381 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 16382 * @phba: HBA structure that indicates port to create a queue on. 16383 * @cqp: The queue structure array to use to create the completion queues. 16384 * @hdwq: The hardware queue array with the EQ to bind completion queues to. 16385 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). 16386 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). 16387 * 16388 * This function creates a set of completion queue, s to support MRQ 16389 * as detailed in @cqp, on a port, 16390 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 16391 * 16392 * The @phba struct is used to send mailbox command to HBA. The @cq struct 16393 * is used to get the entry count and entry size that are necessary to 16394 * determine the number of pages to allocate and use for this queue. The @eq 16395 * is used to indicate which event queue to bind this completion queue to. This 16396 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 16397 * completion queue. This function is asynchronous and will wait for the mailbox 16398 * command to finish before continuing. 16399 * 16400 * On success this function will return a zero. If unable to allocate enough 16401 * memory this function will return -ENOMEM. If the queue create mailbox command 16402 * fails this function will return -ENXIO. 16403 **/ 16404 int 16405 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 16406 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, 16407 uint32_t subtype) 16408 { 16409 struct lpfc_queue *cq; 16410 struct lpfc_queue *eq; 16411 struct lpfc_mbx_cq_create_set *cq_set; 16412 struct lpfc_dmabuf *dmabuf; 16413 LPFC_MBOXQ_t *mbox; 16414 int rc, length, alloclen, status = 0; 16415 int cnt, idx, numcq, page_idx = 0; 16416 uint32_t shdr_status, shdr_add_status; 16417 union lpfc_sli4_cfg_shdr *shdr; 16418 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16419 16420 /* sanity check on queue memory */ 16421 numcq = phba->cfg_nvmet_mrq; 16422 if (!cqp || !hdwq || !numcq) 16423 return -ENODEV; 16424 16425 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16426 if (!mbox) 16427 return -ENOMEM; 16428 16429 length = sizeof(struct lpfc_mbx_cq_create_set); 16430 length += ((numcq * cqp[0]->page_count) * 16431 sizeof(struct dma_address)); 16432 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16433 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 16434 LPFC_SLI4_MBX_NEMBED); 16435 if (alloclen < length) { 16436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16437 "3098 Allocated DMA memory size (%d) is " 16438 "less than the requested DMA memory size " 16439 "(%d)\n", alloclen, length); 16440 status = -ENOMEM; 16441 goto out; 16442 } 16443 cq_set = mbox->sge_array->addr[0]; 16444 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 16445 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 16446 16447 for (idx = 0; idx < numcq; idx++) { 16448 cq = cqp[idx]; 16449 eq = hdwq[idx].hba_eq; 16450 if (!cq || !eq) { 16451 status = -ENOMEM; 16452 goto out; 16453 } 16454 if (!phba->sli4_hba.pc_sli4_params.supported) 16455 hw_page_size = cq->page_size; 16456 16457 switch (idx) { 16458 case 0: 16459 bf_set(lpfc_mbx_cq_create_set_page_size, 16460 &cq_set->u.request, 16461 (hw_page_size / SLI4_PAGE_SIZE)); 16462 bf_set(lpfc_mbx_cq_create_set_num_pages, 16463 &cq_set->u.request, cq->page_count); 16464 bf_set(lpfc_mbx_cq_create_set_evt, 16465 &cq_set->u.request, 1); 16466 bf_set(lpfc_mbx_cq_create_set_valid, 16467 &cq_set->u.request, 1); 16468 bf_set(lpfc_mbx_cq_create_set_cqe_size, 16469 &cq_set->u.request, 0); 16470 bf_set(lpfc_mbx_cq_create_set_num_cq, 16471 &cq_set->u.request, numcq); 16472 bf_set(lpfc_mbx_cq_create_set_autovalid, 16473 &cq_set->u.request, 16474 phba->sli4_hba.pc_sli4_params.cqav); 16475 switch (cq->entry_count) { 16476 case 2048: 16477 case 4096: 16478 if (phba->sli4_hba.pc_sli4_params.cqv == 16479 LPFC_Q_CREATE_VERSION_2) { 16480 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 16481 &cq_set->u.request, 16482 cq->entry_count); 16483 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 16484 &cq_set->u.request, 16485 LPFC_CQ_CNT_WORD7); 16486 break; 16487 } 16488 fallthrough; 16489 default: 16490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16491 "3118 Bad CQ count. (%d)\n", 16492 cq->entry_count); 16493 if (cq->entry_count < 256) { 16494 status = -EINVAL; 16495 goto out; 16496 } 16497 fallthrough; /* otherwise default to smallest */ 16498 case 256: 16499 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 16500 &cq_set->u.request, LPFC_CQ_CNT_256); 16501 break; 16502 case 512: 16503 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 16504 &cq_set->u.request, LPFC_CQ_CNT_512); 16505 break; 16506 case 1024: 16507 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 16508 &cq_set->u.request, LPFC_CQ_CNT_1024); 16509 break; 16510 } 16511 bf_set(lpfc_mbx_cq_create_set_eq_id0, 16512 &cq_set->u.request, eq->queue_id); 16513 break; 16514 case 1: 16515 bf_set(lpfc_mbx_cq_create_set_eq_id1, 16516 &cq_set->u.request, eq->queue_id); 16517 break; 16518 case 2: 16519 bf_set(lpfc_mbx_cq_create_set_eq_id2, 16520 &cq_set->u.request, eq->queue_id); 16521 break; 16522 case 3: 16523 bf_set(lpfc_mbx_cq_create_set_eq_id3, 16524 &cq_set->u.request, eq->queue_id); 16525 break; 16526 case 4: 16527 bf_set(lpfc_mbx_cq_create_set_eq_id4, 16528 &cq_set->u.request, eq->queue_id); 16529 break; 16530 case 5: 16531 bf_set(lpfc_mbx_cq_create_set_eq_id5, 16532 &cq_set->u.request, eq->queue_id); 16533 break; 16534 case 6: 16535 bf_set(lpfc_mbx_cq_create_set_eq_id6, 16536 &cq_set->u.request, eq->queue_id); 16537 break; 16538 case 7: 16539 bf_set(lpfc_mbx_cq_create_set_eq_id7, 16540 &cq_set->u.request, eq->queue_id); 16541 break; 16542 case 8: 16543 bf_set(lpfc_mbx_cq_create_set_eq_id8, 16544 &cq_set->u.request, eq->queue_id); 16545 break; 16546 case 9: 16547 bf_set(lpfc_mbx_cq_create_set_eq_id9, 16548 &cq_set->u.request, eq->queue_id); 16549 break; 16550 case 10: 16551 bf_set(lpfc_mbx_cq_create_set_eq_id10, 16552 &cq_set->u.request, eq->queue_id); 16553 break; 16554 case 11: 16555 bf_set(lpfc_mbx_cq_create_set_eq_id11, 16556 &cq_set->u.request, eq->queue_id); 16557 break; 16558 case 12: 16559 bf_set(lpfc_mbx_cq_create_set_eq_id12, 16560 &cq_set->u.request, eq->queue_id); 16561 break; 16562 case 13: 16563 bf_set(lpfc_mbx_cq_create_set_eq_id13, 16564 &cq_set->u.request, eq->queue_id); 16565 break; 16566 case 14: 16567 bf_set(lpfc_mbx_cq_create_set_eq_id14, 16568 &cq_set->u.request, eq->queue_id); 16569 break; 16570 case 15: 16571 bf_set(lpfc_mbx_cq_create_set_eq_id15, 16572 &cq_set->u.request, eq->queue_id); 16573 break; 16574 } 16575 16576 /* link the cq onto the parent eq child list */ 16577 list_add_tail(&cq->list, &eq->child_list); 16578 /* Set up completion queue's type and subtype */ 16579 cq->type = type; 16580 cq->subtype = subtype; 16581 cq->assoc_qid = eq->queue_id; 16582 cq->assoc_qp = eq; 16583 cq->host_index = 0; 16584 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 16585 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 16586 cq->entry_count); 16587 cq->chann = idx; 16588 16589 rc = 0; 16590 list_for_each_entry(dmabuf, &cq->page_list, list) { 16591 memset(dmabuf->virt, 0, hw_page_size); 16592 cnt = page_idx + dmabuf->buffer_tag; 16593 cq_set->u.request.page[cnt].addr_lo = 16594 putPaddrLow(dmabuf->phys); 16595 cq_set->u.request.page[cnt].addr_hi = 16596 putPaddrHigh(dmabuf->phys); 16597 rc++; 16598 } 16599 page_idx += rc; 16600 } 16601 16602 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16603 16604 /* The IOCTL status is embedded in the mailbox subheader. */ 16605 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16606 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16607 if (shdr_status || shdr_add_status || rc) { 16608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16609 "3119 CQ_CREATE_SET mailbox failed with " 16610 "status x%x add_status x%x, mbx status x%x\n", 16611 shdr_status, shdr_add_status, rc); 16612 status = -ENXIO; 16613 goto out; 16614 } 16615 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 16616 if (rc == 0xFFFF) { 16617 status = -ENXIO; 16618 goto out; 16619 } 16620 16621 for (idx = 0; idx < numcq; idx++) { 16622 cq = cqp[idx]; 16623 cq->queue_id = rc + idx; 16624 if (cq->queue_id > phba->sli4_hba.cq_max) 16625 phba->sli4_hba.cq_max = cq->queue_id; 16626 } 16627 16628 out: 16629 lpfc_sli4_mbox_cmd_free(phba, mbox); 16630 return status; 16631 } 16632 16633 /** 16634 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 16635 * @phba: HBA structure that indicates port to create a queue on. 16636 * @mq: The queue structure to use to create the mailbox queue. 16637 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 16638 * @cq: The completion queue to associate with this cq. 16639 * 16640 * This function provides failback (fb) functionality when the 16641 * mq_create_ext fails on older FW generations. It's purpose is identical 16642 * to mq_create_ext otherwise. 16643 * 16644 * This routine cannot fail as all attributes were previously accessed and 16645 * initialized in mq_create_ext. 16646 **/ 16647 static void 16648 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 16649 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 16650 { 16651 struct lpfc_mbx_mq_create *mq_create; 16652 struct lpfc_dmabuf *dmabuf; 16653 int length; 16654 16655 length = (sizeof(struct lpfc_mbx_mq_create) - 16656 sizeof(struct lpfc_sli4_cfg_mhdr)); 16657 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16658 LPFC_MBOX_OPCODE_MQ_CREATE, 16659 length, LPFC_SLI4_MBX_EMBED); 16660 mq_create = &mbox->u.mqe.un.mq_create; 16661 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 16662 mq->page_count); 16663 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 16664 cq->queue_id); 16665 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 16666 switch (mq->entry_count) { 16667 case 16: 16668 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 16669 LPFC_MQ_RING_SIZE_16); 16670 break; 16671 case 32: 16672 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 16673 LPFC_MQ_RING_SIZE_32); 16674 break; 16675 case 64: 16676 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 16677 LPFC_MQ_RING_SIZE_64); 16678 break; 16679 case 128: 16680 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 16681 LPFC_MQ_RING_SIZE_128); 16682 break; 16683 } 16684 list_for_each_entry(dmabuf, &mq->page_list, list) { 16685 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 16686 putPaddrLow(dmabuf->phys); 16687 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 16688 putPaddrHigh(dmabuf->phys); 16689 } 16690 } 16691 16692 /** 16693 * lpfc_mq_create - Create a mailbox Queue on the HBA 16694 * @phba: HBA structure that indicates port to create a queue on. 16695 * @mq: The queue structure to use to create the mailbox queue. 16696 * @cq: The completion queue to associate with this cq. 16697 * @subtype: The queue's subtype. 16698 * 16699 * This function creates a mailbox queue, as detailed in @mq, on a port, 16700 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 16701 * 16702 * The @phba struct is used to send mailbox command to HBA. The @cq struct 16703 * is used to get the entry count and entry size that are necessary to 16704 * determine the number of pages to allocate and use for this queue. This 16705 * function will send the MQ_CREATE mailbox command to the HBA to setup the 16706 * mailbox queue. This function is asynchronous and will wait for the mailbox 16707 * command to finish before continuing. 16708 * 16709 * On success this function will return a zero. If unable to allocate enough 16710 * memory this function will return -ENOMEM. If the queue create mailbox command 16711 * fails this function will return -ENXIO. 16712 **/ 16713 int32_t 16714 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 16715 struct lpfc_queue *cq, uint32_t subtype) 16716 { 16717 struct lpfc_mbx_mq_create *mq_create; 16718 struct lpfc_mbx_mq_create_ext *mq_create_ext; 16719 struct lpfc_dmabuf *dmabuf; 16720 LPFC_MBOXQ_t *mbox; 16721 int rc, length, status = 0; 16722 uint32_t shdr_status, shdr_add_status; 16723 union lpfc_sli4_cfg_shdr *shdr; 16724 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16725 16726 /* sanity check on queue memory */ 16727 if (!mq || !cq) 16728 return -ENODEV; 16729 if (!phba->sli4_hba.pc_sli4_params.supported) 16730 hw_page_size = SLI4_PAGE_SIZE; 16731 16732 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16733 if (!mbox) 16734 return -ENOMEM; 16735 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 16736 sizeof(struct lpfc_sli4_cfg_mhdr)); 16737 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16738 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 16739 length, LPFC_SLI4_MBX_EMBED); 16740 16741 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 16742 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 16743 bf_set(lpfc_mbx_mq_create_ext_num_pages, 16744 &mq_create_ext->u.request, mq->page_count); 16745 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 16746 &mq_create_ext->u.request, 1); 16747 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 16748 &mq_create_ext->u.request, 1); 16749 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 16750 &mq_create_ext->u.request, 1); 16751 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 16752 &mq_create_ext->u.request, 1); 16753 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 16754 &mq_create_ext->u.request, 1); 16755 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 16756 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16757 phba->sli4_hba.pc_sli4_params.mqv); 16758 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 16759 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 16760 cq->queue_id); 16761 else 16762 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 16763 cq->queue_id); 16764 switch (mq->entry_count) { 16765 default: 16766 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16767 "0362 Unsupported MQ count. (%d)\n", 16768 mq->entry_count); 16769 if (mq->entry_count < 16) { 16770 status = -EINVAL; 16771 goto out; 16772 } 16773 fallthrough; /* otherwise default to smallest count */ 16774 case 16: 16775 bf_set(lpfc_mq_context_ring_size, 16776 &mq_create_ext->u.request.context, 16777 LPFC_MQ_RING_SIZE_16); 16778 break; 16779 case 32: 16780 bf_set(lpfc_mq_context_ring_size, 16781 &mq_create_ext->u.request.context, 16782 LPFC_MQ_RING_SIZE_32); 16783 break; 16784 case 64: 16785 bf_set(lpfc_mq_context_ring_size, 16786 &mq_create_ext->u.request.context, 16787 LPFC_MQ_RING_SIZE_64); 16788 break; 16789 case 128: 16790 bf_set(lpfc_mq_context_ring_size, 16791 &mq_create_ext->u.request.context, 16792 LPFC_MQ_RING_SIZE_128); 16793 break; 16794 } 16795 list_for_each_entry(dmabuf, &mq->page_list, list) { 16796 memset(dmabuf->virt, 0, hw_page_size); 16797 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 16798 putPaddrLow(dmabuf->phys); 16799 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 16800 putPaddrHigh(dmabuf->phys); 16801 } 16802 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16803 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 16804 &mq_create_ext->u.response); 16805 if (rc != MBX_SUCCESS) { 16806 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 16807 "2795 MQ_CREATE_EXT failed with " 16808 "status x%x. Failback to MQ_CREATE.\n", 16809 rc); 16810 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 16811 mq_create = &mbox->u.mqe.un.mq_create; 16812 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16813 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 16814 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 16815 &mq_create->u.response); 16816 } 16817 16818 /* The IOCTL status is embedded in the mailbox subheader. */ 16819 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16820 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16821 if (shdr_status || shdr_add_status || rc) { 16822 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16823 "2502 MQ_CREATE mailbox failed with " 16824 "status x%x add_status x%x, mbx status x%x\n", 16825 shdr_status, shdr_add_status, rc); 16826 status = -ENXIO; 16827 goto out; 16828 } 16829 if (mq->queue_id == 0xFFFF) { 16830 status = -ENXIO; 16831 goto out; 16832 } 16833 mq->type = LPFC_MQ; 16834 mq->assoc_qid = cq->queue_id; 16835 mq->subtype = subtype; 16836 mq->host_index = 0; 16837 mq->hba_index = 0; 16838 16839 /* link the mq onto the parent cq child list */ 16840 list_add_tail(&mq->list, &cq->child_list); 16841 out: 16842 mempool_free(mbox, phba->mbox_mem_pool); 16843 return status; 16844 } 16845 16846 /** 16847 * lpfc_wq_create - Create a Work Queue on the HBA 16848 * @phba: HBA structure that indicates port to create a queue on. 16849 * @wq: The queue structure to use to create the work queue. 16850 * @cq: The completion queue to bind this work queue to. 16851 * @subtype: The subtype of the work queue indicating its functionality. 16852 * 16853 * This function creates a work queue, as detailed in @wq, on a port, described 16854 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 16855 * 16856 * The @phba struct is used to send mailbox command to HBA. The @wq struct 16857 * is used to get the entry count and entry size that are necessary to 16858 * determine the number of pages to allocate and use for this queue. The @cq 16859 * is used to indicate which completion queue to bind this work queue to. This 16860 * function will send the WQ_CREATE mailbox command to the HBA to setup the 16861 * work queue. This function is asynchronous and will wait for the mailbox 16862 * command to finish before continuing. 16863 * 16864 * On success this function will return a zero. If unable to allocate enough 16865 * memory this function will return -ENOMEM. If the queue create mailbox command 16866 * fails this function will return -ENXIO. 16867 **/ 16868 int 16869 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 16870 struct lpfc_queue *cq, uint32_t subtype) 16871 { 16872 struct lpfc_mbx_wq_create *wq_create; 16873 struct lpfc_dmabuf *dmabuf; 16874 LPFC_MBOXQ_t *mbox; 16875 int rc, length, status = 0; 16876 uint32_t shdr_status, shdr_add_status; 16877 union lpfc_sli4_cfg_shdr *shdr; 16878 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16879 struct dma_address *page; 16880 void __iomem *bar_memmap_p; 16881 uint32_t db_offset; 16882 uint16_t pci_barset; 16883 uint8_t dpp_barset; 16884 uint32_t dpp_offset; 16885 uint8_t wq_create_version; 16886 #ifdef CONFIG_X86 16887 unsigned long pg_addr; 16888 #endif 16889 16890 /* sanity check on queue memory */ 16891 if (!wq || !cq) 16892 return -ENODEV; 16893 if (!phba->sli4_hba.pc_sli4_params.supported) 16894 hw_page_size = wq->page_size; 16895 16896 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16897 if (!mbox) 16898 return -ENOMEM; 16899 length = (sizeof(struct lpfc_mbx_wq_create) - 16900 sizeof(struct lpfc_sli4_cfg_mhdr)); 16901 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16902 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 16903 length, LPFC_SLI4_MBX_EMBED); 16904 wq_create = &mbox->u.mqe.un.wq_create; 16905 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 16906 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 16907 wq->page_count); 16908 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 16909 cq->queue_id); 16910 16911 /* wqv is the earliest version supported, NOT the latest */ 16912 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16913 phba->sli4_hba.pc_sli4_params.wqv); 16914 16915 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 16916 (wq->page_size > SLI4_PAGE_SIZE)) 16917 wq_create_version = LPFC_Q_CREATE_VERSION_1; 16918 else 16919 wq_create_version = LPFC_Q_CREATE_VERSION_0; 16920 16921 switch (wq_create_version) { 16922 case LPFC_Q_CREATE_VERSION_1: 16923 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 16924 wq->entry_count); 16925 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16926 LPFC_Q_CREATE_VERSION_1); 16927 16928 switch (wq->entry_size) { 16929 default: 16930 case 64: 16931 bf_set(lpfc_mbx_wq_create_wqe_size, 16932 &wq_create->u.request_1, 16933 LPFC_WQ_WQE_SIZE_64); 16934 break; 16935 case 128: 16936 bf_set(lpfc_mbx_wq_create_wqe_size, 16937 &wq_create->u.request_1, 16938 LPFC_WQ_WQE_SIZE_128); 16939 break; 16940 } 16941 /* Request DPP by default */ 16942 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 16943 bf_set(lpfc_mbx_wq_create_page_size, 16944 &wq_create->u.request_1, 16945 (wq->page_size / SLI4_PAGE_SIZE)); 16946 page = wq_create->u.request_1.page; 16947 break; 16948 default: 16949 page = wq_create->u.request.page; 16950 break; 16951 } 16952 16953 list_for_each_entry(dmabuf, &wq->page_list, list) { 16954 memset(dmabuf->virt, 0, hw_page_size); 16955 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 16956 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 16957 } 16958 16959 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 16960 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 16961 16962 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16963 /* The IOCTL status is embedded in the mailbox subheader. */ 16964 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16965 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16966 if (shdr_status || shdr_add_status || rc) { 16967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16968 "2503 WQ_CREATE mailbox failed with " 16969 "status x%x add_status x%x, mbx status x%x\n", 16970 shdr_status, shdr_add_status, rc); 16971 status = -ENXIO; 16972 goto out; 16973 } 16974 16975 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 16976 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 16977 &wq_create->u.response); 16978 else 16979 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 16980 &wq_create->u.response_1); 16981 16982 if (wq->queue_id == 0xFFFF) { 16983 status = -ENXIO; 16984 goto out; 16985 } 16986 16987 wq->db_format = LPFC_DB_LIST_FORMAT; 16988 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 16989 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 16990 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 16991 &wq_create->u.response); 16992 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 16993 (wq->db_format != LPFC_DB_RING_FORMAT)) { 16994 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16995 "3265 WQ[%d] doorbell format " 16996 "not supported: x%x\n", 16997 wq->queue_id, wq->db_format); 16998 status = -EINVAL; 16999 goto out; 17000 } 17001 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 17002 &wq_create->u.response); 17003 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 17004 pci_barset); 17005 if (!bar_memmap_p) { 17006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17007 "3263 WQ[%d] failed to memmap " 17008 "pci barset:x%x\n", 17009 wq->queue_id, pci_barset); 17010 status = -ENOMEM; 17011 goto out; 17012 } 17013 db_offset = wq_create->u.response.doorbell_offset; 17014 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 17015 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 17016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17017 "3252 WQ[%d] doorbell offset " 17018 "not supported: x%x\n", 17019 wq->queue_id, db_offset); 17020 status = -EINVAL; 17021 goto out; 17022 } 17023 wq->db_regaddr = bar_memmap_p + db_offset; 17024 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 17025 "3264 WQ[%d]: barset:x%x, offset:x%x, " 17026 "format:x%x\n", wq->queue_id, 17027 pci_barset, db_offset, wq->db_format); 17028 } else 17029 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 17030 } else { 17031 /* Check if DPP was honored by the firmware */ 17032 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 17033 &wq_create->u.response_1); 17034 if (wq->dpp_enable) { 17035 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 17036 &wq_create->u.response_1); 17037 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 17038 pci_barset); 17039 if (!bar_memmap_p) { 17040 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17041 "3267 WQ[%d] failed to memmap " 17042 "pci barset:x%x\n", 17043 wq->queue_id, pci_barset); 17044 status = -ENOMEM; 17045 goto out; 17046 } 17047 db_offset = wq_create->u.response_1.doorbell_offset; 17048 wq->db_regaddr = bar_memmap_p + db_offset; 17049 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 17050 &wq_create->u.response_1); 17051 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 17052 &wq_create->u.response_1); 17053 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 17054 dpp_barset); 17055 if (!bar_memmap_p) { 17056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17057 "3268 WQ[%d] failed to memmap " 17058 "pci barset:x%x\n", 17059 wq->queue_id, dpp_barset); 17060 status = -ENOMEM; 17061 goto out; 17062 } 17063 dpp_offset = wq_create->u.response_1.dpp_offset; 17064 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 17065 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 17066 "3271 WQ[%d]: barset:x%x, offset:x%x, " 17067 "dpp_id:x%x dpp_barset:x%x " 17068 "dpp_offset:x%x\n", 17069 wq->queue_id, pci_barset, db_offset, 17070 wq->dpp_id, dpp_barset, dpp_offset); 17071 17072 #ifdef CONFIG_X86 17073 /* Enable combined writes for DPP aperture */ 17074 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 17075 rc = set_memory_wc(pg_addr, 1); 17076 if (rc) { 17077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17078 "3272 Cannot setup Combined " 17079 "Write on WQ[%d] - disable DPP\n", 17080 wq->queue_id); 17081 phba->cfg_enable_dpp = 0; 17082 } 17083 #else 17084 phba->cfg_enable_dpp = 0; 17085 #endif 17086 } else 17087 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 17088 } 17089 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 17090 if (wq->pring == NULL) { 17091 status = -ENOMEM; 17092 goto out; 17093 } 17094 wq->type = LPFC_WQ; 17095 wq->assoc_qid = cq->queue_id; 17096 wq->subtype = subtype; 17097 wq->host_index = 0; 17098 wq->hba_index = 0; 17099 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; 17100 17101 /* link the wq onto the parent cq child list */ 17102 list_add_tail(&wq->list, &cq->child_list); 17103 out: 17104 mempool_free(mbox, phba->mbox_mem_pool); 17105 return status; 17106 } 17107 17108 /** 17109 * lpfc_rq_create - Create a Receive Queue on the HBA 17110 * @phba: HBA structure that indicates port to create a queue on. 17111 * @hrq: The queue structure to use to create the header receive queue. 17112 * @drq: The queue structure to use to create the data receive queue. 17113 * @cq: The completion queue to bind this work queue to. 17114 * @subtype: The subtype of the work queue indicating its functionality. 17115 * 17116 * This function creates a receive buffer queue pair , as detailed in @hrq and 17117 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 17118 * to the HBA. 17119 * 17120 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 17121 * struct is used to get the entry count that is necessary to determine the 17122 * number of pages to use for this queue. The @cq is used to indicate which 17123 * completion queue to bind received buffers that are posted to these queues to. 17124 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 17125 * receive queue pair. This function is asynchronous and will wait for the 17126 * mailbox command to finish before continuing. 17127 * 17128 * On success this function will return a zero. If unable to allocate enough 17129 * memory this function will return -ENOMEM. If the queue create mailbox command 17130 * fails this function will return -ENXIO. 17131 **/ 17132 int 17133 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 17134 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 17135 { 17136 struct lpfc_mbx_rq_create *rq_create; 17137 struct lpfc_dmabuf *dmabuf; 17138 LPFC_MBOXQ_t *mbox; 17139 int rc, length, status = 0; 17140 uint32_t shdr_status, shdr_add_status; 17141 union lpfc_sli4_cfg_shdr *shdr; 17142 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 17143 void __iomem *bar_memmap_p; 17144 uint32_t db_offset; 17145 uint16_t pci_barset; 17146 17147 /* sanity check on queue memory */ 17148 if (!hrq || !drq || !cq) 17149 return -ENODEV; 17150 if (!phba->sli4_hba.pc_sli4_params.supported) 17151 hw_page_size = SLI4_PAGE_SIZE; 17152 17153 if (hrq->entry_count != drq->entry_count) 17154 return -EINVAL; 17155 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17156 if (!mbox) 17157 return -ENOMEM; 17158 length = (sizeof(struct lpfc_mbx_rq_create) - 17159 sizeof(struct lpfc_sli4_cfg_mhdr)); 17160 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17161 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 17162 length, LPFC_SLI4_MBX_EMBED); 17163 rq_create = &mbox->u.mqe.un.rq_create; 17164 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 17165 bf_set(lpfc_mbox_hdr_version, &shdr->request, 17166 phba->sli4_hba.pc_sli4_params.rqv); 17167 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 17168 bf_set(lpfc_rq_context_rqe_count_1, 17169 &rq_create->u.request.context, 17170 hrq->entry_count); 17171 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 17172 bf_set(lpfc_rq_context_rqe_size, 17173 &rq_create->u.request.context, 17174 LPFC_RQE_SIZE_8); 17175 bf_set(lpfc_rq_context_page_size, 17176 &rq_create->u.request.context, 17177 LPFC_RQ_PAGE_SIZE_4096); 17178 } else { 17179 switch (hrq->entry_count) { 17180 default: 17181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17182 "2535 Unsupported RQ count. (%d)\n", 17183 hrq->entry_count); 17184 if (hrq->entry_count < 512) { 17185 status = -EINVAL; 17186 goto out; 17187 } 17188 fallthrough; /* otherwise default to smallest count */ 17189 case 512: 17190 bf_set(lpfc_rq_context_rqe_count, 17191 &rq_create->u.request.context, 17192 LPFC_RQ_RING_SIZE_512); 17193 break; 17194 case 1024: 17195 bf_set(lpfc_rq_context_rqe_count, 17196 &rq_create->u.request.context, 17197 LPFC_RQ_RING_SIZE_1024); 17198 break; 17199 case 2048: 17200 bf_set(lpfc_rq_context_rqe_count, 17201 &rq_create->u.request.context, 17202 LPFC_RQ_RING_SIZE_2048); 17203 break; 17204 case 4096: 17205 bf_set(lpfc_rq_context_rqe_count, 17206 &rq_create->u.request.context, 17207 LPFC_RQ_RING_SIZE_4096); 17208 break; 17209 } 17210 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 17211 LPFC_HDR_BUF_SIZE); 17212 } 17213 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 17214 cq->queue_id); 17215 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 17216 hrq->page_count); 17217 list_for_each_entry(dmabuf, &hrq->page_list, list) { 17218 memset(dmabuf->virt, 0, hw_page_size); 17219 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 17220 putPaddrLow(dmabuf->phys); 17221 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 17222 putPaddrHigh(dmabuf->phys); 17223 } 17224 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 17225 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 17226 17227 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 17228 /* The IOCTL status is embedded in the mailbox subheader. */ 17229 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17230 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17231 if (shdr_status || shdr_add_status || rc) { 17232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17233 "2504 RQ_CREATE mailbox failed with " 17234 "status x%x add_status x%x, mbx status x%x\n", 17235 shdr_status, shdr_add_status, rc); 17236 status = -ENXIO; 17237 goto out; 17238 } 17239 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 17240 if (hrq->queue_id == 0xFFFF) { 17241 status = -ENXIO; 17242 goto out; 17243 } 17244 17245 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 17246 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 17247 &rq_create->u.response); 17248 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 17249 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 17250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17251 "3262 RQ [%d] doorbell format not " 17252 "supported: x%x\n", hrq->queue_id, 17253 hrq->db_format); 17254 status = -EINVAL; 17255 goto out; 17256 } 17257 17258 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 17259 &rq_create->u.response); 17260 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 17261 if (!bar_memmap_p) { 17262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17263 "3269 RQ[%d] failed to memmap pci " 17264 "barset:x%x\n", hrq->queue_id, 17265 pci_barset); 17266 status = -ENOMEM; 17267 goto out; 17268 } 17269 17270 db_offset = rq_create->u.response.doorbell_offset; 17271 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 17272 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 17273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17274 "3270 RQ[%d] doorbell offset not " 17275 "supported: x%x\n", hrq->queue_id, 17276 db_offset); 17277 status = -EINVAL; 17278 goto out; 17279 } 17280 hrq->db_regaddr = bar_memmap_p + db_offset; 17281 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 17282 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 17283 "format:x%x\n", hrq->queue_id, pci_barset, 17284 db_offset, hrq->db_format); 17285 } else { 17286 hrq->db_format = LPFC_DB_RING_FORMAT; 17287 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 17288 } 17289 hrq->type = LPFC_HRQ; 17290 hrq->assoc_qid = cq->queue_id; 17291 hrq->subtype = subtype; 17292 hrq->host_index = 0; 17293 hrq->hba_index = 0; 17294 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 17295 17296 /* now create the data queue */ 17297 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17298 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 17299 length, LPFC_SLI4_MBX_EMBED); 17300 bf_set(lpfc_mbox_hdr_version, &shdr->request, 17301 phba->sli4_hba.pc_sli4_params.rqv); 17302 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 17303 bf_set(lpfc_rq_context_rqe_count_1, 17304 &rq_create->u.request.context, hrq->entry_count); 17305 if (subtype == LPFC_NVMET) 17306 rq_create->u.request.context.buffer_size = 17307 LPFC_NVMET_DATA_BUF_SIZE; 17308 else 17309 rq_create->u.request.context.buffer_size = 17310 LPFC_DATA_BUF_SIZE; 17311 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 17312 LPFC_RQE_SIZE_8); 17313 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 17314 (PAGE_SIZE/SLI4_PAGE_SIZE)); 17315 } else { 17316 switch (drq->entry_count) { 17317 default: 17318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17319 "2536 Unsupported RQ count. (%d)\n", 17320 drq->entry_count); 17321 if (drq->entry_count < 512) { 17322 status = -EINVAL; 17323 goto out; 17324 } 17325 fallthrough; /* otherwise default to smallest count */ 17326 case 512: 17327 bf_set(lpfc_rq_context_rqe_count, 17328 &rq_create->u.request.context, 17329 LPFC_RQ_RING_SIZE_512); 17330 break; 17331 case 1024: 17332 bf_set(lpfc_rq_context_rqe_count, 17333 &rq_create->u.request.context, 17334 LPFC_RQ_RING_SIZE_1024); 17335 break; 17336 case 2048: 17337 bf_set(lpfc_rq_context_rqe_count, 17338 &rq_create->u.request.context, 17339 LPFC_RQ_RING_SIZE_2048); 17340 break; 17341 case 4096: 17342 bf_set(lpfc_rq_context_rqe_count, 17343 &rq_create->u.request.context, 17344 LPFC_RQ_RING_SIZE_4096); 17345 break; 17346 } 17347 if (subtype == LPFC_NVMET) 17348 bf_set(lpfc_rq_context_buf_size, 17349 &rq_create->u.request.context, 17350 LPFC_NVMET_DATA_BUF_SIZE); 17351 else 17352 bf_set(lpfc_rq_context_buf_size, 17353 &rq_create->u.request.context, 17354 LPFC_DATA_BUF_SIZE); 17355 } 17356 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 17357 cq->queue_id); 17358 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 17359 drq->page_count); 17360 list_for_each_entry(dmabuf, &drq->page_list, list) { 17361 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 17362 putPaddrLow(dmabuf->phys); 17363 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 17364 putPaddrHigh(dmabuf->phys); 17365 } 17366 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 17367 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 17368 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 17369 /* The IOCTL status is embedded in the mailbox subheader. */ 17370 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 17371 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17372 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17373 if (shdr_status || shdr_add_status || rc) { 17374 status = -ENXIO; 17375 goto out; 17376 } 17377 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 17378 if (drq->queue_id == 0xFFFF) { 17379 status = -ENXIO; 17380 goto out; 17381 } 17382 drq->type = LPFC_DRQ; 17383 drq->assoc_qid = cq->queue_id; 17384 drq->subtype = subtype; 17385 drq->host_index = 0; 17386 drq->hba_index = 0; 17387 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 17388 17389 /* link the header and data RQs onto the parent cq child list */ 17390 list_add_tail(&hrq->list, &cq->child_list); 17391 list_add_tail(&drq->list, &cq->child_list); 17392 17393 out: 17394 mempool_free(mbox, phba->mbox_mem_pool); 17395 return status; 17396 } 17397 17398 /** 17399 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 17400 * @phba: HBA structure that indicates port to create a queue on. 17401 * @hrqp: The queue structure array to use to create the header receive queues. 17402 * @drqp: The queue structure array to use to create the data receive queues. 17403 * @cqp: The completion queue array to bind these receive queues to. 17404 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). 17405 * 17406 * This function creates a receive buffer queue pair , as detailed in @hrq and 17407 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 17408 * to the HBA. 17409 * 17410 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 17411 * struct is used to get the entry count that is necessary to determine the 17412 * number of pages to use for this queue. The @cq is used to indicate which 17413 * completion queue to bind received buffers that are posted to these queues to. 17414 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 17415 * receive queue pair. This function is asynchronous and will wait for the 17416 * mailbox command to finish before continuing. 17417 * 17418 * On success this function will return a zero. If unable to allocate enough 17419 * memory this function will return -ENOMEM. If the queue create mailbox command 17420 * fails this function will return -ENXIO. 17421 **/ 17422 int 17423 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 17424 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 17425 uint32_t subtype) 17426 { 17427 struct lpfc_queue *hrq, *drq, *cq; 17428 struct lpfc_mbx_rq_create_v2 *rq_create; 17429 struct lpfc_dmabuf *dmabuf; 17430 LPFC_MBOXQ_t *mbox; 17431 int rc, length, alloclen, status = 0; 17432 int cnt, idx, numrq, page_idx = 0; 17433 uint32_t shdr_status, shdr_add_status; 17434 union lpfc_sli4_cfg_shdr *shdr; 17435 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 17436 17437 numrq = phba->cfg_nvmet_mrq; 17438 /* sanity check on array memory */ 17439 if (!hrqp || !drqp || !cqp || !numrq) 17440 return -ENODEV; 17441 if (!phba->sli4_hba.pc_sli4_params.supported) 17442 hw_page_size = SLI4_PAGE_SIZE; 17443 17444 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17445 if (!mbox) 17446 return -ENOMEM; 17447 17448 length = sizeof(struct lpfc_mbx_rq_create_v2); 17449 length += ((2 * numrq * hrqp[0]->page_count) * 17450 sizeof(struct dma_address)); 17451 17452 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17453 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 17454 LPFC_SLI4_MBX_NEMBED); 17455 if (alloclen < length) { 17456 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17457 "3099 Allocated DMA memory size (%d) is " 17458 "less than the requested DMA memory size " 17459 "(%d)\n", alloclen, length); 17460 status = -ENOMEM; 17461 goto out; 17462 } 17463 17464 17465 17466 rq_create = mbox->sge_array->addr[0]; 17467 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 17468 17469 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 17470 cnt = 0; 17471 17472 for (idx = 0; idx < numrq; idx++) { 17473 hrq = hrqp[idx]; 17474 drq = drqp[idx]; 17475 cq = cqp[idx]; 17476 17477 /* sanity check on queue memory */ 17478 if (!hrq || !drq || !cq) { 17479 status = -ENODEV; 17480 goto out; 17481 } 17482 17483 if (hrq->entry_count != drq->entry_count) { 17484 status = -EINVAL; 17485 goto out; 17486 } 17487 17488 if (idx == 0) { 17489 bf_set(lpfc_mbx_rq_create_num_pages, 17490 &rq_create->u.request, 17491 hrq->page_count); 17492 bf_set(lpfc_mbx_rq_create_rq_cnt, 17493 &rq_create->u.request, (numrq * 2)); 17494 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 17495 1); 17496 bf_set(lpfc_rq_context_base_cq, 17497 &rq_create->u.request.context, 17498 cq->queue_id); 17499 bf_set(lpfc_rq_context_data_size, 17500 &rq_create->u.request.context, 17501 LPFC_NVMET_DATA_BUF_SIZE); 17502 bf_set(lpfc_rq_context_hdr_size, 17503 &rq_create->u.request.context, 17504 LPFC_HDR_BUF_SIZE); 17505 bf_set(lpfc_rq_context_rqe_count_1, 17506 &rq_create->u.request.context, 17507 hrq->entry_count); 17508 bf_set(lpfc_rq_context_rqe_size, 17509 &rq_create->u.request.context, 17510 LPFC_RQE_SIZE_8); 17511 bf_set(lpfc_rq_context_page_size, 17512 &rq_create->u.request.context, 17513 (PAGE_SIZE/SLI4_PAGE_SIZE)); 17514 } 17515 rc = 0; 17516 list_for_each_entry(dmabuf, &hrq->page_list, list) { 17517 memset(dmabuf->virt, 0, hw_page_size); 17518 cnt = page_idx + dmabuf->buffer_tag; 17519 rq_create->u.request.page[cnt].addr_lo = 17520 putPaddrLow(dmabuf->phys); 17521 rq_create->u.request.page[cnt].addr_hi = 17522 putPaddrHigh(dmabuf->phys); 17523 rc++; 17524 } 17525 page_idx += rc; 17526 17527 rc = 0; 17528 list_for_each_entry(dmabuf, &drq->page_list, list) { 17529 memset(dmabuf->virt, 0, hw_page_size); 17530 cnt = page_idx + dmabuf->buffer_tag; 17531 rq_create->u.request.page[cnt].addr_lo = 17532 putPaddrLow(dmabuf->phys); 17533 rq_create->u.request.page[cnt].addr_hi = 17534 putPaddrHigh(dmabuf->phys); 17535 rc++; 17536 } 17537 page_idx += rc; 17538 17539 hrq->db_format = LPFC_DB_RING_FORMAT; 17540 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 17541 hrq->type = LPFC_HRQ; 17542 hrq->assoc_qid = cq->queue_id; 17543 hrq->subtype = subtype; 17544 hrq->host_index = 0; 17545 hrq->hba_index = 0; 17546 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 17547 17548 drq->db_format = LPFC_DB_RING_FORMAT; 17549 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 17550 drq->type = LPFC_DRQ; 17551 drq->assoc_qid = cq->queue_id; 17552 drq->subtype = subtype; 17553 drq->host_index = 0; 17554 drq->hba_index = 0; 17555 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 17556 17557 list_add_tail(&hrq->list, &cq->child_list); 17558 list_add_tail(&drq->list, &cq->child_list); 17559 } 17560 17561 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 17562 /* The IOCTL status is embedded in the mailbox subheader. */ 17563 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17564 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17565 if (shdr_status || shdr_add_status || rc) { 17566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17567 "3120 RQ_CREATE mailbox failed with " 17568 "status x%x add_status x%x, mbx status x%x\n", 17569 shdr_status, shdr_add_status, rc); 17570 status = -ENXIO; 17571 goto out; 17572 } 17573 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 17574 if (rc == 0xFFFF) { 17575 status = -ENXIO; 17576 goto out; 17577 } 17578 17579 /* Initialize all RQs with associated queue id */ 17580 for (idx = 0; idx < numrq; idx++) { 17581 hrq = hrqp[idx]; 17582 hrq->queue_id = rc + (2 * idx); 17583 drq = drqp[idx]; 17584 drq->queue_id = rc + (2 * idx) + 1; 17585 } 17586 17587 out: 17588 lpfc_sli4_mbox_cmd_free(phba, mbox); 17589 return status; 17590 } 17591 17592 /** 17593 * lpfc_eq_destroy - Destroy an event Queue on the HBA 17594 * @phba: HBA structure that indicates port to destroy a queue on. 17595 * @eq: The queue structure associated with the queue to destroy. 17596 * 17597 * This function destroys a queue, as detailed in @eq by sending an mailbox 17598 * command, specific to the type of queue, to the HBA. 17599 * 17600 * The @eq struct is used to get the queue ID of the queue to destroy. 17601 * 17602 * On success this function will return a zero. If the queue destroy mailbox 17603 * command fails this function will return -ENXIO. 17604 **/ 17605 int 17606 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 17607 { 17608 LPFC_MBOXQ_t *mbox; 17609 int rc, length, status = 0; 17610 uint32_t shdr_status, shdr_add_status; 17611 union lpfc_sli4_cfg_shdr *shdr; 17612 17613 /* sanity check on queue memory */ 17614 if (!eq) 17615 return -ENODEV; 17616 17617 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 17618 if (!mbox) 17619 return -ENOMEM; 17620 length = (sizeof(struct lpfc_mbx_eq_destroy) - 17621 sizeof(struct lpfc_sli4_cfg_mhdr)); 17622 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 17623 LPFC_MBOX_OPCODE_EQ_DESTROY, 17624 length, LPFC_SLI4_MBX_EMBED); 17625 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 17626 eq->queue_id); 17627 mbox->vport = eq->phba->pport; 17628 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17629 17630 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 17631 /* The IOCTL status is embedded in the mailbox subheader. */ 17632 shdr = (union lpfc_sli4_cfg_shdr *) 17633 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 17634 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17635 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17636 if (shdr_status || shdr_add_status || rc) { 17637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17638 "2505 EQ_DESTROY mailbox failed with " 17639 "status x%x add_status x%x, mbx status x%x\n", 17640 shdr_status, shdr_add_status, rc); 17641 status = -ENXIO; 17642 } 17643 17644 /* Remove eq from any list */ 17645 list_del_init(&eq->list); 17646 mempool_free(mbox, eq->phba->mbox_mem_pool); 17647 return status; 17648 } 17649 17650 /** 17651 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 17652 * @phba: HBA structure that indicates port to destroy a queue on. 17653 * @cq: The queue structure associated with the queue to destroy. 17654 * 17655 * This function destroys a queue, as detailed in @cq by sending an mailbox 17656 * command, specific to the type of queue, to the HBA. 17657 * 17658 * The @cq struct is used to get the queue ID of the queue to destroy. 17659 * 17660 * On success this function will return a zero. If the queue destroy mailbox 17661 * command fails this function will return -ENXIO. 17662 **/ 17663 int 17664 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 17665 { 17666 LPFC_MBOXQ_t *mbox; 17667 int rc, length, status = 0; 17668 uint32_t shdr_status, shdr_add_status; 17669 union lpfc_sli4_cfg_shdr *shdr; 17670 17671 /* sanity check on queue memory */ 17672 if (!cq) 17673 return -ENODEV; 17674 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 17675 if (!mbox) 17676 return -ENOMEM; 17677 length = (sizeof(struct lpfc_mbx_cq_destroy) - 17678 sizeof(struct lpfc_sli4_cfg_mhdr)); 17679 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 17680 LPFC_MBOX_OPCODE_CQ_DESTROY, 17681 length, LPFC_SLI4_MBX_EMBED); 17682 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 17683 cq->queue_id); 17684 mbox->vport = cq->phba->pport; 17685 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17686 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 17687 /* The IOCTL status is embedded in the mailbox subheader. */ 17688 shdr = (union lpfc_sli4_cfg_shdr *) 17689 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 17690 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17691 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17692 if (shdr_status || shdr_add_status || rc) { 17693 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17694 "2506 CQ_DESTROY mailbox failed with " 17695 "status x%x add_status x%x, mbx status x%x\n", 17696 shdr_status, shdr_add_status, rc); 17697 status = -ENXIO; 17698 } 17699 /* Remove cq from any list */ 17700 list_del_init(&cq->list); 17701 mempool_free(mbox, cq->phba->mbox_mem_pool); 17702 return status; 17703 } 17704 17705 /** 17706 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 17707 * @phba: HBA structure that indicates port to destroy a queue on. 17708 * @mq: The queue structure associated with the queue to destroy. 17709 * 17710 * This function destroys a queue, as detailed in @mq by sending an mailbox 17711 * command, specific to the type of queue, to the HBA. 17712 * 17713 * The @mq struct is used to get the queue ID of the queue to destroy. 17714 * 17715 * On success this function will return a zero. If the queue destroy mailbox 17716 * command fails this function will return -ENXIO. 17717 **/ 17718 int 17719 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 17720 { 17721 LPFC_MBOXQ_t *mbox; 17722 int rc, length, status = 0; 17723 uint32_t shdr_status, shdr_add_status; 17724 union lpfc_sli4_cfg_shdr *shdr; 17725 17726 /* sanity check on queue memory */ 17727 if (!mq) 17728 return -ENODEV; 17729 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 17730 if (!mbox) 17731 return -ENOMEM; 17732 length = (sizeof(struct lpfc_mbx_mq_destroy) - 17733 sizeof(struct lpfc_sli4_cfg_mhdr)); 17734 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 17735 LPFC_MBOX_OPCODE_MQ_DESTROY, 17736 length, LPFC_SLI4_MBX_EMBED); 17737 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 17738 mq->queue_id); 17739 mbox->vport = mq->phba->pport; 17740 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17741 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 17742 /* The IOCTL status is embedded in the mailbox subheader. */ 17743 shdr = (union lpfc_sli4_cfg_shdr *) 17744 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 17745 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17746 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17747 if (shdr_status || shdr_add_status || rc) { 17748 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17749 "2507 MQ_DESTROY mailbox failed with " 17750 "status x%x add_status x%x, mbx status x%x\n", 17751 shdr_status, shdr_add_status, rc); 17752 status = -ENXIO; 17753 } 17754 /* Remove mq from any list */ 17755 list_del_init(&mq->list); 17756 mempool_free(mbox, mq->phba->mbox_mem_pool); 17757 return status; 17758 } 17759 17760 /** 17761 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 17762 * @phba: HBA structure that indicates port to destroy a queue on. 17763 * @wq: The queue structure associated with the queue to destroy. 17764 * 17765 * This function destroys a queue, as detailed in @wq by sending an mailbox 17766 * command, specific to the type of queue, to the HBA. 17767 * 17768 * The @wq struct is used to get the queue ID of the queue to destroy. 17769 * 17770 * On success this function will return a zero. If the queue destroy mailbox 17771 * command fails this function will return -ENXIO. 17772 **/ 17773 int 17774 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 17775 { 17776 LPFC_MBOXQ_t *mbox; 17777 int rc, length, status = 0; 17778 uint32_t shdr_status, shdr_add_status; 17779 union lpfc_sli4_cfg_shdr *shdr; 17780 17781 /* sanity check on queue memory */ 17782 if (!wq) 17783 return -ENODEV; 17784 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 17785 if (!mbox) 17786 return -ENOMEM; 17787 length = (sizeof(struct lpfc_mbx_wq_destroy) - 17788 sizeof(struct lpfc_sli4_cfg_mhdr)); 17789 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17790 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 17791 length, LPFC_SLI4_MBX_EMBED); 17792 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 17793 wq->queue_id); 17794 mbox->vport = wq->phba->pport; 17795 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17796 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 17797 shdr = (union lpfc_sli4_cfg_shdr *) 17798 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 17799 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17800 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17801 if (shdr_status || shdr_add_status || rc) { 17802 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17803 "2508 WQ_DESTROY mailbox failed with " 17804 "status x%x add_status x%x, mbx status x%x\n", 17805 shdr_status, shdr_add_status, rc); 17806 status = -ENXIO; 17807 } 17808 /* Remove wq from any list */ 17809 list_del_init(&wq->list); 17810 kfree(wq->pring); 17811 wq->pring = NULL; 17812 mempool_free(mbox, wq->phba->mbox_mem_pool); 17813 return status; 17814 } 17815 17816 /** 17817 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 17818 * @phba: HBA structure that indicates port to destroy a queue on. 17819 * @hrq: The queue structure associated with the queue to destroy. 17820 * @drq: The queue structure associated with the queue to destroy. 17821 * 17822 * This function destroys a queue, as detailed in @rq by sending an mailbox 17823 * command, specific to the type of queue, to the HBA. 17824 * 17825 * The @rq struct is used to get the queue ID of the queue to destroy. 17826 * 17827 * On success this function will return a zero. If the queue destroy mailbox 17828 * command fails this function will return -ENXIO. 17829 **/ 17830 int 17831 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 17832 struct lpfc_queue *drq) 17833 { 17834 LPFC_MBOXQ_t *mbox; 17835 int rc, length, status = 0; 17836 uint32_t shdr_status, shdr_add_status; 17837 union lpfc_sli4_cfg_shdr *shdr; 17838 17839 /* sanity check on queue memory */ 17840 if (!hrq || !drq) 17841 return -ENODEV; 17842 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 17843 if (!mbox) 17844 return -ENOMEM; 17845 length = (sizeof(struct lpfc_mbx_rq_destroy) - 17846 sizeof(struct lpfc_sli4_cfg_mhdr)); 17847 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17848 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 17849 length, LPFC_SLI4_MBX_EMBED); 17850 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 17851 hrq->queue_id); 17852 mbox->vport = hrq->phba->pport; 17853 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17854 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 17855 /* The IOCTL status is embedded in the mailbox subheader. */ 17856 shdr = (union lpfc_sli4_cfg_shdr *) 17857 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 17858 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17859 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17860 if (shdr_status || shdr_add_status || rc) { 17861 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17862 "2509 RQ_DESTROY mailbox failed with " 17863 "status x%x add_status x%x, mbx status x%x\n", 17864 shdr_status, shdr_add_status, rc); 17865 mempool_free(mbox, hrq->phba->mbox_mem_pool); 17866 return -ENXIO; 17867 } 17868 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 17869 drq->queue_id); 17870 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 17871 shdr = (union lpfc_sli4_cfg_shdr *) 17872 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 17873 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17874 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17875 if (shdr_status || shdr_add_status || rc) { 17876 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17877 "2510 RQ_DESTROY mailbox failed with " 17878 "status x%x add_status x%x, mbx status x%x\n", 17879 shdr_status, shdr_add_status, rc); 17880 status = -ENXIO; 17881 } 17882 list_del_init(&hrq->list); 17883 list_del_init(&drq->list); 17884 mempool_free(mbox, hrq->phba->mbox_mem_pool); 17885 return status; 17886 } 17887 17888 /** 17889 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 17890 * @phba: The virtual port for which this call being executed. 17891 * @pdma_phys_addr0: Physical address of the 1st SGL page. 17892 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 17893 * @xritag: the xritag that ties this io to the SGL pages. 17894 * 17895 * This routine will post the sgl pages for the IO that has the xritag 17896 * that is in the iocbq structure. The xritag is assigned during iocbq 17897 * creation and persists for as long as the driver is loaded. 17898 * if the caller has fewer than 256 scatter gather segments to map then 17899 * pdma_phys_addr1 should be 0. 17900 * If the caller needs to map more than 256 scatter gather segment then 17901 * pdma_phys_addr1 should be a valid physical address. 17902 * physical address for SGLs must be 64 byte aligned. 17903 * If you are going to map 2 SGL's then the first one must have 256 entries 17904 * the second sgl can have between 1 and 256 entries. 17905 * 17906 * Return codes: 17907 * 0 - Success 17908 * -ENXIO, -ENOMEM - Failure 17909 **/ 17910 int 17911 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 17912 dma_addr_t pdma_phys_addr0, 17913 dma_addr_t pdma_phys_addr1, 17914 uint16_t xritag) 17915 { 17916 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 17917 LPFC_MBOXQ_t *mbox; 17918 int rc; 17919 uint32_t shdr_status, shdr_add_status; 17920 uint32_t mbox_tmo; 17921 union lpfc_sli4_cfg_shdr *shdr; 17922 17923 if (xritag == NO_XRI) { 17924 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17925 "0364 Invalid param:\n"); 17926 return -EINVAL; 17927 } 17928 17929 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17930 if (!mbox) 17931 return -ENOMEM; 17932 17933 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17934 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 17935 sizeof(struct lpfc_mbx_post_sgl_pages) - 17936 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 17937 17938 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 17939 &mbox->u.mqe.un.post_sgl_pages; 17940 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 17941 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 17942 17943 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 17944 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 17945 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 17946 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 17947 17948 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 17949 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 17950 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 17951 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 17952 if (!phba->sli4_hba.intr_enable) 17953 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 17954 else { 17955 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 17956 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 17957 } 17958 /* The IOCTL status is embedded in the mailbox subheader. */ 17959 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 17960 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17961 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17962 if (!phba->sli4_hba.intr_enable) 17963 mempool_free(mbox, phba->mbox_mem_pool); 17964 else if (rc != MBX_TIMEOUT) 17965 mempool_free(mbox, phba->mbox_mem_pool); 17966 if (shdr_status || shdr_add_status || rc) { 17967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17968 "2511 POST_SGL mailbox failed with " 17969 "status x%x add_status x%x, mbx status x%x\n", 17970 shdr_status, shdr_add_status, rc); 17971 } 17972 return 0; 17973 } 17974 17975 /** 17976 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 17977 * @phba: pointer to lpfc hba data structure. 17978 * 17979 * This routine is invoked to post rpi header templates to the 17980 * HBA consistent with the SLI-4 interface spec. This routine 17981 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17982 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17983 * 17984 * Returns 17985 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 17986 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 17987 **/ 17988 static uint16_t 17989 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 17990 { 17991 unsigned long xri; 17992 17993 /* 17994 * Fetch the next logical xri. Because this index is logical, 17995 * the driver starts at 0 each time. 17996 */ 17997 spin_lock_irq(&phba->hbalock); 17998 xri = find_first_zero_bit(phba->sli4_hba.xri_bmask, 17999 phba->sli4_hba.max_cfg_param.max_xri); 18000 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 18001 spin_unlock_irq(&phba->hbalock); 18002 return NO_XRI; 18003 } else { 18004 set_bit(xri, phba->sli4_hba.xri_bmask); 18005 phba->sli4_hba.max_cfg_param.xri_used++; 18006 } 18007 spin_unlock_irq(&phba->hbalock); 18008 return xri; 18009 } 18010 18011 /** 18012 * __lpfc_sli4_free_xri - Release an xri for reuse. 18013 * @phba: pointer to lpfc hba data structure. 18014 * @xri: xri to release. 18015 * 18016 * This routine is invoked to release an xri to the pool of 18017 * available rpis maintained by the driver. 18018 **/ 18019 static void 18020 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 18021 { 18022 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 18023 phba->sli4_hba.max_cfg_param.xri_used--; 18024 } 18025 } 18026 18027 /** 18028 * lpfc_sli4_free_xri - Release an xri for reuse. 18029 * @phba: pointer to lpfc hba data structure. 18030 * @xri: xri to release. 18031 * 18032 * This routine is invoked to release an xri to the pool of 18033 * available rpis maintained by the driver. 18034 **/ 18035 void 18036 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 18037 { 18038 spin_lock_irq(&phba->hbalock); 18039 __lpfc_sli4_free_xri(phba, xri); 18040 spin_unlock_irq(&phba->hbalock); 18041 } 18042 18043 /** 18044 * lpfc_sli4_next_xritag - Get an xritag for the io 18045 * @phba: Pointer to HBA context object. 18046 * 18047 * This function gets an xritag for the iocb. If there is no unused xritag 18048 * it will return 0xffff. 18049 * The function returns the allocated xritag if successful, else returns zero. 18050 * Zero is not a valid xritag. 18051 * The caller is not required to hold any lock. 18052 **/ 18053 uint16_t 18054 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 18055 { 18056 uint16_t xri_index; 18057 18058 xri_index = lpfc_sli4_alloc_xri(phba); 18059 if (xri_index == NO_XRI) 18060 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 18061 "2004 Failed to allocate XRI.last XRITAG is %d" 18062 " Max XRI is %d, Used XRI is %d\n", 18063 xri_index, 18064 phba->sli4_hba.max_cfg_param.max_xri, 18065 phba->sli4_hba.max_cfg_param.xri_used); 18066 return xri_index; 18067 } 18068 18069 /** 18070 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 18071 * @phba: pointer to lpfc hba data structure. 18072 * @post_sgl_list: pointer to els sgl entry list. 18073 * @post_cnt: number of els sgl entries on the list. 18074 * 18075 * This routine is invoked to post a block of driver's sgl pages to the 18076 * HBA using non-embedded mailbox command. No Lock is held. This routine 18077 * is only called when the driver is loading and after all IO has been 18078 * stopped. 18079 **/ 18080 static int 18081 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 18082 struct list_head *post_sgl_list, 18083 int post_cnt) 18084 { 18085 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 18086 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 18087 struct sgl_page_pairs *sgl_pg_pairs; 18088 void *viraddr; 18089 LPFC_MBOXQ_t *mbox; 18090 uint32_t reqlen, alloclen, pg_pairs; 18091 uint32_t mbox_tmo; 18092 uint16_t xritag_start = 0; 18093 int rc = 0; 18094 uint32_t shdr_status, shdr_add_status; 18095 union lpfc_sli4_cfg_shdr *shdr; 18096 18097 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 18098 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 18099 if (reqlen > SLI4_PAGE_SIZE) { 18100 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18101 "2559 Block sgl registration required DMA " 18102 "size (%d) great than a page\n", reqlen); 18103 return -ENOMEM; 18104 } 18105 18106 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18107 if (!mbox) 18108 return -ENOMEM; 18109 18110 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18111 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18112 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 18113 LPFC_SLI4_MBX_NEMBED); 18114 18115 if (alloclen < reqlen) { 18116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18117 "0285 Allocated DMA memory size (%d) is " 18118 "less than the requested DMA memory " 18119 "size (%d)\n", alloclen, reqlen); 18120 lpfc_sli4_mbox_cmd_free(phba, mbox); 18121 return -ENOMEM; 18122 } 18123 /* Set up the SGL pages in the non-embedded DMA pages */ 18124 viraddr = mbox->sge_array->addr[0]; 18125 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 18126 sgl_pg_pairs = &sgl->sgl_pg_pairs; 18127 18128 pg_pairs = 0; 18129 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 18130 /* Set up the sge entry */ 18131 sgl_pg_pairs->sgl_pg0_addr_lo = 18132 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 18133 sgl_pg_pairs->sgl_pg0_addr_hi = 18134 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 18135 sgl_pg_pairs->sgl_pg1_addr_lo = 18136 cpu_to_le32(putPaddrLow(0)); 18137 sgl_pg_pairs->sgl_pg1_addr_hi = 18138 cpu_to_le32(putPaddrHigh(0)); 18139 18140 /* Keep the first xritag on the list */ 18141 if (pg_pairs == 0) 18142 xritag_start = sglq_entry->sli4_xritag; 18143 sgl_pg_pairs++; 18144 pg_pairs++; 18145 } 18146 18147 /* Complete initialization and perform endian conversion. */ 18148 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 18149 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 18150 sgl->word0 = cpu_to_le32(sgl->word0); 18151 18152 if (!phba->sli4_hba.intr_enable) 18153 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 18154 else { 18155 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 18156 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 18157 } 18158 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 18159 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18160 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18161 if (!phba->sli4_hba.intr_enable) 18162 lpfc_sli4_mbox_cmd_free(phba, mbox); 18163 else if (rc != MBX_TIMEOUT) 18164 lpfc_sli4_mbox_cmd_free(phba, mbox); 18165 if (shdr_status || shdr_add_status || rc) { 18166 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18167 "2513 POST_SGL_BLOCK mailbox command failed " 18168 "status x%x add_status x%x mbx status x%x\n", 18169 shdr_status, shdr_add_status, rc); 18170 rc = -ENXIO; 18171 } 18172 return rc; 18173 } 18174 18175 /** 18176 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware 18177 * @phba: pointer to lpfc hba data structure. 18178 * @nblist: pointer to nvme buffer list. 18179 * @count: number of scsi buffers on the list. 18180 * 18181 * This routine is invoked to post a block of @count scsi sgl pages from a 18182 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. 18183 * No Lock is held. 18184 * 18185 **/ 18186 static int 18187 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, 18188 int count) 18189 { 18190 struct lpfc_io_buf *lpfc_ncmd; 18191 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 18192 struct sgl_page_pairs *sgl_pg_pairs; 18193 void *viraddr; 18194 LPFC_MBOXQ_t *mbox; 18195 uint32_t reqlen, alloclen, pg_pairs; 18196 uint32_t mbox_tmo; 18197 uint16_t xritag_start = 0; 18198 int rc = 0; 18199 uint32_t shdr_status, shdr_add_status; 18200 dma_addr_t pdma_phys_bpl1; 18201 union lpfc_sli4_cfg_shdr *shdr; 18202 18203 /* Calculate the requested length of the dma memory */ 18204 reqlen = count * sizeof(struct sgl_page_pairs) + 18205 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 18206 if (reqlen > SLI4_PAGE_SIZE) { 18207 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 18208 "6118 Block sgl registration required DMA " 18209 "size (%d) great than a page\n", reqlen); 18210 return -ENOMEM; 18211 } 18212 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18213 if (!mbox) { 18214 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18215 "6119 Failed to allocate mbox cmd memory\n"); 18216 return -ENOMEM; 18217 } 18218 18219 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18220 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18221 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 18222 reqlen, LPFC_SLI4_MBX_NEMBED); 18223 18224 if (alloclen < reqlen) { 18225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18226 "6120 Allocated DMA memory size (%d) is " 18227 "less than the requested DMA memory " 18228 "size (%d)\n", alloclen, reqlen); 18229 lpfc_sli4_mbox_cmd_free(phba, mbox); 18230 return -ENOMEM; 18231 } 18232 18233 /* Get the first SGE entry from the non-embedded DMA memory */ 18234 viraddr = mbox->sge_array->addr[0]; 18235 18236 /* Set up the SGL pages in the non-embedded DMA pages */ 18237 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 18238 sgl_pg_pairs = &sgl->sgl_pg_pairs; 18239 18240 pg_pairs = 0; 18241 list_for_each_entry(lpfc_ncmd, nblist, list) { 18242 /* Set up the sge entry */ 18243 sgl_pg_pairs->sgl_pg0_addr_lo = 18244 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); 18245 sgl_pg_pairs->sgl_pg0_addr_hi = 18246 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); 18247 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 18248 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + 18249 SGL_PAGE_SIZE; 18250 else 18251 pdma_phys_bpl1 = 0; 18252 sgl_pg_pairs->sgl_pg1_addr_lo = 18253 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 18254 sgl_pg_pairs->sgl_pg1_addr_hi = 18255 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 18256 /* Keep the first xritag on the list */ 18257 if (pg_pairs == 0) 18258 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; 18259 sgl_pg_pairs++; 18260 pg_pairs++; 18261 } 18262 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 18263 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 18264 /* Perform endian conversion if necessary */ 18265 sgl->word0 = cpu_to_le32(sgl->word0); 18266 18267 if (!phba->sli4_hba.intr_enable) { 18268 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 18269 } else { 18270 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 18271 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 18272 } 18273 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; 18274 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18275 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18276 if (!phba->sli4_hba.intr_enable) 18277 lpfc_sli4_mbox_cmd_free(phba, mbox); 18278 else if (rc != MBX_TIMEOUT) 18279 lpfc_sli4_mbox_cmd_free(phba, mbox); 18280 if (shdr_status || shdr_add_status || rc) { 18281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18282 "6125 POST_SGL_BLOCK mailbox command failed " 18283 "status x%x add_status x%x mbx status x%x\n", 18284 shdr_status, shdr_add_status, rc); 18285 rc = -ENXIO; 18286 } 18287 return rc; 18288 } 18289 18290 /** 18291 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list 18292 * @phba: pointer to lpfc hba data structure. 18293 * @post_nblist: pointer to the nvme buffer list. 18294 * @sb_count: number of nvme buffers. 18295 * 18296 * This routine walks a list of nvme buffers that was passed in. It attempts 18297 * to construct blocks of nvme buffer sgls which contains contiguous xris and 18298 * uses the non-embedded SGL block post mailbox commands to post to the port. 18299 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use 18300 * embedded SGL post mailbox command for posting. The @post_nblist passed in 18301 * must be local list, thus no lock is needed when manipulate the list. 18302 * 18303 * Returns: 0 = failure, non-zero number of successfully posted buffers. 18304 **/ 18305 int 18306 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, 18307 struct list_head *post_nblist, int sb_count) 18308 { 18309 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 18310 int status, sgl_size; 18311 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 18312 dma_addr_t pdma_phys_sgl1; 18313 int last_xritag = NO_XRI; 18314 int cur_xritag; 18315 LIST_HEAD(prep_nblist); 18316 LIST_HEAD(blck_nblist); 18317 LIST_HEAD(nvme_nblist); 18318 18319 /* sanity check */ 18320 if (sb_count <= 0) 18321 return -EINVAL; 18322 18323 sgl_size = phba->cfg_sg_dma_buf_size; 18324 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { 18325 list_del_init(&lpfc_ncmd->list); 18326 block_cnt++; 18327 if ((last_xritag != NO_XRI) && 18328 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { 18329 /* a hole in xri block, form a sgl posting block */ 18330 list_splice_init(&prep_nblist, &blck_nblist); 18331 post_cnt = block_cnt - 1; 18332 /* prepare list for next posting block */ 18333 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 18334 block_cnt = 1; 18335 } else { 18336 /* prepare list for next posting block */ 18337 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 18338 /* enough sgls for non-embed sgl mbox command */ 18339 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 18340 list_splice_init(&prep_nblist, &blck_nblist); 18341 post_cnt = block_cnt; 18342 block_cnt = 0; 18343 } 18344 } 18345 num_posting++; 18346 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 18347 18348 /* end of repost sgl list condition for NVME buffers */ 18349 if (num_posting == sb_count) { 18350 if (post_cnt == 0) { 18351 /* last sgl posting block */ 18352 list_splice_init(&prep_nblist, &blck_nblist); 18353 post_cnt = block_cnt; 18354 } else if (block_cnt == 1) { 18355 /* last single sgl with non-contiguous xri */ 18356 if (sgl_size > SGL_PAGE_SIZE) 18357 pdma_phys_sgl1 = 18358 lpfc_ncmd->dma_phys_sgl + 18359 SGL_PAGE_SIZE; 18360 else 18361 pdma_phys_sgl1 = 0; 18362 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 18363 status = lpfc_sli4_post_sgl( 18364 phba, lpfc_ncmd->dma_phys_sgl, 18365 pdma_phys_sgl1, cur_xritag); 18366 if (status) { 18367 /* Post error. Buffer unavailable. */ 18368 lpfc_ncmd->flags |= 18369 LPFC_SBUF_NOT_POSTED; 18370 } else { 18371 /* Post success. Bffer available. */ 18372 lpfc_ncmd->flags &= 18373 ~LPFC_SBUF_NOT_POSTED; 18374 lpfc_ncmd->status = IOSTAT_SUCCESS; 18375 num_posted++; 18376 } 18377 /* success, put on NVME buffer sgl list */ 18378 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 18379 } 18380 } 18381 18382 /* continue until a nembed page worth of sgls */ 18383 if (post_cnt == 0) 18384 continue; 18385 18386 /* post block of NVME buffer list sgls */ 18387 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, 18388 post_cnt); 18389 18390 /* don't reset xirtag due to hole in xri block */ 18391 if (block_cnt == 0) 18392 last_xritag = NO_XRI; 18393 18394 /* reset NVME buffer post count for next round of posting */ 18395 post_cnt = 0; 18396 18397 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ 18398 while (!list_empty(&blck_nblist)) { 18399 list_remove_head(&blck_nblist, lpfc_ncmd, 18400 struct lpfc_io_buf, list); 18401 if (status) { 18402 /* Post error. Mark buffer unavailable. */ 18403 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; 18404 } else { 18405 /* Post success, Mark buffer available. */ 18406 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; 18407 lpfc_ncmd->status = IOSTAT_SUCCESS; 18408 num_posted++; 18409 } 18410 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 18411 } 18412 } 18413 /* Push NVME buffers with sgl posted to the available list */ 18414 lpfc_io_buf_replenish(phba, &nvme_nblist); 18415 18416 return num_posted; 18417 } 18418 18419 /** 18420 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 18421 * @phba: pointer to lpfc_hba struct that the frame was received on 18422 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 18423 * 18424 * This function checks the fields in the @fc_hdr to see if the FC frame is a 18425 * valid type of frame that the LPFC driver will handle. This function will 18426 * return a zero if the frame is a valid frame or a non zero value when the 18427 * frame does not pass the check. 18428 **/ 18429 static int 18430 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 18431 { 18432 /* make rctl_names static to save stack space */ 18433 struct fc_vft_header *fc_vft_hdr; 18434 uint32_t *header = (uint32_t *) fc_hdr; 18435 18436 #define FC_RCTL_MDS_DIAGS 0xF4 18437 18438 switch (fc_hdr->fh_r_ctl) { 18439 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 18440 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 18441 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 18442 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 18443 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 18444 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 18445 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 18446 case FC_RCTL_DD_CMD_STATUS: /* command status */ 18447 case FC_RCTL_ELS_REQ: /* extended link services request */ 18448 case FC_RCTL_ELS_REP: /* extended link services reply */ 18449 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 18450 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 18451 case FC_RCTL_BA_ABTS: /* basic link service abort */ 18452 case FC_RCTL_BA_RMC: /* remove connection */ 18453 case FC_RCTL_BA_ACC: /* basic accept */ 18454 case FC_RCTL_BA_RJT: /* basic reject */ 18455 case FC_RCTL_BA_PRMT: 18456 case FC_RCTL_ACK_1: /* acknowledge_1 */ 18457 case FC_RCTL_ACK_0: /* acknowledge_0 */ 18458 case FC_RCTL_P_RJT: /* port reject */ 18459 case FC_RCTL_F_RJT: /* fabric reject */ 18460 case FC_RCTL_P_BSY: /* port busy */ 18461 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 18462 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 18463 case FC_RCTL_LCR: /* link credit reset */ 18464 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 18465 case FC_RCTL_END: /* end */ 18466 break; 18467 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 18468 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 18469 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 18470 return lpfc_fc_frame_check(phba, fc_hdr); 18471 case FC_RCTL_BA_NOP: /* basic link service NOP */ 18472 default: 18473 goto drop; 18474 } 18475 18476 switch (fc_hdr->fh_type) { 18477 case FC_TYPE_BLS: 18478 case FC_TYPE_ELS: 18479 case FC_TYPE_FCP: 18480 case FC_TYPE_CT: 18481 case FC_TYPE_NVME: 18482 break; 18483 case FC_TYPE_IP: 18484 case FC_TYPE_ILS: 18485 default: 18486 goto drop; 18487 } 18488 18489 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 18490 "2538 Received frame rctl:x%x, type:x%x, " 18491 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 18492 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 18493 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 18494 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 18495 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 18496 be32_to_cpu(header[6])); 18497 return 0; 18498 drop: 18499 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 18500 "2539 Dropped frame rctl:x%x type:x%x\n", 18501 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 18502 return 1; 18503 } 18504 18505 /** 18506 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 18507 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 18508 * 18509 * This function processes the FC header to retrieve the VFI from the VF 18510 * header, if one exists. This function will return the VFI if one exists 18511 * or 0 if no VSAN Header exists. 18512 **/ 18513 static uint32_t 18514 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 18515 { 18516 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 18517 18518 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 18519 return 0; 18520 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 18521 } 18522 18523 /** 18524 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 18525 * @phba: Pointer to the HBA structure to search for the vport on 18526 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 18527 * @fcfi: The FC Fabric ID that the frame came from 18528 * @did: Destination ID to match against 18529 * 18530 * This function searches the @phba for a vport that matches the content of the 18531 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 18532 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 18533 * returns the matching vport pointer or NULL if unable to match frame to a 18534 * vport. 18535 **/ 18536 static struct lpfc_vport * 18537 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 18538 uint16_t fcfi, uint32_t did) 18539 { 18540 struct lpfc_vport **vports; 18541 struct lpfc_vport *vport = NULL; 18542 int i; 18543 18544 if (did == Fabric_DID) 18545 return phba->pport; 18546 if (test_bit(FC_PT2PT, &phba->pport->fc_flag) && 18547 phba->link_state != LPFC_HBA_READY) 18548 return phba->pport; 18549 18550 vports = lpfc_create_vport_work_array(phba); 18551 if (vports != NULL) { 18552 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 18553 if (phba->fcf.fcfi == fcfi && 18554 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 18555 vports[i]->fc_myDID == did) { 18556 vport = vports[i]; 18557 break; 18558 } 18559 } 18560 } 18561 lpfc_destroy_vport_work_array(phba, vports); 18562 return vport; 18563 } 18564 18565 /** 18566 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 18567 * @vport: The vport to work on. 18568 * 18569 * This function updates the receive sequence time stamp for this vport. The 18570 * receive sequence time stamp indicates the time that the last frame of the 18571 * the sequence that has been idle for the longest amount of time was received. 18572 * the driver uses this time stamp to indicate if any received sequences have 18573 * timed out. 18574 **/ 18575 static void 18576 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 18577 { 18578 struct lpfc_dmabuf *h_buf; 18579 struct hbq_dmabuf *dmabuf = NULL; 18580 18581 /* get the oldest sequence on the rcv list */ 18582 h_buf = list_get_first(&vport->rcv_buffer_list, 18583 struct lpfc_dmabuf, list); 18584 if (!h_buf) 18585 return; 18586 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18587 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 18588 } 18589 18590 /** 18591 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 18592 * @vport: The vport that the received sequences were sent to. 18593 * 18594 * This function cleans up all outstanding received sequences. This is called 18595 * by the driver when a link event or user action invalidates all the received 18596 * sequences. 18597 **/ 18598 void 18599 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 18600 { 18601 struct lpfc_dmabuf *h_buf, *hnext; 18602 struct lpfc_dmabuf *d_buf, *dnext; 18603 struct hbq_dmabuf *dmabuf = NULL; 18604 18605 /* start with the oldest sequence on the rcv list */ 18606 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 18607 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18608 list_del_init(&dmabuf->hbuf.list); 18609 list_for_each_entry_safe(d_buf, dnext, 18610 &dmabuf->dbuf.list, list) { 18611 list_del_init(&d_buf->list); 18612 lpfc_in_buf_free(vport->phba, d_buf); 18613 } 18614 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 18615 } 18616 } 18617 18618 /** 18619 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 18620 * @vport: The vport that the received sequences were sent to. 18621 * 18622 * This function determines whether any received sequences have timed out by 18623 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 18624 * indicates that there is at least one timed out sequence this routine will 18625 * go through the received sequences one at a time from most inactive to most 18626 * active to determine which ones need to be cleaned up. Once it has determined 18627 * that a sequence needs to be cleaned up it will simply free up the resources 18628 * without sending an abort. 18629 **/ 18630 void 18631 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 18632 { 18633 struct lpfc_dmabuf *h_buf, *hnext; 18634 struct lpfc_dmabuf *d_buf, *dnext; 18635 struct hbq_dmabuf *dmabuf = NULL; 18636 unsigned long timeout; 18637 int abort_count = 0; 18638 18639 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 18640 vport->rcv_buffer_time_stamp); 18641 if (list_empty(&vport->rcv_buffer_list) || 18642 time_before(jiffies, timeout)) 18643 return; 18644 /* start with the oldest sequence on the rcv list */ 18645 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 18646 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18647 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 18648 dmabuf->time_stamp); 18649 if (time_before(jiffies, timeout)) 18650 break; 18651 abort_count++; 18652 list_del_init(&dmabuf->hbuf.list); 18653 list_for_each_entry_safe(d_buf, dnext, 18654 &dmabuf->dbuf.list, list) { 18655 list_del_init(&d_buf->list); 18656 lpfc_in_buf_free(vport->phba, d_buf); 18657 } 18658 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 18659 } 18660 if (abort_count) 18661 lpfc_update_rcv_time_stamp(vport); 18662 } 18663 18664 /** 18665 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 18666 * @vport: pointer to a vitural port 18667 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 18668 * 18669 * This function searches through the existing incomplete sequences that have 18670 * been sent to this @vport. If the frame matches one of the incomplete 18671 * sequences then the dbuf in the @dmabuf is added to the list of frames that 18672 * make up that sequence. If no sequence is found that matches this frame then 18673 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 18674 * This function returns a pointer to the first dmabuf in the sequence list that 18675 * the frame was linked to. 18676 **/ 18677 static struct hbq_dmabuf * 18678 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 18679 { 18680 struct fc_frame_header *new_hdr; 18681 struct fc_frame_header *temp_hdr; 18682 struct lpfc_dmabuf *d_buf; 18683 struct lpfc_dmabuf *h_buf; 18684 struct hbq_dmabuf *seq_dmabuf = NULL; 18685 struct hbq_dmabuf *temp_dmabuf = NULL; 18686 uint8_t found = 0; 18687 18688 INIT_LIST_HEAD(&dmabuf->dbuf.list); 18689 dmabuf->time_stamp = jiffies; 18690 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 18691 18692 /* Use the hdr_buf to find the sequence that this frame belongs to */ 18693 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 18694 temp_hdr = (struct fc_frame_header *)h_buf->virt; 18695 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 18696 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 18697 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 18698 continue; 18699 /* found a pending sequence that matches this frame */ 18700 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18701 break; 18702 } 18703 if (!seq_dmabuf) { 18704 /* 18705 * This indicates first frame received for this sequence. 18706 * Queue the buffer on the vport's rcv_buffer_list. 18707 */ 18708 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 18709 lpfc_update_rcv_time_stamp(vport); 18710 return dmabuf; 18711 } 18712 temp_hdr = seq_dmabuf->hbuf.virt; 18713 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 18714 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 18715 list_del_init(&seq_dmabuf->hbuf.list); 18716 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 18717 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 18718 lpfc_update_rcv_time_stamp(vport); 18719 return dmabuf; 18720 } 18721 /* move this sequence to the tail to indicate a young sequence */ 18722 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 18723 seq_dmabuf->time_stamp = jiffies; 18724 lpfc_update_rcv_time_stamp(vport); 18725 if (list_empty(&seq_dmabuf->dbuf.list)) { 18726 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 18727 return seq_dmabuf; 18728 } 18729 /* find the correct place in the sequence to insert this frame */ 18730 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 18731 while (!found) { 18732 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 18733 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 18734 /* 18735 * If the frame's sequence count is greater than the frame on 18736 * the list then insert the frame right after this frame 18737 */ 18738 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 18739 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 18740 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 18741 found = 1; 18742 break; 18743 } 18744 18745 if (&d_buf->list == &seq_dmabuf->dbuf.list) 18746 break; 18747 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 18748 } 18749 18750 if (found) 18751 return seq_dmabuf; 18752 return NULL; 18753 } 18754 18755 /** 18756 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 18757 * @vport: pointer to a vitural port 18758 * @dmabuf: pointer to a dmabuf that describes the FC sequence 18759 * 18760 * This function tries to abort from the partially assembed sequence, described 18761 * by the information from basic abbort @dmabuf. It checks to see whether such 18762 * partially assembled sequence held by the driver. If so, it shall free up all 18763 * the frames from the partially assembled sequence. 18764 * 18765 * Return 18766 * true -- if there is matching partially assembled sequence present and all 18767 * the frames freed with the sequence; 18768 * false -- if there is no matching partially assembled sequence present so 18769 * nothing got aborted in the lower layer driver 18770 **/ 18771 static bool 18772 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 18773 struct hbq_dmabuf *dmabuf) 18774 { 18775 struct fc_frame_header *new_hdr; 18776 struct fc_frame_header *temp_hdr; 18777 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 18778 struct hbq_dmabuf *seq_dmabuf = NULL; 18779 18780 /* Use the hdr_buf to find the sequence that matches this frame */ 18781 INIT_LIST_HEAD(&dmabuf->dbuf.list); 18782 INIT_LIST_HEAD(&dmabuf->hbuf.list); 18783 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 18784 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 18785 temp_hdr = (struct fc_frame_header *)h_buf->virt; 18786 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 18787 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 18788 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 18789 continue; 18790 /* found a pending sequence that matches this frame */ 18791 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18792 break; 18793 } 18794 18795 /* Free up all the frames from the partially assembled sequence */ 18796 if (seq_dmabuf) { 18797 list_for_each_entry_safe(d_buf, n_buf, 18798 &seq_dmabuf->dbuf.list, list) { 18799 list_del_init(&d_buf->list); 18800 lpfc_in_buf_free(vport->phba, d_buf); 18801 } 18802 return true; 18803 } 18804 return false; 18805 } 18806 18807 /** 18808 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 18809 * @vport: pointer to a vitural port 18810 * @dmabuf: pointer to a dmabuf that describes the FC sequence 18811 * 18812 * This function tries to abort from the assembed sequence from upper level 18813 * protocol, described by the information from basic abbort @dmabuf. It 18814 * checks to see whether such pending context exists at upper level protocol. 18815 * If so, it shall clean up the pending context. 18816 * 18817 * Return 18818 * true -- if there is matching pending context of the sequence cleaned 18819 * at ulp; 18820 * false -- if there is no matching pending context of the sequence present 18821 * at ulp. 18822 **/ 18823 static bool 18824 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 18825 { 18826 struct lpfc_hba *phba = vport->phba; 18827 int handled; 18828 18829 /* Accepting abort at ulp with SLI4 only */ 18830 if (phba->sli_rev < LPFC_SLI_REV4) 18831 return false; 18832 18833 /* Register all caring upper level protocols to attend abort */ 18834 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 18835 if (handled) 18836 return true; 18837 18838 return false; 18839 } 18840 18841 /** 18842 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 18843 * @phba: Pointer to HBA context object. 18844 * @cmd_iocbq: pointer to the command iocbq structure. 18845 * @rsp_iocbq: pointer to the response iocbq structure. 18846 * 18847 * This function handles the sequence abort response iocb command complete 18848 * event. It properly releases the memory allocated to the sequence abort 18849 * accept iocb. 18850 **/ 18851 static void 18852 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 18853 struct lpfc_iocbq *cmd_iocbq, 18854 struct lpfc_iocbq *rsp_iocbq) 18855 { 18856 if (cmd_iocbq) { 18857 lpfc_nlp_put(cmd_iocbq->ndlp); 18858 lpfc_sli_release_iocbq(phba, cmd_iocbq); 18859 } 18860 18861 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 18862 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 18863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18864 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 18865 get_job_ulpstatus(phba, rsp_iocbq), 18866 get_job_word4(phba, rsp_iocbq)); 18867 } 18868 18869 /** 18870 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 18871 * @phba: Pointer to HBA context object. 18872 * @xri: xri id in transaction. 18873 * 18874 * This function validates the xri maps to the known range of XRIs allocated an 18875 * used by the driver. 18876 **/ 18877 uint16_t 18878 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 18879 uint16_t xri) 18880 { 18881 uint16_t i; 18882 18883 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 18884 if (xri == phba->sli4_hba.xri_ids[i]) 18885 return i; 18886 } 18887 return NO_XRI; 18888 } 18889 18890 /** 18891 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 18892 * @vport: pointer to a virtual port. 18893 * @fc_hdr: pointer to a FC frame header. 18894 * @aborted: was the partially assembled receive sequence successfully aborted 18895 * 18896 * This function sends a basic response to a previous unsol sequence abort 18897 * event after aborting the sequence handling. 18898 **/ 18899 void 18900 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 18901 struct fc_frame_header *fc_hdr, bool aborted) 18902 { 18903 struct lpfc_hba *phba = vport->phba; 18904 struct lpfc_iocbq *ctiocb = NULL; 18905 struct lpfc_nodelist *ndlp; 18906 uint16_t oxid, rxid, xri, lxri; 18907 uint32_t sid, fctl; 18908 union lpfc_wqe128 *icmd; 18909 int rc; 18910 18911 if (!lpfc_is_link_up(phba)) 18912 return; 18913 18914 sid = sli4_sid_from_fc_hdr(fc_hdr); 18915 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 18916 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 18917 18918 ndlp = lpfc_findnode_did(vport, sid); 18919 if (!ndlp) { 18920 ndlp = lpfc_nlp_init(vport, sid); 18921 if (!ndlp) { 18922 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 18923 "1268 Failed to allocate ndlp for " 18924 "oxid:x%x SID:x%x\n", oxid, sid); 18925 return; 18926 } 18927 /* Put ndlp onto vport node list */ 18928 lpfc_enqueue_node(vport, ndlp); 18929 } 18930 18931 /* Allocate buffer for rsp iocb */ 18932 ctiocb = lpfc_sli_get_iocbq(phba); 18933 if (!ctiocb) 18934 return; 18935 18936 icmd = &ctiocb->wqe; 18937 18938 /* Extract the F_CTL field from FC_HDR */ 18939 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 18940 18941 ctiocb->ndlp = lpfc_nlp_get(ndlp); 18942 if (!ctiocb->ndlp) { 18943 lpfc_sli_release_iocbq(phba, ctiocb); 18944 return; 18945 } 18946 18947 ctiocb->vport = vport; 18948 ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 18949 ctiocb->sli4_lxritag = NO_XRI; 18950 ctiocb->sli4_xritag = NO_XRI; 18951 ctiocb->abort_rctl = FC_RCTL_BA_ACC; 18952 18953 if (fctl & FC_FC_EX_CTX) 18954 /* Exchange responder sent the abort so we 18955 * own the oxid. 18956 */ 18957 xri = oxid; 18958 else 18959 xri = rxid; 18960 lxri = lpfc_sli4_xri_inrange(phba, xri); 18961 if (lxri != NO_XRI) 18962 lpfc_set_rrq_active(phba, ndlp, lxri, 18963 (xri == oxid) ? rxid : oxid, 0); 18964 /* For BA_ABTS from exchange responder, if the logical xri with 18965 * the oxid maps to the FCP XRI range, the port no longer has 18966 * that exchange context, send a BLS_RJT. Override the IOCB for 18967 * a BA_RJT. 18968 */ 18969 if ((fctl & FC_FC_EX_CTX) && 18970 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 18971 ctiocb->abort_rctl = FC_RCTL_BA_RJT; 18972 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); 18973 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, 18974 FC_BA_RJT_INV_XID); 18975 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, 18976 FC_BA_RJT_UNABLE); 18977 } 18978 18979 /* If BA_ABTS failed to abort a partially assembled receive sequence, 18980 * the driver no longer has that exchange, send a BLS_RJT. Override 18981 * the IOCB for a BA_RJT. 18982 */ 18983 if (aborted == false) { 18984 ctiocb->abort_rctl = FC_RCTL_BA_RJT; 18985 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); 18986 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, 18987 FC_BA_RJT_INV_XID); 18988 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, 18989 FC_BA_RJT_UNABLE); 18990 } 18991 18992 if (fctl & FC_FC_EX_CTX) { 18993 /* ABTS sent by responder to CT exchange, construction 18994 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 18995 * field and RX_ID from ABTS for RX_ID field. 18996 */ 18997 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP; 18998 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid); 18999 } else { 19000 /* ABTS sent by initiator to CT exchange, construction 19001 * of BA_ACC will need to allocate a new XRI as for the 19002 * XRI_TAG field. 19003 */ 19004 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT; 19005 } 19006 19007 /* OX_ID is invariable to who sent ABTS to CT exchange */ 19008 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid); 19009 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid); 19010 19011 /* Use CT=VPI */ 19012 bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest, 19013 ndlp->nlp_DID); 19014 bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp, 19015 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 19016 bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX); 19017 19018 /* Xmit CT abts response on exchange <xid> */ 19019 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 19020 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 19021 ctiocb->abort_rctl, oxid, phba->link_state); 19022 19023 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 19024 if (rc == IOCB_ERROR) { 19025 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 19026 "2925 Failed to issue CT ABTS RSP x%x on " 19027 "xri x%x, Data x%x\n", 19028 ctiocb->abort_rctl, oxid, 19029 phba->link_state); 19030 lpfc_nlp_put(ndlp); 19031 ctiocb->ndlp = NULL; 19032 lpfc_sli_release_iocbq(phba, ctiocb); 19033 } 19034 19035 /* if only usage of this nodelist is BLS response, release initial ref 19036 * to free ndlp when transmit completes 19037 */ 19038 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE && 19039 !(ndlp->nlp_flag & NLP_DROPPED) && 19040 !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) { 19041 ndlp->nlp_flag |= NLP_DROPPED; 19042 lpfc_nlp_put(ndlp); 19043 } 19044 } 19045 19046 /** 19047 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 19048 * @vport: Pointer to the vport on which this sequence was received 19049 * @dmabuf: pointer to a dmabuf that describes the FC sequence 19050 * 19051 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 19052 * receive sequence is only partially assembed by the driver, it shall abort 19053 * the partially assembled frames for the sequence. Otherwise, if the 19054 * unsolicited receive sequence has been completely assembled and passed to 19055 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the 19056 * unsolicited sequence has been aborted. After that, it will issue a basic 19057 * accept to accept the abort. 19058 **/ 19059 static void 19060 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 19061 struct hbq_dmabuf *dmabuf) 19062 { 19063 struct lpfc_hba *phba = vport->phba; 19064 struct fc_frame_header fc_hdr; 19065 uint32_t fctl; 19066 bool aborted; 19067 19068 /* Make a copy of fc_hdr before the dmabuf being released */ 19069 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 19070 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 19071 19072 if (fctl & FC_FC_EX_CTX) { 19073 /* ABTS by responder to exchange, no cleanup needed */ 19074 aborted = true; 19075 } else { 19076 /* ABTS by initiator to exchange, need to do cleanup */ 19077 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 19078 if (aborted == false) 19079 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 19080 } 19081 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19082 19083 if (phba->nvmet_support) { 19084 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 19085 return; 19086 } 19087 19088 /* Respond with BA_ACC or BA_RJT accordingly */ 19089 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 19090 } 19091 19092 /** 19093 * lpfc_seq_complete - Indicates if a sequence is complete 19094 * @dmabuf: pointer to a dmabuf that describes the FC sequence 19095 * 19096 * This function checks the sequence, starting with the frame described by 19097 * @dmabuf, to see if all the frames associated with this sequence are present. 19098 * the frames associated with this sequence are linked to the @dmabuf using the 19099 * dbuf list. This function looks for two major things. 1) That the first frame 19100 * has a sequence count of zero. 2) There is a frame with last frame of sequence 19101 * set. 3) That there are no holes in the sequence count. The function will 19102 * return 1 when the sequence is complete, otherwise it will return 0. 19103 **/ 19104 static int 19105 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 19106 { 19107 struct fc_frame_header *hdr; 19108 struct lpfc_dmabuf *d_buf; 19109 struct hbq_dmabuf *seq_dmabuf; 19110 uint32_t fctl; 19111 int seq_count = 0; 19112 19113 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 19114 /* make sure first fame of sequence has a sequence count of zero */ 19115 if (hdr->fh_seq_cnt != seq_count) 19116 return 0; 19117 fctl = (hdr->fh_f_ctl[0] << 16 | 19118 hdr->fh_f_ctl[1] << 8 | 19119 hdr->fh_f_ctl[2]); 19120 /* If last frame of sequence we can return success. */ 19121 if (fctl & FC_FC_END_SEQ) 19122 return 1; 19123 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 19124 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 19125 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 19126 /* If there is a hole in the sequence count then fail. */ 19127 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 19128 return 0; 19129 fctl = (hdr->fh_f_ctl[0] << 16 | 19130 hdr->fh_f_ctl[1] << 8 | 19131 hdr->fh_f_ctl[2]); 19132 /* If last frame of sequence we can return success. */ 19133 if (fctl & FC_FC_END_SEQ) 19134 return 1; 19135 } 19136 return 0; 19137 } 19138 19139 /** 19140 * lpfc_prep_seq - Prep sequence for ULP processing 19141 * @vport: Pointer to the vport on which this sequence was received 19142 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence 19143 * 19144 * This function takes a sequence, described by a list of frames, and creates 19145 * a list of iocbq structures to describe the sequence. This iocbq list will be 19146 * used to issue to the generic unsolicited sequence handler. This routine 19147 * returns a pointer to the first iocbq in the list. If the function is unable 19148 * to allocate an iocbq then it throw out the received frames that were not 19149 * able to be described and return a pointer to the first iocbq. If unable to 19150 * allocate any iocbqs (including the first) this function will return NULL. 19151 **/ 19152 static struct lpfc_iocbq * 19153 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 19154 { 19155 struct hbq_dmabuf *hbq_buf; 19156 struct lpfc_dmabuf *d_buf, *n_buf; 19157 struct lpfc_iocbq *first_iocbq, *iocbq; 19158 struct fc_frame_header *fc_hdr; 19159 uint32_t sid; 19160 uint32_t len, tot_len; 19161 19162 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 19163 /* remove from receive buffer list */ 19164 list_del_init(&seq_dmabuf->hbuf.list); 19165 lpfc_update_rcv_time_stamp(vport); 19166 /* get the Remote Port's SID */ 19167 sid = sli4_sid_from_fc_hdr(fc_hdr); 19168 tot_len = 0; 19169 /* Get an iocbq struct to fill in. */ 19170 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 19171 if (first_iocbq) { 19172 /* Initialize the first IOCB. */ 19173 first_iocbq->wcqe_cmpl.total_data_placed = 0; 19174 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl, 19175 IOSTAT_SUCCESS); 19176 first_iocbq->vport = vport; 19177 19178 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 19179 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 19180 bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp, 19181 sli4_did_from_fc_hdr(fc_hdr)); 19182 } 19183 19184 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com, 19185 NO_XRI); 19186 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com, 19187 be16_to_cpu(fc_hdr->fh_ox_id)); 19188 19189 /* put the first buffer into the first iocb */ 19190 tot_len = bf_get(lpfc_rcqe_length, 19191 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 19192 19193 first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf; 19194 first_iocbq->bpl_dmabuf = NULL; 19195 /* Keep track of the BDE count */ 19196 first_iocbq->wcqe_cmpl.word3 = 1; 19197 19198 if (tot_len > LPFC_DATA_BUF_SIZE) 19199 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = 19200 LPFC_DATA_BUF_SIZE; 19201 else 19202 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len; 19203 19204 first_iocbq->wcqe_cmpl.total_data_placed = tot_len; 19205 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest, 19206 sid); 19207 } 19208 iocbq = first_iocbq; 19209 /* 19210 * Each IOCBq can have two Buffers assigned, so go through the list 19211 * of buffers for this sequence and save two buffers in each IOCBq 19212 */ 19213 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 19214 if (!iocbq) { 19215 lpfc_in_buf_free(vport->phba, d_buf); 19216 continue; 19217 } 19218 if (!iocbq->bpl_dmabuf) { 19219 iocbq->bpl_dmabuf = d_buf; 19220 iocbq->wcqe_cmpl.word3++; 19221 /* We need to get the size out of the right CQE */ 19222 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 19223 len = bf_get(lpfc_rcqe_length, 19224 &hbq_buf->cq_event.cqe.rcqe_cmpl); 19225 iocbq->unsol_rcv_len = len; 19226 iocbq->wcqe_cmpl.total_data_placed += len; 19227 tot_len += len; 19228 } else { 19229 iocbq = lpfc_sli_get_iocbq(vport->phba); 19230 if (!iocbq) { 19231 if (first_iocbq) { 19232 bf_set(lpfc_wcqe_c_status, 19233 &first_iocbq->wcqe_cmpl, 19234 IOSTAT_SUCCESS); 19235 first_iocbq->wcqe_cmpl.parameter = 19236 IOERR_NO_RESOURCES; 19237 } 19238 lpfc_in_buf_free(vport->phba, d_buf); 19239 continue; 19240 } 19241 /* We need to get the size out of the right CQE */ 19242 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 19243 len = bf_get(lpfc_rcqe_length, 19244 &hbq_buf->cq_event.cqe.rcqe_cmpl); 19245 iocbq->cmd_dmabuf = d_buf; 19246 iocbq->bpl_dmabuf = NULL; 19247 iocbq->wcqe_cmpl.word3 = 1; 19248 19249 if (len > LPFC_DATA_BUF_SIZE) 19250 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = 19251 LPFC_DATA_BUF_SIZE; 19252 else 19253 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = 19254 len; 19255 19256 tot_len += len; 19257 iocbq->wcqe_cmpl.total_data_placed = tot_len; 19258 bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest, 19259 sid); 19260 list_add_tail(&iocbq->list, &first_iocbq->list); 19261 } 19262 } 19263 /* Free the sequence's header buffer */ 19264 if (!first_iocbq) 19265 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); 19266 19267 return first_iocbq; 19268 } 19269 19270 static void 19271 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 19272 struct hbq_dmabuf *seq_dmabuf) 19273 { 19274 struct fc_frame_header *fc_hdr; 19275 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 19276 struct lpfc_hba *phba = vport->phba; 19277 19278 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 19279 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 19280 if (!iocbq) { 19281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19282 "2707 Ring %d handler: Failed to allocate " 19283 "iocb Rctl x%x Type x%x received\n", 19284 LPFC_ELS_RING, 19285 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 19286 return; 19287 } 19288 if (!lpfc_complete_unsol_iocb(phba, 19289 phba->sli4_hba.els_wq->pring, 19290 iocbq, fc_hdr->fh_r_ctl, 19291 fc_hdr->fh_type)) { 19292 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19293 "2540 Ring %d handler: unexpected Rctl " 19294 "x%x Type x%x received\n", 19295 LPFC_ELS_RING, 19296 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 19297 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf); 19298 } 19299 19300 /* Free iocb created in lpfc_prep_seq */ 19301 list_for_each_entry_safe(curr_iocb, next_iocb, 19302 &iocbq->list, list) { 19303 list_del_init(&curr_iocb->list); 19304 lpfc_sli_release_iocbq(phba, curr_iocb); 19305 } 19306 lpfc_sli_release_iocbq(phba, iocbq); 19307 } 19308 19309 static void 19310 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 19311 struct lpfc_iocbq *rspiocb) 19312 { 19313 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 19314 19315 if (pcmd && pcmd->virt) 19316 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 19317 kfree(pcmd); 19318 lpfc_sli_release_iocbq(phba, cmdiocb); 19319 lpfc_drain_txq(phba); 19320 } 19321 19322 static void 19323 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 19324 struct hbq_dmabuf *dmabuf) 19325 { 19326 struct fc_frame_header *fc_hdr; 19327 struct lpfc_hba *phba = vport->phba; 19328 struct lpfc_iocbq *iocbq = NULL; 19329 union lpfc_wqe128 *pwqe; 19330 struct lpfc_dmabuf *pcmd = NULL; 19331 uint32_t frame_len; 19332 int rc; 19333 unsigned long iflags; 19334 19335 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 19336 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 19337 19338 /* Send the received frame back */ 19339 iocbq = lpfc_sli_get_iocbq(phba); 19340 if (!iocbq) { 19341 /* Queue cq event and wakeup worker thread to process it */ 19342 spin_lock_irqsave(&phba->hbalock, iflags); 19343 list_add_tail(&dmabuf->cq_event.list, 19344 &phba->sli4_hba.sp_queue_event); 19345 spin_unlock_irqrestore(&phba->hbalock, iflags); 19346 set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 19347 lpfc_worker_wake_up(phba); 19348 return; 19349 } 19350 19351 /* Allocate buffer for command payload */ 19352 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 19353 if (pcmd) 19354 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 19355 &pcmd->phys); 19356 if (!pcmd || !pcmd->virt) 19357 goto exit; 19358 19359 INIT_LIST_HEAD(&pcmd->list); 19360 19361 /* copyin the payload */ 19362 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 19363 19364 iocbq->cmd_dmabuf = pcmd; 19365 iocbq->vport = vport; 19366 iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 19367 iocbq->cmd_flag |= LPFC_USE_FCPWQIDX; 19368 iocbq->num_bdes = 0; 19369 19370 pwqe = &iocbq->wqe; 19371 /* fill in BDE's for command */ 19372 pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys); 19373 pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys); 19374 pwqe->gen_req.bde.tus.f.bdeSize = frame_len; 19375 pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 19376 19377 pwqe->send_frame.frame_len = frame_len; 19378 pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr)); 19379 pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1)); 19380 pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2)); 19381 pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3)); 19382 pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4)); 19383 pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5)); 19384 19385 pwqe->generic.wqe_com.word7 = 0; 19386 pwqe->generic.wqe_com.word10 = 0; 19387 19388 bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME); 19389 bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */ 19390 bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */ 19391 bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1); 19392 bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1); 19393 bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1); 19394 bf_set(wqe_xc, &pwqe->generic.wqe_com, 1); 19395 bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA); 19396 bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 19397 bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag); 19398 bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag); 19399 bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3); 19400 pwqe->generic.wqe_com.abort_tag = iocbq->iotag; 19401 19402 iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl; 19403 19404 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 19405 if (rc == IOCB_ERROR) 19406 goto exit; 19407 19408 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19409 return; 19410 19411 exit: 19412 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 19413 "2023 Unable to process MDS loopback frame\n"); 19414 if (pcmd && pcmd->virt) 19415 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 19416 kfree(pcmd); 19417 if (iocbq) 19418 lpfc_sli_release_iocbq(phba, iocbq); 19419 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19420 } 19421 19422 /** 19423 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 19424 * @phba: Pointer to HBA context object. 19425 * @dmabuf: Pointer to a dmabuf that describes the FC sequence. 19426 * 19427 * This function is called with no lock held. This function processes all 19428 * the received buffers and gives it to upper layers when a received buffer 19429 * indicates that it is the final frame in the sequence. The interrupt 19430 * service routine processes received buffers at interrupt contexts. 19431 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 19432 * appropriate receive function when the final frame in a sequence is received. 19433 **/ 19434 void 19435 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 19436 struct hbq_dmabuf *dmabuf) 19437 { 19438 struct hbq_dmabuf *seq_dmabuf; 19439 struct fc_frame_header *fc_hdr; 19440 struct lpfc_vport *vport; 19441 uint32_t fcfi; 19442 uint32_t did; 19443 19444 /* Process each received buffer */ 19445 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 19446 19447 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 19448 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 19449 vport = phba->pport; 19450 /* Handle MDS Loopback frames */ 19451 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 19452 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 19453 else 19454 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19455 return; 19456 } 19457 19458 /* check to see if this a valid type of frame */ 19459 if (lpfc_fc_frame_check(phba, fc_hdr)) { 19460 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19461 return; 19462 } 19463 19464 if ((bf_get(lpfc_cqe_code, 19465 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 19466 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 19467 &dmabuf->cq_event.cqe.rcqe_cmpl); 19468 else 19469 fcfi = bf_get(lpfc_rcqe_fcf_id, 19470 &dmabuf->cq_event.cqe.rcqe_cmpl); 19471 19472 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { 19473 vport = phba->pport; 19474 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 19475 "2023 MDS Loopback %d bytes\n", 19476 bf_get(lpfc_rcqe_length, 19477 &dmabuf->cq_event.cqe.rcqe_cmpl)); 19478 /* Handle MDS Loopback frames */ 19479 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 19480 return; 19481 } 19482 19483 /* d_id this frame is directed to */ 19484 did = sli4_did_from_fc_hdr(fc_hdr); 19485 19486 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 19487 if (!vport) { 19488 /* throw out the frame */ 19489 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19490 return; 19491 } 19492 19493 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 19494 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 19495 (did != Fabric_DID)) { 19496 /* 19497 * Throw out the frame if we are not pt2pt. 19498 * The pt2pt protocol allows for discovery frames 19499 * to be received without a registered VPI. 19500 */ 19501 if (!test_bit(FC_PT2PT, &vport->fc_flag) || 19502 phba->link_state == LPFC_HBA_READY) { 19503 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19504 return; 19505 } 19506 } 19507 19508 /* Handle the basic abort sequence (BA_ABTS) event */ 19509 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 19510 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 19511 return; 19512 } 19513 19514 /* Link this frame */ 19515 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 19516 if (!seq_dmabuf) { 19517 /* unable to add frame to vport - throw it out */ 19518 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19519 return; 19520 } 19521 /* If not last frame in sequence continue processing frames. */ 19522 if (!lpfc_seq_complete(seq_dmabuf)) 19523 return; 19524 19525 /* Send the complete sequence to the upper layer protocol */ 19526 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 19527 } 19528 19529 /** 19530 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 19531 * @phba: pointer to lpfc hba data structure. 19532 * 19533 * This routine is invoked to post rpi header templates to the 19534 * HBA consistent with the SLI-4 interface spec. This routine 19535 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 19536 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 19537 * 19538 * This routine does not require any locks. It's usage is expected 19539 * to be driver load or reset recovery when the driver is 19540 * sequential. 19541 * 19542 * Return codes 19543 * 0 - successful 19544 * -EIO - The mailbox failed to complete successfully. 19545 * When this error occurs, the driver is not guaranteed 19546 * to have any rpi regions posted to the device and 19547 * must either attempt to repost the regions or take a 19548 * fatal error. 19549 **/ 19550 int 19551 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 19552 { 19553 struct lpfc_rpi_hdr *rpi_page; 19554 uint32_t rc = 0; 19555 uint16_t lrpi = 0; 19556 19557 /* SLI4 ports that support extents do not require RPI headers. */ 19558 if (!phba->sli4_hba.rpi_hdrs_in_use) 19559 goto exit; 19560 if (phba->sli4_hba.extents_in_use) 19561 return -EIO; 19562 19563 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 19564 /* 19565 * Assign the rpi headers a physical rpi only if the driver 19566 * has not initialized those resources. A port reset only 19567 * needs the headers posted. 19568 */ 19569 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 19570 LPFC_RPI_RSRC_RDY) 19571 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 19572 19573 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 19574 if (rc != MBX_SUCCESS) { 19575 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19576 "2008 Error %d posting all rpi " 19577 "headers\n", rc); 19578 rc = -EIO; 19579 break; 19580 } 19581 } 19582 19583 exit: 19584 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 19585 LPFC_RPI_RSRC_RDY); 19586 return rc; 19587 } 19588 19589 /** 19590 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 19591 * @phba: pointer to lpfc hba data structure. 19592 * @rpi_page: pointer to the rpi memory region. 19593 * 19594 * This routine is invoked to post a single rpi header to the 19595 * HBA consistent with the SLI-4 interface spec. This memory region 19596 * maps up to 64 rpi context regions. 19597 * 19598 * Return codes 19599 * 0 - successful 19600 * -ENOMEM - No available memory 19601 * -EIO - The mailbox failed to complete successfully. 19602 **/ 19603 int 19604 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 19605 { 19606 LPFC_MBOXQ_t *mboxq; 19607 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 19608 uint32_t rc = 0; 19609 uint32_t shdr_status, shdr_add_status; 19610 union lpfc_sli4_cfg_shdr *shdr; 19611 19612 /* SLI4 ports that support extents do not require RPI headers. */ 19613 if (!phba->sli4_hba.rpi_hdrs_in_use) 19614 return rc; 19615 if (phba->sli4_hba.extents_in_use) 19616 return -EIO; 19617 19618 /* The port is notified of the header region via a mailbox command. */ 19619 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19620 if (!mboxq) { 19621 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19622 "2001 Unable to allocate memory for issuing " 19623 "SLI_CONFIG_SPECIAL mailbox command\n"); 19624 return -ENOMEM; 19625 } 19626 19627 /* Post all rpi memory regions to the port. */ 19628 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 19629 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 19630 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 19631 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 19632 sizeof(struct lpfc_sli4_cfg_mhdr), 19633 LPFC_SLI4_MBX_EMBED); 19634 19635 19636 /* Post the physical rpi to the port for this rpi header. */ 19637 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 19638 rpi_page->start_rpi); 19639 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 19640 hdr_tmpl, rpi_page->page_count); 19641 19642 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 19643 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 19644 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19645 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 19646 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 19647 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 19648 mempool_free(mboxq, phba->mbox_mem_pool); 19649 if (shdr_status || shdr_add_status || rc) { 19650 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19651 "2514 POST_RPI_HDR mailbox failed with " 19652 "status x%x add_status x%x, mbx status x%x\n", 19653 shdr_status, shdr_add_status, rc); 19654 rc = -ENXIO; 19655 } else { 19656 /* 19657 * The next_rpi stores the next logical module-64 rpi value used 19658 * to post physical rpis in subsequent rpi postings. 19659 */ 19660 spin_lock_irq(&phba->hbalock); 19661 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 19662 spin_unlock_irq(&phba->hbalock); 19663 } 19664 return rc; 19665 } 19666 19667 /** 19668 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 19669 * @phba: pointer to lpfc hba data structure. 19670 * 19671 * This routine is invoked to post rpi header templates to the 19672 * HBA consistent with the SLI-4 interface spec. This routine 19673 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 19674 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 19675 * 19676 * Returns 19677 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 19678 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 19679 **/ 19680 int 19681 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 19682 { 19683 unsigned long rpi; 19684 uint16_t max_rpi, rpi_limit; 19685 uint16_t rpi_remaining, lrpi = 0; 19686 struct lpfc_rpi_hdr *rpi_hdr; 19687 unsigned long iflag; 19688 19689 /* 19690 * Fetch the next logical rpi. Because this index is logical, 19691 * the driver starts at 0 each time. 19692 */ 19693 spin_lock_irqsave(&phba->hbalock, iflag); 19694 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 19695 rpi_limit = phba->sli4_hba.next_rpi; 19696 19697 rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit); 19698 if (rpi >= rpi_limit) 19699 rpi = LPFC_RPI_ALLOC_ERROR; 19700 else { 19701 set_bit(rpi, phba->sli4_hba.rpi_bmask); 19702 phba->sli4_hba.max_cfg_param.rpi_used++; 19703 phba->sli4_hba.rpi_count++; 19704 } 19705 lpfc_printf_log(phba, KERN_INFO, 19706 LOG_NODE | LOG_DISCOVERY, 19707 "0001 Allocated rpi:x%x max:x%x lim:x%x\n", 19708 (int) rpi, max_rpi, rpi_limit); 19709 19710 /* 19711 * Don't try to allocate more rpi header regions if the device limit 19712 * has been exhausted. 19713 */ 19714 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 19715 (phba->sli4_hba.rpi_count >= max_rpi)) { 19716 spin_unlock_irqrestore(&phba->hbalock, iflag); 19717 return rpi; 19718 } 19719 19720 /* 19721 * RPI header postings are not required for SLI4 ports capable of 19722 * extents. 19723 */ 19724 if (!phba->sli4_hba.rpi_hdrs_in_use) { 19725 spin_unlock_irqrestore(&phba->hbalock, iflag); 19726 return rpi; 19727 } 19728 19729 /* 19730 * If the driver is running low on rpi resources, allocate another 19731 * page now. Note that the next_rpi value is used because 19732 * it represents how many are actually in use whereas max_rpi notes 19733 * how many are supported max by the device. 19734 */ 19735 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 19736 spin_unlock_irqrestore(&phba->hbalock, iflag); 19737 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 19738 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 19739 if (!rpi_hdr) { 19740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19741 "2002 Error Could not grow rpi " 19742 "count\n"); 19743 } else { 19744 lrpi = rpi_hdr->start_rpi; 19745 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 19746 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 19747 } 19748 } 19749 19750 return rpi; 19751 } 19752 19753 /** 19754 * __lpfc_sli4_free_rpi - Release an rpi for reuse. 19755 * @phba: pointer to lpfc hba data structure. 19756 * @rpi: rpi to free 19757 * 19758 * This routine is invoked to release an rpi to the pool of 19759 * available rpis maintained by the driver. 19760 **/ 19761 static void 19762 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 19763 { 19764 /* 19765 * if the rpi value indicates a prior unreg has already 19766 * been done, skip the unreg. 19767 */ 19768 if (rpi == LPFC_RPI_ALLOC_ERROR) 19769 return; 19770 19771 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 19772 phba->sli4_hba.rpi_count--; 19773 phba->sli4_hba.max_cfg_param.rpi_used--; 19774 } else { 19775 lpfc_printf_log(phba, KERN_INFO, 19776 LOG_NODE | LOG_DISCOVERY, 19777 "2016 rpi %x not inuse\n", 19778 rpi); 19779 } 19780 } 19781 19782 /** 19783 * lpfc_sli4_free_rpi - Release an rpi for reuse. 19784 * @phba: pointer to lpfc hba data structure. 19785 * @rpi: rpi to free 19786 * 19787 * This routine is invoked to release an rpi to the pool of 19788 * available rpis maintained by the driver. 19789 **/ 19790 void 19791 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 19792 { 19793 spin_lock_irq(&phba->hbalock); 19794 __lpfc_sli4_free_rpi(phba, rpi); 19795 spin_unlock_irq(&phba->hbalock); 19796 } 19797 19798 /** 19799 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 19800 * @phba: pointer to lpfc hba data structure. 19801 * 19802 * This routine is invoked to remove the memory region that 19803 * provided rpi via a bitmask. 19804 **/ 19805 void 19806 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 19807 { 19808 kfree(phba->sli4_hba.rpi_bmask); 19809 kfree(phba->sli4_hba.rpi_ids); 19810 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 19811 } 19812 19813 /** 19814 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 19815 * @ndlp: pointer to lpfc nodelist data structure. 19816 * @cmpl: completion call-back. 19817 * @iocbq: data to load as mbox ctx_u information 19818 * 19819 * This routine is invoked to remove the memory region that 19820 * provided rpi via a bitmask. 19821 **/ 19822 int 19823 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 19824 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), 19825 struct lpfc_iocbq *iocbq) 19826 { 19827 LPFC_MBOXQ_t *mboxq; 19828 struct lpfc_hba *phba = ndlp->phba; 19829 int rc; 19830 19831 /* The port is notified of the header region via a mailbox command. */ 19832 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19833 if (!mboxq) 19834 return -ENOMEM; 19835 19836 /* If cmpl assigned, then this nlp_get pairs with 19837 * lpfc_mbx_cmpl_resume_rpi. 19838 * 19839 * Else cmpl is NULL, then this nlp_get pairs with 19840 * lpfc_sli_def_mbox_cmpl. 19841 */ 19842 if (!lpfc_nlp_get(ndlp)) { 19843 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19844 "2122 %s: Failed to get nlp ref\n", 19845 __func__); 19846 mempool_free(mboxq, phba->mbox_mem_pool); 19847 return -EIO; 19848 } 19849 19850 /* Post all rpi memory regions to the port. */ 19851 lpfc_resume_rpi(mboxq, ndlp); 19852 if (cmpl) { 19853 mboxq->mbox_cmpl = cmpl; 19854 mboxq->ctx_u.save_iocb = iocbq; 19855 } else 19856 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19857 mboxq->ctx_ndlp = ndlp; 19858 mboxq->vport = ndlp->vport; 19859 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 19860 if (rc == MBX_NOT_FINISHED) { 19861 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19862 "2010 Resume RPI Mailbox failed " 19863 "status %d, mbxStatus x%x\n", rc, 19864 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 19865 lpfc_nlp_put(ndlp); 19866 mempool_free(mboxq, phba->mbox_mem_pool); 19867 return -EIO; 19868 } 19869 return 0; 19870 } 19871 19872 /** 19873 * lpfc_sli4_init_vpi - Initialize a vpi with the port 19874 * @vport: Pointer to the vport for which the vpi is being initialized 19875 * 19876 * This routine is invoked to activate a vpi with the port. 19877 * 19878 * Returns: 19879 * 0 success 19880 * -Evalue otherwise 19881 **/ 19882 int 19883 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 19884 { 19885 LPFC_MBOXQ_t *mboxq; 19886 int rc = 0; 19887 int retval = MBX_SUCCESS; 19888 uint32_t mbox_tmo; 19889 struct lpfc_hba *phba = vport->phba; 19890 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19891 if (!mboxq) 19892 return -ENOMEM; 19893 lpfc_init_vpi(phba, mboxq, vport->vpi); 19894 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 19895 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 19896 if (rc != MBX_SUCCESS) { 19897 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 19898 "2022 INIT VPI Mailbox failed " 19899 "status %d, mbxStatus x%x\n", rc, 19900 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 19901 retval = -EIO; 19902 } 19903 if (rc != MBX_TIMEOUT) 19904 mempool_free(mboxq, vport->phba->mbox_mem_pool); 19905 19906 return retval; 19907 } 19908 19909 /** 19910 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 19911 * @phba: pointer to lpfc hba data structure. 19912 * @mboxq: Pointer to mailbox object. 19913 * 19914 * This routine is invoked to manually add a single FCF record. The caller 19915 * must pass a completely initialized FCF_Record. This routine takes 19916 * care of the nonembedded mailbox operations. 19917 **/ 19918 static void 19919 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 19920 { 19921 void *virt_addr; 19922 union lpfc_sli4_cfg_shdr *shdr; 19923 uint32_t shdr_status, shdr_add_status; 19924 19925 virt_addr = mboxq->sge_array->addr[0]; 19926 /* The IOCTL status is embedded in the mailbox subheader. */ 19927 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 19928 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 19929 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 19930 19931 if ((shdr_status || shdr_add_status) && 19932 (shdr_status != STATUS_FCF_IN_USE)) 19933 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19934 "2558 ADD_FCF_RECORD mailbox failed with " 19935 "status x%x add_status x%x\n", 19936 shdr_status, shdr_add_status); 19937 19938 lpfc_sli4_mbox_cmd_free(phba, mboxq); 19939 } 19940 19941 /** 19942 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 19943 * @phba: pointer to lpfc hba data structure. 19944 * @fcf_record: pointer to the initialized fcf record to add. 19945 * 19946 * This routine is invoked to manually add a single FCF record. The caller 19947 * must pass a completely initialized FCF_Record. This routine takes 19948 * care of the nonembedded mailbox operations. 19949 **/ 19950 int 19951 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 19952 { 19953 int rc = 0; 19954 LPFC_MBOXQ_t *mboxq; 19955 uint8_t *bytep; 19956 void *virt_addr; 19957 struct lpfc_mbx_sge sge; 19958 uint32_t alloc_len, req_len; 19959 uint32_t fcfindex; 19960 19961 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19962 if (!mboxq) { 19963 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19964 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 19965 return -ENOMEM; 19966 } 19967 19968 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 19969 sizeof(uint32_t); 19970 19971 /* Allocate DMA memory and set up the non-embedded mailbox command */ 19972 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 19973 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 19974 req_len, LPFC_SLI4_MBX_NEMBED); 19975 if (alloc_len < req_len) { 19976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19977 "2523 Allocated DMA memory size (x%x) is " 19978 "less than the requested DMA memory " 19979 "size (x%x)\n", alloc_len, req_len); 19980 lpfc_sli4_mbox_cmd_free(phba, mboxq); 19981 return -ENOMEM; 19982 } 19983 19984 /* 19985 * Get the first SGE entry from the non-embedded DMA memory. This 19986 * routine only uses a single SGE. 19987 */ 19988 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 19989 virt_addr = mboxq->sge_array->addr[0]; 19990 /* 19991 * Configure the FCF record for FCFI 0. This is the driver's 19992 * hardcoded default and gets used in nonFIP mode. 19993 */ 19994 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 19995 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 19996 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 19997 19998 /* 19999 * Copy the fcf_index and the FCF Record Data. The data starts after 20000 * the FCoE header plus word10. The data copy needs to be endian 20001 * correct. 20002 */ 20003 bytep += sizeof(uint32_t); 20004 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 20005 mboxq->vport = phba->pport; 20006 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 20007 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 20008 if (rc == MBX_NOT_FINISHED) { 20009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20010 "2515 ADD_FCF_RECORD mailbox failed with " 20011 "status 0x%x\n", rc); 20012 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20013 rc = -EIO; 20014 } else 20015 rc = 0; 20016 20017 return rc; 20018 } 20019 20020 /** 20021 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 20022 * @phba: pointer to lpfc hba data structure. 20023 * @fcf_record: pointer to the fcf record to write the default data. 20024 * @fcf_index: FCF table entry index. 20025 * 20026 * This routine is invoked to build the driver's default FCF record. The 20027 * values used are hardcoded. This routine handles memory initialization. 20028 * 20029 **/ 20030 void 20031 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 20032 struct fcf_record *fcf_record, 20033 uint16_t fcf_index) 20034 { 20035 memset(fcf_record, 0, sizeof(struct fcf_record)); 20036 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 20037 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 20038 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 20039 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 20040 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 20041 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 20042 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 20043 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 20044 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 20045 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 20046 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 20047 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 20048 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 20049 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 20050 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 20051 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 20052 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 20053 /* Set the VLAN bit map */ 20054 if (phba->valid_vlan) { 20055 fcf_record->vlan_bitmap[phba->vlan_id / 8] 20056 = 1 << (phba->vlan_id % 8); 20057 } 20058 } 20059 20060 /** 20061 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 20062 * @phba: pointer to lpfc hba data structure. 20063 * @fcf_index: FCF table entry offset. 20064 * 20065 * This routine is invoked to scan the entire FCF table by reading FCF 20066 * record and processing it one at a time starting from the @fcf_index 20067 * for initial FCF discovery or fast FCF failover rediscovery. 20068 * 20069 * Return 0 if the mailbox command is submitted successfully, none 0 20070 * otherwise. 20071 **/ 20072 int 20073 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 20074 { 20075 int rc = 0, error; 20076 LPFC_MBOXQ_t *mboxq; 20077 20078 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 20079 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 20080 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20081 if (!mboxq) { 20082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20083 "2000 Failed to allocate mbox for " 20084 "READ_FCF cmd\n"); 20085 error = -ENOMEM; 20086 goto fail_fcf_scan; 20087 } 20088 /* Construct the read FCF record mailbox command */ 20089 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 20090 if (rc) { 20091 error = -EINVAL; 20092 goto fail_fcf_scan; 20093 } 20094 /* Issue the mailbox command asynchronously */ 20095 mboxq->vport = phba->pport; 20096 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 20097 20098 set_bit(FCF_TS_INPROG, &phba->hba_flag); 20099 20100 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 20101 if (rc == MBX_NOT_FINISHED) 20102 error = -EIO; 20103 else { 20104 /* Reset eligible FCF count for new scan */ 20105 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 20106 phba->fcf.eligible_fcf_cnt = 0; 20107 error = 0; 20108 } 20109 fail_fcf_scan: 20110 if (error) { 20111 if (mboxq) 20112 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20113 /* FCF scan failed, clear FCF_TS_INPROG flag */ 20114 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 20115 } 20116 return error; 20117 } 20118 20119 /** 20120 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 20121 * @phba: pointer to lpfc hba data structure. 20122 * @fcf_index: FCF table entry offset. 20123 * 20124 * This routine is invoked to read an FCF record indicated by @fcf_index 20125 * and to use it for FLOGI roundrobin FCF failover. 20126 * 20127 * Return 0 if the mailbox command is submitted successfully, none 0 20128 * otherwise. 20129 **/ 20130 int 20131 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 20132 { 20133 int rc = 0, error; 20134 LPFC_MBOXQ_t *mboxq; 20135 20136 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20137 if (!mboxq) { 20138 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 20139 "2763 Failed to allocate mbox for " 20140 "READ_FCF cmd\n"); 20141 error = -ENOMEM; 20142 goto fail_fcf_read; 20143 } 20144 /* Construct the read FCF record mailbox command */ 20145 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 20146 if (rc) { 20147 error = -EINVAL; 20148 goto fail_fcf_read; 20149 } 20150 /* Issue the mailbox command asynchronously */ 20151 mboxq->vport = phba->pport; 20152 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 20153 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 20154 if (rc == MBX_NOT_FINISHED) 20155 error = -EIO; 20156 else 20157 error = 0; 20158 20159 fail_fcf_read: 20160 if (error && mboxq) 20161 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20162 return error; 20163 } 20164 20165 /** 20166 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 20167 * @phba: pointer to lpfc hba data structure. 20168 * @fcf_index: FCF table entry offset. 20169 * 20170 * This routine is invoked to read an FCF record indicated by @fcf_index to 20171 * determine whether it's eligible for FLOGI roundrobin failover list. 20172 * 20173 * Return 0 if the mailbox command is submitted successfully, none 0 20174 * otherwise. 20175 **/ 20176 int 20177 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 20178 { 20179 int rc = 0, error; 20180 LPFC_MBOXQ_t *mboxq; 20181 20182 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20183 if (!mboxq) { 20184 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 20185 "2758 Failed to allocate mbox for " 20186 "READ_FCF cmd\n"); 20187 error = -ENOMEM; 20188 goto fail_fcf_read; 20189 } 20190 /* Construct the read FCF record mailbox command */ 20191 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 20192 if (rc) { 20193 error = -EINVAL; 20194 goto fail_fcf_read; 20195 } 20196 /* Issue the mailbox command asynchronously */ 20197 mboxq->vport = phba->pport; 20198 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 20199 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 20200 if (rc == MBX_NOT_FINISHED) 20201 error = -EIO; 20202 else 20203 error = 0; 20204 20205 fail_fcf_read: 20206 if (error && mboxq) 20207 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20208 return error; 20209 } 20210 20211 /** 20212 * lpfc_check_next_fcf_pri_level 20213 * @phba: pointer to the lpfc_hba struct for this port. 20214 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 20215 * routine when the rr_bmask is empty. The FCF indecies are put into the 20216 * rr_bmask based on their priority level. Starting from the highest priority 20217 * to the lowest. The most likely FCF candidate will be in the highest 20218 * priority group. When this routine is called it searches the fcf_pri list for 20219 * next lowest priority group and repopulates the rr_bmask with only those 20220 * fcf_indexes. 20221 * returns: 20222 * 1=success 0=failure 20223 **/ 20224 static int 20225 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 20226 { 20227 uint16_t next_fcf_pri; 20228 uint16_t last_index; 20229 struct lpfc_fcf_pri *fcf_pri; 20230 int rc; 20231 int ret = 0; 20232 20233 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 20234 LPFC_SLI4_FCF_TBL_INDX_MAX); 20235 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20236 "3060 Last IDX %d\n", last_index); 20237 20238 /* Verify the priority list has 2 or more entries */ 20239 spin_lock_irq(&phba->hbalock); 20240 if (list_empty(&phba->fcf.fcf_pri_list) || 20241 list_is_singular(&phba->fcf.fcf_pri_list)) { 20242 spin_unlock_irq(&phba->hbalock); 20243 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 20244 "3061 Last IDX %d\n", last_index); 20245 return 0; /* Empty rr list */ 20246 } 20247 spin_unlock_irq(&phba->hbalock); 20248 20249 next_fcf_pri = 0; 20250 /* 20251 * Clear the rr_bmask and set all of the bits that are at this 20252 * priority. 20253 */ 20254 memset(phba->fcf.fcf_rr_bmask, 0, 20255 sizeof(*phba->fcf.fcf_rr_bmask)); 20256 spin_lock_irq(&phba->hbalock); 20257 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 20258 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 20259 continue; 20260 /* 20261 * the 1st priority that has not FLOGI failed 20262 * will be the highest. 20263 */ 20264 if (!next_fcf_pri) 20265 next_fcf_pri = fcf_pri->fcf_rec.priority; 20266 spin_unlock_irq(&phba->hbalock); 20267 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 20268 rc = lpfc_sli4_fcf_rr_index_set(phba, 20269 fcf_pri->fcf_rec.fcf_index); 20270 if (rc) 20271 return 0; 20272 } 20273 spin_lock_irq(&phba->hbalock); 20274 } 20275 /* 20276 * if next_fcf_pri was not set above and the list is not empty then 20277 * we have failed flogis on all of them. So reset flogi failed 20278 * and start at the beginning. 20279 */ 20280 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 20281 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 20282 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 20283 /* 20284 * the 1st priority that has not FLOGI failed 20285 * will be the highest. 20286 */ 20287 if (!next_fcf_pri) 20288 next_fcf_pri = fcf_pri->fcf_rec.priority; 20289 spin_unlock_irq(&phba->hbalock); 20290 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 20291 rc = lpfc_sli4_fcf_rr_index_set(phba, 20292 fcf_pri->fcf_rec.fcf_index); 20293 if (rc) 20294 return 0; 20295 } 20296 spin_lock_irq(&phba->hbalock); 20297 } 20298 } else 20299 ret = 1; 20300 spin_unlock_irq(&phba->hbalock); 20301 20302 return ret; 20303 } 20304 /** 20305 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 20306 * @phba: pointer to lpfc hba data structure. 20307 * 20308 * This routine is to get the next eligible FCF record index in a round 20309 * robin fashion. If the next eligible FCF record index equals to the 20310 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 20311 * shall be returned, otherwise, the next eligible FCF record's index 20312 * shall be returned. 20313 **/ 20314 uint16_t 20315 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 20316 { 20317 uint16_t next_fcf_index; 20318 20319 initial_priority: 20320 /* Search start from next bit of currently registered FCF index */ 20321 next_fcf_index = phba->fcf.current_rec.fcf_indx; 20322 20323 next_priority: 20324 /* Determine the next fcf index to check */ 20325 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 20326 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 20327 LPFC_SLI4_FCF_TBL_INDX_MAX, 20328 next_fcf_index); 20329 20330 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 20331 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 20332 /* 20333 * If we have wrapped then we need to clear the bits that 20334 * have been tested so that we can detect when we should 20335 * change the priority level. 20336 */ 20337 next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask, 20338 LPFC_SLI4_FCF_TBL_INDX_MAX); 20339 } 20340 20341 20342 /* Check roundrobin failover list empty condition */ 20343 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 20344 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 20345 /* 20346 * If next fcf index is not found check if there are lower 20347 * Priority level fcf's in the fcf_priority list. 20348 * Set up the rr_bmask with all of the avaiable fcf bits 20349 * at that level and continue the selection process. 20350 */ 20351 if (lpfc_check_next_fcf_pri_level(phba)) 20352 goto initial_priority; 20353 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 20354 "2844 No roundrobin failover FCF available\n"); 20355 20356 return LPFC_FCOE_FCF_NEXT_NONE; 20357 } 20358 20359 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 20360 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 20361 LPFC_FCF_FLOGI_FAILED) { 20362 if (list_is_singular(&phba->fcf.fcf_pri_list)) 20363 return LPFC_FCOE_FCF_NEXT_NONE; 20364 20365 goto next_priority; 20366 } 20367 20368 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20369 "2845 Get next roundrobin failover FCF (x%x)\n", 20370 next_fcf_index); 20371 20372 return next_fcf_index; 20373 } 20374 20375 /** 20376 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 20377 * @phba: pointer to lpfc hba data structure. 20378 * @fcf_index: index into the FCF table to 'set' 20379 * 20380 * This routine sets the FCF record index in to the eligible bmask for 20381 * roundrobin failover search. It checks to make sure that the index 20382 * does not go beyond the range of the driver allocated bmask dimension 20383 * before setting the bit. 20384 * 20385 * Returns 0 if the index bit successfully set, otherwise, it returns 20386 * -EINVAL. 20387 **/ 20388 int 20389 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 20390 { 20391 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 20392 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 20393 "2610 FCF (x%x) reached driver's book " 20394 "keeping dimension:x%x\n", 20395 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 20396 return -EINVAL; 20397 } 20398 /* Set the eligible FCF record index bmask */ 20399 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 20400 20401 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20402 "2790 Set FCF (x%x) to roundrobin FCF failover " 20403 "bmask\n", fcf_index); 20404 20405 return 0; 20406 } 20407 20408 /** 20409 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 20410 * @phba: pointer to lpfc hba data structure. 20411 * @fcf_index: index into the FCF table to 'clear' 20412 * 20413 * This routine clears the FCF record index from the eligible bmask for 20414 * roundrobin failover search. It checks to make sure that the index 20415 * does not go beyond the range of the driver allocated bmask dimension 20416 * before clearing the bit. 20417 **/ 20418 void 20419 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 20420 { 20421 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 20422 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 20423 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 20424 "2762 FCF (x%x) reached driver's book " 20425 "keeping dimension:x%x\n", 20426 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 20427 return; 20428 } 20429 /* Clear the eligible FCF record index bmask */ 20430 spin_lock_irq(&phba->hbalock); 20431 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 20432 list) { 20433 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 20434 list_del_init(&fcf_pri->list); 20435 break; 20436 } 20437 } 20438 spin_unlock_irq(&phba->hbalock); 20439 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 20440 20441 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20442 "2791 Clear FCF (x%x) from roundrobin failover " 20443 "bmask\n", fcf_index); 20444 } 20445 20446 /** 20447 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 20448 * @phba: pointer to lpfc hba data structure. 20449 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 20450 * 20451 * This routine is the completion routine for the rediscover FCF table mailbox 20452 * command. If the mailbox command returned failure, it will try to stop the 20453 * FCF rediscover wait timer. 20454 **/ 20455 static void 20456 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 20457 { 20458 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 20459 uint32_t shdr_status, shdr_add_status; 20460 20461 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 20462 20463 shdr_status = bf_get(lpfc_mbox_hdr_status, 20464 &redisc_fcf->header.cfg_shdr.response); 20465 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 20466 &redisc_fcf->header.cfg_shdr.response); 20467 if (shdr_status || shdr_add_status) { 20468 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 20469 "2746 Requesting for FCF rediscovery failed " 20470 "status x%x add_status x%x\n", 20471 shdr_status, shdr_add_status); 20472 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 20473 spin_lock_irq(&phba->hbalock); 20474 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 20475 spin_unlock_irq(&phba->hbalock); 20476 /* 20477 * CVL event triggered FCF rediscover request failed, 20478 * last resort to re-try current registered FCF entry. 20479 */ 20480 lpfc_retry_pport_discovery(phba); 20481 } else { 20482 spin_lock_irq(&phba->hbalock); 20483 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 20484 spin_unlock_irq(&phba->hbalock); 20485 /* 20486 * DEAD FCF event triggered FCF rediscover request 20487 * failed, last resort to fail over as a link down 20488 * to FCF registration. 20489 */ 20490 lpfc_sli4_fcf_dead_failthrough(phba); 20491 } 20492 } else { 20493 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20494 "2775 Start FCF rediscover quiescent timer\n"); 20495 /* 20496 * Start FCF rediscovery wait timer for pending FCF 20497 * before rescan FCF record table. 20498 */ 20499 lpfc_fcf_redisc_wait_start_timer(phba); 20500 } 20501 20502 mempool_free(mbox, phba->mbox_mem_pool); 20503 } 20504 20505 /** 20506 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 20507 * @phba: pointer to lpfc hba data structure. 20508 * 20509 * This routine is invoked to request for rediscovery of the entire FCF table 20510 * by the port. 20511 **/ 20512 int 20513 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 20514 { 20515 LPFC_MBOXQ_t *mbox; 20516 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 20517 int rc, length; 20518 20519 /* Cancel retry delay timers to all vports before FCF rediscover */ 20520 lpfc_cancel_all_vport_retry_delay_timer(phba); 20521 20522 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20523 if (!mbox) { 20524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20525 "2745 Failed to allocate mbox for " 20526 "requesting FCF rediscover.\n"); 20527 return -ENOMEM; 20528 } 20529 20530 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 20531 sizeof(struct lpfc_sli4_cfg_mhdr)); 20532 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 20533 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 20534 length, LPFC_SLI4_MBX_EMBED); 20535 20536 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 20537 /* Set count to 0 for invalidating the entire FCF database */ 20538 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 20539 20540 /* Issue the mailbox command asynchronously */ 20541 mbox->vport = phba->pport; 20542 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 20543 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 20544 20545 if (rc == MBX_NOT_FINISHED) { 20546 mempool_free(mbox, phba->mbox_mem_pool); 20547 return -EIO; 20548 } 20549 return 0; 20550 } 20551 20552 /** 20553 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 20554 * @phba: pointer to lpfc hba data structure. 20555 * 20556 * This function is the failover routine as a last resort to the FCF DEAD 20557 * event when driver failed to perform fast FCF failover. 20558 **/ 20559 void 20560 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 20561 { 20562 uint32_t link_state; 20563 20564 /* 20565 * Last resort as FCF DEAD event failover will treat this as 20566 * a link down, but save the link state because we don't want 20567 * it to be changed to Link Down unless it is already down. 20568 */ 20569 link_state = phba->link_state; 20570 lpfc_linkdown(phba); 20571 phba->link_state = link_state; 20572 20573 /* Unregister FCF if no devices connected to it */ 20574 lpfc_unregister_unused_fcf(phba); 20575 } 20576 20577 /** 20578 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 20579 * @phba: pointer to lpfc hba data structure. 20580 * @rgn23_data: pointer to configure region 23 data. 20581 * 20582 * This function gets SLI3 port configure region 23 data through memory dump 20583 * mailbox command. When it successfully retrieves data, the size of the data 20584 * will be returned, otherwise, 0 will be returned. 20585 **/ 20586 static uint32_t 20587 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 20588 { 20589 LPFC_MBOXQ_t *pmb = NULL; 20590 MAILBOX_t *mb; 20591 uint32_t offset = 0; 20592 int rc; 20593 20594 if (!rgn23_data) 20595 return 0; 20596 20597 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20598 if (!pmb) { 20599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20600 "2600 failed to allocate mailbox memory\n"); 20601 return 0; 20602 } 20603 mb = &pmb->u.mb; 20604 20605 do { 20606 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 20607 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 20608 20609 if (rc != MBX_SUCCESS) { 20610 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 20611 "2601 failed to read config " 20612 "region 23, rc 0x%x Status 0x%x\n", 20613 rc, mb->mbxStatus); 20614 mb->un.varDmp.word_cnt = 0; 20615 } 20616 /* 20617 * dump mem may return a zero when finished or we got a 20618 * mailbox error, either way we are done. 20619 */ 20620 if (mb->un.varDmp.word_cnt == 0) 20621 break; 20622 20623 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 20624 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 20625 20626 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 20627 rgn23_data + offset, 20628 mb->un.varDmp.word_cnt); 20629 offset += mb->un.varDmp.word_cnt; 20630 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 20631 20632 mempool_free(pmb, phba->mbox_mem_pool); 20633 return offset; 20634 } 20635 20636 /** 20637 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 20638 * @phba: pointer to lpfc hba data structure. 20639 * @rgn23_data: pointer to configure region 23 data. 20640 * 20641 * This function gets SLI4 port configure region 23 data through memory dump 20642 * mailbox command. When it successfully retrieves data, the size of the data 20643 * will be returned, otherwise, 0 will be returned. 20644 **/ 20645 static uint32_t 20646 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 20647 { 20648 LPFC_MBOXQ_t *mboxq = NULL; 20649 struct lpfc_dmabuf *mp = NULL; 20650 struct lpfc_mqe *mqe; 20651 uint32_t data_length = 0; 20652 int rc; 20653 20654 if (!rgn23_data) 20655 return 0; 20656 20657 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20658 if (!mboxq) { 20659 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20660 "3105 failed to allocate mailbox memory\n"); 20661 return 0; 20662 } 20663 20664 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 20665 goto out; 20666 mqe = &mboxq->u.mqe; 20667 mp = mboxq->ctx_buf; 20668 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 20669 if (rc) 20670 goto out; 20671 data_length = mqe->un.mb_words[5]; 20672 if (data_length == 0) 20673 goto out; 20674 if (data_length > DMP_RGN23_SIZE) { 20675 data_length = 0; 20676 goto out; 20677 } 20678 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 20679 out: 20680 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 20681 return data_length; 20682 } 20683 20684 /** 20685 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 20686 * @phba: pointer to lpfc hba data structure. 20687 * 20688 * This function read region 23 and parse TLV for port status to 20689 * decide if the user disaled the port. If the TLV indicates the 20690 * port is disabled, the hba_flag is set accordingly. 20691 **/ 20692 void 20693 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 20694 { 20695 uint8_t *rgn23_data = NULL; 20696 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 20697 uint32_t offset = 0; 20698 20699 /* Get adapter Region 23 data */ 20700 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 20701 if (!rgn23_data) 20702 goto out; 20703 20704 if (phba->sli_rev < LPFC_SLI_REV4) 20705 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 20706 else { 20707 if_type = bf_get(lpfc_sli_intf_if_type, 20708 &phba->sli4_hba.sli_intf); 20709 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 20710 goto out; 20711 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 20712 } 20713 20714 if (!data_size) 20715 goto out; 20716 20717 /* Check the region signature first */ 20718 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 20719 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20720 "2619 Config region 23 has bad signature\n"); 20721 goto out; 20722 } 20723 offset += 4; 20724 20725 /* Check the data structure version */ 20726 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 20727 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20728 "2620 Config region 23 has bad version\n"); 20729 goto out; 20730 } 20731 offset += 4; 20732 20733 /* Parse TLV entries in the region */ 20734 while (offset < data_size) { 20735 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 20736 break; 20737 /* 20738 * If the TLV is not driver specific TLV or driver id is 20739 * not linux driver id, skip the record. 20740 */ 20741 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 20742 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 20743 (rgn23_data[offset + 3] != 0)) { 20744 offset += rgn23_data[offset + 1] * 4 + 4; 20745 continue; 20746 } 20747 20748 /* Driver found a driver specific TLV in the config region */ 20749 sub_tlv_len = rgn23_data[offset + 1] * 4; 20750 offset += 4; 20751 tlv_offset = 0; 20752 20753 /* 20754 * Search for configured port state sub-TLV. 20755 */ 20756 while ((offset < data_size) && 20757 (tlv_offset < sub_tlv_len)) { 20758 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 20759 offset += 4; 20760 tlv_offset += 4; 20761 break; 20762 } 20763 if (rgn23_data[offset] != PORT_STE_TYPE) { 20764 offset += rgn23_data[offset + 1] * 4 + 4; 20765 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 20766 continue; 20767 } 20768 20769 /* This HBA contains PORT_STE configured */ 20770 if (!rgn23_data[offset + 2]) 20771 set_bit(LINK_DISABLED, &phba->hba_flag); 20772 20773 goto out; 20774 } 20775 } 20776 20777 out: 20778 kfree(rgn23_data); 20779 return; 20780 } 20781 20782 /** 20783 * lpfc_log_fw_write_cmpl - logs firmware write completion status 20784 * @phba: pointer to lpfc hba data structure 20785 * @shdr_status: wr_object rsp's status field 20786 * @shdr_add_status: wr_object rsp's add_status field 20787 * @shdr_add_status_2: wr_object rsp's add_status_2 field 20788 * @shdr_change_status: wr_object rsp's change_status field 20789 * @shdr_csf: wr_object rsp's csf bit 20790 * 20791 * This routine is intended to be called after a firmware write completes. 20792 * It will log next action items to be performed by the user to instantiate 20793 * the newly downloaded firmware or reason for incompatibility. 20794 **/ 20795 static void 20796 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, 20797 u32 shdr_add_status, u32 shdr_add_status_2, 20798 u32 shdr_change_status, u32 shdr_csf) 20799 { 20800 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 20801 "4198 %s: flash_id x%02x, asic_rev x%02x, " 20802 "status x%02x, add_status x%02x, add_status_2 x%02x, " 20803 "change_status x%02x, csf %01x\n", __func__, 20804 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev, 20805 shdr_status, shdr_add_status, shdr_add_status_2, 20806 shdr_change_status, shdr_csf); 20807 20808 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { 20809 switch (shdr_add_status_2) { 20810 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: 20811 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 20812 "4199 Firmware write failed: " 20813 "image incompatible with flash x%02x\n", 20814 phba->sli4_hba.flash_id); 20815 break; 20816 case LPFC_ADD_STATUS_2_INCORRECT_ASIC: 20817 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 20818 "4200 Firmware write failed: " 20819 "image incompatible with ASIC " 20820 "architecture x%02x\n", 20821 phba->sli4_hba.asic_rev); 20822 break; 20823 default: 20824 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 20825 "4210 Firmware write failed: " 20826 "add_status_2 x%02x\n", 20827 shdr_add_status_2); 20828 break; 20829 } 20830 } else if (!shdr_status && !shdr_add_status) { 20831 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || 20832 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { 20833 if (shdr_csf) 20834 shdr_change_status = 20835 LPFC_CHANGE_STATUS_PCI_RESET; 20836 } 20837 20838 switch (shdr_change_status) { 20839 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): 20840 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, 20841 "3198 Firmware write complete: System " 20842 "reboot required to instantiate\n"); 20843 break; 20844 case (LPFC_CHANGE_STATUS_FW_RESET): 20845 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, 20846 "3199 Firmware write complete: " 20847 "Firmware reset required to " 20848 "instantiate\n"); 20849 break; 20850 case (LPFC_CHANGE_STATUS_PORT_MIGRATION): 20851 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, 20852 "3200 Firmware write complete: Port " 20853 "Migration or PCI Reset required to " 20854 "instantiate\n"); 20855 break; 20856 case (LPFC_CHANGE_STATUS_PCI_RESET): 20857 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, 20858 "3201 Firmware write complete: PCI " 20859 "Reset required to instantiate\n"); 20860 break; 20861 default: 20862 break; 20863 } 20864 } 20865 } 20866 20867 /** 20868 * lpfc_wr_object - write an object to the firmware 20869 * @phba: HBA structure that indicates port to create a queue on. 20870 * @dmabuf_list: list of dmabufs to write to the port. 20871 * @size: the total byte value of the objects to write to the port. 20872 * @offset: the current offset to be used to start the transfer. 20873 * 20874 * This routine will create a wr_object mailbox command to send to the port. 20875 * the mailbox command will be constructed using the dma buffers described in 20876 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 20877 * BDEs that the imbedded mailbox can support. The @offset variable will be 20878 * used to indicate the starting offset of the transfer and will also return 20879 * the offset after the write object mailbox has completed. @size is used to 20880 * determine the end of the object and whether the eof bit should be set. 20881 * 20882 * Return 0 is successful and offset will contain the new offset to use 20883 * for the next write. 20884 * Return negative value for error cases. 20885 **/ 20886 int 20887 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 20888 uint32_t size, uint32_t *offset) 20889 { 20890 struct lpfc_mbx_wr_object *wr_object; 20891 LPFC_MBOXQ_t *mbox; 20892 int rc = 0, i = 0; 20893 int mbox_status = 0; 20894 uint32_t shdr_status, shdr_add_status, shdr_add_status_2; 20895 uint32_t shdr_change_status = 0, shdr_csf = 0; 20896 uint32_t mbox_tmo; 20897 struct lpfc_dmabuf *dmabuf; 20898 uint32_t written = 0; 20899 bool check_change_status = false; 20900 20901 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20902 if (!mbox) 20903 return -ENOMEM; 20904 20905 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 20906 LPFC_MBOX_OPCODE_WRITE_OBJECT, 20907 sizeof(struct lpfc_mbx_wr_object) - 20908 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 20909 20910 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 20911 wr_object->u.request.write_offset = *offset; 20912 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 20913 wr_object->u.request.object_name[0] = 20914 cpu_to_le32(wr_object->u.request.object_name[0]); 20915 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 20916 list_for_each_entry(dmabuf, dmabuf_list, list) { 20917 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 20918 break; 20919 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 20920 wr_object->u.request.bde[i].addrHigh = 20921 putPaddrHigh(dmabuf->phys); 20922 if (written + SLI4_PAGE_SIZE >= size) { 20923 wr_object->u.request.bde[i].tus.f.bdeSize = 20924 (size - written); 20925 written += (size - written); 20926 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 20927 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); 20928 check_change_status = true; 20929 } else { 20930 wr_object->u.request.bde[i].tus.f.bdeSize = 20931 SLI4_PAGE_SIZE; 20932 written += SLI4_PAGE_SIZE; 20933 } 20934 i++; 20935 } 20936 wr_object->u.request.bde_count = i; 20937 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 20938 if (!phba->sli4_hba.intr_enable) 20939 mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 20940 else { 20941 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 20942 mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 20943 } 20944 20945 /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */ 20946 rc = mbox_status; 20947 20948 /* The IOCTL status is embedded in the mailbox subheader. */ 20949 shdr_status = bf_get(lpfc_mbox_hdr_status, 20950 &wr_object->header.cfg_shdr.response); 20951 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 20952 &wr_object->header.cfg_shdr.response); 20953 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2, 20954 &wr_object->header.cfg_shdr.response); 20955 if (check_change_status) { 20956 shdr_change_status = bf_get(lpfc_wr_object_change_status, 20957 &wr_object->u.response); 20958 shdr_csf = bf_get(lpfc_wr_object_csf, 20959 &wr_object->u.response); 20960 } 20961 20962 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { 20963 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20964 "3025 Write Object mailbox failed with " 20965 "status x%x add_status x%x, add_status_2 x%x, " 20966 "mbx status x%x\n", 20967 shdr_status, shdr_add_status, shdr_add_status_2, 20968 rc); 20969 rc = -ENXIO; 20970 *offset = shdr_add_status; 20971 } else { 20972 *offset += wr_object->u.response.actual_write_length; 20973 } 20974 20975 if (rc || check_change_status) 20976 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, 20977 shdr_add_status_2, shdr_change_status, 20978 shdr_csf); 20979 20980 if (!phba->sli4_hba.intr_enable) 20981 mempool_free(mbox, phba->mbox_mem_pool); 20982 else if (mbox_status != MBX_TIMEOUT) 20983 mempool_free(mbox, phba->mbox_mem_pool); 20984 20985 return rc; 20986 } 20987 20988 /** 20989 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 20990 * @vport: pointer to vport data structure. 20991 * 20992 * This function iterate through the mailboxq and clean up all REG_LOGIN 20993 * and REG_VPI mailbox commands associated with the vport. This function 20994 * is called when driver want to restart discovery of the vport due to 20995 * a Clear Virtual Link event. 20996 **/ 20997 void 20998 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 20999 { 21000 struct lpfc_hba *phba = vport->phba; 21001 LPFC_MBOXQ_t *mb, *nextmb; 21002 struct lpfc_nodelist *ndlp; 21003 struct lpfc_nodelist *act_mbx_ndlp = NULL; 21004 LIST_HEAD(mbox_cmd_list); 21005 uint8_t restart_loop; 21006 21007 /* Clean up internally queued mailbox commands with the vport */ 21008 spin_lock_irq(&phba->hbalock); 21009 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 21010 if (mb->vport != vport) 21011 continue; 21012 21013 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 21014 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 21015 continue; 21016 21017 list_move_tail(&mb->list, &mbox_cmd_list); 21018 } 21019 /* Clean up active mailbox command with the vport */ 21020 mb = phba->sli.mbox_active; 21021 if (mb && (mb->vport == vport)) { 21022 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 21023 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 21024 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 21025 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 21026 act_mbx_ndlp = mb->ctx_ndlp; 21027 21028 /* This reference is local to this routine. The 21029 * reference is removed at routine exit. 21030 */ 21031 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 21032 21033 /* Unregister the RPI when mailbox complete */ 21034 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 21035 } 21036 } 21037 /* Cleanup any mailbox completions which are not yet processed */ 21038 do { 21039 restart_loop = 0; 21040 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 21041 /* 21042 * If this mailox is already processed or it is 21043 * for another vport ignore it. 21044 */ 21045 if ((mb->vport != vport) || 21046 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 21047 continue; 21048 21049 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 21050 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 21051 continue; 21052 21053 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 21054 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 21055 ndlp = mb->ctx_ndlp; 21056 /* Unregister the RPI when mailbox complete */ 21057 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 21058 restart_loop = 1; 21059 spin_unlock_irq(&phba->hbalock); 21060 spin_lock(&ndlp->lock); 21061 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 21062 spin_unlock(&ndlp->lock); 21063 spin_lock_irq(&phba->hbalock); 21064 break; 21065 } 21066 } 21067 } while (restart_loop); 21068 21069 spin_unlock_irq(&phba->hbalock); 21070 21071 /* Release the cleaned-up mailbox commands */ 21072 while (!list_empty(&mbox_cmd_list)) { 21073 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 21074 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 21075 ndlp = mb->ctx_ndlp; 21076 mb->ctx_ndlp = NULL; 21077 if (ndlp) { 21078 spin_lock(&ndlp->lock); 21079 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 21080 spin_unlock(&ndlp->lock); 21081 lpfc_nlp_put(ndlp); 21082 } 21083 } 21084 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED); 21085 } 21086 21087 /* Release the ndlp with the cleaned-up active mailbox command */ 21088 if (act_mbx_ndlp) { 21089 spin_lock(&act_mbx_ndlp->lock); 21090 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 21091 spin_unlock(&act_mbx_ndlp->lock); 21092 lpfc_nlp_put(act_mbx_ndlp); 21093 } 21094 } 21095 21096 /** 21097 * lpfc_drain_txq - Drain the txq 21098 * @phba: Pointer to HBA context object. 21099 * 21100 * This function attempt to submit IOCBs on the txq 21101 * to the adapter. For SLI4 adapters, the txq contains 21102 * ELS IOCBs that have been deferred because the there 21103 * are no SGLs. This congestion can occur with large 21104 * vport counts during node discovery. 21105 **/ 21106 21107 uint32_t 21108 lpfc_drain_txq(struct lpfc_hba *phba) 21109 { 21110 LIST_HEAD(completions); 21111 struct lpfc_sli_ring *pring; 21112 struct lpfc_iocbq *piocbq = NULL; 21113 unsigned long iflags = 0; 21114 char *fail_msg = NULL; 21115 uint32_t txq_cnt = 0; 21116 struct lpfc_queue *wq; 21117 int ret = 0; 21118 21119 if (phba->link_flag & LS_MDS_LOOPBACK) { 21120 /* MDS WQE are posted only to first WQ*/ 21121 wq = phba->sli4_hba.hdwq[0].io_wq; 21122 if (unlikely(!wq)) 21123 return 0; 21124 pring = wq->pring; 21125 } else { 21126 wq = phba->sli4_hba.els_wq; 21127 if (unlikely(!wq)) 21128 return 0; 21129 pring = lpfc_phba_elsring(phba); 21130 } 21131 21132 if (unlikely(!pring) || list_empty(&pring->txq)) 21133 return 0; 21134 21135 spin_lock_irqsave(&pring->ring_lock, iflags); 21136 list_for_each_entry(piocbq, &pring->txq, list) { 21137 txq_cnt++; 21138 } 21139 21140 if (txq_cnt > pring->txq_max) 21141 pring->txq_max = txq_cnt; 21142 21143 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21144 21145 while (!list_empty(&pring->txq)) { 21146 spin_lock_irqsave(&pring->ring_lock, iflags); 21147 21148 piocbq = lpfc_sli_ringtx_get(phba, pring); 21149 if (!piocbq) { 21150 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 21152 "2823 txq empty and txq_cnt is %d\n ", 21153 txq_cnt); 21154 break; 21155 } 21156 txq_cnt--; 21157 21158 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0); 21159 21160 if (ret && ret != IOCB_BUSY) { 21161 fail_msg = " - Cannot send IO "; 21162 piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; 21163 } 21164 if (fail_msg) { 21165 piocbq->cmd_flag |= LPFC_DRIVER_ABORTED; 21166 /* Failed means we can't issue and need to cancel */ 21167 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 21168 "2822 IOCB failed %s iotag 0x%x " 21169 "xri 0x%x %d flg x%x\n", 21170 fail_msg, piocbq->iotag, 21171 piocbq->sli4_xritag, ret, 21172 piocbq->cmd_flag); 21173 list_add_tail(&piocbq->list, &completions); 21174 fail_msg = NULL; 21175 } 21176 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21177 if (txq_cnt == 0 || ret == IOCB_BUSY) 21178 break; 21179 } 21180 /* Cancel all the IOCBs that cannot be issued */ 21181 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 21182 IOERR_SLI_ABORTED); 21183 21184 return txq_cnt; 21185 } 21186 21187 /** 21188 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 21189 * @phba: Pointer to HBA context object. 21190 * @pwqeq: Pointer to command WQE. 21191 * @sglq: Pointer to the scatter gather queue object. 21192 * 21193 * This routine converts the bpl or bde that is in the WQE 21194 * to a sgl list for the sli4 hardware. The physical address 21195 * of the bpl/bde is converted back to a virtual address. 21196 * If the WQE contains a BPL then the list of BDE's is 21197 * converted to sli4_sge's. If the WQE contains a single 21198 * BDE then it is converted to a single sli_sge. 21199 * The WQE is still in cpu endianness so the contents of 21200 * the bpl can be used without byte swapping. 21201 * 21202 * Returns valid XRI = Success, NO_XRI = Failure. 21203 */ 21204 static uint16_t 21205 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 21206 struct lpfc_sglq *sglq) 21207 { 21208 uint16_t xritag = NO_XRI; 21209 struct ulp_bde64 *bpl = NULL; 21210 struct ulp_bde64 bde; 21211 struct sli4_sge *sgl = NULL; 21212 struct lpfc_dmabuf *dmabuf; 21213 union lpfc_wqe128 *wqe; 21214 int numBdes = 0; 21215 int i = 0; 21216 uint32_t offset = 0; /* accumulated offset in the sg request list */ 21217 int inbound = 0; /* number of sg reply entries inbound from firmware */ 21218 uint32_t cmd; 21219 21220 if (!pwqeq || !sglq) 21221 return xritag; 21222 21223 sgl = (struct sli4_sge *)sglq->sgl; 21224 wqe = &pwqeq->wqe; 21225 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 21226 21227 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 21228 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 21229 return sglq->sli4_xritag; 21230 numBdes = pwqeq->num_bdes; 21231 if (numBdes) { 21232 /* The addrHigh and addrLow fields within the WQE 21233 * have not been byteswapped yet so there is no 21234 * need to swap them back. 21235 */ 21236 if (pwqeq->bpl_dmabuf) 21237 dmabuf = pwqeq->bpl_dmabuf; 21238 else 21239 return xritag; 21240 21241 bpl = (struct ulp_bde64 *)dmabuf->virt; 21242 if (!bpl) 21243 return xritag; 21244 21245 for (i = 0; i < numBdes; i++) { 21246 /* Should already be byte swapped. */ 21247 sgl->addr_hi = bpl->addrHigh; 21248 sgl->addr_lo = bpl->addrLow; 21249 21250 sgl->word2 = le32_to_cpu(sgl->word2); 21251 if ((i+1) == numBdes) 21252 bf_set(lpfc_sli4_sge_last, sgl, 1); 21253 else 21254 bf_set(lpfc_sli4_sge_last, sgl, 0); 21255 /* swap the size field back to the cpu so we 21256 * can assign it to the sgl. 21257 */ 21258 bde.tus.w = le32_to_cpu(bpl->tus.w); 21259 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 21260 /* The offsets in the sgl need to be accumulated 21261 * separately for the request and reply lists. 21262 * The request is always first, the reply follows. 21263 */ 21264 switch (cmd) { 21265 case CMD_GEN_REQUEST64_WQE: 21266 /* add up the reply sg entries */ 21267 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 21268 inbound++; 21269 /* first inbound? reset the offset */ 21270 if (inbound == 1) 21271 offset = 0; 21272 bf_set(lpfc_sli4_sge_offset, sgl, offset); 21273 bf_set(lpfc_sli4_sge_type, sgl, 21274 LPFC_SGE_TYPE_DATA); 21275 offset += bde.tus.f.bdeSize; 21276 break; 21277 case CMD_FCP_TRSP64_WQE: 21278 bf_set(lpfc_sli4_sge_offset, sgl, 0); 21279 bf_set(lpfc_sli4_sge_type, sgl, 21280 LPFC_SGE_TYPE_DATA); 21281 break; 21282 case CMD_FCP_TSEND64_WQE: 21283 case CMD_FCP_TRECEIVE64_WQE: 21284 bf_set(lpfc_sli4_sge_type, sgl, 21285 bpl->tus.f.bdeFlags); 21286 if (i < 3) 21287 offset = 0; 21288 else 21289 offset += bde.tus.f.bdeSize; 21290 bf_set(lpfc_sli4_sge_offset, sgl, offset); 21291 break; 21292 } 21293 sgl->word2 = cpu_to_le32(sgl->word2); 21294 bpl++; 21295 sgl++; 21296 } 21297 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 21298 /* The addrHigh and addrLow fields of the BDE have not 21299 * been byteswapped yet so they need to be swapped 21300 * before putting them in the sgl. 21301 */ 21302 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 21303 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 21304 sgl->word2 = le32_to_cpu(sgl->word2); 21305 bf_set(lpfc_sli4_sge_last, sgl, 1); 21306 sgl->word2 = cpu_to_le32(sgl->word2); 21307 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 21308 } 21309 return sglq->sli4_xritag; 21310 } 21311 21312 /** 21313 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 21314 * @phba: Pointer to HBA context object. 21315 * @qp: Pointer to HDW queue. 21316 * @pwqe: Pointer to command WQE. 21317 **/ 21318 int 21319 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 21320 struct lpfc_iocbq *pwqe) 21321 { 21322 union lpfc_wqe128 *wqe = &pwqe->wqe; 21323 struct lpfc_async_xchg_ctx *ctxp; 21324 struct lpfc_queue *wq; 21325 struct lpfc_sglq *sglq; 21326 struct lpfc_sli_ring *pring; 21327 unsigned long iflags; 21328 uint32_t ret = 0; 21329 21330 /* NVME_LS and NVME_LS ABTS requests. */ 21331 if (pwqe->cmd_flag & LPFC_IO_NVME_LS) { 21332 pring = phba->sli4_hba.nvmels_wq->pring; 21333 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 21334 qp, wq_access); 21335 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 21336 if (!sglq) { 21337 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21338 return WQE_BUSY; 21339 } 21340 pwqe->sli4_lxritag = sglq->sli4_lxritag; 21341 pwqe->sli4_xritag = sglq->sli4_xritag; 21342 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 21343 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21344 return WQE_ERROR; 21345 } 21346 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 21347 pwqe->sli4_xritag); 21348 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 21349 if (ret) { 21350 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21351 return ret; 21352 } 21353 21354 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 21355 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21356 21357 lpfc_sli4_poll_eq(qp->hba_eq); 21358 return 0; 21359 } 21360 21361 /* NVME_FCREQ and NVME_ABTS requests */ 21362 if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) { 21363 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 21364 wq = qp->io_wq; 21365 pring = wq->pring; 21366 21367 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); 21368 21369 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 21370 qp, wq_access); 21371 ret = lpfc_sli4_wq_put(wq, wqe); 21372 if (ret) { 21373 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21374 return ret; 21375 } 21376 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 21377 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21378 21379 lpfc_sli4_poll_eq(qp->hba_eq); 21380 return 0; 21381 } 21382 21383 /* NVMET requests */ 21384 if (pwqe->cmd_flag & LPFC_IO_NVMET) { 21385 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 21386 wq = qp->io_wq; 21387 pring = wq->pring; 21388 21389 ctxp = pwqe->context_un.axchg; 21390 sglq = ctxp->ctxbuf->sglq; 21391 if (pwqe->sli4_xritag == NO_XRI) { 21392 pwqe->sli4_lxritag = sglq->sli4_lxritag; 21393 pwqe->sli4_xritag = sglq->sli4_xritag; 21394 } 21395 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 21396 pwqe->sli4_xritag); 21397 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); 21398 21399 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 21400 qp, wq_access); 21401 ret = lpfc_sli4_wq_put(wq, wqe); 21402 if (ret) { 21403 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21404 return ret; 21405 } 21406 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 21407 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21408 21409 lpfc_sli4_poll_eq(qp->hba_eq); 21410 return 0; 21411 } 21412 return WQE_ERROR; 21413 } 21414 21415 /** 21416 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort 21417 * @phba: Pointer to HBA context object. 21418 * @cmdiocb: Pointer to driver command iocb object. 21419 * @cmpl: completion function. 21420 * 21421 * Fill the appropriate fields for the abort WQE and call 21422 * internal routine lpfc_sli4_issue_wqe to send the WQE 21423 * This function is called with hbalock held and no ring_lock held. 21424 * 21425 * RETURNS 0 - SUCCESS 21426 **/ 21427 21428 int 21429 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 21430 void *cmpl) 21431 { 21432 struct lpfc_vport *vport = cmdiocb->vport; 21433 struct lpfc_iocbq *abtsiocb = NULL; 21434 union lpfc_wqe128 *abtswqe; 21435 struct lpfc_io_buf *lpfc_cmd; 21436 int retval = IOCB_ERROR; 21437 u16 xritag = cmdiocb->sli4_xritag; 21438 21439 /* 21440 * The scsi command can not be in txq and it is in flight because the 21441 * pCmd is still pointing at the SCSI command we have to abort. There 21442 * is no need to search the txcmplq. Just send an abort to the FW. 21443 */ 21444 21445 abtsiocb = __lpfc_sli_get_iocbq(phba); 21446 if (!abtsiocb) 21447 return WQE_NORESOURCE; 21448 21449 /* Indicate the IO is being aborted by the driver. */ 21450 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; 21451 21452 abtswqe = &abtsiocb->wqe; 21453 memset(abtswqe, 0, sizeof(*abtswqe)); 21454 21455 if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK)) 21456 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1); 21457 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG); 21458 abtswqe->abort_cmd.rsrvd5 = 0; 21459 abtswqe->abort_cmd.wqe_com.abort_tag = xritag; 21460 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag); 21461 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 21462 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0); 21463 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1); 21464 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 21465 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND); 21466 21467 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 21468 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx; 21469 abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX; 21470 if (cmdiocb->cmd_flag & LPFC_IO_FCP) 21471 abtsiocb->cmd_flag |= LPFC_IO_FCP; 21472 if (cmdiocb->cmd_flag & LPFC_IO_NVME) 21473 abtsiocb->cmd_flag |= LPFC_IO_NVME; 21474 if (cmdiocb->cmd_flag & LPFC_IO_FOF) 21475 abtsiocb->cmd_flag |= LPFC_IO_FOF; 21476 abtsiocb->vport = vport; 21477 abtsiocb->cmd_cmpl = cmpl; 21478 21479 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq); 21480 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb); 21481 21482 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 21483 "0359 Abort xri x%x, original iotag x%x, " 21484 "abort cmd iotag x%x retval x%x\n", 21485 xritag, cmdiocb->iotag, abtsiocb->iotag, retval); 21486 21487 if (retval) { 21488 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; 21489 __lpfc_sli_release_iocbq(phba, abtsiocb); 21490 } 21491 21492 return retval; 21493 } 21494 21495 #ifdef LPFC_MXP_STAT 21496 /** 21497 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count 21498 * @phba: pointer to lpfc hba data structure. 21499 * @hwqid: belong to which HWQ. 21500 * 21501 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count 21502 * 15 seconds after a test case is running. 21503 * 21504 * The user should call lpfc_debugfs_multixripools_write before running a test 21505 * case to clear stat_snapshot_taken. Then the user starts a test case. During 21506 * test case is running, stat_snapshot_taken is incremented by 1 every time when 21507 * this routine is called from heartbeat timer. When stat_snapshot_taken is 21508 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. 21509 **/ 21510 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) 21511 { 21512 struct lpfc_sli4_hdw_queue *qp; 21513 struct lpfc_multixri_pool *multixri_pool; 21514 struct lpfc_pvt_pool *pvt_pool; 21515 struct lpfc_pbl_pool *pbl_pool; 21516 u32 txcmplq_cnt; 21517 21518 qp = &phba->sli4_hba.hdwq[hwqid]; 21519 multixri_pool = qp->p_multixri_pool; 21520 if (!multixri_pool) 21521 return; 21522 21523 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { 21524 pvt_pool = &qp->p_multixri_pool->pvt_pool; 21525 pbl_pool = &qp->p_multixri_pool->pbl_pool; 21526 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 21527 21528 multixri_pool->stat_pbl_count = pbl_pool->count; 21529 multixri_pool->stat_pvt_count = pvt_pool->count; 21530 multixri_pool->stat_busy_count = txcmplq_cnt; 21531 } 21532 21533 multixri_pool->stat_snapshot_taken++; 21534 } 21535 #endif 21536 21537 /** 21538 * lpfc_adjust_pvt_pool_count - Adjust private pool count 21539 * @phba: pointer to lpfc hba data structure. 21540 * @hwqid: belong to which HWQ. 21541 * 21542 * This routine moves some XRIs from private to public pool when private pool 21543 * is not busy. 21544 **/ 21545 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) 21546 { 21547 struct lpfc_multixri_pool *multixri_pool; 21548 u32 io_req_count; 21549 u32 prev_io_req_count; 21550 21551 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 21552 if (!multixri_pool) 21553 return; 21554 io_req_count = multixri_pool->io_req_count; 21555 prev_io_req_count = multixri_pool->prev_io_req_count; 21556 21557 if (prev_io_req_count != io_req_count) { 21558 /* Private pool is busy */ 21559 multixri_pool->prev_io_req_count = io_req_count; 21560 } else { 21561 /* Private pool is not busy. 21562 * Move XRIs from private to public pool. 21563 */ 21564 lpfc_move_xri_pvt_to_pbl(phba, hwqid); 21565 } 21566 } 21567 21568 /** 21569 * lpfc_adjust_high_watermark - Adjust high watermark 21570 * @phba: pointer to lpfc hba data structure. 21571 * @hwqid: belong to which HWQ. 21572 * 21573 * This routine sets high watermark as number of outstanding XRIs, 21574 * but make sure the new value is between xri_limit/2 and xri_limit. 21575 **/ 21576 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) 21577 { 21578 u32 new_watermark; 21579 u32 watermark_max; 21580 u32 watermark_min; 21581 u32 xri_limit; 21582 u32 txcmplq_cnt; 21583 u32 abts_io_bufs; 21584 struct lpfc_multixri_pool *multixri_pool; 21585 struct lpfc_sli4_hdw_queue *qp; 21586 21587 qp = &phba->sli4_hba.hdwq[hwqid]; 21588 multixri_pool = qp->p_multixri_pool; 21589 if (!multixri_pool) 21590 return; 21591 xri_limit = multixri_pool->xri_limit; 21592 21593 watermark_max = xri_limit; 21594 watermark_min = xri_limit / 2; 21595 21596 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 21597 abts_io_bufs = qp->abts_scsi_io_bufs; 21598 abts_io_bufs += qp->abts_nvme_io_bufs; 21599 21600 new_watermark = txcmplq_cnt + abts_io_bufs; 21601 new_watermark = min(watermark_max, new_watermark); 21602 new_watermark = max(watermark_min, new_watermark); 21603 multixri_pool->pvt_pool.high_watermark = new_watermark; 21604 21605 #ifdef LPFC_MXP_STAT 21606 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, 21607 new_watermark); 21608 #endif 21609 } 21610 21611 /** 21612 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool 21613 * @phba: pointer to lpfc hba data structure. 21614 * @hwqid: belong to which HWQ. 21615 * 21616 * This routine is called from hearbeat timer when pvt_pool is idle. 21617 * All free XRIs are moved from private to public pool on hwqid with 2 steps. 21618 * The first step moves (all - low_watermark) amount of XRIs. 21619 * The second step moves the rest of XRIs. 21620 **/ 21621 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) 21622 { 21623 struct lpfc_pbl_pool *pbl_pool; 21624 struct lpfc_pvt_pool *pvt_pool; 21625 struct lpfc_sli4_hdw_queue *qp; 21626 struct lpfc_io_buf *lpfc_ncmd; 21627 struct lpfc_io_buf *lpfc_ncmd_next; 21628 unsigned long iflag; 21629 struct list_head tmp_list; 21630 u32 tmp_count; 21631 21632 qp = &phba->sli4_hba.hdwq[hwqid]; 21633 pbl_pool = &qp->p_multixri_pool->pbl_pool; 21634 pvt_pool = &qp->p_multixri_pool->pvt_pool; 21635 tmp_count = 0; 21636 21637 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); 21638 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); 21639 21640 if (pvt_pool->count > pvt_pool->low_watermark) { 21641 /* Step 1: move (all - low_watermark) from pvt_pool 21642 * to pbl_pool 21643 */ 21644 21645 /* Move low watermark of bufs from pvt_pool to tmp_list */ 21646 INIT_LIST_HEAD(&tmp_list); 21647 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 21648 &pvt_pool->list, list) { 21649 list_move_tail(&lpfc_ncmd->list, &tmp_list); 21650 tmp_count++; 21651 if (tmp_count >= pvt_pool->low_watermark) 21652 break; 21653 } 21654 21655 /* Move all bufs from pvt_pool to pbl_pool */ 21656 list_splice_init(&pvt_pool->list, &pbl_pool->list); 21657 21658 /* Move all bufs from tmp_list to pvt_pool */ 21659 list_splice(&tmp_list, &pvt_pool->list); 21660 21661 pbl_pool->count += (pvt_pool->count - tmp_count); 21662 pvt_pool->count = tmp_count; 21663 } else { 21664 /* Step 2: move the rest from pvt_pool to pbl_pool */ 21665 list_splice_init(&pvt_pool->list, &pbl_pool->list); 21666 pbl_pool->count += pvt_pool->count; 21667 pvt_pool->count = 0; 21668 } 21669 21670 spin_unlock(&pvt_pool->lock); 21671 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 21672 } 21673 21674 /** 21675 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 21676 * @phba: pointer to lpfc hba data structure 21677 * @qp: pointer to HDW queue 21678 * @pbl_pool: specified public free XRI pool 21679 * @pvt_pool: specified private free XRI pool 21680 * @count: number of XRIs to move 21681 * 21682 * This routine tries to move some free common bufs from the specified pbl_pool 21683 * to the specified pvt_pool. It might move less than count XRIs if there's not 21684 * enough in public pool. 21685 * 21686 * Return: 21687 * true - if XRIs are successfully moved from the specified pbl_pool to the 21688 * specified pvt_pool 21689 * false - if the specified pbl_pool is empty or locked by someone else 21690 **/ 21691 static bool 21692 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 21693 struct lpfc_pbl_pool *pbl_pool, 21694 struct lpfc_pvt_pool *pvt_pool, u32 count) 21695 { 21696 struct lpfc_io_buf *lpfc_ncmd; 21697 struct lpfc_io_buf *lpfc_ncmd_next; 21698 unsigned long iflag; 21699 int ret; 21700 21701 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); 21702 if (ret) { 21703 if (pbl_pool->count) { 21704 /* Move a batch of XRIs from public to private pool */ 21705 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); 21706 list_for_each_entry_safe(lpfc_ncmd, 21707 lpfc_ncmd_next, 21708 &pbl_pool->list, 21709 list) { 21710 list_move_tail(&lpfc_ncmd->list, 21711 &pvt_pool->list); 21712 pvt_pool->count++; 21713 pbl_pool->count--; 21714 count--; 21715 if (count == 0) 21716 break; 21717 } 21718 21719 spin_unlock(&pvt_pool->lock); 21720 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 21721 return true; 21722 } 21723 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 21724 } 21725 21726 return false; 21727 } 21728 21729 /** 21730 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 21731 * @phba: pointer to lpfc hba data structure. 21732 * @hwqid: belong to which HWQ. 21733 * @count: number of XRIs to move 21734 * 21735 * This routine tries to find some free common bufs in one of public pools with 21736 * Round Robin method. The search always starts from local hwqid, then the next 21737 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, 21738 * a batch of free common bufs are moved to private pool on hwqid. 21739 * It might move less than count XRIs if there's not enough in public pool. 21740 **/ 21741 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) 21742 { 21743 struct lpfc_multixri_pool *multixri_pool; 21744 struct lpfc_multixri_pool *next_multixri_pool; 21745 struct lpfc_pvt_pool *pvt_pool; 21746 struct lpfc_pbl_pool *pbl_pool; 21747 struct lpfc_sli4_hdw_queue *qp; 21748 u32 next_hwqid; 21749 u32 hwq_count; 21750 int ret; 21751 21752 qp = &phba->sli4_hba.hdwq[hwqid]; 21753 multixri_pool = qp->p_multixri_pool; 21754 pvt_pool = &multixri_pool->pvt_pool; 21755 pbl_pool = &multixri_pool->pbl_pool; 21756 21757 /* Check if local pbl_pool is available */ 21758 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); 21759 if (ret) { 21760 #ifdef LPFC_MXP_STAT 21761 multixri_pool->local_pbl_hit_count++; 21762 #endif 21763 return; 21764 } 21765 21766 hwq_count = phba->cfg_hdw_queue; 21767 21768 /* Get the next hwqid which was found last time */ 21769 next_hwqid = multixri_pool->rrb_next_hwqid; 21770 21771 do { 21772 /* Go to next hwq */ 21773 next_hwqid = (next_hwqid + 1) % hwq_count; 21774 21775 next_multixri_pool = 21776 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; 21777 pbl_pool = &next_multixri_pool->pbl_pool; 21778 21779 /* Check if the public free xri pool is available */ 21780 ret = _lpfc_move_xri_pbl_to_pvt( 21781 phba, qp, pbl_pool, pvt_pool, count); 21782 21783 /* Exit while-loop if success or all hwqid are checked */ 21784 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); 21785 21786 /* Starting point for the next time */ 21787 multixri_pool->rrb_next_hwqid = next_hwqid; 21788 21789 if (!ret) { 21790 /* stats: all public pools are empty*/ 21791 multixri_pool->pbl_empty_count++; 21792 } 21793 21794 #ifdef LPFC_MXP_STAT 21795 if (ret) { 21796 if (next_hwqid == hwqid) 21797 multixri_pool->local_pbl_hit_count++; 21798 else 21799 multixri_pool->other_pbl_hit_count++; 21800 } 21801 #endif 21802 } 21803 21804 /** 21805 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark 21806 * @phba: pointer to lpfc hba data structure. 21807 * @hwqid: belong to which HWQ. 21808 * 21809 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than 21810 * low watermark. 21811 **/ 21812 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) 21813 { 21814 struct lpfc_multixri_pool *multixri_pool; 21815 struct lpfc_pvt_pool *pvt_pool; 21816 21817 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 21818 pvt_pool = &multixri_pool->pvt_pool; 21819 21820 if (pvt_pool->count < pvt_pool->low_watermark) 21821 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 21822 } 21823 21824 /** 21825 * lpfc_release_io_buf - Return one IO buf back to free pool 21826 * @phba: pointer to lpfc hba data structure. 21827 * @lpfc_ncmd: IO buf to be returned. 21828 * @qp: belong to which HWQ. 21829 * 21830 * This routine returns one IO buf back to free pool. If this is an urgent IO, 21831 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, 21832 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and 21833 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to 21834 * lpfc_io_buf_list_put. 21835 **/ 21836 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, 21837 struct lpfc_sli4_hdw_queue *qp) 21838 { 21839 unsigned long iflag; 21840 struct lpfc_pbl_pool *pbl_pool; 21841 struct lpfc_pvt_pool *pvt_pool; 21842 struct lpfc_epd_pool *epd_pool; 21843 u32 txcmplq_cnt; 21844 u32 xri_owned; 21845 u32 xri_limit; 21846 u32 abts_io_bufs; 21847 21848 /* MUST zero fields if buffer is reused by another protocol */ 21849 lpfc_ncmd->nvmeCmd = NULL; 21850 lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL; 21851 21852 if (phba->cfg_xpsgl && !phba->nvmet_support && 21853 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) 21854 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 21855 21856 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) 21857 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 21858 21859 if (phba->cfg_xri_rebalancing) { 21860 if (lpfc_ncmd->expedite) { 21861 /* Return to expedite pool */ 21862 epd_pool = &phba->epd_pool; 21863 spin_lock_irqsave(&epd_pool->lock, iflag); 21864 list_add_tail(&lpfc_ncmd->list, &epd_pool->list); 21865 epd_pool->count++; 21866 spin_unlock_irqrestore(&epd_pool->lock, iflag); 21867 return; 21868 } 21869 21870 /* Avoid invalid access if an IO sneaks in and is being rejected 21871 * just _after_ xri pools are destroyed in lpfc_offline. 21872 * Nothing much can be done at this point. 21873 */ 21874 if (!qp->p_multixri_pool) 21875 return; 21876 21877 pbl_pool = &qp->p_multixri_pool->pbl_pool; 21878 pvt_pool = &qp->p_multixri_pool->pvt_pool; 21879 21880 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 21881 abts_io_bufs = qp->abts_scsi_io_bufs; 21882 abts_io_bufs += qp->abts_nvme_io_bufs; 21883 21884 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; 21885 xri_limit = qp->p_multixri_pool->xri_limit; 21886 21887 #ifdef LPFC_MXP_STAT 21888 if (xri_owned <= xri_limit) 21889 qp->p_multixri_pool->below_limit_count++; 21890 else 21891 qp->p_multixri_pool->above_limit_count++; 21892 #endif 21893 21894 /* XRI goes to either public or private free xri pool 21895 * based on watermark and xri_limit 21896 */ 21897 if ((pvt_pool->count < pvt_pool->low_watermark) || 21898 (xri_owned < xri_limit && 21899 pvt_pool->count < pvt_pool->high_watermark)) { 21900 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, 21901 qp, free_pvt_pool); 21902 list_add_tail(&lpfc_ncmd->list, 21903 &pvt_pool->list); 21904 pvt_pool->count++; 21905 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 21906 } else { 21907 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, 21908 qp, free_pub_pool); 21909 list_add_tail(&lpfc_ncmd->list, 21910 &pbl_pool->list); 21911 pbl_pool->count++; 21912 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 21913 } 21914 } else { 21915 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, 21916 qp, free_xri); 21917 list_add_tail(&lpfc_ncmd->list, 21918 &qp->lpfc_io_buf_list_put); 21919 qp->put_io_bufs++; 21920 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 21921 iflag); 21922 } 21923 } 21924 21925 /** 21926 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool 21927 * @phba: pointer to lpfc hba data structure. 21928 * @qp: pointer to HDW queue 21929 * @pvt_pool: pointer to private pool data structure. 21930 * @ndlp: pointer to lpfc nodelist data structure. 21931 * 21932 * This routine tries to get one free IO buf from private pool. 21933 * 21934 * Return: 21935 * pointer to one free IO buf - if private pool is not empty 21936 * NULL - if private pool is empty 21937 **/ 21938 static struct lpfc_io_buf * 21939 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, 21940 struct lpfc_sli4_hdw_queue *qp, 21941 struct lpfc_pvt_pool *pvt_pool, 21942 struct lpfc_nodelist *ndlp) 21943 { 21944 struct lpfc_io_buf *lpfc_ncmd; 21945 struct lpfc_io_buf *lpfc_ncmd_next; 21946 unsigned long iflag; 21947 21948 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); 21949 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 21950 &pvt_pool->list, list) { 21951 if (lpfc_test_rrq_active( 21952 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) 21953 continue; 21954 list_del(&lpfc_ncmd->list); 21955 pvt_pool->count--; 21956 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 21957 return lpfc_ncmd; 21958 } 21959 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 21960 21961 return NULL; 21962 } 21963 21964 /** 21965 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool 21966 * @phba: pointer to lpfc hba data structure. 21967 * 21968 * This routine tries to get one free IO buf from expedite pool. 21969 * 21970 * Return: 21971 * pointer to one free IO buf - if expedite pool is not empty 21972 * NULL - if expedite pool is empty 21973 **/ 21974 static struct lpfc_io_buf * 21975 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) 21976 { 21977 struct lpfc_io_buf *lpfc_ncmd = NULL, *iter; 21978 struct lpfc_io_buf *lpfc_ncmd_next; 21979 unsigned long iflag; 21980 struct lpfc_epd_pool *epd_pool; 21981 21982 epd_pool = &phba->epd_pool; 21983 21984 spin_lock_irqsave(&epd_pool->lock, iflag); 21985 if (epd_pool->count > 0) { 21986 list_for_each_entry_safe(iter, lpfc_ncmd_next, 21987 &epd_pool->list, list) { 21988 list_del(&iter->list); 21989 epd_pool->count--; 21990 lpfc_ncmd = iter; 21991 break; 21992 } 21993 } 21994 spin_unlock_irqrestore(&epd_pool->lock, iflag); 21995 21996 return lpfc_ncmd; 21997 } 21998 21999 /** 22000 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs 22001 * @phba: pointer to lpfc hba data structure. 22002 * @ndlp: pointer to lpfc nodelist data structure. 22003 * @hwqid: belong to which HWQ 22004 * @expedite: 1 means this request is urgent. 22005 * 22006 * This routine will do the following actions and then return a pointer to 22007 * one free IO buf. 22008 * 22009 * 1. If private free xri count is empty, move some XRIs from public to 22010 * private pool. 22011 * 2. Get one XRI from private free xri pool. 22012 * 3. If we fail to get one from pvt_pool and this is an expedite request, 22013 * get one free xri from expedite pool. 22014 * 22015 * Note: ndlp is only used on SCSI side for RRQ testing. 22016 * The caller should pass NULL for ndlp on NVME side. 22017 * 22018 * Return: 22019 * pointer to one free IO buf - if private pool is not empty 22020 * NULL - if private pool is empty 22021 **/ 22022 static struct lpfc_io_buf * 22023 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, 22024 struct lpfc_nodelist *ndlp, 22025 int hwqid, int expedite) 22026 { 22027 struct lpfc_sli4_hdw_queue *qp; 22028 struct lpfc_multixri_pool *multixri_pool; 22029 struct lpfc_pvt_pool *pvt_pool; 22030 struct lpfc_io_buf *lpfc_ncmd; 22031 22032 qp = &phba->sli4_hba.hdwq[hwqid]; 22033 lpfc_ncmd = NULL; 22034 if (!qp) { 22035 lpfc_printf_log(phba, KERN_INFO, 22036 LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 22037 "5556 NULL qp for hwqid x%x\n", hwqid); 22038 return lpfc_ncmd; 22039 } 22040 multixri_pool = qp->p_multixri_pool; 22041 if (!multixri_pool) { 22042 lpfc_printf_log(phba, KERN_INFO, 22043 LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 22044 "5557 NULL multixri for hwqid x%x\n", hwqid); 22045 return lpfc_ncmd; 22046 } 22047 pvt_pool = &multixri_pool->pvt_pool; 22048 if (!pvt_pool) { 22049 lpfc_printf_log(phba, KERN_INFO, 22050 LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 22051 "5558 NULL pvt_pool for hwqid x%x\n", hwqid); 22052 return lpfc_ncmd; 22053 } 22054 multixri_pool->io_req_count++; 22055 22056 /* If pvt_pool is empty, move some XRIs from public to private pool */ 22057 if (pvt_pool->count == 0) 22058 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 22059 22060 /* Get one XRI from private free xri pool */ 22061 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); 22062 22063 if (lpfc_ncmd) { 22064 lpfc_ncmd->hdwq = qp; 22065 lpfc_ncmd->hdwq_no = hwqid; 22066 } else if (expedite) { 22067 /* If we fail to get one from pvt_pool and this is an expedite 22068 * request, get one free xri from expedite pool. 22069 */ 22070 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); 22071 } 22072 22073 return lpfc_ncmd; 22074 } 22075 22076 static inline struct lpfc_io_buf * 22077 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) 22078 { 22079 struct lpfc_sli4_hdw_queue *qp; 22080 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; 22081 22082 qp = &phba->sli4_hba.hdwq[idx]; 22083 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 22084 &qp->lpfc_io_buf_list_get, list) { 22085 if (lpfc_test_rrq_active(phba, ndlp, 22086 lpfc_cmd->cur_iocbq.sli4_lxritag)) 22087 continue; 22088 22089 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) 22090 continue; 22091 22092 list_del_init(&lpfc_cmd->list); 22093 qp->get_io_bufs--; 22094 lpfc_cmd->hdwq = qp; 22095 lpfc_cmd->hdwq_no = idx; 22096 return lpfc_cmd; 22097 } 22098 return NULL; 22099 } 22100 22101 /** 22102 * lpfc_get_io_buf - Get one IO buffer from free pool 22103 * @phba: The HBA for which this call is being executed. 22104 * @ndlp: pointer to lpfc nodelist data structure. 22105 * @hwqid: belong to which HWQ 22106 * @expedite: 1 means this request is urgent. 22107 * 22108 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, 22109 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes 22110 * a IO buffer from head of @hdwq io_buf_list and returns to caller. 22111 * 22112 * Note: ndlp is only used on SCSI side for RRQ testing. 22113 * The caller should pass NULL for ndlp on NVME side. 22114 * 22115 * Return codes: 22116 * NULL - Error 22117 * Pointer to lpfc_io_buf - Success 22118 **/ 22119 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, 22120 struct lpfc_nodelist *ndlp, 22121 u32 hwqid, int expedite) 22122 { 22123 struct lpfc_sli4_hdw_queue *qp; 22124 unsigned long iflag; 22125 struct lpfc_io_buf *lpfc_cmd; 22126 22127 qp = &phba->sli4_hba.hdwq[hwqid]; 22128 lpfc_cmd = NULL; 22129 if (!qp) { 22130 lpfc_printf_log(phba, KERN_WARNING, 22131 LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 22132 "5555 NULL qp for hwqid x%x\n", hwqid); 22133 return lpfc_cmd; 22134 } 22135 22136 if (phba->cfg_xri_rebalancing) 22137 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( 22138 phba, ndlp, hwqid, expedite); 22139 else { 22140 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, 22141 qp, alloc_xri_get); 22142 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) 22143 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 22144 if (!lpfc_cmd) { 22145 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, 22146 qp, alloc_xri_put); 22147 list_splice(&qp->lpfc_io_buf_list_put, 22148 &qp->lpfc_io_buf_list_get); 22149 qp->get_io_bufs += qp->put_io_bufs; 22150 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 22151 qp->put_io_bufs = 0; 22152 spin_unlock(&qp->io_buf_list_put_lock); 22153 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || 22154 expedite) 22155 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 22156 } 22157 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); 22158 } 22159 22160 return lpfc_cmd; 22161 } 22162 22163 /** 22164 * lpfc_read_object - Retrieve object data from HBA 22165 * @phba: The HBA for which this call is being executed. 22166 * @rdobject: Pathname of object data we want to read. 22167 * @datap: Pointer to where data will be copied to. 22168 * @datasz: size of data area 22169 * 22170 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less. 22171 * The data will be truncated if datasz is not large enough. 22172 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0. 22173 * Returns the actual bytes read from the object. 22174 * 22175 * This routine is hard coded to use a poll completion. Unlike other 22176 * sli4_config mailboxes, it uses lpfc_mbuf memory which is not 22177 * cleaned up in lpfc_sli4_cmd_mbox_free. If this routine is modified 22178 * to use interrupt-based completions, code is needed to fully cleanup 22179 * the memory. 22180 */ 22181 int 22182 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap, 22183 uint32_t datasz) 22184 { 22185 struct lpfc_mbx_read_object *read_object; 22186 LPFC_MBOXQ_t *mbox; 22187 int rc, length, eof, j, byte_cnt = 0; 22188 uint32_t shdr_status, shdr_add_status; 22189 union lpfc_sli4_cfg_shdr *shdr; 22190 struct lpfc_dmabuf *pcmd; 22191 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0}; 22192 22193 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 22194 if (!mbox) 22195 return -ENOMEM; 22196 length = (sizeof(struct lpfc_mbx_read_object) - 22197 sizeof(struct lpfc_sli4_cfg_mhdr)); 22198 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 22199 LPFC_MBOX_OPCODE_READ_OBJECT, 22200 length, LPFC_SLI4_MBX_EMBED); 22201 read_object = &mbox->u.mqe.un.read_object; 22202 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr; 22203 22204 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0); 22205 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz); 22206 read_object->u.request.rd_object_offset = 0; 22207 read_object->u.request.rd_object_cnt = 1; 22208 22209 memset((void *)read_object->u.request.rd_object_name, 0, 22210 LPFC_OBJ_NAME_SZ); 22211 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject); 22212 for (j = 0; j < strlen(rdobject); j++) 22213 read_object->u.request.rd_object_name[j] = 22214 cpu_to_le32(rd_object_name[j]); 22215 22216 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 22217 if (pcmd) 22218 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 22219 if (!pcmd || !pcmd->virt) { 22220 kfree(pcmd); 22221 mempool_free(mbox, phba->mbox_mem_pool); 22222 return -ENOMEM; 22223 } 22224 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE); 22225 read_object->u.request.rd_object_hbuf[0].pa_lo = 22226 putPaddrLow(pcmd->phys); 22227 read_object->u.request.rd_object_hbuf[0].pa_hi = 22228 putPaddrHigh(pcmd->phys); 22229 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE; 22230 22231 mbox->vport = phba->pport; 22232 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 22233 mbox->ctx_ndlp = NULL; 22234 22235 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 22236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 22237 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 22238 22239 if (shdr_status == STATUS_FAILED && 22240 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) { 22241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, 22242 "4674 No port cfg file in FW.\n"); 22243 byte_cnt = -ENOENT; 22244 } else if (shdr_status || shdr_add_status || rc) { 22245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, 22246 "2625 READ_OBJECT mailbox failed with " 22247 "status x%x add_status x%x, mbx status x%x\n", 22248 shdr_status, shdr_add_status, rc); 22249 byte_cnt = -ENXIO; 22250 } else { 22251 /* Success */ 22252 length = read_object->u.response.rd_object_actual_rlen; 22253 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response); 22254 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT, 22255 "2626 READ_OBJECT Success len %d:%d, EOF %d\n", 22256 length, datasz, eof); 22257 22258 /* Detect the port config file exists but is empty */ 22259 if (!length && eof) { 22260 byte_cnt = 0; 22261 goto exit; 22262 } 22263 22264 byte_cnt = length; 22265 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt); 22266 } 22267 22268 exit: 22269 /* This is an embedded SLI4 mailbox with an external buffer allocated. 22270 * Free the pcmd and then cleanup with the correct routine. 22271 */ 22272 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 22273 kfree(pcmd); 22274 lpfc_sli4_mbox_cmd_free(phba, mbox); 22275 return byte_cnt; 22276 } 22277 22278 /** 22279 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool 22280 * @phba: The HBA for which this call is being executed. 22281 * @lpfc_buf: IO buf structure to append the SGL chunk 22282 * 22283 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, 22284 * and will allocate an SGL chunk if the pool is empty. 22285 * 22286 * Return codes: 22287 * NULL - Error 22288 * Pointer to sli4_hybrid_sgl - Success 22289 **/ 22290 struct sli4_hybrid_sgl * 22291 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) 22292 { 22293 struct sli4_hybrid_sgl *list_entry = NULL; 22294 struct sli4_hybrid_sgl *tmp = NULL; 22295 struct sli4_hybrid_sgl *allocated_sgl = NULL; 22296 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 22297 struct list_head *buf_list = &hdwq->sgl_list; 22298 unsigned long iflags; 22299 22300 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22301 22302 if (likely(!list_empty(buf_list))) { 22303 /* break off 1 chunk from the sgl_list */ 22304 list_for_each_entry_safe(list_entry, tmp, 22305 buf_list, list_node) { 22306 list_move_tail(&list_entry->list_node, 22307 &lpfc_buf->dma_sgl_xtra_list); 22308 break; 22309 } 22310 } else { 22311 /* allocate more */ 22312 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22313 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, 22314 cpu_to_node(hdwq->io_wq->chann)); 22315 if (!tmp) { 22316 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 22317 "8353 error kmalloc memory for HDWQ " 22318 "%d %s\n", 22319 lpfc_buf->hdwq_no, __func__); 22320 return NULL; 22321 } 22322 22323 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, 22324 GFP_ATOMIC, &tmp->dma_phys_sgl); 22325 if (!tmp->dma_sgl) { 22326 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 22327 "8354 error pool_alloc memory for HDWQ " 22328 "%d %s\n", 22329 lpfc_buf->hdwq_no, __func__); 22330 kfree(tmp); 22331 return NULL; 22332 } 22333 22334 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22335 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); 22336 } 22337 22338 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, 22339 struct sli4_hybrid_sgl, 22340 list_node); 22341 22342 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22343 22344 return allocated_sgl; 22345 } 22346 22347 /** 22348 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool 22349 * @phba: The HBA for which this call is being executed. 22350 * @lpfc_buf: IO buf structure with the SGL chunk 22351 * 22352 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. 22353 * 22354 * Return codes: 22355 * 0 - Success 22356 * -EINVAL - Error 22357 **/ 22358 int 22359 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) 22360 { 22361 int rc = 0; 22362 struct sli4_hybrid_sgl *list_entry = NULL; 22363 struct sli4_hybrid_sgl *tmp = NULL; 22364 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 22365 struct list_head *buf_list = &hdwq->sgl_list; 22366 unsigned long iflags; 22367 22368 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22369 22370 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { 22371 list_for_each_entry_safe(list_entry, tmp, 22372 &lpfc_buf->dma_sgl_xtra_list, 22373 list_node) { 22374 list_move_tail(&list_entry->list_node, 22375 buf_list); 22376 } 22377 } else { 22378 rc = -EINVAL; 22379 } 22380 22381 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22382 return rc; 22383 } 22384 22385 /** 22386 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool 22387 * @phba: phba object 22388 * @hdwq: hdwq to cleanup sgl buff resources on 22389 * 22390 * This routine frees all SGL chunks of hdwq SGL chunk pool. 22391 * 22392 * Return codes: 22393 * None 22394 **/ 22395 void 22396 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, 22397 struct lpfc_sli4_hdw_queue *hdwq) 22398 { 22399 struct list_head *buf_list = &hdwq->sgl_list; 22400 struct sli4_hybrid_sgl *list_entry = NULL; 22401 struct sli4_hybrid_sgl *tmp = NULL; 22402 unsigned long iflags; 22403 22404 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22405 22406 /* Free sgl pool */ 22407 list_for_each_entry_safe(list_entry, tmp, 22408 buf_list, list_node) { 22409 list_del(&list_entry->list_node); 22410 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 22411 list_entry->dma_sgl, 22412 list_entry->dma_phys_sgl); 22413 kfree(list_entry); 22414 } 22415 22416 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22417 } 22418 22419 /** 22420 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq 22421 * @phba: The HBA for which this call is being executed. 22422 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer 22423 * 22424 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, 22425 * and will allocate an CMD/RSP buffer if the pool is empty. 22426 * 22427 * Return codes: 22428 * NULL - Error 22429 * Pointer to fcp_cmd_rsp_buf - Success 22430 **/ 22431 struct fcp_cmd_rsp_buf * 22432 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 22433 struct lpfc_io_buf *lpfc_buf) 22434 { 22435 struct fcp_cmd_rsp_buf *list_entry = NULL; 22436 struct fcp_cmd_rsp_buf *tmp = NULL; 22437 struct fcp_cmd_rsp_buf *allocated_buf = NULL; 22438 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 22439 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 22440 unsigned long iflags; 22441 22442 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22443 22444 if (likely(!list_empty(buf_list))) { 22445 /* break off 1 chunk from the list */ 22446 list_for_each_entry_safe(list_entry, tmp, 22447 buf_list, 22448 list_node) { 22449 list_move_tail(&list_entry->list_node, 22450 &lpfc_buf->dma_cmd_rsp_list); 22451 break; 22452 } 22453 } else { 22454 /* allocate more */ 22455 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22456 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, 22457 cpu_to_node(hdwq->io_wq->chann)); 22458 if (!tmp) { 22459 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 22460 "8355 error kmalloc memory for HDWQ " 22461 "%d %s\n", 22462 lpfc_buf->hdwq_no, __func__); 22463 return NULL; 22464 } 22465 22466 tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool, 22467 GFP_ATOMIC, 22468 &tmp->fcp_cmd_rsp_dma_handle); 22469 22470 if (!tmp->fcp_cmnd) { 22471 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 22472 "8356 error pool_alloc memory for HDWQ " 22473 "%d %s\n", 22474 lpfc_buf->hdwq_no, __func__); 22475 kfree(tmp); 22476 return NULL; 22477 } 22478 22479 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + 22480 sizeof(struct fcp_cmnd32)); 22481 22482 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22483 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); 22484 } 22485 22486 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, 22487 struct fcp_cmd_rsp_buf, 22488 list_node); 22489 22490 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22491 22492 return allocated_buf; 22493 } 22494 22495 /** 22496 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool 22497 * @phba: The HBA for which this call is being executed. 22498 * @lpfc_buf: IO buf structure with the CMD/RSP buf 22499 * 22500 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. 22501 * 22502 * Return codes: 22503 * 0 - Success 22504 * -EINVAL - Error 22505 **/ 22506 int 22507 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 22508 struct lpfc_io_buf *lpfc_buf) 22509 { 22510 int rc = 0; 22511 struct fcp_cmd_rsp_buf *list_entry = NULL; 22512 struct fcp_cmd_rsp_buf *tmp = NULL; 22513 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 22514 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 22515 unsigned long iflags; 22516 22517 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22518 22519 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { 22520 list_for_each_entry_safe(list_entry, tmp, 22521 &lpfc_buf->dma_cmd_rsp_list, 22522 list_node) { 22523 list_move_tail(&list_entry->list_node, 22524 buf_list); 22525 } 22526 } else { 22527 rc = -EINVAL; 22528 } 22529 22530 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22531 return rc; 22532 } 22533 22534 /** 22535 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool 22536 * @phba: phba object 22537 * @hdwq: hdwq to cleanup cmd rsp buff resources on 22538 * 22539 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. 22540 * 22541 * Return codes: 22542 * None 22543 **/ 22544 void 22545 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 22546 struct lpfc_sli4_hdw_queue *hdwq) 22547 { 22548 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 22549 struct fcp_cmd_rsp_buf *list_entry = NULL; 22550 struct fcp_cmd_rsp_buf *tmp = NULL; 22551 unsigned long iflags; 22552 22553 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22554 22555 /* Free cmd_rsp buf pool */ 22556 list_for_each_entry_safe(list_entry, tmp, 22557 buf_list, 22558 list_node) { 22559 list_del(&list_entry->list_node); 22560 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool, 22561 list_entry->fcp_cmnd, 22562 list_entry->fcp_cmd_rsp_dma_handle); 22563 kfree(list_entry); 22564 } 22565 22566 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22567 } 22568 22569 /** 22570 * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted 22571 * @phba: phba object 22572 * @job: job entry of the command to be posted. 22573 * 22574 * Fill the common fields of the wqe for each of the command. 22575 * 22576 * Return codes: 22577 * None 22578 **/ 22579 void 22580 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job) 22581 { 22582 u8 cmnd; 22583 u32 *pcmd; 22584 u32 if_type = 0; 22585 u32 abort_tag; 22586 bool fip; 22587 struct lpfc_nodelist *ndlp = NULL; 22588 union lpfc_wqe128 *wqe = &job->wqe; 22589 u8 command_type = ELS_COMMAND_NON_FIP; 22590 22591 fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag); 22592 /* The fcp commands will set command type */ 22593 if (job->cmd_flag & LPFC_IO_FCP) 22594 command_type = FCP_COMMAND; 22595 else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK)) 22596 command_type = ELS_COMMAND_FIP; 22597 else 22598 command_type = ELS_COMMAND_NON_FIP; 22599 22600 abort_tag = job->iotag; 22601 cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com); 22602 22603 switch (cmnd) { 22604 case CMD_ELS_REQUEST64_WQE: 22605 ndlp = job->ndlp; 22606 22607 if_type = bf_get(lpfc_sli_intf_if_type, 22608 &phba->sli4_hba.sli_intf); 22609 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 22610 pcmd = (u32 *)job->cmd_dmabuf->virt; 22611 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 22612 *pcmd == ELS_CMD_SCR || 22613 *pcmd == ELS_CMD_RDF || 22614 *pcmd == ELS_CMD_EDC || 22615 *pcmd == ELS_CMD_RSCN_XMT || 22616 *pcmd == ELS_CMD_FDISC || 22617 *pcmd == ELS_CMD_LOGO || 22618 *pcmd == ELS_CMD_QFPA || 22619 *pcmd == ELS_CMD_UVEM || 22620 *pcmd == ELS_CMD_PLOGI)) { 22621 bf_set(els_req64_sp, &wqe->els_req, 1); 22622 bf_set(els_req64_sid, &wqe->els_req, 22623 job->vport->fc_myDID); 22624 22625 if ((*pcmd == ELS_CMD_FLOGI) && 22626 !(phba->fc_topology == 22627 LPFC_TOPOLOGY_LOOP)) 22628 bf_set(els_req64_sid, &wqe->els_req, 0); 22629 22630 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 22631 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 22632 phba->vpi_ids[job->vport->vpi]); 22633 } else if (pcmd) { 22634 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 22635 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 22636 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 22637 } 22638 } 22639 22640 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 22641 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 22642 22643 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 22644 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 22645 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 22646 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 22647 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 22648 break; 22649 case CMD_XMIT_ELS_RSP64_WQE: 22650 ndlp = job->ndlp; 22651 22652 /* word4 */ 22653 wqe->xmit_els_rsp.word4 = 0; 22654 22655 if_type = bf_get(lpfc_sli_intf_if_type, 22656 &phba->sli4_hba.sli_intf); 22657 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 22658 if (test_bit(FC_PT2PT, &job->vport->fc_flag)) { 22659 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 22660 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 22661 job->vport->fc_myDID); 22662 if (job->vport->fc_myDID == Fabric_DID) { 22663 bf_set(wqe_els_did, 22664 &wqe->xmit_els_rsp.wqe_dest, 0); 22665 } 22666 } 22667 } 22668 22669 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 22670 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 22671 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 22672 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 22673 LPFC_WQE_LENLOC_WORD3); 22674 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 22675 22676 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 22677 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 22678 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 22679 job->vport->fc_myDID); 22680 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 22681 } 22682 22683 if (phba->sli_rev == LPFC_SLI_REV4) { 22684 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 22685 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 22686 22687 if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com)) 22688 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 22689 phba->vpi_ids[job->vport->vpi]); 22690 } 22691 command_type = OTHER_COMMAND; 22692 break; 22693 case CMD_GEN_REQUEST64_WQE: 22694 /* Word 10 */ 22695 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 22696 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 22697 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 22698 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 22699 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 22700 command_type = OTHER_COMMAND; 22701 break; 22702 case CMD_XMIT_SEQUENCE64_WQE: 22703 if (phba->link_flag & LS_LOOPBACK_MODE) 22704 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 22705 22706 wqe->xmit_sequence.rsvd3 = 0; 22707 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 22708 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 22709 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 22710 LPFC_WQE_IOD_WRITE); 22711 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 22712 LPFC_WQE_LENLOC_WORD12); 22713 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 22714 command_type = OTHER_COMMAND; 22715 break; 22716 case CMD_XMIT_BLS_RSP64_WQE: 22717 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 22718 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 22719 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 22720 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 22721 phba->vpi_ids[phba->pport->vpi]); 22722 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 22723 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 22724 LPFC_WQE_LENLOC_NONE); 22725 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 22726 command_type = OTHER_COMMAND; 22727 break; 22728 case CMD_FCP_ICMND64_WQE: /* task mgmt commands */ 22729 case CMD_ABORT_XRI_WQE: /* abort iotag */ 22730 case CMD_SEND_FRAME: /* mds loopback */ 22731 /* cases already formatted for sli4 wqe - no chgs necessary */ 22732 return; 22733 default: 22734 dump_stack(); 22735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 22736 "6207 Invalid command 0x%x\n", 22737 cmnd); 22738 break; 22739 } 22740 22741 wqe->generic.wqe_com.abort_tag = abort_tag; 22742 bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag); 22743 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 22744 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 22745 } 22746