1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 #include <linux/dmi.h> 31 #include <linux/of.h> 32 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_cmnd.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_host.h> 37 #include <scsi/scsi_transport_fc.h> 38 #include <scsi/fc/fc_fs.h> 39 #include <linux/crash_dump.h> 40 #ifdef CONFIG_X86 41 #include <asm/set_memory.h> 42 #endif 43 44 #include "lpfc_hw4.h" 45 #include "lpfc_hw.h" 46 #include "lpfc_sli.h" 47 #include "lpfc_sli4.h" 48 #include "lpfc_nl.h" 49 #include "lpfc_disc.h" 50 #include "lpfc.h" 51 #include "lpfc_scsi.h" 52 #include "lpfc_nvme.h" 53 #include "lpfc_crtn.h" 54 #include "lpfc_logmsg.h" 55 #include "lpfc_compat.h" 56 #include "lpfc_debugfs.h" 57 #include "lpfc_vport.h" 58 #include "lpfc_version.h" 59 60 /* There are only four IOCB completion types. */ 61 typedef enum _lpfc_iocb_type { 62 LPFC_UNKNOWN_IOCB, 63 LPFC_UNSOL_IOCB, 64 LPFC_SOL_IOCB, 65 LPFC_ABORT_IOCB 66 } lpfc_iocb_type; 67 68 69 /* Provide function prototypes local to this module. */ 70 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 71 uint32_t); 72 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 73 uint8_t *, uint32_t *); 74 static struct lpfc_iocbq * 75 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, 76 struct lpfc_iocbq *rspiocbq); 77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 78 struct hbq_dmabuf *); 79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 80 struct hbq_dmabuf *dmabuf); 81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, 82 struct lpfc_queue *cq, struct lpfc_cqe *cqe); 83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 84 int); 85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 86 struct lpfc_queue *eq, 87 struct lpfc_eqe *eqe, 88 enum lpfc_poll_mode poll_mode); 89 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 90 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 91 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); 92 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, 93 struct lpfc_queue *cq, 94 struct lpfc_cqe *cqe); 95 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, 96 struct lpfc_iocbq *pwqeq, 97 struct lpfc_sglq *sglq); 98 99 union lpfc_wqe128 lpfc_iread_cmd_template; 100 union lpfc_wqe128 lpfc_iwrite_cmd_template; 101 union lpfc_wqe128 lpfc_icmnd_cmd_template; 102 103 /* Setup WQE templates for IOs */ 104 void lpfc_wqe_cmd_template(void) 105 { 106 union lpfc_wqe128 *wqe; 107 108 /* IREAD template */ 109 wqe = &lpfc_iread_cmd_template; 110 memset(wqe, 0, sizeof(union lpfc_wqe128)); 111 112 /* Word 0, 1, 2 - BDE is variable */ 113 114 /* Word 3 - cmd_buff_len, payload_offset_len is zero */ 115 116 /* Word 4 - total_xfer_len is variable */ 117 118 /* Word 5 - is zero */ 119 120 /* Word 6 - ctxt_tag, xri_tag is variable */ 121 122 /* Word 7 */ 123 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); 124 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); 125 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); 126 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); 127 128 /* Word 8 - abort_tag is variable */ 129 130 /* Word 9 - reqtag is variable */ 131 132 /* Word 10 - dbde, wqes is variable */ 133 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); 134 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 135 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4); 136 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 137 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 138 139 /* Word 11 - pbde is variable */ 140 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN); 141 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 142 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 143 144 /* Word 12 - is zero */ 145 146 /* Word 13, 14, 15 - PBDE is variable */ 147 148 /* IWRITE template */ 149 wqe = &lpfc_iwrite_cmd_template; 150 memset(wqe, 0, sizeof(union lpfc_wqe128)); 151 152 /* Word 0, 1, 2 - BDE is variable */ 153 154 /* Word 3 - cmd_buff_len, payload_offset_len is zero */ 155 156 /* Word 4 - total_xfer_len is variable */ 157 158 /* Word 5 - initial_xfer_len is variable */ 159 160 /* Word 6 - ctxt_tag, xri_tag is variable */ 161 162 /* Word 7 */ 163 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE); 164 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK); 165 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3); 166 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI); 167 168 /* Word 8 - abort_tag is variable */ 169 170 /* Word 9 - reqtag is variable */ 171 172 /* Word 10 - dbde, wqes is variable */ 173 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0); 174 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 175 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4); 176 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 177 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 178 179 /* Word 11 - pbde is variable */ 180 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT); 181 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 182 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 183 184 /* Word 12 - is zero */ 185 186 /* Word 13, 14, 15 - PBDE is variable */ 187 188 /* ICMND template */ 189 wqe = &lpfc_icmnd_cmd_template; 190 memset(wqe, 0, sizeof(union lpfc_wqe128)); 191 192 /* Word 0, 1, 2 - BDE is variable */ 193 194 /* Word 3 - payload_offset_len is variable */ 195 196 /* Word 4, 5 - is zero */ 197 198 /* Word 6 - ctxt_tag, xri_tag is variable */ 199 200 /* Word 7 */ 201 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE); 202 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 203 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3); 204 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI); 205 206 /* Word 8 - abort_tag is variable */ 207 208 /* Word 9 - reqtag is variable */ 209 210 /* Word 10 - dbde, wqes is variable */ 211 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 212 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE); 213 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE); 214 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 215 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 216 217 /* Word 11 */ 218 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN); 219 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 220 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0); 221 222 /* Word 12, 13, 14, 15 - is zero */ 223 } 224 225 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 226 /** 227 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 228 * @srcp: Source memory pointer. 229 * @destp: Destination memory pointer. 230 * @cnt: Number of words required to be copied. 231 * Must be a multiple of sizeof(uint64_t) 232 * 233 * This function is used for copying data between driver memory 234 * and the SLI WQ. This function also changes the endianness 235 * of each word if native endianness is different from SLI 236 * endianness. This function can be called with or without 237 * lock. 238 **/ 239 static void 240 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 241 { 242 uint64_t *src = srcp; 243 uint64_t *dest = destp; 244 int i; 245 246 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 247 *dest++ = *src++; 248 } 249 #else 250 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 251 #endif 252 253 /** 254 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 255 * @q: The Work Queue to operate on. 256 * @wqe: The work Queue Entry to put on the Work queue. 257 * 258 * This routine will copy the contents of @wqe to the next available entry on 259 * the @q. This function will then ring the Work Queue Doorbell to signal the 260 * HBA to start processing the Work Queue Entry. This function returns 0 if 261 * successful. If no entries are available on @q then this function will return 262 * -ENOMEM. 263 * The caller is expected to hold the hbalock when calling this routine. 264 **/ 265 static int 266 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 267 { 268 union lpfc_wqe *temp_wqe; 269 struct lpfc_register doorbell; 270 uint32_t host_index; 271 uint32_t idx; 272 uint32_t i = 0; 273 uint8_t *tmp; 274 u32 if_type; 275 276 /* sanity check on queue memory */ 277 if (unlikely(!q)) 278 return -ENOMEM; 279 280 temp_wqe = lpfc_sli4_qe(q, q->host_index); 281 282 /* If the host has not yet processed the next entry then we are done */ 283 idx = ((q->host_index + 1) % q->entry_count); 284 if (idx == q->hba_index) { 285 q->WQ_overflow++; 286 return -EBUSY; 287 } 288 q->WQ_posted++; 289 /* set consumption flag every once in a while */ 290 if (!((q->host_index + 1) % q->notify_interval)) 291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 292 else 293 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 294 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 295 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 296 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 297 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 298 /* write to DPP aperture taking advatage of Combined Writes */ 299 tmp = (uint8_t *)temp_wqe; 300 #ifdef __raw_writeq 301 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 302 __raw_writeq(*((uint64_t *)(tmp + i)), 303 q->dpp_regaddr + i); 304 #else 305 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 306 __raw_writel(*((uint32_t *)(tmp + i)), 307 q->dpp_regaddr + i); 308 #endif 309 } 310 /* ensure WQE bcopy and DPP flushed before doorbell write */ 311 wmb(); 312 313 /* Update the host index before invoking device */ 314 host_index = q->host_index; 315 316 q->host_index = idx; 317 318 /* Ring Doorbell */ 319 doorbell.word0 = 0; 320 if (q->db_format == LPFC_DB_LIST_FORMAT) { 321 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 322 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 323 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 324 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 325 q->dpp_id); 326 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 327 q->queue_id); 328 } else { 329 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 330 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 331 332 /* Leave bits <23:16> clear for if_type 6 dpp */ 333 if_type = bf_get(lpfc_sli_intf_if_type, 334 &q->phba->sli4_hba.sli_intf); 335 if (if_type != LPFC_SLI_INTF_IF_TYPE_6) 336 bf_set(lpfc_wq_db_list_fm_index, &doorbell, 337 host_index); 338 } 339 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 340 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 341 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 342 } else { 343 return -EINVAL; 344 } 345 writel(doorbell.word0, q->db_regaddr); 346 347 return 0; 348 } 349 350 /** 351 * lpfc_sli4_wq_release - Updates internal hba index for WQ 352 * @q: The Work Queue to operate on. 353 * @index: The index to advance the hba index to. 354 * 355 * This routine will update the HBA index of a queue to reflect consumption of 356 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 357 * an entry the host calls this function to update the queue's internal 358 * pointers. 359 **/ 360 static void 361 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 362 { 363 /* sanity check on queue memory */ 364 if (unlikely(!q)) 365 return; 366 367 q->hba_index = index; 368 } 369 370 /** 371 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 372 * @q: The Mailbox Queue to operate on. 373 * @mqe: The Mailbox Queue Entry to put on the Work queue. 374 * 375 * This routine will copy the contents of @mqe to the next available entry on 376 * the @q. This function will then ring the Work Queue Doorbell to signal the 377 * HBA to start processing the Work Queue Entry. This function returns 0 if 378 * successful. If no entries are available on @q then this function will return 379 * -ENOMEM. 380 * The caller is expected to hold the hbalock when calling this routine. 381 **/ 382 static uint32_t 383 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 384 { 385 struct lpfc_mqe *temp_mqe; 386 struct lpfc_register doorbell; 387 388 /* sanity check on queue memory */ 389 if (unlikely(!q)) 390 return -ENOMEM; 391 temp_mqe = lpfc_sli4_qe(q, q->host_index); 392 393 /* If the host has not yet processed the next entry then we are done */ 394 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 395 return -ENOMEM; 396 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 397 /* Save off the mailbox pointer for completion */ 398 q->phba->mbox = (MAILBOX_t *)temp_mqe; 399 400 /* Update the host index before invoking device */ 401 q->host_index = ((q->host_index + 1) % q->entry_count); 402 403 /* Ring Doorbell */ 404 doorbell.word0 = 0; 405 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 406 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 407 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 408 return 0; 409 } 410 411 /** 412 * lpfc_sli4_mq_release - Updates internal hba index for MQ 413 * @q: The Mailbox Queue to operate on. 414 * 415 * This routine will update the HBA index of a queue to reflect consumption of 416 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 417 * an entry the host calls this function to update the queue's internal 418 * pointers. This routine returns the number of entries that were consumed by 419 * the HBA. 420 **/ 421 static uint32_t 422 lpfc_sli4_mq_release(struct lpfc_queue *q) 423 { 424 /* sanity check on queue memory */ 425 if (unlikely(!q)) 426 return 0; 427 428 /* Clear the mailbox pointer for completion */ 429 q->phba->mbox = NULL; 430 q->hba_index = ((q->hba_index + 1) % q->entry_count); 431 return 1; 432 } 433 434 /** 435 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 436 * @q: The Event Queue to get the first valid EQE from 437 * 438 * This routine will get the first valid Event Queue Entry from @q, update 439 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 440 * the Queue (no more work to do), or the Queue is full of EQEs that have been 441 * processed, but not popped back to the HBA then this routine will return NULL. 442 **/ 443 static struct lpfc_eqe * 444 lpfc_sli4_eq_get(struct lpfc_queue *q) 445 { 446 struct lpfc_eqe *eqe; 447 448 /* sanity check on queue memory */ 449 if (unlikely(!q)) 450 return NULL; 451 eqe = lpfc_sli4_qe(q, q->host_index); 452 453 /* If the next EQE is not valid then we are done */ 454 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 455 return NULL; 456 457 /* 458 * insert barrier for instruction interlock : data from the hardware 459 * must have the valid bit checked before it can be copied and acted 460 * upon. Speculative instructions were allowing a bcopy at the start 461 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 462 * after our return, to copy data before the valid bit check above 463 * was done. As such, some of the copied data was stale. The barrier 464 * ensures the check is before any data is copied. 465 */ 466 mb(); 467 return eqe; 468 } 469 470 /** 471 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 472 * @q: The Event Queue to disable interrupts 473 * 474 **/ 475 void 476 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 477 { 478 struct lpfc_register doorbell; 479 480 doorbell.word0 = 0; 481 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 482 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 483 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 484 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 485 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 486 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 487 } 488 489 /** 490 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 491 * @q: The Event Queue to disable interrupts 492 * 493 **/ 494 void 495 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 496 { 497 struct lpfc_register doorbell; 498 499 doorbell.word0 = 0; 500 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 501 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 502 } 503 504 /** 505 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state 506 * @phba: adapter with EQ 507 * @q: The Event Queue that the host has completed processing for. 508 * @count: Number of elements that have been consumed 509 * @arm: Indicates whether the host wants to arms this CQ. 510 * 511 * This routine will notify the HBA, by ringing the doorbell, that count 512 * number of EQEs have been processed. The @arm parameter indicates whether 513 * the queue should be rearmed when ringing the doorbell. 514 **/ 515 void 516 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 517 uint32_t count, bool arm) 518 { 519 struct lpfc_register doorbell; 520 521 /* sanity check on queue memory */ 522 if (unlikely(!q || (count == 0 && !arm))) 523 return; 524 525 /* ring doorbell for number popped */ 526 doorbell.word0 = 0; 527 if (arm) { 528 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 529 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 530 } 531 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 532 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 533 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 534 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 535 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 536 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 537 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 538 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 539 readl(q->phba->sli4_hba.EQDBregaddr); 540 } 541 542 /** 543 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state 544 * @phba: adapter with EQ 545 * @q: The Event Queue that the host has completed processing for. 546 * @count: Number of elements that have been consumed 547 * @arm: Indicates whether the host wants to arms this CQ. 548 * 549 * This routine will notify the HBA, by ringing the doorbell, that count 550 * number of EQEs have been processed. The @arm parameter indicates whether 551 * the queue should be rearmed when ringing the doorbell. 552 **/ 553 void 554 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 555 uint32_t count, bool arm) 556 { 557 struct lpfc_register doorbell; 558 559 /* sanity check on queue memory */ 560 if (unlikely(!q || (count == 0 && !arm))) 561 return; 562 563 /* ring doorbell for number popped */ 564 doorbell.word0 = 0; 565 if (arm) 566 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 567 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); 568 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 569 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 570 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 571 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 572 readl(q->phba->sli4_hba.EQDBregaddr); 573 } 574 575 static void 576 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 577 struct lpfc_eqe *eqe) 578 { 579 if (!phba->sli4_hba.pc_sli4_params.eqav) 580 bf_set_le32(lpfc_eqe_valid, eqe, 0); 581 582 eq->host_index = ((eq->host_index + 1) % eq->entry_count); 583 584 /* if the index wrapped around, toggle the valid bit */ 585 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) 586 eq->qe_valid = (eq->qe_valid) ? 0 : 1; 587 } 588 589 static void 590 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 591 { 592 struct lpfc_eqe *eqe = NULL; 593 u32 eq_count = 0, cq_count = 0; 594 struct lpfc_cqe *cqe = NULL; 595 struct lpfc_queue *cq = NULL, *childq = NULL; 596 int cqid = 0; 597 598 /* walk all the EQ entries and drop on the floor */ 599 eqe = lpfc_sli4_eq_get(eq); 600 while (eqe) { 601 /* Get the reference to the corresponding CQ */ 602 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 603 cq = NULL; 604 605 list_for_each_entry(childq, &eq->child_list, list) { 606 if (childq->queue_id == cqid) { 607 cq = childq; 608 break; 609 } 610 } 611 /* If CQ is valid, iterate through it and drop all the CQEs */ 612 if (cq) { 613 cqe = lpfc_sli4_cq_get(cq); 614 while (cqe) { 615 __lpfc_sli4_consume_cqe(phba, cq, cqe); 616 cq_count++; 617 cqe = lpfc_sli4_cq_get(cq); 618 } 619 /* Clear and re-arm the CQ */ 620 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count, 621 LPFC_QUEUE_REARM); 622 cq_count = 0; 623 } 624 __lpfc_sli4_consume_eqe(phba, eq, eqe); 625 eq_count++; 626 eqe = lpfc_sli4_eq_get(eq); 627 } 628 629 /* Clear and re-arm the EQ */ 630 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM); 631 } 632 633 static int 634 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, 635 u8 rearm, enum lpfc_poll_mode poll_mode) 636 { 637 struct lpfc_eqe *eqe; 638 int count = 0, consumed = 0; 639 640 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) 641 goto rearm_and_exit; 642 643 eqe = lpfc_sli4_eq_get(eq); 644 while (eqe) { 645 lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode); 646 __lpfc_sli4_consume_eqe(phba, eq, eqe); 647 648 consumed++; 649 if (!(++count % eq->max_proc_limit)) 650 break; 651 652 if (!(count % eq->notify_interval)) { 653 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, 654 LPFC_QUEUE_NOARM); 655 consumed = 0; 656 } 657 658 eqe = lpfc_sli4_eq_get(eq); 659 } 660 eq->EQ_processed += count; 661 662 /* Track the max number of EQEs processed in 1 intr */ 663 if (count > eq->EQ_max_eqe) 664 eq->EQ_max_eqe = count; 665 666 xchg(&eq->queue_claimed, 0); 667 668 rearm_and_exit: 669 /* Always clear the EQ. */ 670 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm); 671 672 return count; 673 } 674 675 /** 676 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 677 * @q: The Completion Queue to get the first valid CQE from 678 * 679 * This routine will get the first valid Completion Queue Entry from @q, update 680 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 681 * the Queue (no more work to do), or the Queue is full of CQEs that have been 682 * processed, but not popped back to the HBA then this routine will return NULL. 683 **/ 684 static struct lpfc_cqe * 685 lpfc_sli4_cq_get(struct lpfc_queue *q) 686 { 687 struct lpfc_cqe *cqe; 688 689 /* sanity check on queue memory */ 690 if (unlikely(!q)) 691 return NULL; 692 cqe = lpfc_sli4_qe(q, q->host_index); 693 694 /* If the next CQE is not valid then we are done */ 695 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 696 return NULL; 697 698 /* 699 * insert barrier for instruction interlock : data from the hardware 700 * must have the valid bit checked before it can be copied and acted 701 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 702 * instructions allowing action on content before valid bit checked, 703 * add barrier here as well. May not be needed as "content" is a 704 * single 32-bit entity here (vs multi word structure for cq's). 705 */ 706 mb(); 707 return cqe; 708 } 709 710 static void 711 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 712 struct lpfc_cqe *cqe) 713 { 714 if (!phba->sli4_hba.pc_sli4_params.cqav) 715 bf_set_le32(lpfc_cqe_valid, cqe, 0); 716 717 cq->host_index = ((cq->host_index + 1) % cq->entry_count); 718 719 /* if the index wrapped around, toggle the valid bit */ 720 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) 721 cq->qe_valid = (cq->qe_valid) ? 0 : 1; 722 } 723 724 /** 725 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. 726 * @phba: the adapter with the CQ 727 * @q: The Completion Queue that the host has completed processing for. 728 * @count: the number of elements that were consumed 729 * @arm: Indicates whether the host wants to arms this CQ. 730 * 731 * This routine will notify the HBA, by ringing the doorbell, that the 732 * CQEs have been processed. The @arm parameter specifies whether the 733 * queue should be rearmed when ringing the doorbell. 734 **/ 735 void 736 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 737 uint32_t count, bool arm) 738 { 739 struct lpfc_register doorbell; 740 741 /* sanity check on queue memory */ 742 if (unlikely(!q || (count == 0 && !arm))) 743 return; 744 745 /* ring doorbell for number popped */ 746 doorbell.word0 = 0; 747 if (arm) 748 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 749 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 750 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 751 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 752 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 753 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 754 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 755 } 756 757 /** 758 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. 759 * @phba: the adapter with the CQ 760 * @q: The Completion Queue that the host has completed processing for. 761 * @count: the number of elements that were consumed 762 * @arm: Indicates whether the host wants to arms this CQ. 763 * 764 * This routine will notify the HBA, by ringing the doorbell, that the 765 * CQEs have been processed. The @arm parameter specifies whether the 766 * queue should be rearmed when ringing the doorbell. 767 **/ 768 void 769 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 770 uint32_t count, bool arm) 771 { 772 struct lpfc_register doorbell; 773 774 /* sanity check on queue memory */ 775 if (unlikely(!q || (count == 0 && !arm))) 776 return; 777 778 /* ring doorbell for number popped */ 779 doorbell.word0 = 0; 780 if (arm) 781 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 782 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); 783 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 784 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 785 } 786 787 /* 788 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 789 * 790 * This routine will copy the contents of @wqe to the next available entry on 791 * the @q. This function will then ring the Receive Queue Doorbell to signal the 792 * HBA to start processing the Receive Queue Entry. This function returns the 793 * index that the rqe was copied to if successful. If no entries are available 794 * on @q then this function will return -ENOMEM. 795 * The caller is expected to hold the hbalock when calling this routine. 796 **/ 797 int 798 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 799 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 800 { 801 struct lpfc_rqe *temp_hrqe; 802 struct lpfc_rqe *temp_drqe; 803 struct lpfc_register doorbell; 804 int hq_put_index; 805 int dq_put_index; 806 807 /* sanity check on queue memory */ 808 if (unlikely(!hq) || unlikely(!dq)) 809 return -ENOMEM; 810 hq_put_index = hq->host_index; 811 dq_put_index = dq->host_index; 812 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); 813 temp_drqe = lpfc_sli4_qe(dq, dq_put_index); 814 815 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 816 return -EINVAL; 817 if (hq_put_index != dq_put_index) 818 return -EINVAL; 819 /* If the host has not yet processed the next entry then we are done */ 820 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 821 return -EBUSY; 822 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 823 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 824 825 /* Update the host index to point to the next slot */ 826 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 827 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 828 hq->RQ_buf_posted++; 829 830 /* Ring The Header Receive Queue Doorbell */ 831 if (!(hq->host_index % hq->notify_interval)) { 832 doorbell.word0 = 0; 833 if (hq->db_format == LPFC_DB_RING_FORMAT) { 834 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 835 hq->notify_interval); 836 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 837 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 838 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 839 hq->notify_interval); 840 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 841 hq->host_index); 842 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 843 } else { 844 return -EINVAL; 845 } 846 writel(doorbell.word0, hq->db_regaddr); 847 } 848 return hq_put_index; 849 } 850 851 /* 852 * lpfc_sli4_rq_release - Updates internal hba index for RQ 853 * 854 * This routine will update the HBA index of a queue to reflect consumption of 855 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 856 * consumed an entry the host calls this function to update the queue's 857 * internal pointers. This routine returns the number of entries that were 858 * consumed by the HBA. 859 **/ 860 static uint32_t 861 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 862 { 863 /* sanity check on queue memory */ 864 if (unlikely(!hq) || unlikely(!dq)) 865 return 0; 866 867 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 868 return 0; 869 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 870 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 871 return 1; 872 } 873 874 /** 875 * lpfc_cmd_iocb - Get next command iocb entry in the ring 876 * @phba: Pointer to HBA context object. 877 * @pring: Pointer to driver SLI ring object. 878 * 879 * This function returns pointer to next command iocb entry 880 * in the command ring. The caller must hold hbalock to prevent 881 * other threads consume the next command iocb. 882 * SLI-2/SLI-3 provide different sized iocbs. 883 **/ 884 static inline IOCB_t * 885 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 886 { 887 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 888 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 889 } 890 891 /** 892 * lpfc_resp_iocb - Get next response iocb entry in the ring 893 * @phba: Pointer to HBA context object. 894 * @pring: Pointer to driver SLI ring object. 895 * 896 * This function returns pointer to next response iocb entry 897 * in the response ring. The caller must hold hbalock to make sure 898 * that no other thread consume the next response iocb. 899 * SLI-2/SLI-3 provide different sized iocbs. 900 **/ 901 static inline IOCB_t * 902 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 903 { 904 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 905 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 906 } 907 908 /** 909 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 910 * @phba: Pointer to HBA context object. 911 * 912 * This function is called with hbalock held. This function 913 * allocates a new driver iocb object from the iocb pool. If the 914 * allocation is successful, it returns pointer to the newly 915 * allocated iocb object else it returns NULL. 916 **/ 917 struct lpfc_iocbq * 918 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 919 { 920 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 921 struct lpfc_iocbq * iocbq = NULL; 922 923 lockdep_assert_held(&phba->hbalock); 924 925 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 926 if (iocbq) 927 phba->iocb_cnt++; 928 if (phba->iocb_cnt > phba->iocb_max) 929 phba->iocb_max = phba->iocb_cnt; 930 return iocbq; 931 } 932 933 /** 934 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 935 * @phba: Pointer to HBA context object. 936 * @xritag: XRI value. 937 * 938 * This function clears the sglq pointer from the array of active 939 * sglq's. The xritag that is passed in is used to index into the 940 * array. Before the xritag can be used it needs to be adjusted 941 * by subtracting the xribase. 942 * 943 * Returns sglq ponter = success, NULL = Failure. 944 **/ 945 struct lpfc_sglq * 946 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 947 { 948 struct lpfc_sglq *sglq; 949 950 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 951 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 952 return sglq; 953 } 954 955 /** 956 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 957 * @phba: Pointer to HBA context object. 958 * @xritag: XRI value. 959 * 960 * This function returns the sglq pointer from the array of active 961 * sglq's. The xritag that is passed in is used to index into the 962 * array. Before the xritag can be used it needs to be adjusted 963 * by subtracting the xribase. 964 * 965 * Returns sglq ponter = success, NULL = Failure. 966 **/ 967 struct lpfc_sglq * 968 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 969 { 970 struct lpfc_sglq *sglq; 971 972 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 973 return sglq; 974 } 975 976 /** 977 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 978 * @phba: Pointer to HBA context object. 979 * @xritag: xri used in this exchange. 980 * @rrq: The RRQ to be cleared. 981 * 982 **/ 983 void 984 lpfc_clr_rrq_active(struct lpfc_hba *phba, 985 uint16_t xritag, 986 struct lpfc_node_rrq *rrq) 987 { 988 struct lpfc_nodelist *ndlp = NULL; 989 990 /* Lookup did to verify if did is still active on this vport */ 991 if (rrq->vport) 992 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 993 994 if (!ndlp) 995 goto out; 996 997 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 998 rrq->send_rrq = 0; 999 rrq->xritag = 0; 1000 rrq->rrq_stop_time = 0; 1001 } 1002 out: 1003 mempool_free(rrq, phba->rrq_pool); 1004 } 1005 1006 /** 1007 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 1008 * @phba: Pointer to HBA context object. 1009 * 1010 * This function is called with hbalock held. This function 1011 * Checks if stop_time (ratov from setting rrq active) has 1012 * been reached, if it has and the send_rrq flag is set then 1013 * it will call lpfc_send_rrq. If the send_rrq flag is not set 1014 * then it will just call the routine to clear the rrq and 1015 * free the rrq resource. 1016 * The timer is set to the next rrq that is going to expire before 1017 * leaving the routine. 1018 * 1019 **/ 1020 void 1021 lpfc_handle_rrq_active(struct lpfc_hba *phba) 1022 { 1023 struct lpfc_node_rrq *rrq; 1024 struct lpfc_node_rrq *nextrrq; 1025 unsigned long next_time; 1026 unsigned long iflags; 1027 LIST_HEAD(send_rrq); 1028 1029 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1030 next_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1); 1031 spin_lock_irqsave(&phba->rrq_list_lock, iflags); 1032 list_for_each_entry_safe(rrq, nextrrq, 1033 &phba->active_rrq_list, list) { 1034 if (time_after(jiffies, rrq->rrq_stop_time)) 1035 list_move(&rrq->list, &send_rrq); 1036 else if (time_before(rrq->rrq_stop_time, next_time)) 1037 next_time = rrq->rrq_stop_time; 1038 } 1039 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1040 if ((!list_empty(&phba->active_rrq_list)) && 1041 (!test_bit(FC_UNLOADING, &phba->pport->load_flag))) 1042 mod_timer(&phba->rrq_tmr, next_time); 1043 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 1044 list_del(&rrq->list); 1045 if (!rrq->send_rrq) { 1046 /* this call will free the rrq */ 1047 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1048 } else if (lpfc_send_rrq(phba, rrq)) { 1049 /* if we send the rrq then the completion handler 1050 * will clear the bit in the xribitmap. 1051 */ 1052 lpfc_clr_rrq_active(phba, rrq->xritag, 1053 rrq); 1054 } 1055 } 1056 } 1057 1058 /** 1059 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 1060 * @vport: Pointer to vport context object. 1061 * @xri: The xri used in the exchange. 1062 * @did: The targets DID for this exchange. 1063 * 1064 * returns NULL = rrq not found in the phba->active_rrq_list. 1065 * rrq = rrq for this xri and target. 1066 **/ 1067 struct lpfc_node_rrq * 1068 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 1069 { 1070 struct lpfc_hba *phba = vport->phba; 1071 struct lpfc_node_rrq *rrq; 1072 struct lpfc_node_rrq *nextrrq; 1073 unsigned long iflags; 1074 1075 if (phba->sli_rev != LPFC_SLI_REV4) 1076 return NULL; 1077 spin_lock_irqsave(&phba->rrq_list_lock, iflags); 1078 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 1079 if (rrq->vport == vport && rrq->xritag == xri && 1080 rrq->nlp_DID == did){ 1081 list_del(&rrq->list); 1082 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1083 return rrq; 1084 } 1085 } 1086 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1087 return NULL; 1088 } 1089 1090 /** 1091 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 1092 * @vport: Pointer to vport context object. 1093 * @ndlp: Pointer to the lpfc_node_list structure. 1094 * If ndlp is NULL Remove all active RRQs for this vport from the 1095 * phba->active_rrq_list and clear the rrq. 1096 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 1097 **/ 1098 void 1099 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1100 1101 { 1102 struct lpfc_hba *phba = vport->phba; 1103 struct lpfc_node_rrq *rrq; 1104 struct lpfc_node_rrq *nextrrq; 1105 unsigned long iflags; 1106 LIST_HEAD(rrq_list); 1107 1108 if (phba->sli_rev != LPFC_SLI_REV4) 1109 return; 1110 if (!ndlp) { 1111 lpfc_sli4_vport_delete_els_xri_aborted(vport); 1112 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 1113 } 1114 spin_lock_irqsave(&phba->rrq_list_lock, iflags); 1115 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 1116 if (rrq->vport != vport) 1117 continue; 1118 1119 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID)) 1120 list_move(&rrq->list, &rrq_list); 1121 1122 } 1123 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1124 1125 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 1126 list_del(&rrq->list); 1127 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1128 } 1129 } 1130 1131 /** 1132 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 1133 * @phba: Pointer to HBA context object. 1134 * @ndlp: Targets nodelist pointer for this exchange. 1135 * @xritag: the xri in the bitmap to test. 1136 * 1137 * This function returns: 1138 * 0 = rrq not active for this xri 1139 * 1 = rrq is valid for this xri. 1140 **/ 1141 int 1142 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1143 uint16_t xritag) 1144 { 1145 if (!ndlp) 1146 return 0; 1147 if (!ndlp->active_rrqs_xri_bitmap) 1148 return 0; 1149 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1150 return 1; 1151 else 1152 return 0; 1153 } 1154 1155 /** 1156 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1157 * @phba: Pointer to HBA context object. 1158 * @ndlp: nodelist pointer for this target. 1159 * @xritag: xri used in this exchange. 1160 * @rxid: Remote Exchange ID. 1161 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1162 * 1163 * This function takes the hbalock. 1164 * The active bit is always set in the active rrq xri_bitmap even 1165 * if there is no slot avaiable for the other rrq information. 1166 * 1167 * returns 0 rrq actived for this xri 1168 * < 0 No memory or invalid ndlp. 1169 **/ 1170 int 1171 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1172 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1173 { 1174 unsigned long iflags; 1175 struct lpfc_node_rrq *rrq; 1176 int empty; 1177 1178 if (!ndlp) 1179 return -EINVAL; 1180 1181 if (!phba->cfg_enable_rrq) 1182 return -EINVAL; 1183 1184 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { 1185 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1186 goto outnl; 1187 } 1188 1189 spin_lock_irqsave(&phba->hbalock, iflags); 1190 if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag)) 1191 goto out; 1192 1193 if (!ndlp->active_rrqs_xri_bitmap) 1194 goto out; 1195 1196 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1197 goto out; 1198 1199 spin_unlock_irqrestore(&phba->hbalock, iflags); 1200 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC); 1201 if (!rrq) { 1202 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1203 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1204 " DID:0x%x Send:%d\n", 1205 xritag, rxid, ndlp->nlp_DID, send_rrq); 1206 return -EINVAL; 1207 } 1208 if (phba->cfg_enable_rrq == 1) 1209 rrq->send_rrq = send_rrq; 1210 else 1211 rrq->send_rrq = 0; 1212 rrq->xritag = xritag; 1213 rrq->rrq_stop_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1); 1214 rrq->nlp_DID = ndlp->nlp_DID; 1215 rrq->vport = ndlp->vport; 1216 rrq->rxid = rxid; 1217 1218 spin_lock_irqsave(&phba->rrq_list_lock, iflags); 1219 empty = list_empty(&phba->active_rrq_list); 1220 list_add_tail(&rrq->list, &phba->active_rrq_list); 1221 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1222 set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1223 if (empty) 1224 lpfc_worker_wake_up(phba); 1225 return 0; 1226 out: 1227 spin_unlock_irqrestore(&phba->hbalock, iflags); 1228 outnl: 1229 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1230 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1231 " DID:0x%x Send:%d\n", 1232 xritag, rxid, ndlp->nlp_DID, send_rrq); 1233 return -EINVAL; 1234 } 1235 1236 /** 1237 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1238 * @phba: Pointer to HBA context object. 1239 * @piocbq: Pointer to the iocbq. 1240 * 1241 * The driver calls this function with either the nvme ls ring lock 1242 * or the fc els ring lock held depending on the iocb usage. This function 1243 * gets a new driver sglq object from the sglq list. If the list is not empty 1244 * then it is successful, it returns pointer to the newly allocated sglq 1245 * object else it returns NULL. 1246 **/ 1247 static struct lpfc_sglq * 1248 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1249 { 1250 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1251 struct lpfc_sglq *sglq = NULL; 1252 struct lpfc_sglq *start_sglq = NULL; 1253 struct lpfc_io_buf *lpfc_cmd; 1254 struct lpfc_nodelist *ndlp; 1255 int found = 0; 1256 u8 cmnd; 1257 1258 cmnd = get_job_cmnd(phba, piocbq); 1259 1260 if (piocbq->cmd_flag & LPFC_IO_FCP) { 1261 lpfc_cmd = piocbq->io_buf; 1262 ndlp = lpfc_cmd->rdata->pnode; 1263 } else if ((cmnd == CMD_GEN_REQUEST64_CR) && 1264 !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) { 1265 ndlp = piocbq->ndlp; 1266 } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) { 1267 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK) 1268 ndlp = NULL; 1269 else 1270 ndlp = piocbq->ndlp; 1271 } else { 1272 ndlp = piocbq->ndlp; 1273 } 1274 1275 spin_lock(&phba->sli4_hba.sgl_list_lock); 1276 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1277 start_sglq = sglq; 1278 while (!found) { 1279 if (!sglq) 1280 break; 1281 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1282 test_bit(sglq->sli4_lxritag, 1283 ndlp->active_rrqs_xri_bitmap)) { 1284 /* This xri has an rrq outstanding for this DID. 1285 * put it back in the list and get another xri. 1286 */ 1287 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1288 sglq = NULL; 1289 list_remove_head(lpfc_els_sgl_list, sglq, 1290 struct lpfc_sglq, list); 1291 if (sglq == start_sglq) { 1292 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1293 sglq = NULL; 1294 break; 1295 } else 1296 continue; 1297 } 1298 sglq->ndlp = ndlp; 1299 found = 1; 1300 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1301 sglq->state = SGL_ALLOCATED; 1302 } 1303 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1304 return sglq; 1305 } 1306 1307 /** 1308 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1309 * @phba: Pointer to HBA context object. 1310 * @piocbq: Pointer to the iocbq. 1311 * 1312 * This function is called with the sgl_list lock held. This function 1313 * gets a new driver sglq object from the sglq list. If the 1314 * list is not empty then it is successful, it returns pointer to the newly 1315 * allocated sglq object else it returns NULL. 1316 **/ 1317 struct lpfc_sglq * 1318 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1319 { 1320 struct list_head *lpfc_nvmet_sgl_list; 1321 struct lpfc_sglq *sglq = NULL; 1322 1323 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1324 1325 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1326 1327 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1328 if (!sglq) 1329 return NULL; 1330 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1331 sglq->state = SGL_ALLOCATED; 1332 return sglq; 1333 } 1334 1335 /** 1336 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1337 * @phba: Pointer to HBA context object. 1338 * 1339 * This function is called with no lock held. This function 1340 * allocates a new driver iocb object from the iocb pool. If the 1341 * allocation is successful, it returns pointer to the newly 1342 * allocated iocb object else it returns NULL. 1343 **/ 1344 struct lpfc_iocbq * 1345 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1346 { 1347 struct lpfc_iocbq * iocbq = NULL; 1348 unsigned long iflags; 1349 1350 spin_lock_irqsave(&phba->hbalock, iflags); 1351 iocbq = __lpfc_sli_get_iocbq(phba); 1352 spin_unlock_irqrestore(&phba->hbalock, iflags); 1353 return iocbq; 1354 } 1355 1356 /** 1357 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1358 * @phba: Pointer to HBA context object. 1359 * @iocbq: Pointer to driver iocb object. 1360 * 1361 * This function is called to release the driver iocb object 1362 * to the iocb pool. The iotag in the iocb object 1363 * does not change for each use of the iocb object. This function 1364 * clears all other fields of the iocb object when it is freed. 1365 * The sqlq structure that holds the xritag and phys and virtual 1366 * mappings for the scatter gather list is retrieved from the 1367 * active array of sglq. The get of the sglq pointer also clears 1368 * the entry in the array. If the status of the IO indiactes that 1369 * this IO was aborted then the sglq entry it put on the 1370 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1371 * IO has good status or fails for any other reason then the sglq 1372 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is 1373 * asserted held in the code path calling this routine. 1374 **/ 1375 static void 1376 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1377 { 1378 struct lpfc_sglq *sglq; 1379 unsigned long iflag = 0; 1380 struct lpfc_sli_ring *pring; 1381 1382 if (iocbq->sli4_xritag == NO_XRI) 1383 sglq = NULL; 1384 else 1385 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1386 1387 1388 if (sglq) { 1389 if (iocbq->cmd_flag & LPFC_IO_NVMET) { 1390 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1391 iflag); 1392 sglq->state = SGL_FREED; 1393 sglq->ndlp = NULL; 1394 list_add_tail(&sglq->list, 1395 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1396 spin_unlock_irqrestore( 1397 &phba->sli4_hba.sgl_list_lock, iflag); 1398 goto out; 1399 } 1400 1401 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) && 1402 (!(unlikely(pci_channel_offline(phba->pcidev)))) && 1403 sglq->state != SGL_XRI_ABORTED) { 1404 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1405 iflag); 1406 1407 /* Check if we can get a reference on ndlp */ 1408 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp)) 1409 sglq->ndlp = NULL; 1410 1411 list_add(&sglq->list, 1412 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1413 spin_unlock_irqrestore( 1414 &phba->sli4_hba.sgl_list_lock, iflag); 1415 } else { 1416 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1417 iflag); 1418 sglq->state = SGL_FREED; 1419 sglq->ndlp = NULL; 1420 list_add_tail(&sglq->list, 1421 &phba->sli4_hba.lpfc_els_sgl_list); 1422 spin_unlock_irqrestore( 1423 &phba->sli4_hba.sgl_list_lock, iflag); 1424 pring = lpfc_phba_elsring(phba); 1425 /* Check if TXQ queue needs to be serviced */ 1426 if (pring && (!list_empty(&pring->txq))) 1427 lpfc_worker_wake_up(phba); 1428 } 1429 } 1430 1431 out: 1432 /* 1433 * Clean all volatile data fields, preserve iotag and node struct. 1434 */ 1435 memset_startat(iocbq, 0, wqe); 1436 iocbq->sli4_lxritag = NO_XRI; 1437 iocbq->sli4_xritag = NO_XRI; 1438 iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | 1439 LPFC_IO_NVME_LS); 1440 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1441 } 1442 1443 1444 /** 1445 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1446 * @phba: Pointer to HBA context object. 1447 * @iocbq: Pointer to driver iocb object. 1448 * 1449 * This function is called to release the driver iocb object to the 1450 * iocb pool. The iotag in the iocb object does not change for each 1451 * use of the iocb object. This function clears all other fields of 1452 * the iocb object when it is freed. The hbalock is asserted held in 1453 * the code path calling this routine. 1454 **/ 1455 static void 1456 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1457 { 1458 1459 /* 1460 * Clean all volatile data fields, preserve iotag and node struct. 1461 */ 1462 memset_startat(iocbq, 0, iocb); 1463 iocbq->sli4_xritag = NO_XRI; 1464 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1465 } 1466 1467 /** 1468 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1469 * @phba: Pointer to HBA context object. 1470 * @iocbq: Pointer to driver iocb object. 1471 * 1472 * This function is called with hbalock held to release driver 1473 * iocb object to the iocb pool. The iotag in the iocb object 1474 * does not change for each use of the iocb object. This function 1475 * clears all other fields of the iocb object when it is freed. 1476 **/ 1477 static void 1478 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1479 { 1480 lockdep_assert_held(&phba->hbalock); 1481 1482 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1483 phba->iocb_cnt--; 1484 } 1485 1486 /** 1487 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1488 * @phba: Pointer to HBA context object. 1489 * @iocbq: Pointer to driver iocb object. 1490 * 1491 * This function is called with no lock held to release the iocb to 1492 * iocb pool. 1493 **/ 1494 void 1495 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1496 { 1497 unsigned long iflags; 1498 1499 /* 1500 * Clean all volatile data fields, preserve iotag and node struct. 1501 */ 1502 spin_lock_irqsave(&phba->hbalock, iflags); 1503 __lpfc_sli_release_iocbq(phba, iocbq); 1504 spin_unlock_irqrestore(&phba->hbalock, iflags); 1505 } 1506 1507 /** 1508 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1509 * @phba: Pointer to HBA context object. 1510 * @iocblist: List of IOCBs. 1511 * @ulpstatus: ULP status in IOCB command field. 1512 * @ulpWord4: ULP word-4 in IOCB command field. 1513 * 1514 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1515 * on the list by invoking the complete callback function associated with the 1516 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1517 * fields. 1518 **/ 1519 void 1520 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1521 uint32_t ulpstatus, uint32_t ulpWord4) 1522 { 1523 struct lpfc_iocbq *piocb; 1524 1525 while (!list_empty(iocblist)) { 1526 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1527 if (piocb->cmd_cmpl) { 1528 if (piocb->cmd_flag & LPFC_IO_NVME) { 1529 lpfc_nvme_cancel_iocb(phba, piocb, 1530 ulpstatus, ulpWord4); 1531 } else { 1532 if (phba->sli_rev == LPFC_SLI_REV4) { 1533 bf_set(lpfc_wcqe_c_status, 1534 &piocb->wcqe_cmpl, ulpstatus); 1535 piocb->wcqe_cmpl.parameter = ulpWord4; 1536 } else { 1537 piocb->iocb.ulpStatus = ulpstatus; 1538 piocb->iocb.un.ulpWord[4] = ulpWord4; 1539 } 1540 (piocb->cmd_cmpl) (phba, piocb, piocb); 1541 } 1542 } else { 1543 lpfc_sli_release_iocbq(phba, piocb); 1544 } 1545 } 1546 return; 1547 } 1548 1549 /** 1550 * lpfc_sli_iocb_cmd_type - Get the iocb type 1551 * @iocb_cmnd: iocb command code. 1552 * 1553 * This function is called by ring event handler function to get the iocb type. 1554 * This function translates the iocb command to an iocb command type used to 1555 * decide the final disposition of each completed IOCB. 1556 * The function returns 1557 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1558 * LPFC_SOL_IOCB if it is a solicited iocb completion 1559 * LPFC_ABORT_IOCB if it is an abort iocb 1560 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1561 * 1562 * The caller is not required to hold any lock. 1563 **/ 1564 static lpfc_iocb_type 1565 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1566 { 1567 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1568 1569 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1570 return 0; 1571 1572 switch (iocb_cmnd) { 1573 case CMD_XMIT_SEQUENCE_CR: 1574 case CMD_XMIT_SEQUENCE_CX: 1575 case CMD_XMIT_BCAST_CN: 1576 case CMD_XMIT_BCAST_CX: 1577 case CMD_ELS_REQUEST_CR: 1578 case CMD_ELS_REQUEST_CX: 1579 case CMD_CREATE_XRI_CR: 1580 case CMD_CREATE_XRI_CX: 1581 case CMD_GET_RPI_CN: 1582 case CMD_XMIT_ELS_RSP_CX: 1583 case CMD_GET_RPI_CR: 1584 case CMD_FCP_IWRITE_CR: 1585 case CMD_FCP_IWRITE_CX: 1586 case CMD_FCP_IREAD_CR: 1587 case CMD_FCP_IREAD_CX: 1588 case CMD_FCP_ICMND_CR: 1589 case CMD_FCP_ICMND_CX: 1590 case CMD_FCP_TSEND_CX: 1591 case CMD_FCP_TRSP_CX: 1592 case CMD_FCP_TRECEIVE_CX: 1593 case CMD_FCP_AUTO_TRSP_CX: 1594 case CMD_ADAPTER_MSG: 1595 case CMD_ADAPTER_DUMP: 1596 case CMD_XMIT_SEQUENCE64_CR: 1597 case CMD_XMIT_SEQUENCE64_CX: 1598 case CMD_XMIT_BCAST64_CN: 1599 case CMD_XMIT_BCAST64_CX: 1600 case CMD_ELS_REQUEST64_CR: 1601 case CMD_ELS_REQUEST64_CX: 1602 case CMD_FCP_IWRITE64_CR: 1603 case CMD_FCP_IWRITE64_CX: 1604 case CMD_FCP_IREAD64_CR: 1605 case CMD_FCP_IREAD64_CX: 1606 case CMD_FCP_ICMND64_CR: 1607 case CMD_FCP_ICMND64_CX: 1608 case CMD_FCP_TSEND64_CX: 1609 case CMD_FCP_TRSP64_CX: 1610 case CMD_FCP_TRECEIVE64_CX: 1611 case CMD_GEN_REQUEST64_CR: 1612 case CMD_GEN_REQUEST64_CX: 1613 case CMD_XMIT_ELS_RSP64_CX: 1614 case DSSCMD_IWRITE64_CR: 1615 case DSSCMD_IWRITE64_CX: 1616 case DSSCMD_IREAD64_CR: 1617 case DSSCMD_IREAD64_CX: 1618 case CMD_SEND_FRAME: 1619 type = LPFC_SOL_IOCB; 1620 break; 1621 case CMD_ABORT_XRI_CN: 1622 case CMD_ABORT_XRI_CX: 1623 case CMD_CLOSE_XRI_CN: 1624 case CMD_CLOSE_XRI_CX: 1625 case CMD_XRI_ABORTED_CX: 1626 case CMD_ABORT_MXRI64_CN: 1627 case CMD_XMIT_BLS_RSP64_CX: 1628 type = LPFC_ABORT_IOCB; 1629 break; 1630 case CMD_RCV_SEQUENCE_CX: 1631 case CMD_RCV_ELS_REQ_CX: 1632 case CMD_RCV_SEQUENCE64_CX: 1633 case CMD_RCV_ELS_REQ64_CX: 1634 case CMD_ASYNC_STATUS: 1635 case CMD_IOCB_RCV_SEQ64_CX: 1636 case CMD_IOCB_RCV_ELS64_CX: 1637 case CMD_IOCB_RCV_CONT64_CX: 1638 case CMD_IOCB_RET_XRI64_CX: 1639 type = LPFC_UNSOL_IOCB; 1640 break; 1641 case CMD_IOCB_XMIT_MSEQ64_CR: 1642 case CMD_IOCB_XMIT_MSEQ64_CX: 1643 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1644 case CMD_IOCB_RCV_ELS_LIST64_CX: 1645 case CMD_IOCB_CLOSE_EXTENDED_CN: 1646 case CMD_IOCB_ABORT_EXTENDED_CN: 1647 case CMD_IOCB_RET_HBQE64_CN: 1648 case CMD_IOCB_FCP_IBIDIR64_CR: 1649 case CMD_IOCB_FCP_IBIDIR64_CX: 1650 case CMD_IOCB_FCP_ITASKMGT64_CX: 1651 case CMD_IOCB_LOGENTRY_CN: 1652 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1653 printk("%s - Unhandled SLI-3 Command x%x\n", 1654 __func__, iocb_cmnd); 1655 type = LPFC_UNKNOWN_IOCB; 1656 break; 1657 default: 1658 type = LPFC_UNKNOWN_IOCB; 1659 break; 1660 } 1661 1662 return type; 1663 } 1664 1665 /** 1666 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1667 * @phba: Pointer to HBA context object. 1668 * 1669 * This function is called from SLI initialization code 1670 * to configure every ring of the HBA's SLI interface. The 1671 * caller is not required to hold any lock. This function issues 1672 * a config_ring mailbox command for each ring. 1673 * This function returns zero if successful else returns a negative 1674 * error code. 1675 **/ 1676 static int 1677 lpfc_sli_ring_map(struct lpfc_hba *phba) 1678 { 1679 struct lpfc_sli *psli = &phba->sli; 1680 LPFC_MBOXQ_t *pmb; 1681 MAILBOX_t *pmbox; 1682 int i, rc, ret = 0; 1683 1684 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1685 if (!pmb) 1686 return -ENOMEM; 1687 pmbox = &pmb->u.mb; 1688 phba->link_state = LPFC_INIT_MBX_CMDS; 1689 for (i = 0; i < psli->num_rings; i++) { 1690 lpfc_config_ring(phba, i, pmb); 1691 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1692 if (rc != MBX_SUCCESS) { 1693 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1694 "0446 Adapter failed to init (%d), " 1695 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1696 "ring %d\n", 1697 rc, pmbox->mbxCommand, 1698 pmbox->mbxStatus, i); 1699 phba->link_state = LPFC_HBA_ERROR; 1700 ret = -ENXIO; 1701 break; 1702 } 1703 } 1704 mempool_free(pmb, phba->mbox_mem_pool); 1705 return ret; 1706 } 1707 1708 /** 1709 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1710 * @phba: Pointer to HBA context object. 1711 * @pring: Pointer to driver SLI ring object. 1712 * @piocb: Pointer to the driver iocb object. 1713 * 1714 * The driver calls this function with the hbalock held for SLI3 ports or 1715 * the ring lock held for SLI4 ports. The function adds the 1716 * new iocb to txcmplq of the given ring. This function always returns 1717 * 0. If this function is called for ELS ring, this function checks if 1718 * there is a vport associated with the ELS command. This function also 1719 * starts els_tmofunc timer if this is an ELS command. 1720 **/ 1721 static int 1722 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1723 struct lpfc_iocbq *piocb) 1724 { 1725 u32 ulp_command = 0; 1726 1727 BUG_ON(!piocb); 1728 ulp_command = get_job_cmnd(phba, piocb); 1729 1730 list_add_tail(&piocb->list, &pring->txcmplq); 1731 piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ; 1732 pring->txcmplq_cnt++; 1733 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1734 (ulp_command != CMD_ABORT_XRI_WQE) && 1735 (ulp_command != CMD_ABORT_XRI_CN) && 1736 (ulp_command != CMD_CLOSE_XRI_CN)) { 1737 BUG_ON(!piocb->vport); 1738 if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag)) 1739 mod_timer(&piocb->vport->els_tmofunc, 1740 jiffies + secs_to_jiffies(phba->fc_ratov << 1)); 1741 } 1742 1743 return 0; 1744 } 1745 1746 /** 1747 * lpfc_sli_ringtx_get - Get first element of the txq 1748 * @phba: Pointer to HBA context object. 1749 * @pring: Pointer to driver SLI ring object. 1750 * 1751 * This function is called with hbalock held to get next 1752 * iocb in txq of the given ring. If there is any iocb in 1753 * the txq, the function returns first iocb in the list after 1754 * removing the iocb from the list, else it returns NULL. 1755 **/ 1756 struct lpfc_iocbq * 1757 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1758 { 1759 struct lpfc_iocbq *cmd_iocb; 1760 1761 lockdep_assert_held(&phba->hbalock); 1762 1763 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1764 return cmd_iocb; 1765 } 1766 1767 /** 1768 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl 1769 * @phba: Pointer to HBA context object. 1770 * @cmdiocb: Pointer to driver command iocb object. 1771 * @rspiocb: Pointer to driver response iocb object. 1772 * 1773 * This routine will inform the driver of any BW adjustments we need 1774 * to make. These changes will be picked up during the next CMF 1775 * timer interrupt. In addition, any BW changes will be logged 1776 * with LOG_CGN_MGMT. 1777 **/ 1778 static void 1779 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1780 struct lpfc_iocbq *rspiocb) 1781 { 1782 union lpfc_wqe128 *wqe; 1783 uint32_t status, info; 1784 struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl; 1785 uint64_t bw, bwdif, slop; 1786 uint64_t pcent, bwpcent; 1787 int asig, afpin, sigcnt, fpincnt; 1788 int wsigmax, wfpinmax, cg, tdp; 1789 char *s; 1790 1791 /* First check for error */ 1792 status = bf_get(lpfc_wcqe_c_status, wcqe); 1793 if (status) { 1794 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1795 "6211 CMF_SYNC_WQE Error " 1796 "req_tag x%x status x%x hwstatus x%x " 1797 "tdatap x%x parm x%x\n", 1798 bf_get(lpfc_wcqe_c_request_tag, wcqe), 1799 bf_get(lpfc_wcqe_c_status, wcqe), 1800 bf_get(lpfc_wcqe_c_hw_status, wcqe), 1801 wcqe->total_data_placed, 1802 wcqe->parameter); 1803 goto out; 1804 } 1805 1806 /* Gather congestion information on a successful cmpl */ 1807 info = wcqe->parameter; 1808 phba->cmf_active_info = info; 1809 1810 /* See if firmware info count is valid or has changed */ 1811 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info) 1812 info = 0; 1813 else 1814 phba->cmf_info_per_interval = info; 1815 1816 tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe); 1817 cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe); 1818 1819 /* Get BW requirement from firmware */ 1820 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE; 1821 if (!bw) { 1822 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1823 "6212 CMF_SYNC_WQE x%x: NULL bw\n", 1824 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 1825 goto out; 1826 } 1827 1828 /* Gather information needed for logging if a BW change is required */ 1829 wqe = &cmdiocb->wqe; 1830 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync); 1831 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync); 1832 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync); 1833 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync); 1834 if (phba->cmf_max_bytes_per_interval != bw || 1835 (asig || afpin || sigcnt || fpincnt)) { 1836 /* Are we increasing or decreasing BW */ 1837 if (phba->cmf_max_bytes_per_interval < bw) { 1838 bwdif = bw - phba->cmf_max_bytes_per_interval; 1839 s = "Increase"; 1840 } else { 1841 bwdif = phba->cmf_max_bytes_per_interval - bw; 1842 s = "Decrease"; 1843 } 1844 1845 /* What is the change percentage */ 1846 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/ 1847 pcent = div64_u64(bwdif * 100 + slop, 1848 phba->cmf_link_byte_count); 1849 bwpcent = div64_u64(bw * 100 + slop, 1850 phba->cmf_link_byte_count); 1851 /* Because of bytes adjustment due to shorter timer in 1852 * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and 1853 * may seem like BW is above 100%. 1854 */ 1855 if (bwpcent > 100) 1856 bwpcent = 100; 1857 1858 if (phba->cmf_max_bytes_per_interval < bw && 1859 bwpcent > 95) 1860 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1861 "6208 Congestion bandwidth " 1862 "limits removed\n"); 1863 else if ((phba->cmf_max_bytes_per_interval > bw) && 1864 ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95)) 1865 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1866 "6209 Congestion bandwidth " 1867 "limits in effect\n"); 1868 1869 if (asig) { 1870 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1871 "6237 BW Threshold %lld%% (%lld): " 1872 "%lld%% %s: Signal Alarm: cg:%d " 1873 "Info:%u\n", 1874 bwpcent, bw, pcent, s, cg, 1875 phba->cmf_active_info); 1876 } else if (afpin) { 1877 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1878 "6238 BW Threshold %lld%% (%lld): " 1879 "%lld%% %s: FPIN Alarm: cg:%d " 1880 "Info:%u\n", 1881 bwpcent, bw, pcent, s, cg, 1882 phba->cmf_active_info); 1883 } else if (sigcnt) { 1884 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync); 1885 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1886 "6239 BW Threshold %lld%% (%lld): " 1887 "%lld%% %s: Signal Warning: " 1888 "Cnt %d Max %d: cg:%d Info:%u\n", 1889 bwpcent, bw, pcent, s, sigcnt, 1890 wsigmax, cg, phba->cmf_active_info); 1891 } else if (fpincnt) { 1892 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync); 1893 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1894 "6240 BW Threshold %lld%% (%lld): " 1895 "%lld%% %s: FPIN Warning: " 1896 "Cnt %d Max %d: cg:%d Info:%u\n", 1897 bwpcent, bw, pcent, s, fpincnt, 1898 wfpinmax, cg, phba->cmf_active_info); 1899 } else { 1900 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1901 "6241 BW Threshold %lld%% (%lld): " 1902 "CMF %lld%% %s: cg:%d Info:%u\n", 1903 bwpcent, bw, pcent, s, cg, 1904 phba->cmf_active_info); 1905 } 1906 } else if (info) { 1907 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1908 "6246 Info Threshold %u\n", info); 1909 } 1910 1911 /* Save BW change to be picked up during next timer interrupt */ 1912 phba->cmf_last_sync_bw = bw; 1913 out: 1914 lpfc_sli_release_iocbq(phba, cmdiocb); 1915 } 1916 1917 /** 1918 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE 1919 * @phba: Pointer to HBA context object. 1920 * @ms: ms to set in WQE interval, 0 means use init op 1921 * @total: Total rcv bytes for this interval 1922 * 1923 * This routine is called every CMF timer interrupt. Its purpose is 1924 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events 1925 * that may indicate we have congestion (FPINs or Signals). Upon 1926 * completion, the firmware will indicate any BW restrictions the 1927 * driver may need to take. 1928 **/ 1929 int 1930 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) 1931 { 1932 union lpfc_wqe128 *wqe; 1933 struct lpfc_iocbq *sync_buf; 1934 unsigned long iflags; 1935 u32 ret_val, cgn_sig_freq; 1936 u32 atot, wtot, max; 1937 u8 warn_sync_period = 0; 1938 1939 /* First address any alarm / warning activity */ 1940 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); 1941 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); 1942 1943 spin_lock_irqsave(&phba->hbalock, iflags); 1944 1945 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ 1946 if (phba->cmf_active_mode != LPFC_CFG_MANAGED || 1947 phba->link_state < LPFC_LINK_UP) { 1948 ret_val = 0; 1949 goto out_unlock; 1950 } 1951 1952 sync_buf = __lpfc_sli_get_iocbq(phba); 1953 if (!sync_buf) { 1954 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 1955 "6244 No available WQEs for CMF_SYNC_WQE\n"); 1956 ret_val = ENOMEM; 1957 goto out_unlock; 1958 } 1959 1960 wqe = &sync_buf->wqe; 1961 1962 /* WQEs are reused. Clear stale data and set key fields to zero */ 1963 memset(wqe, 0, sizeof(*wqe)); 1964 1965 /* If this is the very first CMF_SYNC_WQE, issue an init operation */ 1966 if (!ms) { 1967 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 1968 "6441 CMF Init %d - CMF_SYNC_WQE\n", 1969 phba->fc_eventTag); 1970 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */ 1971 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL); 1972 goto initpath; 1973 } 1974 1975 bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */ 1976 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms); 1977 1978 /* Check for alarms / warnings */ 1979 if (atot) { 1980 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 1981 /* We hit an Signal alarm condition */ 1982 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1); 1983 } else { 1984 /* We hit a FPIN alarm condition */ 1985 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1); 1986 } 1987 } else if (wtot) { 1988 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 1989 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 1990 cgn_sig_freq = phba->cgn_sig_freq ? phba->cgn_sig_freq : 1991 lpfc_fabric_cgn_frequency; 1992 /* We hit an Signal warning condition */ 1993 max = LPFC_SEC_TO_MSEC / cgn_sig_freq * 1994 lpfc_acqe_cgn_frequency; 1995 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); 1996 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); 1997 warn_sync_period = lpfc_acqe_cgn_frequency; 1998 } else { 1999 /* We hit a FPIN warning condition */ 2000 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1); 2001 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1); 2002 if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) 2003 warn_sync_period = 2004 LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency); 2005 } 2006 } 2007 2008 /* Update total read blocks during previous timer interval */ 2009 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE); 2010 2011 initpath: 2012 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER); 2013 wqe->cmf_sync.event_tag = phba->fc_eventTag; 2014 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE); 2015 2016 /* Setup reqtag to match the wqe completion. */ 2017 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag); 2018 2019 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1); 2020 bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period); 2021 2022 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND); 2023 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1); 2024 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT); 2025 2026 sync_buf->vport = phba->pport; 2027 sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl; 2028 sync_buf->cmd_dmabuf = NULL; 2029 sync_buf->rsp_dmabuf = NULL; 2030 sync_buf->bpl_dmabuf = NULL; 2031 sync_buf->sli4_xritag = NO_XRI; 2032 2033 sync_buf->cmd_flag |= LPFC_IO_CMF; 2034 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf); 2035 if (ret_val) { 2036 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 2037 "6214 Cannot issue CMF_SYNC_WQE: x%x\n", 2038 ret_val); 2039 __lpfc_sli_release_iocbq(phba, sync_buf); 2040 } 2041 out_unlock: 2042 spin_unlock_irqrestore(&phba->hbalock, iflags); 2043 return ret_val; 2044 } 2045 2046 /** 2047 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 2048 * @phba: Pointer to HBA context object. 2049 * @pring: Pointer to driver SLI ring object. 2050 * 2051 * This function is called with hbalock held and the caller must post the 2052 * iocb without releasing the lock. If the caller releases the lock, 2053 * iocb slot returned by the function is not guaranteed to be available. 2054 * The function returns pointer to the next available iocb slot if there 2055 * is available slot in the ring, else it returns NULL. 2056 * If the get index of the ring is ahead of the put index, the function 2057 * will post an error attention event to the worker thread to take the 2058 * HBA to offline state. 2059 **/ 2060 static IOCB_t * 2061 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2062 { 2063 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2064 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 2065 2066 lockdep_assert_held(&phba->hbalock); 2067 2068 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 2069 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 2070 pring->sli.sli3.next_cmdidx = 0; 2071 2072 if (unlikely(pring->sli.sli3.local_getidx == 2073 pring->sli.sli3.next_cmdidx)) { 2074 2075 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 2076 2077 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 2078 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2079 "0315 Ring %d issue: portCmdGet %d " 2080 "is bigger than cmd ring %d\n", 2081 pring->ringno, 2082 pring->sli.sli3.local_getidx, 2083 max_cmd_idx); 2084 2085 phba->link_state = LPFC_HBA_ERROR; 2086 /* 2087 * All error attention handlers are posted to 2088 * worker thread 2089 */ 2090 phba->work_ha |= HA_ERATT; 2091 phba->work_hs = HS_FFER3; 2092 2093 lpfc_worker_wake_up(phba); 2094 2095 return NULL; 2096 } 2097 2098 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 2099 return NULL; 2100 } 2101 2102 return lpfc_cmd_iocb(phba, pring); 2103 } 2104 2105 /** 2106 * lpfc_sli_next_iotag - Get an iotag for the iocb 2107 * @phba: Pointer to HBA context object. 2108 * @iocbq: Pointer to driver iocb object. 2109 * 2110 * This function gets an iotag for the iocb. If there is no unused iotag and 2111 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 2112 * array and assigns a new iotag. 2113 * The function returns the allocated iotag if successful, else returns zero. 2114 * Zero is not a valid iotag. 2115 * The caller is not required to hold any lock. 2116 **/ 2117 uint16_t 2118 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 2119 { 2120 struct lpfc_iocbq **new_arr; 2121 struct lpfc_iocbq **old_arr; 2122 size_t new_len; 2123 struct lpfc_sli *psli = &phba->sli; 2124 uint16_t iotag; 2125 2126 spin_lock_irq(&phba->hbalock); 2127 iotag = psli->last_iotag; 2128 if(++iotag < psli->iocbq_lookup_len) { 2129 psli->last_iotag = iotag; 2130 psli->iocbq_lookup[iotag] = iocbq; 2131 spin_unlock_irq(&phba->hbalock); 2132 iocbq->iotag = iotag; 2133 return iotag; 2134 } else if (psli->iocbq_lookup_len < (0xffff 2135 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 2136 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 2137 spin_unlock_irq(&phba->hbalock); 2138 new_arr = kzalloc_objs(struct lpfc_iocbq *, new_len); 2139 if (new_arr) { 2140 spin_lock_irq(&phba->hbalock); 2141 old_arr = psli->iocbq_lookup; 2142 if (new_len <= psli->iocbq_lookup_len) { 2143 /* highly unprobable case */ 2144 kfree(new_arr); 2145 iotag = psli->last_iotag; 2146 if(++iotag < psli->iocbq_lookup_len) { 2147 psli->last_iotag = iotag; 2148 psli->iocbq_lookup[iotag] = iocbq; 2149 spin_unlock_irq(&phba->hbalock); 2150 iocbq->iotag = iotag; 2151 return iotag; 2152 } 2153 spin_unlock_irq(&phba->hbalock); 2154 return 0; 2155 } 2156 if (psli->iocbq_lookup) 2157 memcpy(new_arr, old_arr, 2158 ((psli->last_iotag + 1) * 2159 sizeof (struct lpfc_iocbq *))); 2160 psli->iocbq_lookup = new_arr; 2161 psli->iocbq_lookup_len = new_len; 2162 psli->last_iotag = iotag; 2163 psli->iocbq_lookup[iotag] = iocbq; 2164 spin_unlock_irq(&phba->hbalock); 2165 iocbq->iotag = iotag; 2166 kfree(old_arr); 2167 return iotag; 2168 } 2169 } else 2170 spin_unlock_irq(&phba->hbalock); 2171 2172 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2173 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 2174 psli->last_iotag); 2175 2176 return 0; 2177 } 2178 2179 /** 2180 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 2181 * @phba: Pointer to HBA context object. 2182 * @pring: Pointer to driver SLI ring object. 2183 * @iocb: Pointer to iocb slot in the ring. 2184 * @nextiocb: Pointer to driver iocb object which need to be 2185 * posted to firmware. 2186 * 2187 * This function is called to post a new iocb to the firmware. This 2188 * function copies the new iocb to ring iocb slot and updates the 2189 * ring pointers. It adds the new iocb to txcmplq if there is 2190 * a completion call back for this iocb else the function will free the 2191 * iocb object. The hbalock is asserted held in the code path calling 2192 * this routine. 2193 **/ 2194 static void 2195 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2196 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 2197 { 2198 /* 2199 * Set up an iotag 2200 */ 2201 nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0; 2202 2203 2204 if (pring->ringno == LPFC_ELS_RING) { 2205 lpfc_debugfs_slow_ring_trc(phba, 2206 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 2207 *(((uint32_t *) &nextiocb->iocb) + 4), 2208 *(((uint32_t *) &nextiocb->iocb) + 6), 2209 *(((uint32_t *) &nextiocb->iocb) + 7)); 2210 } 2211 2212 /* 2213 * Issue iocb command to adapter 2214 */ 2215 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 2216 wmb(); 2217 pring->stats.iocb_cmd++; 2218 2219 /* 2220 * If there is no completion routine to call, we can release the 2221 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 2222 * that have no rsp ring completion, cmd_cmpl MUST be NULL. 2223 */ 2224 if (nextiocb->cmd_cmpl) 2225 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 2226 else 2227 __lpfc_sli_release_iocbq(phba, nextiocb); 2228 2229 /* 2230 * Let the HBA know what IOCB slot will be the next one the 2231 * driver will put a command into. 2232 */ 2233 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 2234 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 2235 } 2236 2237 /** 2238 * lpfc_sli_update_full_ring - Update the chip attention register 2239 * @phba: Pointer to HBA context object. 2240 * @pring: Pointer to driver SLI ring object. 2241 * 2242 * The caller is not required to hold any lock for calling this function. 2243 * This function updates the chip attention bits for the ring to inform firmware 2244 * that there are pending work to be done for this ring and requests an 2245 * interrupt when there is space available in the ring. This function is 2246 * called when the driver is unable to post more iocbs to the ring due 2247 * to unavailability of space in the ring. 2248 **/ 2249 static void 2250 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2251 { 2252 int ringno = pring->ringno; 2253 2254 pring->flag |= LPFC_CALL_RING_AVAILABLE; 2255 2256 wmb(); 2257 2258 /* 2259 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 2260 * The HBA will tell us when an IOCB entry is available. 2261 */ 2262 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 2263 readl(phba->CAregaddr); /* flush */ 2264 2265 pring->stats.iocb_cmd_full++; 2266 } 2267 2268 /** 2269 * lpfc_sli_update_ring - Update chip attention register 2270 * @phba: Pointer to HBA context object. 2271 * @pring: Pointer to driver SLI ring object. 2272 * 2273 * This function updates the chip attention register bit for the 2274 * given ring to inform HBA that there is more work to be done 2275 * in this ring. The caller is not required to hold any lock. 2276 **/ 2277 static void 2278 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2279 { 2280 int ringno = pring->ringno; 2281 2282 /* 2283 * Tell the HBA that there is work to do in this ring. 2284 */ 2285 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 2286 wmb(); 2287 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 2288 readl(phba->CAregaddr); /* flush */ 2289 } 2290 } 2291 2292 /** 2293 * lpfc_sli_resume_iocb - Process iocbs in the txq 2294 * @phba: Pointer to HBA context object. 2295 * @pring: Pointer to driver SLI ring object. 2296 * 2297 * This function is called with hbalock held to post pending iocbs 2298 * in the txq to the firmware. This function is called when driver 2299 * detects space available in the ring. 2300 **/ 2301 static void 2302 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2303 { 2304 IOCB_t *iocb; 2305 struct lpfc_iocbq *nextiocb; 2306 2307 lockdep_assert_held(&phba->hbalock); 2308 2309 /* 2310 * Check to see if: 2311 * (a) there is anything on the txq to send 2312 * (b) link is up 2313 * (c) link attention events can be processed (fcp ring only) 2314 * (d) IOCB processing is not blocked by the outstanding mbox command. 2315 */ 2316 2317 if (lpfc_is_link_up(phba) && 2318 (!list_empty(&pring->txq)) && 2319 (pring->ringno != LPFC_FCP_RING || 2320 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 2321 2322 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2323 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 2324 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 2325 2326 if (iocb) 2327 lpfc_sli_update_ring(phba, pring); 2328 else 2329 lpfc_sli_update_full_ring(phba, pring); 2330 } 2331 2332 return; 2333 } 2334 2335 /** 2336 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 2337 * @phba: Pointer to HBA context object. 2338 * @hbqno: HBQ number. 2339 * 2340 * This function is called with hbalock held to get the next 2341 * available slot for the given HBQ. If there is free slot 2342 * available for the HBQ it will return pointer to the next available 2343 * HBQ entry else it will return NULL. 2344 **/ 2345 static struct lpfc_hbq_entry * 2346 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 2347 { 2348 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2349 2350 lockdep_assert_held(&phba->hbalock); 2351 2352 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 2353 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 2354 hbqp->next_hbqPutIdx = 0; 2355 2356 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 2357 uint32_t raw_index = phba->hbq_get[hbqno]; 2358 uint32_t getidx = le32_to_cpu(raw_index); 2359 2360 hbqp->local_hbqGetIdx = getidx; 2361 2362 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 2363 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2364 "1802 HBQ %d: local_hbqGetIdx " 2365 "%u is > than hbqp->entry_count %u\n", 2366 hbqno, hbqp->local_hbqGetIdx, 2367 hbqp->entry_count); 2368 2369 phba->link_state = LPFC_HBA_ERROR; 2370 return NULL; 2371 } 2372 2373 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 2374 return NULL; 2375 } 2376 2377 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 2378 hbqp->hbqPutIdx; 2379 } 2380 2381 /** 2382 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 2383 * @phba: Pointer to HBA context object. 2384 * 2385 * This function is called with no lock held to free all the 2386 * hbq buffers while uninitializing the SLI interface. It also 2387 * frees the HBQ buffers returned by the firmware but not yet 2388 * processed by the upper layers. 2389 **/ 2390 void 2391 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 2392 { 2393 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2394 struct hbq_dmabuf *hbq_buf; 2395 unsigned long flags; 2396 int i, hbq_count; 2397 2398 hbq_count = lpfc_sli_hbq_count(); 2399 /* Return all memory used by all HBQs */ 2400 spin_lock_irqsave(&phba->hbalock, flags); 2401 for (i = 0; i < hbq_count; ++i) { 2402 list_for_each_entry_safe(dmabuf, next_dmabuf, 2403 &phba->hbqs[i].hbq_buffer_list, list) { 2404 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 2405 list_del(&hbq_buf->dbuf.list); 2406 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 2407 } 2408 phba->hbqs[i].buffer_count = 0; 2409 } 2410 2411 /* Mark the HBQs not in use */ 2412 phba->hbq_in_use = 0; 2413 spin_unlock_irqrestore(&phba->hbalock, flags); 2414 } 2415 2416 /** 2417 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 2418 * @phba: Pointer to HBA context object. 2419 * @hbqno: HBQ number. 2420 * @hbq_buf: Pointer to HBQ buffer. 2421 * 2422 * This function is called with the hbalock held to post a 2423 * hbq buffer to the firmware. If the function finds an empty 2424 * slot in the HBQ, it will post the buffer. The function will return 2425 * pointer to the hbq entry if it successfully post the buffer 2426 * else it will return NULL. 2427 **/ 2428 static int 2429 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2430 struct hbq_dmabuf *hbq_buf) 2431 { 2432 lockdep_assert_held(&phba->hbalock); 2433 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2434 } 2435 2436 /** 2437 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2438 * @phba: Pointer to HBA context object. 2439 * @hbqno: HBQ number. 2440 * @hbq_buf: Pointer to HBQ buffer. 2441 * 2442 * This function is called with the hbalock held to post a hbq buffer to the 2443 * firmware. If the function finds an empty slot in the HBQ, it will post the 2444 * buffer and place it on the hbq_buffer_list. The function will return zero if 2445 * it successfully post the buffer else it will return an error. 2446 **/ 2447 static int 2448 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2449 struct hbq_dmabuf *hbq_buf) 2450 { 2451 struct lpfc_hbq_entry *hbqe; 2452 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2453 2454 lockdep_assert_held(&phba->hbalock); 2455 /* Get next HBQ entry slot to use */ 2456 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2457 if (hbqe) { 2458 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2459 2460 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2461 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2462 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2463 hbqe->bde.tus.f.bdeFlags = 0; 2464 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2465 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2466 /* Sync SLIM */ 2467 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2468 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2469 /* flush */ 2470 readl(phba->hbq_put + hbqno); 2471 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2472 return 0; 2473 } else 2474 return -ENOMEM; 2475 } 2476 2477 /** 2478 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2479 * @phba: Pointer to HBA context object. 2480 * @hbqno: HBQ number. 2481 * @hbq_buf: Pointer to HBQ buffer. 2482 * 2483 * This function is called with the hbalock held to post an RQE to the SLI4 2484 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2485 * the hbq_buffer_list and return zero, otherwise it will return an error. 2486 **/ 2487 static int 2488 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2489 struct hbq_dmabuf *hbq_buf) 2490 { 2491 int rc; 2492 struct lpfc_rqe hrqe; 2493 struct lpfc_rqe drqe; 2494 struct lpfc_queue *hrq; 2495 struct lpfc_queue *drq; 2496 2497 if (hbqno != LPFC_ELS_HBQ) 2498 return 1; 2499 hrq = phba->sli4_hba.hdr_rq; 2500 drq = phba->sli4_hba.dat_rq; 2501 2502 lockdep_assert_held(&phba->hbalock); 2503 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2504 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2505 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2506 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2507 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2508 if (rc < 0) 2509 return rc; 2510 hbq_buf->tag = (rc | (hbqno << 16)); 2511 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2512 return 0; 2513 } 2514 2515 /* HBQ for ELS and CT traffic. */ 2516 static struct lpfc_hbq_init lpfc_els_hbq = { 2517 .rn = 1, 2518 .entry_count = 256, 2519 .mask_count = 0, 2520 .profile = 0, 2521 .ring_mask = (1 << LPFC_ELS_RING), 2522 .buffer_count = 0, 2523 .init_count = 40, 2524 .add_count = 40, 2525 }; 2526 2527 /* Array of HBQs */ 2528 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2529 &lpfc_els_hbq, 2530 }; 2531 2532 /** 2533 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2534 * @phba: Pointer to HBA context object. 2535 * @hbqno: HBQ number. 2536 * @count: Number of HBQ buffers to be posted. 2537 * 2538 * This function is called with no lock held to post more hbq buffers to the 2539 * given HBQ. The function returns the number of HBQ buffers successfully 2540 * posted. 2541 **/ 2542 static int 2543 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2544 { 2545 uint32_t i, posted = 0; 2546 unsigned long flags; 2547 struct hbq_dmabuf *hbq_buffer; 2548 LIST_HEAD(hbq_buf_list); 2549 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2550 return 0; 2551 2552 if ((phba->hbqs[hbqno].buffer_count + count) > 2553 lpfc_hbq_defs[hbqno]->entry_count) 2554 count = lpfc_hbq_defs[hbqno]->entry_count - 2555 phba->hbqs[hbqno].buffer_count; 2556 if (!count) 2557 return 0; 2558 /* Allocate HBQ entries */ 2559 for (i = 0; i < count; i++) { 2560 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2561 if (!hbq_buffer) 2562 break; 2563 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2564 } 2565 /* Check whether HBQ is still in use */ 2566 spin_lock_irqsave(&phba->hbalock, flags); 2567 if (!phba->hbq_in_use) 2568 goto err; 2569 while (!list_empty(&hbq_buf_list)) { 2570 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2571 dbuf.list); 2572 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2573 (hbqno << 16)); 2574 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2575 phba->hbqs[hbqno].buffer_count++; 2576 posted++; 2577 } else 2578 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2579 } 2580 spin_unlock_irqrestore(&phba->hbalock, flags); 2581 return posted; 2582 err: 2583 spin_unlock_irqrestore(&phba->hbalock, flags); 2584 while (!list_empty(&hbq_buf_list)) { 2585 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2586 dbuf.list); 2587 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2588 } 2589 return 0; 2590 } 2591 2592 /** 2593 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2594 * @phba: Pointer to HBA context object. 2595 * @qno: HBQ number. 2596 * 2597 * This function posts more buffers to the HBQ. This function 2598 * is called with no lock held. The function returns the number of HBQ entries 2599 * successfully allocated. 2600 **/ 2601 int 2602 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2603 { 2604 if (phba->sli_rev == LPFC_SLI_REV4) 2605 return 0; 2606 else 2607 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2608 lpfc_hbq_defs[qno]->add_count); 2609 } 2610 2611 /** 2612 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2613 * @phba: Pointer to HBA context object. 2614 * @qno: HBQ queue number. 2615 * 2616 * This function is called from SLI initialization code path with 2617 * no lock held to post initial HBQ buffers to firmware. The 2618 * function returns the number of HBQ entries successfully allocated. 2619 **/ 2620 static int 2621 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2622 { 2623 if (phba->sli_rev == LPFC_SLI_REV4) 2624 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2625 lpfc_hbq_defs[qno]->entry_count); 2626 else 2627 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2628 lpfc_hbq_defs[qno]->init_count); 2629 } 2630 2631 /* 2632 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2633 * 2634 * This function removes the first hbq buffer on an hbq list and returns a 2635 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2636 **/ 2637 static struct hbq_dmabuf * 2638 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2639 { 2640 struct lpfc_dmabuf *d_buf; 2641 2642 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2643 if (!d_buf) 2644 return NULL; 2645 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2646 } 2647 2648 /** 2649 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2650 * @phba: Pointer to HBA context object. 2651 * @hrq: HBQ number. 2652 * 2653 * This function removes the first RQ buffer on an RQ buffer list and returns a 2654 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2655 **/ 2656 static struct rqb_dmabuf * 2657 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2658 { 2659 struct lpfc_dmabuf *h_buf; 2660 struct lpfc_rqb *rqbp; 2661 2662 rqbp = hrq->rqbp; 2663 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2664 struct lpfc_dmabuf, list); 2665 if (!h_buf) 2666 return NULL; 2667 rqbp->buffer_count--; 2668 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2669 } 2670 2671 /** 2672 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2673 * @phba: Pointer to HBA context object. 2674 * @tag: Tag of the hbq buffer. 2675 * 2676 * This function searches for the hbq buffer associated with the given tag in 2677 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2678 * otherwise it returns NULL. 2679 **/ 2680 static struct hbq_dmabuf * 2681 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2682 { 2683 struct lpfc_dmabuf *d_buf; 2684 struct hbq_dmabuf *hbq_buf; 2685 uint32_t hbqno; 2686 2687 hbqno = tag >> 16; 2688 if (hbqno >= LPFC_MAX_HBQS) 2689 return NULL; 2690 2691 spin_lock_irq(&phba->hbalock); 2692 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2693 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2694 if (hbq_buf->tag == tag) { 2695 spin_unlock_irq(&phba->hbalock); 2696 return hbq_buf; 2697 } 2698 } 2699 spin_unlock_irq(&phba->hbalock); 2700 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2701 "1803 Bad hbq tag. Data: x%x x%x\n", 2702 tag, phba->hbqs[tag >> 16].buffer_count); 2703 return NULL; 2704 } 2705 2706 /** 2707 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2708 * @phba: Pointer to HBA context object. 2709 * @hbq_buffer: Pointer to HBQ buffer. 2710 * 2711 * This function is called with hbalock. This function gives back 2712 * the hbq buffer to firmware. If the HBQ does not have space to 2713 * post the buffer, it will free the buffer. 2714 **/ 2715 void 2716 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2717 { 2718 uint32_t hbqno; 2719 2720 if (hbq_buffer) { 2721 hbqno = hbq_buffer->tag >> 16; 2722 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2723 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2724 } 2725 } 2726 2727 /** 2728 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2729 * @mbxCommand: mailbox command code. 2730 * 2731 * This function is called by the mailbox event handler function to verify 2732 * that the completed mailbox command is a legitimate mailbox command. If the 2733 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2734 * and the mailbox event handler will take the HBA offline. 2735 **/ 2736 static int 2737 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2738 { 2739 uint8_t ret; 2740 2741 switch (mbxCommand) { 2742 case MBX_LOAD_SM: 2743 case MBX_READ_NV: 2744 case MBX_WRITE_NV: 2745 case MBX_WRITE_VPARMS: 2746 case MBX_RUN_BIU_DIAG: 2747 case MBX_INIT_LINK: 2748 case MBX_DOWN_LINK: 2749 case MBX_CONFIG_LINK: 2750 case MBX_CONFIG_RING: 2751 case MBX_RESET_RING: 2752 case MBX_READ_CONFIG: 2753 case MBX_READ_RCONFIG: 2754 case MBX_READ_SPARM: 2755 case MBX_READ_STATUS: 2756 case MBX_READ_RPI: 2757 case MBX_READ_XRI: 2758 case MBX_READ_REV: 2759 case MBX_READ_LNK_STAT: 2760 case MBX_REG_LOGIN: 2761 case MBX_UNREG_LOGIN: 2762 case MBX_CLEAR_LA: 2763 case MBX_DUMP_MEMORY: 2764 case MBX_DUMP_CONTEXT: 2765 case MBX_RUN_DIAGS: 2766 case MBX_RESTART: 2767 case MBX_UPDATE_CFG: 2768 case MBX_DOWN_LOAD: 2769 case MBX_DEL_LD_ENTRY: 2770 case MBX_RUN_PROGRAM: 2771 case MBX_SET_MASK: 2772 case MBX_SET_VARIABLE: 2773 case MBX_UNREG_D_ID: 2774 case MBX_KILL_BOARD: 2775 case MBX_CONFIG_FARP: 2776 case MBX_BEACON: 2777 case MBX_LOAD_AREA: 2778 case MBX_RUN_BIU_DIAG64: 2779 case MBX_CONFIG_PORT: 2780 case MBX_READ_SPARM64: 2781 case MBX_READ_RPI64: 2782 case MBX_REG_LOGIN64: 2783 case MBX_READ_TOPOLOGY: 2784 case MBX_WRITE_WWN: 2785 case MBX_SET_DEBUG: 2786 case MBX_LOAD_EXP_ROM: 2787 case MBX_ASYNCEVT_ENABLE: 2788 case MBX_REG_VPI: 2789 case MBX_UNREG_VPI: 2790 case MBX_HEARTBEAT: 2791 case MBX_PORT_CAPABILITIES: 2792 case MBX_PORT_IOV_CONTROL: 2793 case MBX_SLI4_CONFIG: 2794 case MBX_SLI4_REQ_FTRS: 2795 case MBX_REG_FCFI: 2796 case MBX_UNREG_FCFI: 2797 case MBX_REG_VFI: 2798 case MBX_UNREG_VFI: 2799 case MBX_INIT_VPI: 2800 case MBX_INIT_VFI: 2801 case MBX_RESUME_RPI: 2802 case MBX_READ_EVENT_LOG_STATUS: 2803 case MBX_READ_EVENT_LOG: 2804 case MBX_SECURITY_MGMT: 2805 case MBX_AUTH_PORT: 2806 case MBX_ACCESS_VDATA: 2807 ret = mbxCommand; 2808 break; 2809 default: 2810 ret = MBX_SHUTDOWN; 2811 break; 2812 } 2813 return ret; 2814 } 2815 2816 /** 2817 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2818 * @phba: Pointer to HBA context object. 2819 * @pmboxq: Pointer to mailbox command. 2820 * 2821 * This is completion handler function for mailbox commands issued from 2822 * lpfc_sli_issue_mbox_wait function. This function is called by the 2823 * mailbox event handler function with no lock held. This function 2824 * will wake up thread waiting on the wait queue pointed by context1 2825 * of the mailbox. 2826 **/ 2827 void 2828 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2829 { 2830 unsigned long drvr_flag; 2831 struct completion *pmbox_done; 2832 2833 /* 2834 * If pmbox_done is empty, the driver thread gave up waiting and 2835 * continued running. 2836 */ 2837 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2838 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2839 pmbox_done = pmboxq->ctx_u.mbox_wait; 2840 if (pmbox_done) 2841 complete(pmbox_done); 2842 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2843 return; 2844 } 2845 2846 /** 2847 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2848 * @phba: Pointer to HBA context object. 2849 * @pmb: Pointer to mailbox object. 2850 * 2851 * This function is the default mailbox completion handler. It 2852 * frees the memory resources associated with the completed mailbox 2853 * command. If the completed command is a REG_LOGIN mailbox command, 2854 * this function will issue a UREG_LOGIN to re-claim the RPI. 2855 **/ 2856 void 2857 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2858 { 2859 struct lpfc_vport *vport = pmb->vport; 2860 struct lpfc_dmabuf *mp; 2861 struct lpfc_nodelist *ndlp; 2862 struct Scsi_Host *shost; 2863 uint16_t rpi, vpi; 2864 int rc; 2865 2866 /* 2867 * If a REG_LOGIN succeeded after node is destroyed or node 2868 * is in re-discovery driver need to cleanup the RPI. 2869 */ 2870 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) && 2871 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2872 !pmb->u.mb.mbxStatus) { 2873 mp = pmb->ctx_buf; 2874 if (mp) { 2875 pmb->ctx_buf = NULL; 2876 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2877 kfree(mp); 2878 } 2879 rpi = pmb->u.mb.un.varWords[0]; 2880 vpi = pmb->u.mb.un.varRegLogin.vpi; 2881 if (phba->sli_rev == LPFC_SLI_REV4) 2882 vpi -= phba->sli4_hba.max_cfg_param.vpi_base; 2883 lpfc_unreg_login(phba, vpi, rpi, pmb); 2884 pmb->vport = vport; 2885 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2886 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2887 if (rc != MBX_NOT_FINISHED) 2888 return; 2889 } 2890 2891 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2892 !test_bit(FC_UNLOADING, &phba->pport->load_flag) && 2893 !pmb->u.mb.mbxStatus) { 2894 shost = lpfc_shost_from_vport(vport); 2895 spin_lock_irq(shost->host_lock); 2896 vport->vpi_state |= LPFC_VPI_REGISTERED; 2897 spin_unlock_irq(shost->host_lock); 2898 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 2899 } 2900 2901 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2902 ndlp = pmb->ctx_ndlp; 2903 lpfc_nlp_put(ndlp); 2904 } 2905 2906 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2907 ndlp = pmb->ctx_ndlp; 2908 2909 /* Check to see if there are any deferred events to process */ 2910 if (ndlp) { 2911 lpfc_printf_vlog( 2912 vport, 2913 KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2914 "1438 UNREG cmpl deferred mbox x%x " 2915 "on NPort x%x Data: x%lx x%x x%px x%lx x%x\n", 2916 ndlp->nlp_rpi, ndlp->nlp_DID, 2917 ndlp->nlp_flag, ndlp->nlp_defer_did, 2918 ndlp, vport->load_flag, kref_read(&ndlp->kref)); 2919 2920 if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && 2921 ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { 2922 clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); 2923 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2924 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2925 } else { 2926 clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); 2927 } 2928 2929 /* The unreg_login mailbox is complete and had a 2930 * reference that has to be released. The PLOGI 2931 * got its own ref. 2932 */ 2933 lpfc_nlp_put(ndlp); 2934 pmb->ctx_ndlp = NULL; 2935 } 2936 } 2937 2938 /* This nlp_put pairs with lpfc_sli4_resume_rpi */ 2939 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) { 2940 ndlp = pmb->ctx_ndlp; 2941 lpfc_nlp_put(ndlp); 2942 } 2943 2944 /* Check security permission status on INIT_LINK mailbox command */ 2945 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2946 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2947 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2948 "2860 SLI authentication is required " 2949 "for INIT_LINK but has not done yet\n"); 2950 2951 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2952 lpfc_sli4_mbox_cmd_free(phba, pmb); 2953 else 2954 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 2955 } 2956 /** 2957 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2958 * @phba: Pointer to HBA context object. 2959 * @pmb: Pointer to mailbox object. 2960 * 2961 * This function is the unreg rpi mailbox completion handler. It 2962 * frees the memory resources associated with the completed mailbox 2963 * command. An additional reference is put on the ndlp to prevent 2964 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2965 * the unreg mailbox command completes, this routine puts the 2966 * reference back. 2967 * 2968 **/ 2969 void 2970 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2971 { 2972 struct lpfc_vport *vport = pmb->vport; 2973 struct lpfc_nodelist *ndlp; 2974 bool unreg_inp; 2975 2976 ndlp = pmb->ctx_ndlp; 2977 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2978 if (phba->sli_rev == LPFC_SLI_REV4 && 2979 (bf_get(lpfc_sli_intf_if_type, 2980 &phba->sli4_hba.sli_intf) >= 2981 LPFC_SLI_INTF_IF_TYPE_2)) { 2982 if (ndlp) { 2983 lpfc_printf_vlog( 2984 vport, KERN_INFO, 2985 LOG_MBOX | LOG_SLI | LOG_NODE, 2986 "0010 UNREG_LOGIN vpi:x%x " 2987 "rpi:%x DID:%x defer x%x flg x%lx " 2988 "x%px\n", 2989 vport->vpi, ndlp->nlp_rpi, 2990 ndlp->nlp_DID, ndlp->nlp_defer_did, 2991 ndlp->nlp_flag, 2992 ndlp); 2993 2994 /* Cleanup the nlp_flag now that the UNREG RPI 2995 * has completed. 2996 */ 2997 unreg_inp = test_and_clear_bit(NLP_UNREG_INP, 2998 &ndlp->nlp_flag); 2999 clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); 3000 3001 /* Check to see if there are any deferred 3002 * events to process 3003 */ 3004 if (unreg_inp && 3005 ndlp->nlp_defer_did != 3006 NLP_EVT_NOTHING_PENDING) { 3007 lpfc_printf_vlog( 3008 vport, KERN_INFO, 3009 LOG_MBOX | LOG_SLI | LOG_NODE, 3010 "4111 UNREG cmpl deferred " 3011 "clr x%x on " 3012 "NPort x%x Data: x%x x%px\n", 3013 ndlp->nlp_rpi, ndlp->nlp_DID, 3014 ndlp->nlp_defer_did, ndlp); 3015 ndlp->nlp_defer_did = 3016 NLP_EVT_NOTHING_PENDING; 3017 lpfc_issue_els_plogi( 3018 vport, ndlp->nlp_DID, 0); 3019 } 3020 3021 lpfc_nlp_put(ndlp); 3022 } 3023 } 3024 } 3025 3026 mempool_free(pmb, phba->mbox_mem_pool); 3027 } 3028 3029 /** 3030 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 3031 * @phba: Pointer to HBA context object. 3032 * 3033 * This function is called with no lock held. This function processes all 3034 * the completed mailbox commands and gives it to upper layers. The interrupt 3035 * service routine processes mailbox completion interrupt and adds completed 3036 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 3037 * Worker thread call lpfc_sli_handle_mb_event, which will return the 3038 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 3039 * function returns the mailbox commands to the upper layer by calling the 3040 * completion handler function of each mailbox. 3041 **/ 3042 int 3043 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 3044 { 3045 MAILBOX_t *pmbox; 3046 LPFC_MBOXQ_t *pmb; 3047 int rc; 3048 LIST_HEAD(cmplq); 3049 3050 phba->sli.slistat.mbox_event++; 3051 3052 /* Get all completed mailboxe buffers into the cmplq */ 3053 spin_lock_irq(&phba->hbalock); 3054 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 3055 spin_unlock_irq(&phba->hbalock); 3056 3057 /* Get a Mailbox buffer to setup mailbox commands for callback */ 3058 do { 3059 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 3060 if (pmb == NULL) 3061 break; 3062 3063 pmbox = &pmb->u.mb; 3064 3065 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 3066 if (pmb->vport) { 3067 lpfc_debugfs_disc_trc(pmb->vport, 3068 LPFC_DISC_TRC_MBOX_VPORT, 3069 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 3070 (uint32_t)pmbox->mbxCommand, 3071 pmbox->un.varWords[0], 3072 pmbox->un.varWords[1]); 3073 } 3074 else { 3075 lpfc_debugfs_disc_trc(phba->pport, 3076 LPFC_DISC_TRC_MBOX, 3077 "MBOX cmpl: cmd:x%x mb:x%x x%x", 3078 (uint32_t)pmbox->mbxCommand, 3079 pmbox->un.varWords[0], 3080 pmbox->un.varWords[1]); 3081 } 3082 } 3083 3084 /* 3085 * It is a fatal error if unknown mbox command completion. 3086 */ 3087 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 3088 MBX_SHUTDOWN) { 3089 /* Unknown mailbox command compl */ 3090 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3091 "(%d):0323 Unknown Mailbox command " 3092 "x%x (x%x/x%x) Cmpl\n", 3093 pmb->vport ? pmb->vport->vpi : 3094 LPFC_VPORT_UNKNOWN, 3095 pmbox->mbxCommand, 3096 lpfc_sli_config_mbox_subsys_get(phba, 3097 pmb), 3098 lpfc_sli_config_mbox_opcode_get(phba, 3099 pmb)); 3100 phba->link_state = LPFC_HBA_ERROR; 3101 phba->work_hs = HS_FFER3; 3102 lpfc_handle_eratt(phba); 3103 continue; 3104 } 3105 3106 if (pmbox->mbxStatus) { 3107 phba->sli.slistat.mbox_stat_err++; 3108 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 3109 /* Mbox cmd cmpl error - RETRYing */ 3110 lpfc_printf_log(phba, KERN_INFO, 3111 LOG_MBOX | LOG_SLI, 3112 "(%d):0305 Mbox cmd cmpl " 3113 "error - RETRYing Data: x%x " 3114 "(x%x/x%x) x%x x%x x%x\n", 3115 pmb->vport ? pmb->vport->vpi : 3116 LPFC_VPORT_UNKNOWN, 3117 pmbox->mbxCommand, 3118 lpfc_sli_config_mbox_subsys_get(phba, 3119 pmb), 3120 lpfc_sli_config_mbox_opcode_get(phba, 3121 pmb), 3122 pmbox->mbxStatus, 3123 pmbox->un.varWords[0], 3124 pmb->vport ? pmb->vport->port_state : 3125 LPFC_VPORT_UNKNOWN); 3126 pmbox->mbxStatus = 0; 3127 pmbox->mbxOwner = OWN_HOST; 3128 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3129 if (rc != MBX_NOT_FINISHED) 3130 continue; 3131 } 3132 } 3133 3134 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 3135 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 3136 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps " 3137 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 3138 "x%x x%x x%x\n", 3139 pmb->vport ? pmb->vport->vpi : 0, 3140 pmbox->mbxCommand, 3141 lpfc_sli_config_mbox_subsys_get(phba, pmb), 3142 lpfc_sli_config_mbox_opcode_get(phba, pmb), 3143 pmb->mbox_cmpl, 3144 *((uint32_t *) pmbox), 3145 pmbox->un.varWords[0], 3146 pmbox->un.varWords[1], 3147 pmbox->un.varWords[2], 3148 pmbox->un.varWords[3], 3149 pmbox->un.varWords[4], 3150 pmbox->un.varWords[5], 3151 pmbox->un.varWords[6], 3152 pmbox->un.varWords[7], 3153 pmbox->un.varWords[8], 3154 pmbox->un.varWords[9], 3155 pmbox->un.varWords[10]); 3156 3157 if (pmb->mbox_cmpl) 3158 pmb->mbox_cmpl(phba,pmb); 3159 } while (1); 3160 return 0; 3161 } 3162 3163 /** 3164 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 3165 * @phba: Pointer to HBA context object. 3166 * @pring: Pointer to driver SLI ring object. 3167 * @tag: buffer tag. 3168 * 3169 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 3170 * is set in the tag the buffer is posted for a particular exchange, 3171 * the function will return the buffer without replacing the buffer. 3172 * If the buffer is for unsolicited ELS or CT traffic, this function 3173 * returns the buffer and also posts another buffer to the firmware. 3174 **/ 3175 static struct lpfc_dmabuf * 3176 lpfc_sli_get_buff(struct lpfc_hba *phba, 3177 struct lpfc_sli_ring *pring, 3178 uint32_t tag) 3179 { 3180 struct hbq_dmabuf *hbq_entry; 3181 3182 if (tag & QUE_BUFTAG_BIT) 3183 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 3184 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 3185 if (!hbq_entry) 3186 return NULL; 3187 return &hbq_entry->dbuf; 3188 } 3189 3190 /** 3191 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer 3192 * containing a NVME LS request. 3193 * @phba: pointer to lpfc hba data structure. 3194 * @piocb: pointer to the iocbq struct representing the sequence starting 3195 * frame. 3196 * 3197 * This routine initially validates the NVME LS, validates there is a login 3198 * with the port that sent the LS, and then calls the appropriate nvme host 3199 * or target LS request handler. 3200 **/ 3201 static void 3202 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 3203 { 3204 struct lpfc_nodelist *ndlp; 3205 struct lpfc_dmabuf *d_buf; 3206 struct hbq_dmabuf *nvmebuf; 3207 struct fc_frame_header *fc_hdr; 3208 struct lpfc_async_xchg_ctx *axchg = NULL; 3209 char *failwhy = NULL; 3210 uint32_t oxid, sid, did, fctl, size; 3211 int ret = 1; 3212 3213 d_buf = piocb->cmd_dmabuf; 3214 3215 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 3216 fc_hdr = nvmebuf->hbuf.virt; 3217 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 3218 sid = sli4_sid_from_fc_hdr(fc_hdr); 3219 did = sli4_did_from_fc_hdr(fc_hdr); 3220 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 3221 fc_hdr->fh_f_ctl[1] << 8 | 3222 fc_hdr->fh_f_ctl[2]); 3223 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); 3224 3225 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n", 3226 oxid, size, sid); 3227 3228 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { 3229 failwhy = "Driver Unloading"; 3230 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { 3231 failwhy = "NVME FC4 Disabled"; 3232 } else if (!phba->nvmet_support && !phba->pport->localport) { 3233 failwhy = "No Localport"; 3234 } else if (phba->nvmet_support && !phba->targetport) { 3235 failwhy = "No Targetport"; 3236 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) { 3237 failwhy = "Bad NVME LS R_CTL"; 3238 } else if (unlikely((fctl & 0x00FF0000) != 3239 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) { 3240 failwhy = "Bad NVME LS F_CTL"; 3241 } else { 3242 axchg = kzalloc_obj(*axchg, GFP_ATOMIC); 3243 if (!axchg) 3244 failwhy = "No CTX memory"; 3245 } 3246 3247 if (unlikely(failwhy)) { 3248 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3249 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n", 3250 sid, oxid, failwhy); 3251 goto out_fail; 3252 } 3253 3254 /* validate the source of the LS is logged in */ 3255 ndlp = lpfc_findnode_did(phba->pport, sid); 3256 if (!ndlp || 3257 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 3258 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 3259 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 3260 "6216 NVME Unsol rcv: No ndlp: " 3261 "NPort_ID x%x oxid x%x\n", 3262 sid, oxid); 3263 goto out_fail; 3264 } 3265 3266 axchg->phba = phba; 3267 axchg->ndlp = ndlp; 3268 axchg->size = size; 3269 axchg->oxid = oxid; 3270 axchg->sid = sid; 3271 axchg->wqeq = NULL; 3272 axchg->state = LPFC_NVME_STE_LS_RCV; 3273 axchg->entry_cnt = 1; 3274 axchg->rqb_buffer = (void *)nvmebuf; 3275 axchg->hdwq = &phba->sli4_hba.hdwq[0]; 3276 axchg->payload = nvmebuf->dbuf.virt; 3277 INIT_LIST_HEAD(&axchg->list); 3278 3279 if (phba->nvmet_support) { 3280 ret = lpfc_nvmet_handle_lsreq(phba, axchg); 3281 spin_lock_irq(&ndlp->lock); 3282 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) { 3283 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH; 3284 spin_unlock_irq(&ndlp->lock); 3285 3286 /* This reference is a single occurrence to hold the 3287 * node valid until the nvmet transport calls 3288 * host_release. 3289 */ 3290 if (!lpfc_nlp_get(ndlp)) 3291 goto out_fail; 3292 3293 lpfc_printf_log(phba, KERN_ERR, LOG_NODE, 3294 "6206 NVMET unsol ls_req ndlp x%px " 3295 "DID x%x xflags x%x refcnt %d\n", 3296 ndlp, ndlp->nlp_DID, 3297 ndlp->fc4_xpt_flags, 3298 kref_read(&ndlp->kref)); 3299 } else { 3300 spin_unlock_irq(&ndlp->lock); 3301 } 3302 } else { 3303 ret = lpfc_nvme_handle_lsreq(phba, axchg); 3304 } 3305 3306 /* if zero, LS was successfully handled. If non-zero, LS not handled */ 3307 if (!ret) 3308 return; 3309 3310 out_fail: 3311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3312 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X " 3313 "NVMe%s handler failed %d\n", 3314 did, sid, oxid, 3315 (phba->nvmet_support) ? "T" : "I", ret); 3316 3317 /* recycle receive buffer */ 3318 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 3319 3320 /* If start of new exchange, abort it */ 3321 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX))) 3322 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid); 3323 3324 if (ret) 3325 kfree(axchg); 3326 } 3327 3328 /** 3329 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 3330 * @phba: Pointer to HBA context object. 3331 * @pring: Pointer to driver SLI ring object. 3332 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 3333 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 3334 * @fch_type: the type for the first frame of the sequence. 3335 * 3336 * This function is called with no lock held. This function uses the r_ctl and 3337 * type of the received sequence to find the correct callback function to call 3338 * to process the sequence. 3339 **/ 3340 static int 3341 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3342 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 3343 uint32_t fch_type) 3344 { 3345 int i; 3346 3347 switch (fch_type) { 3348 case FC_TYPE_NVME: 3349 lpfc_nvme_unsol_ls_handler(phba, saveq); 3350 return 1; 3351 default: 3352 break; 3353 } 3354 3355 /* unSolicited Responses */ 3356 if (pring->prt[0].profile) { 3357 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 3358 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 3359 saveq); 3360 return 1; 3361 } 3362 /* We must search, based on rctl / type 3363 for the right routine */ 3364 for (i = 0; i < pring->num_mask; i++) { 3365 if ((pring->prt[i].rctl == fch_r_ctl) && 3366 (pring->prt[i].type == fch_type)) { 3367 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 3368 (pring->prt[i].lpfc_sli_rcv_unsol_event) 3369 (phba, pring, saveq); 3370 return 1; 3371 } 3372 } 3373 return 0; 3374 } 3375 3376 static void 3377 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba, 3378 struct lpfc_iocbq *saveq) 3379 { 3380 IOCB_t *irsp; 3381 union lpfc_wqe128 *wqe; 3382 u16 i = 0; 3383 3384 irsp = &saveq->iocb; 3385 wqe = &saveq->wqe; 3386 3387 /* Fill wcqe with the IOCB status fields */ 3388 bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus); 3389 saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount; 3390 saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4]; 3391 saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len; 3392 3393 /* Source ID */ 3394 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo); 3395 3396 /* rx-id of the response frame */ 3397 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext); 3398 3399 /* ox-id of the frame */ 3400 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 3401 irsp->unsli3.rcvsli3.ox_id); 3402 3403 /* DID */ 3404 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 3405 irsp->un.rcvels.remoteID); 3406 3407 /* unsol data len */ 3408 for (i = 0; i < irsp->ulpBdeCount; i++) { 3409 struct lpfc_hbq_entry *hbqe = NULL; 3410 3411 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 3412 if (i == 0) { 3413 hbqe = (struct lpfc_hbq_entry *) 3414 &irsp->un.ulpWord[0]; 3415 saveq->wqe.gen_req.bde.tus.f.bdeSize = 3416 hbqe->bde.tus.f.bdeSize; 3417 } else if (i == 1) { 3418 hbqe = (struct lpfc_hbq_entry *) 3419 &irsp->unsli3.sli3Words[4]; 3420 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize; 3421 } 3422 } 3423 } 3424 } 3425 3426 /** 3427 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 3428 * @phba: Pointer to HBA context object. 3429 * @pring: Pointer to driver SLI ring object. 3430 * @saveq: Pointer to the unsolicited iocb. 3431 * 3432 * This function is called with no lock held by the ring event handler 3433 * when there is an unsolicited iocb posted to the response ring by the 3434 * firmware. This function gets the buffer associated with the iocbs 3435 * and calls the event handler for the ring. This function handles both 3436 * qring buffers and hbq buffers. 3437 * When the function returns 1 the caller can free the iocb object otherwise 3438 * upper layer functions will free the iocb objects. 3439 **/ 3440 static int 3441 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3442 struct lpfc_iocbq *saveq) 3443 { 3444 IOCB_t * irsp; 3445 WORD5 * w5p; 3446 dma_addr_t paddr; 3447 uint32_t Rctl, Type; 3448 struct lpfc_iocbq *iocbq; 3449 struct lpfc_dmabuf *dmzbuf; 3450 3451 irsp = &saveq->iocb; 3452 saveq->vport = phba->pport; 3453 3454 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 3455 if (pring->lpfc_sli_rcv_async_status) 3456 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 3457 else 3458 lpfc_printf_log(phba, 3459 KERN_WARNING, 3460 LOG_SLI, 3461 "0316 Ring %d handler: unexpected " 3462 "ASYNC_STATUS iocb received evt_code " 3463 "0x%x\n", 3464 pring->ringno, 3465 irsp->un.asyncstat.evt_code); 3466 return 1; 3467 } 3468 3469 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 3470 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 3471 if (irsp->ulpBdeCount > 0) { 3472 dmzbuf = lpfc_sli_get_buff(phba, pring, 3473 irsp->un.ulpWord[3]); 3474 lpfc_in_buf_free(phba, dmzbuf); 3475 } 3476 3477 if (irsp->ulpBdeCount > 1) { 3478 dmzbuf = lpfc_sli_get_buff(phba, pring, 3479 irsp->unsli3.sli3Words[3]); 3480 lpfc_in_buf_free(phba, dmzbuf); 3481 } 3482 3483 if (irsp->ulpBdeCount > 2) { 3484 dmzbuf = lpfc_sli_get_buff(phba, pring, 3485 irsp->unsli3.sli3Words[7]); 3486 lpfc_in_buf_free(phba, dmzbuf); 3487 } 3488 3489 return 1; 3490 } 3491 3492 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 3493 if (irsp->ulpBdeCount != 0) { 3494 saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring, 3495 irsp->un.ulpWord[3]); 3496 if (!saveq->cmd_dmabuf) 3497 lpfc_printf_log(phba, 3498 KERN_ERR, 3499 LOG_SLI, 3500 "0341 Ring %d Cannot find buffer for " 3501 "an unsolicited iocb. tag 0x%x\n", 3502 pring->ringno, 3503 irsp->un.ulpWord[3]); 3504 } 3505 if (irsp->ulpBdeCount == 2) { 3506 saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring, 3507 irsp->unsli3.sli3Words[7]); 3508 if (!saveq->bpl_dmabuf) 3509 lpfc_printf_log(phba, 3510 KERN_ERR, 3511 LOG_SLI, 3512 "0342 Ring %d Cannot find buffer for an" 3513 " unsolicited iocb. tag 0x%x\n", 3514 pring->ringno, 3515 irsp->unsli3.sli3Words[7]); 3516 } 3517 list_for_each_entry(iocbq, &saveq->list, list) { 3518 irsp = &iocbq->iocb; 3519 if (irsp->ulpBdeCount != 0) { 3520 iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba, 3521 pring, 3522 irsp->un.ulpWord[3]); 3523 if (!iocbq->cmd_dmabuf) 3524 lpfc_printf_log(phba, 3525 KERN_ERR, 3526 LOG_SLI, 3527 "0343 Ring %d Cannot find " 3528 "buffer for an unsolicited iocb" 3529 ". tag 0x%x\n", pring->ringno, 3530 irsp->un.ulpWord[3]); 3531 } 3532 if (irsp->ulpBdeCount == 2) { 3533 iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba, 3534 pring, 3535 irsp->unsli3.sli3Words[7]); 3536 if (!iocbq->bpl_dmabuf) 3537 lpfc_printf_log(phba, 3538 KERN_ERR, 3539 LOG_SLI, 3540 "0344 Ring %d Cannot find " 3541 "buffer for an unsolicited " 3542 "iocb. tag 0x%x\n", 3543 pring->ringno, 3544 irsp->unsli3.sli3Words[7]); 3545 } 3546 } 3547 } else { 3548 paddr = getPaddr(irsp->un.cont64[0].addrHigh, 3549 irsp->un.cont64[0].addrLow); 3550 saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 3551 paddr); 3552 if (irsp->ulpBdeCount == 2) { 3553 paddr = getPaddr(irsp->un.cont64[1].addrHigh, 3554 irsp->un.cont64[1].addrLow); 3555 saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 3556 pring, 3557 paddr); 3558 } 3559 } 3560 3561 if (irsp->ulpBdeCount != 0 && 3562 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 3563 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 3564 int found = 0; 3565 3566 /* search continue save q for same XRI */ 3567 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 3568 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 3569 saveq->iocb.unsli3.rcvsli3.ox_id) { 3570 list_add_tail(&saveq->list, &iocbq->list); 3571 found = 1; 3572 break; 3573 } 3574 } 3575 if (!found) 3576 list_add_tail(&saveq->clist, 3577 &pring->iocb_continue_saveq); 3578 3579 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 3580 list_del_init(&iocbq->clist); 3581 saveq = iocbq; 3582 irsp = &saveq->iocb; 3583 } else { 3584 return 0; 3585 } 3586 } 3587 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 3588 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 3589 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 3590 Rctl = FC_RCTL_ELS_REQ; 3591 Type = FC_TYPE_ELS; 3592 } else { 3593 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 3594 Rctl = w5p->hcsw.Rctl; 3595 Type = w5p->hcsw.Type; 3596 3597 /* Firmware Workaround */ 3598 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 3599 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 3600 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 3601 Rctl = FC_RCTL_ELS_REQ; 3602 Type = FC_TYPE_ELS; 3603 w5p->hcsw.Rctl = Rctl; 3604 w5p->hcsw.Type = Type; 3605 } 3606 } 3607 3608 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3609 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 3610 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 3611 if (irsp->unsli3.rcvsli3.vpi == 0xffff) 3612 saveq->vport = phba->pport; 3613 else 3614 saveq->vport = lpfc_find_vport_by_vpid(phba, 3615 irsp->unsli3.rcvsli3.vpi); 3616 } 3617 3618 /* Prepare WQE with Unsol frame */ 3619 lpfc_sli_prep_unsol_wqe(phba, saveq); 3620 3621 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 3622 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3623 "0313 Ring %d handler: unexpected Rctl x%x " 3624 "Type x%x received\n", 3625 pring->ringno, Rctl, Type); 3626 3627 return 1; 3628 } 3629 3630 /** 3631 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 3632 * @phba: Pointer to HBA context object. 3633 * @pring: Pointer to driver SLI ring object. 3634 * @prspiocb: Pointer to response iocb object. 3635 * 3636 * This function looks up the iocb_lookup table to get the command iocb 3637 * corresponding to the given response iocb using the iotag of the 3638 * response iocb. The driver calls this function with the hbalock held 3639 * for SLI3 ports or the ring lock held for SLI4 ports. 3640 * This function returns the command iocb object if it finds the command 3641 * iocb else returns NULL. 3642 **/ 3643 static struct lpfc_iocbq * 3644 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 3645 struct lpfc_sli_ring *pring, 3646 struct lpfc_iocbq *prspiocb) 3647 { 3648 struct lpfc_iocbq *cmd_iocb = NULL; 3649 u16 iotag; 3650 3651 if (phba->sli_rev == LPFC_SLI_REV4) 3652 iotag = get_wqe_reqtag(prspiocb); 3653 else 3654 iotag = prspiocb->iocb.ulpIoTag; 3655 3656 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3657 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3658 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { 3659 /* remove from txcmpl queue list */ 3660 list_del_init(&cmd_iocb->list); 3661 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 3662 pring->txcmplq_cnt--; 3663 return cmd_iocb; 3664 } 3665 } 3666 3667 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3668 "0317 iotag x%x is out of " 3669 "range: max iotag x%x\n", 3670 iotag, phba->sli.last_iotag); 3671 return NULL; 3672 } 3673 3674 /** 3675 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 3676 * @phba: Pointer to HBA context object. 3677 * @pring: Pointer to driver SLI ring object. 3678 * @iotag: IOCB tag. 3679 * 3680 * This function looks up the iocb_lookup table to get the command iocb 3681 * corresponding to the given iotag. The driver calls this function with 3682 * the ring lock held because this function is an SLI4 port only helper. 3683 * This function returns the command iocb object if it finds the command 3684 * iocb else returns NULL. 3685 **/ 3686 static struct lpfc_iocbq * 3687 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 3688 struct lpfc_sli_ring *pring, uint16_t iotag) 3689 { 3690 struct lpfc_iocbq *cmd_iocb = NULL; 3691 3692 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3693 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3694 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { 3695 /* remove from txcmpl queue list */ 3696 list_del_init(&cmd_iocb->list); 3697 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 3698 pring->txcmplq_cnt--; 3699 return cmd_iocb; 3700 } 3701 } 3702 3703 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3704 "0372 iotag x%x lookup error: max iotag (x%x) " 3705 "cmd_flag x%x\n", 3706 iotag, phba->sli.last_iotag, 3707 cmd_iocb ? cmd_iocb->cmd_flag : 0xffff); 3708 return NULL; 3709 } 3710 3711 /** 3712 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3713 * @phba: Pointer to HBA context object. 3714 * @pring: Pointer to driver SLI ring object. 3715 * @saveq: Pointer to the response iocb to be processed. 3716 * 3717 * This function is called by the ring event handler for non-fcp 3718 * rings when there is a new response iocb in the response ring. 3719 * The caller is not required to hold any locks. This function 3720 * gets the command iocb associated with the response iocb and 3721 * calls the completion handler for the command iocb. If there 3722 * is no completion handler, the function will free the resources 3723 * associated with command iocb. If the response iocb is for 3724 * an already aborted command iocb, the status of the completion 3725 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3726 * This function always returns 1. 3727 **/ 3728 static int 3729 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3730 struct lpfc_iocbq *saveq) 3731 { 3732 struct lpfc_iocbq *cmdiocbp; 3733 unsigned long iflag; 3734 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag; 3735 3736 if (phba->sli_rev == LPFC_SLI_REV4) 3737 spin_lock_irqsave(&pring->ring_lock, iflag); 3738 else 3739 spin_lock_irqsave(&phba->hbalock, iflag); 3740 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3741 if (phba->sli_rev == LPFC_SLI_REV4) 3742 spin_unlock_irqrestore(&pring->ring_lock, iflag); 3743 else 3744 spin_unlock_irqrestore(&phba->hbalock, iflag); 3745 3746 ulp_command = get_job_cmnd(phba, saveq); 3747 ulp_status = get_job_ulpstatus(phba, saveq); 3748 ulp_word4 = get_job_word4(phba, saveq); 3749 ulp_context = get_job_ulpcontext(phba, saveq); 3750 if (phba->sli_rev == LPFC_SLI_REV4) 3751 iotag = get_wqe_reqtag(saveq); 3752 else 3753 iotag = saveq->iocb.ulpIoTag; 3754 3755 if (cmdiocbp) { 3756 ulp_command = get_job_cmnd(phba, cmdiocbp); 3757 if (cmdiocbp->cmd_cmpl) { 3758 /* 3759 * If an ELS command failed send an event to mgmt 3760 * application. 3761 */ 3762 if (ulp_status && 3763 (pring->ringno == LPFC_ELS_RING) && 3764 (ulp_command == CMD_ELS_REQUEST64_CR)) 3765 lpfc_send_els_failure_event(phba, 3766 cmdiocbp, saveq); 3767 3768 /* 3769 * Post all ELS completions to the worker thread. 3770 * All other are passed to the completion callback. 3771 */ 3772 if (pring->ringno == LPFC_ELS_RING) { 3773 if ((phba->sli_rev < LPFC_SLI_REV4) && 3774 (cmdiocbp->cmd_flag & 3775 LPFC_DRIVER_ABORTED)) { 3776 spin_lock_irqsave(&phba->hbalock, 3777 iflag); 3778 cmdiocbp->cmd_flag &= 3779 ~LPFC_DRIVER_ABORTED; 3780 spin_unlock_irqrestore(&phba->hbalock, 3781 iflag); 3782 saveq->iocb.ulpStatus = 3783 IOSTAT_LOCAL_REJECT; 3784 saveq->iocb.un.ulpWord[4] = 3785 IOERR_SLI_ABORTED; 3786 3787 /* Firmware could still be in progress 3788 * of DMAing payload, so don't free data 3789 * buffer till after a hbeat. 3790 */ 3791 spin_lock_irqsave(&phba->hbalock, 3792 iflag); 3793 saveq->cmd_flag |= LPFC_DELAY_MEM_FREE; 3794 spin_unlock_irqrestore(&phba->hbalock, 3795 iflag); 3796 } 3797 if (phba->sli_rev == LPFC_SLI_REV4) { 3798 if (saveq->cmd_flag & 3799 LPFC_EXCHANGE_BUSY) { 3800 /* Set cmdiocb flag for the 3801 * exchange busy so sgl (xri) 3802 * will not be released until 3803 * the abort xri is received 3804 * from hba. 3805 */ 3806 spin_lock_irqsave( 3807 &phba->hbalock, iflag); 3808 cmdiocbp->cmd_flag |= 3809 LPFC_EXCHANGE_BUSY; 3810 spin_unlock_irqrestore( 3811 &phba->hbalock, iflag); 3812 } 3813 if (cmdiocbp->cmd_flag & 3814 LPFC_DRIVER_ABORTED) { 3815 /* 3816 * Clear LPFC_DRIVER_ABORTED 3817 * bit in case it was driver 3818 * initiated abort. 3819 */ 3820 spin_lock_irqsave( 3821 &phba->hbalock, iflag); 3822 cmdiocbp->cmd_flag &= 3823 ~LPFC_DRIVER_ABORTED; 3824 spin_unlock_irqrestore( 3825 &phba->hbalock, iflag); 3826 set_job_ulpstatus(cmdiocbp, 3827 IOSTAT_LOCAL_REJECT); 3828 set_job_ulpword4(cmdiocbp, 3829 IOERR_ABORT_REQUESTED); 3830 /* 3831 * For SLI4, irspiocb contains 3832 * NO_XRI in sli_xritag, it 3833 * shall not affect releasing 3834 * sgl (xri) process. 3835 */ 3836 set_job_ulpstatus(saveq, 3837 IOSTAT_LOCAL_REJECT); 3838 set_job_ulpword4(saveq, 3839 IOERR_SLI_ABORTED); 3840 spin_lock_irqsave( 3841 &phba->hbalock, iflag); 3842 saveq->cmd_flag |= 3843 LPFC_DELAY_MEM_FREE; 3844 spin_unlock_irqrestore( 3845 &phba->hbalock, iflag); 3846 } 3847 } 3848 } 3849 cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq); 3850 } else 3851 lpfc_sli_release_iocbq(phba, cmdiocbp); 3852 } else { 3853 /* 3854 * Unknown initiating command based on the response iotag. 3855 * This could be the case on the ELS ring because of 3856 * lpfc_els_abort(). 3857 */ 3858 if (pring->ringno != LPFC_ELS_RING) { 3859 /* 3860 * Ring <ringno> handler: unexpected completion IoTag 3861 * <IoTag> 3862 */ 3863 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3864 "0322 Ring %d handler: " 3865 "unexpected completion IoTag x%x " 3866 "Data: x%x x%x x%x x%x\n", 3867 pring->ringno, iotag, ulp_status, 3868 ulp_word4, ulp_command, ulp_context); 3869 } 3870 } 3871 3872 return 1; 3873 } 3874 3875 /** 3876 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3877 * @phba: Pointer to HBA context object. 3878 * @pring: Pointer to driver SLI ring object. 3879 * 3880 * This function is called from the iocb ring event handlers when 3881 * put pointer is ahead of the get pointer for a ring. This function signal 3882 * an error attention condition to the worker thread and the worker 3883 * thread will transition the HBA to offline state. 3884 **/ 3885 static void 3886 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3887 { 3888 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3889 /* 3890 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3891 * rsp ring <portRspMax> 3892 */ 3893 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3894 "0312 Ring %d handler: portRspPut %d " 3895 "is bigger than rsp ring %d\n", 3896 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3897 pring->sli.sli3.numRiocb); 3898 3899 phba->link_state = LPFC_HBA_ERROR; 3900 3901 /* 3902 * All error attention handlers are posted to 3903 * worker thread 3904 */ 3905 phba->work_ha |= HA_ERATT; 3906 phba->work_hs = HS_FFER3; 3907 3908 lpfc_worker_wake_up(phba); 3909 3910 return; 3911 } 3912 3913 /** 3914 * lpfc_poll_eratt - Error attention polling timer timeout handler 3915 * @t: Context to fetch pointer to address of HBA context object from. 3916 * 3917 * This function is invoked by the Error Attention polling timer when the 3918 * timer times out. It will check the SLI Error Attention register for 3919 * possible attention events. If so, it will post an Error Attention event 3920 * and wake up worker thread to process it. Otherwise, it will set up the 3921 * Error Attention polling timer for the next poll. 3922 **/ 3923 void lpfc_poll_eratt(struct timer_list *t) 3924 { 3925 struct lpfc_hba *phba; 3926 uint32_t eratt = 0; 3927 uint64_t sli_intr, cnt; 3928 3929 phba = timer_container_of(phba, t, eratt_poll); 3930 3931 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) 3932 return; 3933 3934 if (phba->sli_rev == LPFC_SLI_REV4 && 3935 !test_bit(HBA_SETUP, &phba->hba_flag)) { 3936 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3937 "0663 HBA still initializing 0x%lx, restart " 3938 "timer\n", 3939 phba->hba_flag); 3940 goto restart_timer; 3941 } 3942 3943 /* Here we will also keep track of interrupts per sec of the hba */ 3944 sli_intr = phba->sli.slistat.sli_intr; 3945 3946 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3947 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3948 sli_intr); 3949 else 3950 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3951 3952 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3953 do_div(cnt, phba->eratt_poll_interval); 3954 phba->sli.slistat.sli_ips = cnt; 3955 3956 phba->sli.slistat.sli_prev_intr = sli_intr; 3957 3958 /* Check chip HA register for error event */ 3959 eratt = lpfc_sli_check_eratt(phba); 3960 3961 if (eratt) { 3962 /* Tell the worker thread there is work to do */ 3963 lpfc_worker_wake_up(phba); 3964 return; 3965 } 3966 3967 restart_timer: 3968 /* Restart the timer for next eratt poll */ 3969 mod_timer(&phba->eratt_poll, 3970 jiffies + secs_to_jiffies(phba->eratt_poll_interval)); 3971 return; 3972 } 3973 3974 3975 /** 3976 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3977 * @phba: Pointer to HBA context object. 3978 * @pring: Pointer to driver SLI ring object. 3979 * @mask: Host attention register mask for this ring. 3980 * 3981 * This function is called from the interrupt context when there is a ring 3982 * event for the fcp ring. The caller does not hold any lock. 3983 * The function processes each response iocb in the response ring until it 3984 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3985 * LE bit set. The function will call the completion handler of the command iocb 3986 * if the response iocb indicates a completion for a command iocb or it is 3987 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3988 * function if this is an unsolicited iocb. 3989 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3990 * to check it explicitly. 3991 */ 3992 int 3993 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3994 struct lpfc_sli_ring *pring, uint32_t mask) 3995 { 3996 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3997 IOCB_t *irsp = NULL; 3998 IOCB_t *entry = NULL; 3999 struct lpfc_iocbq *cmdiocbq = NULL; 4000 struct lpfc_iocbq rspiocbq; 4001 uint32_t status; 4002 uint32_t portRspPut, portRspMax; 4003 int rc = 1; 4004 lpfc_iocb_type type; 4005 unsigned long iflag; 4006 uint32_t rsp_cmpl = 0; 4007 4008 spin_lock_irqsave(&phba->hbalock, iflag); 4009 pring->stats.iocb_event++; 4010 4011 /* 4012 * The next available response entry should never exceed the maximum 4013 * entries. If it does, treat it as an adapter hardware error. 4014 */ 4015 portRspMax = pring->sli.sli3.numRiocb; 4016 portRspPut = le32_to_cpu(pgp->rspPutInx); 4017 if (unlikely(portRspPut >= portRspMax)) { 4018 lpfc_sli_rsp_pointers_error(phba, pring); 4019 spin_unlock_irqrestore(&phba->hbalock, iflag); 4020 return 1; 4021 } 4022 if (phba->fcp_ring_in_use) { 4023 spin_unlock_irqrestore(&phba->hbalock, iflag); 4024 return 1; 4025 } else 4026 phba->fcp_ring_in_use = 1; 4027 4028 rmb(); 4029 while (pring->sli.sli3.rspidx != portRspPut) { 4030 /* 4031 * Fetch an entry off the ring and copy it into a local data 4032 * structure. The copy involves a byte-swap since the 4033 * network byte order and pci byte orders are different. 4034 */ 4035 entry = lpfc_resp_iocb(phba, pring); 4036 phba->last_completion_time = jiffies; 4037 4038 if (++pring->sli.sli3.rspidx >= portRspMax) 4039 pring->sli.sli3.rspidx = 0; 4040 4041 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 4042 (uint32_t *) &rspiocbq.iocb, 4043 phba->iocb_rsp_size); 4044 INIT_LIST_HEAD(&(rspiocbq.list)); 4045 irsp = &rspiocbq.iocb; 4046 4047 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 4048 pring->stats.iocb_rsp++; 4049 rsp_cmpl++; 4050 4051 if (unlikely(irsp->ulpStatus)) { 4052 /* 4053 * If resource errors reported from HBA, reduce 4054 * queuedepths of the SCSI device. 4055 */ 4056 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 4057 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 4058 IOERR_NO_RESOURCES)) { 4059 spin_unlock_irqrestore(&phba->hbalock, iflag); 4060 phba->lpfc_rampdown_queue_depth(phba); 4061 spin_lock_irqsave(&phba->hbalock, iflag); 4062 } 4063 4064 /* Rsp ring <ringno> error: IOCB */ 4065 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4066 "0336 Rsp Ring %d error: IOCB Data: " 4067 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 4068 pring->ringno, 4069 irsp->un.ulpWord[0], 4070 irsp->un.ulpWord[1], 4071 irsp->un.ulpWord[2], 4072 irsp->un.ulpWord[3], 4073 irsp->un.ulpWord[4], 4074 irsp->un.ulpWord[5], 4075 *(uint32_t *)&irsp->un1, 4076 *((uint32_t *)&irsp->un1 + 1)); 4077 } 4078 4079 switch (type) { 4080 case LPFC_ABORT_IOCB: 4081 case LPFC_SOL_IOCB: 4082 /* 4083 * Idle exchange closed via ABTS from port. No iocb 4084 * resources need to be recovered. 4085 */ 4086 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 4087 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4088 "0333 IOCB cmd 0x%x" 4089 " processed. Skipping" 4090 " completion\n", 4091 irsp->ulpCommand); 4092 break; 4093 } 4094 4095 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 4096 &rspiocbq); 4097 if (unlikely(!cmdiocbq)) 4098 break; 4099 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) 4100 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; 4101 if (cmdiocbq->cmd_cmpl) { 4102 spin_unlock_irqrestore(&phba->hbalock, iflag); 4103 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq); 4104 spin_lock_irqsave(&phba->hbalock, iflag); 4105 } 4106 break; 4107 case LPFC_UNSOL_IOCB: 4108 spin_unlock_irqrestore(&phba->hbalock, iflag); 4109 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 4110 spin_lock_irqsave(&phba->hbalock, iflag); 4111 break; 4112 default: 4113 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 4114 char adaptermsg[LPFC_MAX_ADPTMSG]; 4115 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 4116 memcpy(&adaptermsg[0], (uint8_t *) irsp, 4117 MAX_MSG_DATA); 4118 dev_warn(&((phba->pcidev)->dev), 4119 "lpfc%d: %s\n", 4120 phba->brd_no, adaptermsg); 4121 } else { 4122 /* Unknown IOCB command */ 4123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4124 "0334 Unknown IOCB command " 4125 "Data: x%x, x%x x%x x%x x%x\n", 4126 type, irsp->ulpCommand, 4127 irsp->ulpStatus, 4128 irsp->ulpIoTag, 4129 irsp->ulpContext); 4130 } 4131 break; 4132 } 4133 4134 /* 4135 * The response IOCB has been processed. Update the ring 4136 * pointer in SLIM. If the port response put pointer has not 4137 * been updated, sync the pgp->rspPutInx and fetch the new port 4138 * response put pointer. 4139 */ 4140 writel(pring->sli.sli3.rspidx, 4141 &phba->host_gp[pring->ringno].rspGetInx); 4142 4143 if (pring->sli.sli3.rspidx == portRspPut) 4144 portRspPut = le32_to_cpu(pgp->rspPutInx); 4145 } 4146 4147 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 4148 pring->stats.iocb_rsp_full++; 4149 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 4150 writel(status, phba->CAregaddr); 4151 readl(phba->CAregaddr); 4152 } 4153 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 4154 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 4155 pring->stats.iocb_cmd_empty++; 4156 4157 /* Force update of the local copy of cmdGetInx */ 4158 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 4159 lpfc_sli_resume_iocb(phba, pring); 4160 4161 if ((pring->lpfc_sli_cmd_available)) 4162 (pring->lpfc_sli_cmd_available) (phba, pring); 4163 4164 } 4165 4166 phba->fcp_ring_in_use = 0; 4167 spin_unlock_irqrestore(&phba->hbalock, iflag); 4168 return rc; 4169 } 4170 4171 /** 4172 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 4173 * @phba: Pointer to HBA context object. 4174 * @pring: Pointer to driver SLI ring object. 4175 * @rspiocbp: Pointer to driver response IOCB object. 4176 * 4177 * This function is called from the worker thread when there is a slow-path 4178 * response IOCB to process. This function chains all the response iocbs until 4179 * seeing the iocb with the LE bit set. The function will call 4180 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 4181 * completion of a command iocb. The function will call the 4182 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 4183 * The function frees the resources or calls the completion handler if this 4184 * iocb is an abort completion. The function returns NULL when the response 4185 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 4186 * this function shall chain the iocb on to the iocb_continueq and return the 4187 * response iocb passed in. 4188 **/ 4189 static struct lpfc_iocbq * 4190 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4191 struct lpfc_iocbq *rspiocbp) 4192 { 4193 struct lpfc_iocbq *saveq; 4194 struct lpfc_iocbq *cmdiocb; 4195 struct lpfc_iocbq *next_iocb; 4196 IOCB_t *irsp; 4197 uint32_t free_saveq; 4198 u8 cmd_type; 4199 lpfc_iocb_type type; 4200 unsigned long iflag; 4201 u32 ulp_status = get_job_ulpstatus(phba, rspiocbp); 4202 u32 ulp_word4 = get_job_word4(phba, rspiocbp); 4203 u32 ulp_command = get_job_cmnd(phba, rspiocbp); 4204 int rc; 4205 4206 spin_lock_irqsave(&phba->hbalock, iflag); 4207 /* First add the response iocb to the countinueq list */ 4208 list_add_tail(&rspiocbp->list, &pring->iocb_continueq); 4209 pring->iocb_continueq_cnt++; 4210 4211 /* 4212 * By default, the driver expects to free all resources 4213 * associated with this iocb completion. 4214 */ 4215 free_saveq = 1; 4216 saveq = list_get_first(&pring->iocb_continueq, 4217 struct lpfc_iocbq, list); 4218 list_del_init(&pring->iocb_continueq); 4219 pring->iocb_continueq_cnt = 0; 4220 4221 pring->stats.iocb_rsp++; 4222 4223 /* 4224 * If resource errors reported from HBA, reduce 4225 * queuedepths of the SCSI device. 4226 */ 4227 if (ulp_status == IOSTAT_LOCAL_REJECT && 4228 ((ulp_word4 & IOERR_PARAM_MASK) == 4229 IOERR_NO_RESOURCES)) { 4230 spin_unlock_irqrestore(&phba->hbalock, iflag); 4231 phba->lpfc_rampdown_queue_depth(phba); 4232 spin_lock_irqsave(&phba->hbalock, iflag); 4233 } 4234 4235 if (ulp_status) { 4236 /* Rsp ring <ringno> error: IOCB */ 4237 if (phba->sli_rev < LPFC_SLI_REV4) { 4238 irsp = &rspiocbp->iocb; 4239 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4240 "0328 Rsp Ring %d error: ulp_status x%x " 4241 "IOCB Data: " 4242 "x%08x x%08x x%08x x%08x " 4243 "x%08x x%08x x%08x x%08x " 4244 "x%08x x%08x x%08x x%08x " 4245 "x%08x x%08x x%08x x%08x\n", 4246 pring->ringno, ulp_status, 4247 get_job_ulpword(rspiocbp, 0), 4248 get_job_ulpword(rspiocbp, 1), 4249 get_job_ulpword(rspiocbp, 2), 4250 get_job_ulpword(rspiocbp, 3), 4251 get_job_ulpword(rspiocbp, 4), 4252 get_job_ulpword(rspiocbp, 5), 4253 *(((uint32_t *)irsp) + 6), 4254 *(((uint32_t *)irsp) + 7), 4255 *(((uint32_t *)irsp) + 8), 4256 *(((uint32_t *)irsp) + 9), 4257 *(((uint32_t *)irsp) + 10), 4258 *(((uint32_t *)irsp) + 11), 4259 *(((uint32_t *)irsp) + 12), 4260 *(((uint32_t *)irsp) + 13), 4261 *(((uint32_t *)irsp) + 14), 4262 *(((uint32_t *)irsp) + 15)); 4263 } else { 4264 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4265 "0321 Rsp Ring %d error: " 4266 "IOCB Data: " 4267 "x%x x%x x%x x%x\n", 4268 pring->ringno, 4269 rspiocbp->wcqe_cmpl.word0, 4270 rspiocbp->wcqe_cmpl.total_data_placed, 4271 rspiocbp->wcqe_cmpl.parameter, 4272 rspiocbp->wcqe_cmpl.word3); 4273 } 4274 } 4275 4276 4277 /* 4278 * Fetch the iocb command type and call the correct completion 4279 * routine. Solicited and Unsolicited IOCBs on the ELS ring 4280 * get freed back to the lpfc_iocb_list by the discovery 4281 * kernel thread. 4282 */ 4283 cmd_type = ulp_command & CMD_IOCB_MASK; 4284 type = lpfc_sli_iocb_cmd_type(cmd_type); 4285 switch (type) { 4286 case LPFC_SOL_IOCB: 4287 spin_unlock_irqrestore(&phba->hbalock, iflag); 4288 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 4289 spin_lock_irqsave(&phba->hbalock, iflag); 4290 break; 4291 case LPFC_UNSOL_IOCB: 4292 spin_unlock_irqrestore(&phba->hbalock, iflag); 4293 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 4294 spin_lock_irqsave(&phba->hbalock, iflag); 4295 if (!rc) 4296 free_saveq = 0; 4297 break; 4298 case LPFC_ABORT_IOCB: 4299 cmdiocb = NULL; 4300 if (ulp_command != CMD_XRI_ABORTED_CX) 4301 cmdiocb = lpfc_sli_iocbq_lookup(phba, pring, 4302 saveq); 4303 if (cmdiocb) { 4304 /* Call the specified completion routine */ 4305 if (cmdiocb->cmd_cmpl) { 4306 spin_unlock_irqrestore(&phba->hbalock, iflag); 4307 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq); 4308 spin_lock_irqsave(&phba->hbalock, iflag); 4309 } else { 4310 __lpfc_sli_release_iocbq(phba, cmdiocb); 4311 } 4312 } 4313 break; 4314 case LPFC_UNKNOWN_IOCB: 4315 if (ulp_command == CMD_ADAPTER_MSG) { 4316 char adaptermsg[LPFC_MAX_ADPTMSG]; 4317 4318 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 4319 memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe, 4320 MAX_MSG_DATA); 4321 dev_warn(&((phba->pcidev)->dev), 4322 "lpfc%d: %s\n", 4323 phba->brd_no, adaptermsg); 4324 } else { 4325 /* Unknown command */ 4326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4327 "0335 Unknown IOCB " 4328 "command Data: x%x " 4329 "x%x x%x x%x\n", 4330 ulp_command, 4331 ulp_status, 4332 get_wqe_reqtag(rspiocbp), 4333 get_job_ulpcontext(phba, rspiocbp)); 4334 } 4335 break; 4336 } 4337 4338 if (free_saveq) { 4339 list_for_each_entry_safe(rspiocbp, next_iocb, 4340 &saveq->list, list) { 4341 list_del_init(&rspiocbp->list); 4342 __lpfc_sli_release_iocbq(phba, rspiocbp); 4343 } 4344 __lpfc_sli_release_iocbq(phba, saveq); 4345 } 4346 rspiocbp = NULL; 4347 spin_unlock_irqrestore(&phba->hbalock, iflag); 4348 return rspiocbp; 4349 } 4350 4351 /** 4352 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 4353 * @phba: Pointer to HBA context object. 4354 * @pring: Pointer to driver SLI ring object. 4355 * @mask: Host attention register mask for this ring. 4356 * 4357 * This routine wraps the actual slow_ring event process routine from the 4358 * API jump table function pointer from the lpfc_hba struct. 4359 **/ 4360 void 4361 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 4362 struct lpfc_sli_ring *pring, uint32_t mask) 4363 { 4364 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 4365 } 4366 4367 /** 4368 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 4369 * @phba: Pointer to HBA context object. 4370 * @pring: Pointer to driver SLI ring object. 4371 * @mask: Host attention register mask for this ring. 4372 * 4373 * This function is called from the worker thread when there is a ring event 4374 * for non-fcp rings. The caller does not hold any lock. The function will 4375 * remove each response iocb in the response ring and calls the handle 4376 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 4377 **/ 4378 static void 4379 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 4380 struct lpfc_sli_ring *pring, uint32_t mask) 4381 { 4382 struct lpfc_pgp *pgp; 4383 IOCB_t *entry; 4384 IOCB_t *irsp = NULL; 4385 struct lpfc_iocbq *rspiocbp = NULL; 4386 uint32_t portRspPut, portRspMax; 4387 unsigned long iflag; 4388 uint32_t status; 4389 4390 pgp = &phba->port_gp[pring->ringno]; 4391 spin_lock_irqsave(&phba->hbalock, iflag); 4392 pring->stats.iocb_event++; 4393 4394 /* 4395 * The next available response entry should never exceed the maximum 4396 * entries. If it does, treat it as an adapter hardware error. 4397 */ 4398 portRspMax = pring->sli.sli3.numRiocb; 4399 portRspPut = le32_to_cpu(pgp->rspPutInx); 4400 if (portRspPut >= portRspMax) { 4401 /* 4402 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 4403 * rsp ring <portRspMax> 4404 */ 4405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4406 "0303 Ring %d handler: portRspPut %d " 4407 "is bigger than rsp ring %d\n", 4408 pring->ringno, portRspPut, portRspMax); 4409 4410 phba->link_state = LPFC_HBA_ERROR; 4411 spin_unlock_irqrestore(&phba->hbalock, iflag); 4412 4413 phba->work_hs = HS_FFER3; 4414 lpfc_handle_eratt(phba); 4415 4416 return; 4417 } 4418 4419 rmb(); 4420 while (pring->sli.sli3.rspidx != portRspPut) { 4421 /* 4422 * Build a completion list and call the appropriate handler. 4423 * The process is to get the next available response iocb, get 4424 * a free iocb from the list, copy the response data into the 4425 * free iocb, insert to the continuation list, and update the 4426 * next response index to slim. This process makes response 4427 * iocb's in the ring available to DMA as fast as possible but 4428 * pays a penalty for a copy operation. Since the iocb is 4429 * only 32 bytes, this penalty is considered small relative to 4430 * the PCI reads for register values and a slim write. When 4431 * the ulpLe field is set, the entire Command has been 4432 * received. 4433 */ 4434 entry = lpfc_resp_iocb(phba, pring); 4435 4436 phba->last_completion_time = jiffies; 4437 rspiocbp = __lpfc_sli_get_iocbq(phba); 4438 if (rspiocbp == NULL) { 4439 printk(KERN_ERR "%s: out of buffers! Failing " 4440 "completion.\n", __func__); 4441 break; 4442 } 4443 4444 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 4445 phba->iocb_rsp_size); 4446 irsp = &rspiocbp->iocb; 4447 4448 if (++pring->sli.sli3.rspidx >= portRspMax) 4449 pring->sli.sli3.rspidx = 0; 4450 4451 if (pring->ringno == LPFC_ELS_RING) { 4452 lpfc_debugfs_slow_ring_trc(phba, 4453 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 4454 *(((uint32_t *) irsp) + 4), 4455 *(((uint32_t *) irsp) + 6), 4456 *(((uint32_t *) irsp) + 7)); 4457 } 4458 4459 writel(pring->sli.sli3.rspidx, 4460 &phba->host_gp[pring->ringno].rspGetInx); 4461 4462 spin_unlock_irqrestore(&phba->hbalock, iflag); 4463 /* Handle the response IOCB */ 4464 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 4465 spin_lock_irqsave(&phba->hbalock, iflag); 4466 4467 /* 4468 * If the port response put pointer has not been updated, sync 4469 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 4470 * response put pointer. 4471 */ 4472 if (pring->sli.sli3.rspidx == portRspPut) { 4473 portRspPut = le32_to_cpu(pgp->rspPutInx); 4474 } 4475 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 4476 4477 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 4478 /* At least one response entry has been freed */ 4479 pring->stats.iocb_rsp_full++; 4480 /* SET RxRE_RSP in Chip Att register */ 4481 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 4482 writel(status, phba->CAregaddr); 4483 readl(phba->CAregaddr); /* flush */ 4484 } 4485 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 4486 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 4487 pring->stats.iocb_cmd_empty++; 4488 4489 /* Force update of the local copy of cmdGetInx */ 4490 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 4491 lpfc_sli_resume_iocb(phba, pring); 4492 4493 if ((pring->lpfc_sli_cmd_available)) 4494 (pring->lpfc_sli_cmd_available) (phba, pring); 4495 4496 } 4497 4498 spin_unlock_irqrestore(&phba->hbalock, iflag); 4499 return; 4500 } 4501 4502 /** 4503 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 4504 * @phba: Pointer to HBA context object. 4505 * @pring: Pointer to driver SLI ring object. 4506 * @mask: Host attention register mask for this ring. 4507 * 4508 * This function is called from the worker thread when there is a pending 4509 * ELS response iocb on the driver internal slow-path response iocb worker 4510 * queue. The caller does not hold any lock. The function will remove each 4511 * response iocb from the response worker queue and calls the handle 4512 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 4513 **/ 4514 static void 4515 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 4516 struct lpfc_sli_ring *pring, uint32_t mask) 4517 { 4518 struct lpfc_iocbq *irspiocbq; 4519 struct hbq_dmabuf *dmabuf; 4520 struct lpfc_cq_event *cq_event; 4521 unsigned long iflag; 4522 int count = 0; 4523 4524 clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 4525 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 4526 /* Get the response iocb from the head of work queue */ 4527 spin_lock_irqsave(&phba->hbalock, iflag); 4528 list_remove_head(&phba->sli4_hba.sp_queue_event, 4529 cq_event, struct lpfc_cq_event, list); 4530 spin_unlock_irqrestore(&phba->hbalock, iflag); 4531 4532 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 4533 case CQE_CODE_COMPL_WQE: 4534 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 4535 cq_event); 4536 /* Translate ELS WCQE to response IOCBQ */ 4537 irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba, 4538 irspiocbq); 4539 if (irspiocbq) 4540 lpfc_sli_sp_handle_rspiocb(phba, pring, 4541 irspiocbq); 4542 count++; 4543 break; 4544 case CQE_CODE_RECEIVE: 4545 case CQE_CODE_RECEIVE_V1: 4546 dmabuf = container_of(cq_event, struct hbq_dmabuf, 4547 cq_event); 4548 lpfc_sli4_handle_received_buffer(phba, dmabuf); 4549 count++; 4550 break; 4551 default: 4552 break; 4553 } 4554 4555 /* Limit the number of events to 64 to avoid soft lockups */ 4556 if (count == 64) 4557 break; 4558 } 4559 } 4560 4561 /** 4562 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 4563 * @phba: Pointer to HBA context object. 4564 * @pring: Pointer to driver SLI ring object. 4565 * 4566 * This function aborts all iocbs in the given ring and frees all the iocb 4567 * objects in txq. This function issues an abort iocb for all the iocb commands 4568 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 4569 * the return of this function. The caller is not required to hold any locks. 4570 **/ 4571 void 4572 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 4573 { 4574 LIST_HEAD(tx_completions); 4575 LIST_HEAD(txcmplq_completions); 4576 struct lpfc_iocbq *iocb, *next_iocb; 4577 int offline; 4578 4579 if (pring->ringno == LPFC_ELS_RING) { 4580 lpfc_fabric_abort_hba(phba); 4581 } 4582 offline = pci_channel_offline(phba->pcidev); 4583 4584 /* Error everything on txq and txcmplq 4585 * First do the txq. 4586 */ 4587 if (phba->sli_rev >= LPFC_SLI_REV4) { 4588 spin_lock_irq(&pring->ring_lock); 4589 list_splice_init(&pring->txq, &tx_completions); 4590 pring->txq_cnt = 0; 4591 4592 if (offline) { 4593 list_splice_init(&pring->txcmplq, 4594 &txcmplq_completions); 4595 } else { 4596 /* Next issue ABTS for everything on the txcmplq */ 4597 list_for_each_entry_safe(iocb, next_iocb, 4598 &pring->txcmplq, list) 4599 lpfc_sli_issue_abort_iotag(phba, pring, 4600 iocb, NULL); 4601 } 4602 spin_unlock_irq(&pring->ring_lock); 4603 } else { 4604 spin_lock_irq(&phba->hbalock); 4605 list_splice_init(&pring->txq, &tx_completions); 4606 pring->txq_cnt = 0; 4607 4608 if (offline) { 4609 list_splice_init(&pring->txcmplq, &txcmplq_completions); 4610 } else { 4611 /* Next issue ABTS for everything on the txcmplq */ 4612 list_for_each_entry_safe(iocb, next_iocb, 4613 &pring->txcmplq, list) 4614 lpfc_sli_issue_abort_iotag(phba, pring, 4615 iocb, NULL); 4616 } 4617 spin_unlock_irq(&phba->hbalock); 4618 } 4619 4620 if (offline) { 4621 /* Cancel all the IOCBs from the completions list */ 4622 lpfc_sli_cancel_iocbs(phba, &txcmplq_completions, 4623 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 4624 } else { 4625 /* Make sure HBA is alive */ 4626 lpfc_issue_hb_tmo(phba); 4627 } 4628 /* Cancel all the IOCBs from the completions list */ 4629 lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT, 4630 IOERR_SLI_ABORTED); 4631 } 4632 4633 /** 4634 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 4635 * @phba: Pointer to HBA context object. 4636 * 4637 * This function aborts all iocbs in FCP rings and frees all the iocb 4638 * objects in txq. This function issues an abort iocb for all the iocb commands 4639 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 4640 * the return of this function. The caller is not required to hold any locks. 4641 **/ 4642 void 4643 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 4644 { 4645 struct lpfc_sli *psli = &phba->sli; 4646 struct lpfc_sli_ring *pring; 4647 uint32_t i; 4648 4649 /* Look on all the FCP Rings for the iotag */ 4650 if (phba->sli_rev >= LPFC_SLI_REV4) { 4651 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4652 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 4653 lpfc_sli_abort_iocb_ring(phba, pring); 4654 } 4655 } else { 4656 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4657 lpfc_sli_abort_iocb_ring(phba, pring); 4658 } 4659 } 4660 4661 /** 4662 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring 4663 * @phba: Pointer to HBA context object. 4664 * 4665 * This function flushes all iocbs in the IO ring and frees all the iocb 4666 * objects in txq and txcmplq. This function will not issue abort iocbs 4667 * for all the iocb commands in txcmplq, they will just be returned with 4668 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4669 * slot has been permanently disabled. 4670 **/ 4671 void 4672 lpfc_sli_flush_io_rings(struct lpfc_hba *phba) 4673 { 4674 LIST_HEAD(txq); 4675 LIST_HEAD(txcmplq); 4676 struct lpfc_sli *psli = &phba->sli; 4677 struct lpfc_sli_ring *pring; 4678 uint32_t i; 4679 struct lpfc_iocbq *piocb, *next_iocb; 4680 4681 /* Indicate the I/O queues are flushed */ 4682 set_bit(HBA_IOQ_FLUSH, &phba->hba_flag); 4683 4684 /* Look on all the FCP Rings for the iotag */ 4685 if (phba->sli_rev >= LPFC_SLI_REV4) { 4686 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4687 if (!phba->sli4_hba.hdwq || 4688 !phba->sli4_hba.hdwq[i].io_wq) { 4689 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4690 "7777 hdwq's deleted %lx " 4691 "%lx %x %x\n", 4692 phba->pport->load_flag, 4693 phba->hba_flag, 4694 phba->link_state, 4695 phba->sli.sli_flag); 4696 return; 4697 } 4698 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 4699 4700 spin_lock_irq(&pring->ring_lock); 4701 /* Retrieve everything on txq */ 4702 list_splice_init(&pring->txq, &txq); 4703 list_for_each_entry_safe(piocb, next_iocb, 4704 &pring->txcmplq, list) 4705 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 4706 /* Retrieve everything on the txcmplq */ 4707 list_splice_init(&pring->txcmplq, &txcmplq); 4708 pring->txq_cnt = 0; 4709 pring->txcmplq_cnt = 0; 4710 spin_unlock_irq(&pring->ring_lock); 4711 4712 /* Flush the txq */ 4713 lpfc_sli_cancel_iocbs(phba, &txq, 4714 IOSTAT_LOCAL_REJECT, 4715 IOERR_SLI_DOWN); 4716 /* Flush the txcmplq */ 4717 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4718 IOSTAT_LOCAL_REJECT, 4719 IOERR_SLI_DOWN); 4720 if (unlikely(pci_channel_offline(phba->pcidev))) 4721 lpfc_sli4_io_xri_aborted(phba, NULL, 0); 4722 } 4723 } else { 4724 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4725 4726 spin_lock_irq(&phba->hbalock); 4727 /* Retrieve everything on txq */ 4728 list_splice_init(&pring->txq, &txq); 4729 list_for_each_entry_safe(piocb, next_iocb, 4730 &pring->txcmplq, list) 4731 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 4732 /* Retrieve everything on the txcmplq */ 4733 list_splice_init(&pring->txcmplq, &txcmplq); 4734 pring->txq_cnt = 0; 4735 pring->txcmplq_cnt = 0; 4736 spin_unlock_irq(&phba->hbalock); 4737 4738 /* Flush the txq */ 4739 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4740 IOERR_SLI_DOWN); 4741 /* Flush the txcmpq */ 4742 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4743 IOERR_SLI_DOWN); 4744 } 4745 } 4746 4747 /** 4748 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4749 * @phba: Pointer to HBA context object. 4750 * @mask: Bit mask to be checked. 4751 * 4752 * This function reads the host status register and compares 4753 * with the provided bit mask to check if HBA completed 4754 * the restart. This function will wait in a loop for the 4755 * HBA to complete restart. If the HBA does not restart within 4756 * 15 iterations, the function will reset the HBA again. The 4757 * function returns 1 when HBA fail to restart otherwise returns 4758 * zero. 4759 **/ 4760 static int 4761 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4762 { 4763 uint32_t status; 4764 int i = 0; 4765 int retval = 0; 4766 4767 /* Read the HBA Host Status Register */ 4768 if (lpfc_readl(phba->HSregaddr, &status)) 4769 return 1; 4770 4771 set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 4772 4773 /* 4774 * Check status register every 100ms for 5 retries, then every 4775 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4776 * every 2.5 sec for 4. 4777 * Break our of the loop if errors occurred during init. 4778 */ 4779 while (((status & mask) != mask) && 4780 !(status & HS_FFERM) && 4781 i++ < 20) { 4782 4783 if (i <= 5) 4784 msleep(10); 4785 else if (i <= 10) 4786 msleep(500); 4787 else 4788 msleep(2500); 4789 4790 if (i == 15) { 4791 /* Do post */ 4792 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4793 lpfc_sli_brdrestart(phba); 4794 } 4795 /* Read the HBA Host Status Register */ 4796 if (lpfc_readl(phba->HSregaddr, &status)) { 4797 retval = 1; 4798 break; 4799 } 4800 } 4801 4802 /* Check to see if any errors occurred during init */ 4803 if ((status & HS_FFERM) || (i >= 20)) { 4804 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4805 "2751 Adapter failed to restart, " 4806 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4807 status, 4808 readl(phba->MBslimaddr + 0xa8), 4809 readl(phba->MBslimaddr + 0xac)); 4810 phba->link_state = LPFC_HBA_ERROR; 4811 retval = 1; 4812 } 4813 4814 return retval; 4815 } 4816 4817 /** 4818 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4819 * @phba: Pointer to HBA context object. 4820 * @mask: Bit mask to be checked. 4821 * 4822 * This function checks the host status register to check if HBA is 4823 * ready. This function will wait in a loop for the HBA to be ready 4824 * If the HBA is not ready , the function will will reset the HBA PCI 4825 * function again. The function returns 1 when HBA fail to be ready 4826 * otherwise returns zero. 4827 **/ 4828 static int 4829 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4830 { 4831 uint32_t status; 4832 int retval = 0; 4833 4834 /* Read the HBA Host Status Register */ 4835 status = lpfc_sli4_post_status_check(phba); 4836 4837 if (status) { 4838 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4839 lpfc_sli_brdrestart(phba); 4840 status = lpfc_sli4_post_status_check(phba); 4841 } 4842 4843 /* Check to see if any errors occurred during init */ 4844 if (status) { 4845 phba->link_state = LPFC_HBA_ERROR; 4846 retval = 1; 4847 } else 4848 phba->sli4_hba.intr_enable = 0; 4849 4850 clear_bit(HBA_SETUP, &phba->hba_flag); 4851 return retval; 4852 } 4853 4854 /** 4855 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4856 * @phba: Pointer to HBA context object. 4857 * @mask: Bit mask to be checked. 4858 * 4859 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4860 * from the API jump table function pointer from the lpfc_hba struct. 4861 **/ 4862 int 4863 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4864 { 4865 return phba->lpfc_sli_brdready(phba, mask); 4866 } 4867 4868 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4869 4870 /** 4871 * lpfc_reset_barrier - Make HBA ready for HBA reset 4872 * @phba: Pointer to HBA context object. 4873 * 4874 * This function is called before resetting an HBA. This function is called 4875 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4876 **/ 4877 void lpfc_reset_barrier(struct lpfc_hba *phba) 4878 { 4879 uint32_t __iomem *resp_buf; 4880 uint32_t __iomem *mbox_buf; 4881 volatile struct MAILBOX_word0 mbox; 4882 uint32_t hc_copy, ha_copy, resp_data; 4883 int i; 4884 uint8_t hdrtype; 4885 4886 lockdep_assert_held(&phba->hbalock); 4887 4888 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4889 if (hdrtype != PCI_HEADER_TYPE_MFD || 4890 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4891 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4892 return; 4893 4894 /* 4895 * Tell the other part of the chip to suspend temporarily all 4896 * its DMA activity. 4897 */ 4898 resp_buf = phba->MBslimaddr; 4899 4900 /* Disable the error attention */ 4901 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4902 return; 4903 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4904 readl(phba->HCregaddr); /* flush */ 4905 phba->link_flag |= LS_IGNORE_ERATT; 4906 4907 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4908 return; 4909 if (ha_copy & HA_ERATT) { 4910 /* Clear Chip error bit */ 4911 writel(HA_ERATT, phba->HAregaddr); 4912 phba->pport->stopped = 1; 4913 } 4914 4915 mbox.word0 = 0; 4916 mbox.mbxCommand = MBX_KILL_BOARD; 4917 mbox.mbxOwner = OWN_CHIP; 4918 4919 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4920 mbox_buf = phba->MBslimaddr; 4921 writel(mbox.word0, mbox_buf); 4922 4923 for (i = 0; i < 50; i++) { 4924 if (lpfc_readl((resp_buf + 1), &resp_data)) 4925 return; 4926 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4927 mdelay(1); 4928 else 4929 break; 4930 } 4931 resp_data = 0; 4932 if (lpfc_readl((resp_buf + 1), &resp_data)) 4933 return; 4934 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4935 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4936 phba->pport->stopped) 4937 goto restore_hc; 4938 else 4939 goto clear_errat; 4940 } 4941 4942 mbox.mbxOwner = OWN_HOST; 4943 resp_data = 0; 4944 for (i = 0; i < 500; i++) { 4945 if (lpfc_readl(resp_buf, &resp_data)) 4946 return; 4947 if (resp_data != mbox.word0) 4948 mdelay(1); 4949 else 4950 break; 4951 } 4952 4953 clear_errat: 4954 4955 while (++i < 500) { 4956 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4957 return; 4958 if (!(ha_copy & HA_ERATT)) 4959 mdelay(1); 4960 else 4961 break; 4962 } 4963 4964 if (readl(phba->HAregaddr) & HA_ERATT) { 4965 writel(HA_ERATT, phba->HAregaddr); 4966 phba->pport->stopped = 1; 4967 } 4968 4969 restore_hc: 4970 phba->link_flag &= ~LS_IGNORE_ERATT; 4971 writel(hc_copy, phba->HCregaddr); 4972 readl(phba->HCregaddr); /* flush */ 4973 } 4974 4975 /** 4976 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4977 * @phba: Pointer to HBA context object. 4978 * 4979 * This function issues a kill_board mailbox command and waits for 4980 * the error attention interrupt. This function is called for stopping 4981 * the firmware processing. The caller is not required to hold any 4982 * locks. This function calls lpfc_hba_down_post function to free 4983 * any pending commands after the kill. The function will return 1 when it 4984 * fails to kill the board else will return 0. 4985 **/ 4986 int 4987 lpfc_sli_brdkill(struct lpfc_hba *phba) 4988 { 4989 struct lpfc_sli *psli; 4990 LPFC_MBOXQ_t *pmb; 4991 uint32_t status; 4992 uint32_t ha_copy; 4993 int retval; 4994 int i = 0; 4995 4996 psli = &phba->sli; 4997 4998 /* Kill HBA */ 4999 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5000 "0329 Kill HBA Data: x%x x%x\n", 5001 phba->pport->port_state, psli->sli_flag); 5002 5003 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5004 if (!pmb) 5005 return 1; 5006 5007 /* Disable the error attention */ 5008 spin_lock_irq(&phba->hbalock); 5009 if (lpfc_readl(phba->HCregaddr, &status)) { 5010 spin_unlock_irq(&phba->hbalock); 5011 mempool_free(pmb, phba->mbox_mem_pool); 5012 return 1; 5013 } 5014 status &= ~HC_ERINT_ENA; 5015 writel(status, phba->HCregaddr); 5016 readl(phba->HCregaddr); /* flush */ 5017 phba->link_flag |= LS_IGNORE_ERATT; 5018 spin_unlock_irq(&phba->hbalock); 5019 5020 lpfc_kill_board(phba, pmb); 5021 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5022 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5023 5024 if (retval != MBX_SUCCESS) { 5025 if (retval != MBX_BUSY) 5026 mempool_free(pmb, phba->mbox_mem_pool); 5027 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5028 "2752 KILL_BOARD command failed retval %d\n", 5029 retval); 5030 spin_lock_irq(&phba->hbalock); 5031 phba->link_flag &= ~LS_IGNORE_ERATT; 5032 spin_unlock_irq(&phba->hbalock); 5033 return 1; 5034 } 5035 5036 spin_lock_irq(&phba->hbalock); 5037 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 5038 spin_unlock_irq(&phba->hbalock); 5039 5040 mempool_free(pmb, phba->mbox_mem_pool); 5041 5042 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 5043 * attention every 100ms for 3 seconds. If we don't get ERATT after 5044 * 3 seconds we still set HBA_ERROR state because the status of the 5045 * board is now undefined. 5046 */ 5047 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 5048 return 1; 5049 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 5050 mdelay(100); 5051 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 5052 return 1; 5053 } 5054 5055 timer_delete_sync(&psli->mbox_tmo); 5056 if (ha_copy & HA_ERATT) { 5057 writel(HA_ERATT, phba->HAregaddr); 5058 phba->pport->stopped = 1; 5059 } 5060 spin_lock_irq(&phba->hbalock); 5061 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5062 psli->mbox_active = NULL; 5063 phba->link_flag &= ~LS_IGNORE_ERATT; 5064 spin_unlock_irq(&phba->hbalock); 5065 5066 lpfc_hba_down_post(phba); 5067 phba->link_state = LPFC_HBA_ERROR; 5068 5069 return ha_copy & HA_ERATT ? 0 : 1; 5070 } 5071 5072 /** 5073 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 5074 * @phba: Pointer to HBA context object. 5075 * 5076 * This function resets the HBA by writing HC_INITFF to the control 5077 * register. After the HBA resets, this function resets all the iocb ring 5078 * indices. This function disables PCI layer parity checking during 5079 * the reset. 5080 * This function returns 0 always. 5081 * The caller is not required to hold any locks. 5082 **/ 5083 int 5084 lpfc_sli_brdreset(struct lpfc_hba *phba) 5085 { 5086 struct lpfc_sli *psli; 5087 struct lpfc_sli_ring *pring; 5088 uint16_t cfg_value; 5089 int i; 5090 5091 psli = &phba->sli; 5092 5093 /* Reset HBA */ 5094 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5095 "0325 Reset HBA Data: x%x x%x\n", 5096 (phba->pport) ? phba->pport->port_state : 0, 5097 psli->sli_flag); 5098 5099 /* perform board reset */ 5100 phba->fc_eventTag = 0; 5101 phba->link_events = 0; 5102 set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 5103 if (phba->pport) { 5104 phba->pport->fc_myDID = 0; 5105 phba->pport->fc_prevDID = 0; 5106 } 5107 5108 /* Turn off parity checking and serr during the physical reset */ 5109 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) 5110 return -EIO; 5111 5112 pci_write_config_word(phba->pcidev, PCI_COMMAND, 5113 (cfg_value & 5114 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 5115 5116 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 5117 5118 /* Now toggle INITFF bit in the Host Control Register */ 5119 writel(HC_INITFF, phba->HCregaddr); 5120 mdelay(1); 5121 readl(phba->HCregaddr); /* flush */ 5122 writel(0, phba->HCregaddr); 5123 readl(phba->HCregaddr); /* flush */ 5124 5125 /* Restore PCI cmd register */ 5126 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 5127 5128 /* Initialize relevant SLI info */ 5129 for (i = 0; i < psli->num_rings; i++) { 5130 pring = &psli->sli3_ring[i]; 5131 pring->flag = 0; 5132 pring->sli.sli3.rspidx = 0; 5133 pring->sli.sli3.next_cmdidx = 0; 5134 pring->sli.sli3.local_getidx = 0; 5135 pring->sli.sli3.cmdidx = 0; 5136 pring->missbufcnt = 0; 5137 } 5138 5139 phba->link_state = LPFC_WARM_START; 5140 return 0; 5141 } 5142 5143 /** 5144 * lpfc_sli4_brdreset - Reset a sli-4 HBA 5145 * @phba: Pointer to HBA context object. 5146 * 5147 * This function resets a SLI4 HBA. This function disables PCI layer parity 5148 * checking during resets the device. The caller is not required to hold 5149 * any locks. 5150 * 5151 * This function returns 0 on success else returns negative error code. 5152 **/ 5153 int 5154 lpfc_sli4_brdreset(struct lpfc_hba *phba) 5155 { 5156 struct lpfc_sli *psli = &phba->sli; 5157 uint16_t cfg_value; 5158 int rc = 0; 5159 5160 /* Reset HBA */ 5161 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5162 "0295 Reset HBA Data: x%x x%x x%lx\n", 5163 phba->pport->port_state, psli->sli_flag, 5164 phba->hba_flag); 5165 5166 /* perform board reset */ 5167 phba->fc_eventTag = 0; 5168 phba->link_events = 0; 5169 phba->pport->fc_myDID = 0; 5170 phba->pport->fc_prevDID = 0; 5171 5172 spin_lock_irq(&phba->hbalock); 5173 psli->sli_flag &= ~(LPFC_PROCESS_LA); 5174 phba->fcf.fcf_flag = 0; 5175 spin_unlock_irq(&phba->hbalock); 5176 5177 /* Now physically reset the device */ 5178 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5179 "0389 Performing PCI function reset!\n"); 5180 5181 /* Turn off parity checking and serr during the physical reset */ 5182 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { 5183 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5184 "3205 PCI read Config failed\n"); 5185 return -EIO; 5186 } 5187 5188 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 5189 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 5190 5191 /* Perform FCoE PCI function reset before freeing queue memory */ 5192 rc = lpfc_pci_function_reset(phba); 5193 5194 /* Restore PCI cmd register */ 5195 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 5196 5197 return rc; 5198 } 5199 5200 /** 5201 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 5202 * @phba: Pointer to HBA context object. 5203 * 5204 * This function is called in the SLI initialization code path to 5205 * restart the HBA. The caller is not required to hold any lock. 5206 * This function writes MBX_RESTART mailbox command to the SLIM and 5207 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 5208 * function to free any pending commands. The function enables 5209 * POST only during the first initialization. The function returns zero. 5210 * The function does not guarantee completion of MBX_RESTART mailbox 5211 * command before the return of this function. 5212 **/ 5213 static int 5214 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 5215 { 5216 volatile struct MAILBOX_word0 mb; 5217 struct lpfc_sli *psli; 5218 void __iomem *to_slim; 5219 5220 spin_lock_irq(&phba->hbalock); 5221 5222 psli = &phba->sli; 5223 5224 /* Restart HBA */ 5225 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5226 "0337 Restart HBA Data: x%x x%x\n", 5227 (phba->pport) ? phba->pport->port_state : 0, 5228 psli->sli_flag); 5229 5230 mb.word0 = 0; 5231 mb.mbxCommand = MBX_RESTART; 5232 mb.mbxHc = 1; 5233 5234 lpfc_reset_barrier(phba); 5235 5236 to_slim = phba->MBslimaddr; 5237 writel(mb.word0, to_slim); 5238 readl(to_slim); /* flush */ 5239 5240 /* Only skip post after fc_ffinit is completed */ 5241 if (phba->pport && phba->pport->port_state) 5242 mb.word0 = 1; /* This is really setting up word1 */ 5243 else 5244 mb.word0 = 0; /* This is really setting up word1 */ 5245 to_slim = phba->MBslimaddr + sizeof (uint32_t); 5246 writel(mb.word0, to_slim); 5247 readl(to_slim); /* flush */ 5248 5249 lpfc_sli_brdreset(phba); 5250 if (phba->pport) 5251 phba->pport->stopped = 0; 5252 phba->link_state = LPFC_INIT_START; 5253 phba->hba_flag = 0; 5254 spin_unlock_irq(&phba->hbalock); 5255 5256 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 5257 psli->stats_start = ktime_get_seconds(); 5258 5259 /* Give the INITFF and Post time to settle. */ 5260 mdelay(100); 5261 5262 lpfc_hba_down_post(phba); 5263 5264 return 0; 5265 } 5266 5267 /** 5268 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 5269 * @phba: Pointer to HBA context object. 5270 * 5271 * This function is called in the SLI initialization code path to restart 5272 * a SLI4 HBA. The caller is not required to hold any lock. 5273 * At the end of the function, it calls lpfc_hba_down_post function to 5274 * free any pending commands. 5275 **/ 5276 static int 5277 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 5278 { 5279 struct lpfc_sli *psli = &phba->sli; 5280 int rc; 5281 5282 /* Restart HBA */ 5283 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5284 "0296 Restart HBA Data: x%x x%x\n", 5285 phba->pport->port_state, psli->sli_flag); 5286 5287 clear_bit(HBA_SETUP, &phba->hba_flag); 5288 lpfc_sli4_queue_unset(phba); 5289 5290 rc = lpfc_sli4_brdreset(phba); 5291 if (rc) { 5292 phba->link_state = LPFC_HBA_ERROR; 5293 goto hba_down_queue; 5294 } 5295 5296 spin_lock_irq(&phba->hbalock); 5297 phba->pport->stopped = 0; 5298 phba->link_state = LPFC_INIT_START; 5299 phba->hba_flag = 0; 5300 /* Preserve FA-PWWN expectation */ 5301 phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC; 5302 spin_unlock_irq(&phba->hbalock); 5303 5304 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 5305 psli->stats_start = ktime_get_seconds(); 5306 5307 hba_down_queue: 5308 lpfc_hba_down_post(phba); 5309 lpfc_sli4_queue_destroy(phba); 5310 5311 return rc; 5312 } 5313 5314 /** 5315 * lpfc_sli_brdrestart - Wrapper func for restarting hba 5316 * @phba: Pointer to HBA context object. 5317 * 5318 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 5319 * API jump table function pointer from the lpfc_hba struct. 5320 **/ 5321 int 5322 lpfc_sli_brdrestart(struct lpfc_hba *phba) 5323 { 5324 return phba->lpfc_sli_brdrestart(phba); 5325 } 5326 5327 /** 5328 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 5329 * @phba: Pointer to HBA context object. 5330 * 5331 * This function is called after a HBA restart to wait for successful 5332 * restart of the HBA. Successful restart of the HBA is indicated by 5333 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 5334 * iteration, the function will restart the HBA again. The function returns 5335 * zero if HBA successfully restarted else returns negative error code. 5336 **/ 5337 int 5338 lpfc_sli_chipset_init(struct lpfc_hba *phba) 5339 { 5340 uint32_t status, i = 0; 5341 5342 /* Read the HBA Host Status Register */ 5343 if (lpfc_readl(phba->HSregaddr, &status)) 5344 return -EIO; 5345 5346 /* Check status register to see what current state is */ 5347 i = 0; 5348 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 5349 5350 /* Check every 10ms for 10 retries, then every 100ms for 90 5351 * retries, then every 1 sec for 50 retires for a total of 5352 * ~60 seconds before reset the board again and check every 5353 * 1 sec for 50 retries. The up to 60 seconds before the 5354 * board ready is required by the Falcon FIPS zeroization 5355 * complete, and any reset the board in between shall cause 5356 * restart of zeroization, further delay the board ready. 5357 */ 5358 if (i++ >= 200) { 5359 /* Adapter failed to init, timeout, status reg 5360 <status> */ 5361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5362 "0436 Adapter failed to init, " 5363 "timeout, status reg x%x, " 5364 "FW Data: A8 x%x AC x%x\n", status, 5365 readl(phba->MBslimaddr + 0xa8), 5366 readl(phba->MBslimaddr + 0xac)); 5367 phba->link_state = LPFC_HBA_ERROR; 5368 return -ETIMEDOUT; 5369 } 5370 5371 /* Check to see if any errors occurred during init */ 5372 if (status & HS_FFERM) { 5373 /* ERROR: During chipset initialization */ 5374 /* Adapter failed to init, chipset, status reg 5375 <status> */ 5376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5377 "0437 Adapter failed to init, " 5378 "chipset, status reg x%x, " 5379 "FW Data: A8 x%x AC x%x\n", status, 5380 readl(phba->MBslimaddr + 0xa8), 5381 readl(phba->MBslimaddr + 0xac)); 5382 phba->link_state = LPFC_HBA_ERROR; 5383 return -EIO; 5384 } 5385 5386 if (i <= 10) 5387 msleep(10); 5388 else if (i <= 100) 5389 msleep(100); 5390 else 5391 msleep(1000); 5392 5393 if (i == 150) { 5394 /* Do post */ 5395 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 5396 lpfc_sli_brdrestart(phba); 5397 } 5398 /* Read the HBA Host Status Register */ 5399 if (lpfc_readl(phba->HSregaddr, &status)) 5400 return -EIO; 5401 } 5402 5403 /* Check to see if any errors occurred during init */ 5404 if (status & HS_FFERM) { 5405 /* ERROR: During chipset initialization */ 5406 /* Adapter failed to init, chipset, status reg <status> */ 5407 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5408 "0438 Adapter failed to init, chipset, " 5409 "status reg x%x, " 5410 "FW Data: A8 x%x AC x%x\n", status, 5411 readl(phba->MBslimaddr + 0xa8), 5412 readl(phba->MBslimaddr + 0xac)); 5413 phba->link_state = LPFC_HBA_ERROR; 5414 return -EIO; 5415 } 5416 5417 set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 5418 5419 /* Clear all interrupt enable conditions */ 5420 writel(0, phba->HCregaddr); 5421 readl(phba->HCregaddr); /* flush */ 5422 5423 /* setup host attn register */ 5424 writel(0xffffffff, phba->HAregaddr); 5425 readl(phba->HAregaddr); /* flush */ 5426 return 0; 5427 } 5428 5429 /** 5430 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 5431 * 5432 * This function calculates and returns the number of HBQs required to be 5433 * configured. 5434 **/ 5435 int 5436 lpfc_sli_hbq_count(void) 5437 { 5438 return ARRAY_SIZE(lpfc_hbq_defs); 5439 } 5440 5441 /** 5442 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 5443 * 5444 * This function adds the number of hbq entries in every HBQ to get 5445 * the total number of hbq entries required for the HBA and returns 5446 * the total count. 5447 **/ 5448 static int 5449 lpfc_sli_hbq_entry_count(void) 5450 { 5451 int hbq_count = lpfc_sli_hbq_count(); 5452 int count = 0; 5453 int i; 5454 5455 for (i = 0; i < hbq_count; ++i) 5456 count += lpfc_hbq_defs[i]->entry_count; 5457 return count; 5458 } 5459 5460 /** 5461 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 5462 * 5463 * This function calculates amount of memory required for all hbq entries 5464 * to be configured and returns the total memory required. 5465 **/ 5466 int 5467 lpfc_sli_hbq_size(void) 5468 { 5469 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 5470 } 5471 5472 /** 5473 * lpfc_sli_hbq_setup - configure and initialize HBQs 5474 * @phba: Pointer to HBA context object. 5475 * 5476 * This function is called during the SLI initialization to configure 5477 * all the HBQs and post buffers to the HBQ. The caller is not 5478 * required to hold any locks. This function will return zero if successful 5479 * else it will return negative error code. 5480 **/ 5481 static int 5482 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 5483 { 5484 int hbq_count = lpfc_sli_hbq_count(); 5485 LPFC_MBOXQ_t *pmb; 5486 MAILBOX_t *pmbox; 5487 uint32_t hbqno; 5488 uint32_t hbq_entry_index; 5489 5490 /* Get a Mailbox buffer to setup mailbox 5491 * commands for HBA initialization 5492 */ 5493 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5494 5495 if (!pmb) 5496 return -ENOMEM; 5497 5498 pmbox = &pmb->u.mb; 5499 5500 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 5501 phba->link_state = LPFC_INIT_MBX_CMDS; 5502 phba->hbq_in_use = 1; 5503 5504 hbq_entry_index = 0; 5505 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 5506 phba->hbqs[hbqno].next_hbqPutIdx = 0; 5507 phba->hbqs[hbqno].hbqPutIdx = 0; 5508 phba->hbqs[hbqno].local_hbqGetIdx = 0; 5509 phba->hbqs[hbqno].entry_count = 5510 lpfc_hbq_defs[hbqno]->entry_count; 5511 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 5512 hbq_entry_index, pmb); 5513 hbq_entry_index += phba->hbqs[hbqno].entry_count; 5514 5515 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 5516 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 5517 mbxStatus <status>, ring <num> */ 5518 5519 lpfc_printf_log(phba, KERN_ERR, 5520 LOG_SLI | LOG_VPORT, 5521 "1805 Adapter failed to init. " 5522 "Data: x%x x%x x%x\n", 5523 pmbox->mbxCommand, 5524 pmbox->mbxStatus, hbqno); 5525 5526 phba->link_state = LPFC_HBA_ERROR; 5527 mempool_free(pmb, phba->mbox_mem_pool); 5528 return -ENXIO; 5529 } 5530 } 5531 phba->hbq_count = hbq_count; 5532 5533 mempool_free(pmb, phba->mbox_mem_pool); 5534 5535 /* Initially populate or replenish the HBQs */ 5536 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 5537 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 5538 return 0; 5539 } 5540 5541 /** 5542 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 5543 * @phba: Pointer to HBA context object. 5544 * 5545 * This function is called during the SLI initialization to configure 5546 * all the HBQs and post buffers to the HBQ. The caller is not 5547 * required to hold any locks. This function will return zero if successful 5548 * else it will return negative error code. 5549 **/ 5550 static int 5551 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 5552 { 5553 phba->hbq_in_use = 1; 5554 /** 5555 * Specific case when the MDS diagnostics is enabled and supported. 5556 * The receive buffer count is truncated to manage the incoming 5557 * traffic. 5558 **/ 5559 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) 5560 phba->hbqs[LPFC_ELS_HBQ].entry_count = 5561 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; 5562 else 5563 phba->hbqs[LPFC_ELS_HBQ].entry_count = 5564 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 5565 phba->hbq_count = 1; 5566 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 5567 /* Initially populate or replenish the HBQs */ 5568 return 0; 5569 } 5570 5571 /** 5572 * lpfc_sli_config_port - Issue config port mailbox command 5573 * @phba: Pointer to HBA context object. 5574 * @sli_mode: sli mode - 2/3 5575 * 5576 * This function is called by the sli initialization code path 5577 * to issue config_port mailbox command. This function restarts the 5578 * HBA firmware and issues a config_port mailbox command to configure 5579 * the SLI interface in the sli mode specified by sli_mode 5580 * variable. The caller is not required to hold any locks. 5581 * The function returns 0 if successful, else returns negative error 5582 * code. 5583 **/ 5584 int 5585 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 5586 { 5587 LPFC_MBOXQ_t *pmb; 5588 uint32_t resetcount = 0, rc = 0, done = 0; 5589 5590 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5591 if (!pmb) { 5592 phba->link_state = LPFC_HBA_ERROR; 5593 return -ENOMEM; 5594 } 5595 5596 phba->sli_rev = sli_mode; 5597 while (resetcount < 2 && !done) { 5598 spin_lock_irq(&phba->hbalock); 5599 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 5600 spin_unlock_irq(&phba->hbalock); 5601 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 5602 lpfc_sli_brdrestart(phba); 5603 rc = lpfc_sli_chipset_init(phba); 5604 if (rc) 5605 break; 5606 5607 spin_lock_irq(&phba->hbalock); 5608 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5609 spin_unlock_irq(&phba->hbalock); 5610 resetcount++; 5611 5612 /* Call pre CONFIG_PORT mailbox command initialization. A 5613 * value of 0 means the call was successful. Any other 5614 * nonzero value is a failure, but if ERESTART is returned, 5615 * the driver may reset the HBA and try again. 5616 */ 5617 rc = lpfc_config_port_prep(phba); 5618 if (rc == -ERESTART) { 5619 phba->link_state = LPFC_LINK_UNKNOWN; 5620 continue; 5621 } else if (rc) 5622 break; 5623 5624 phba->link_state = LPFC_INIT_MBX_CMDS; 5625 lpfc_config_port(phba, pmb); 5626 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5627 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 5628 LPFC_SLI3_HBQ_ENABLED | 5629 LPFC_SLI3_CRP_ENABLED | 5630 LPFC_SLI3_DSS_ENABLED); 5631 if (rc != MBX_SUCCESS) { 5632 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5633 "0442 Adapter failed to init, mbxCmd x%x " 5634 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 5635 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 5636 spin_lock_irq(&phba->hbalock); 5637 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 5638 spin_unlock_irq(&phba->hbalock); 5639 rc = -ENXIO; 5640 } else { 5641 /* Allow asynchronous mailbox command to go through */ 5642 spin_lock_irq(&phba->hbalock); 5643 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5644 spin_unlock_irq(&phba->hbalock); 5645 done = 1; 5646 5647 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 5648 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 5649 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5650 "3110 Port did not grant ASABT\n"); 5651 } 5652 } 5653 if (!done) { 5654 rc = -EINVAL; 5655 goto do_prep_failed; 5656 } 5657 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 5658 if (!pmb->u.mb.un.varCfgPort.cMA) { 5659 rc = -ENXIO; 5660 goto do_prep_failed; 5661 } 5662 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 5663 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 5664 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 5665 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 5666 phba->max_vpi : phba->max_vports; 5667 5668 } else 5669 phba->max_vpi = 0; 5670 if (pmb->u.mb.un.varCfgPort.gerbm) 5671 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5672 if (pmb->u.mb.un.varCfgPort.gcrp) 5673 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5674 5675 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5676 phba->port_gp = phba->mbox->us.s3_pgp.port; 5677 5678 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5679 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5680 phba->cfg_enable_bg = 0; 5681 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5683 "0443 Adapter did not grant " 5684 "BlockGuard\n"); 5685 } 5686 } 5687 } else { 5688 phba->hbq_get = NULL; 5689 phba->port_gp = phba->mbox->us.s2.port; 5690 phba->max_vpi = 0; 5691 } 5692 do_prep_failed: 5693 mempool_free(pmb, phba->mbox_mem_pool); 5694 return rc; 5695 } 5696 5697 5698 /** 5699 * lpfc_sli_hba_setup - SLI initialization function 5700 * @phba: Pointer to HBA context object. 5701 * 5702 * This function is the main SLI initialization function. This function 5703 * is called by the HBA initialization code, HBA reset code and HBA 5704 * error attention handler code. Caller is not required to hold any 5705 * locks. This function issues config_port mailbox command to configure 5706 * the SLI, setup iocb rings and HBQ rings. In the end the function 5707 * calls the config_port_post function to issue init_link mailbox 5708 * command and to start the discovery. The function will return zero 5709 * if successful, else it will return negative error code. 5710 **/ 5711 int 5712 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5713 { 5714 uint32_t rc; 5715 int i; 5716 int longs; 5717 5718 /* Enable ISR already does config_port because of config_msi mbx */ 5719 if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) { 5720 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 5721 if (rc) 5722 return -EIO; 5723 clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 5724 } 5725 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5726 5727 if (phba->sli_rev == 3) { 5728 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5729 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5730 } else { 5731 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5732 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5733 phba->sli3_options = 0; 5734 } 5735 5736 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5737 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5738 phba->sli_rev, phba->max_vpi); 5739 rc = lpfc_sli_ring_map(phba); 5740 5741 if (rc) 5742 goto lpfc_sli_hba_setup_error; 5743 5744 /* Initialize VPIs. */ 5745 if (phba->sli_rev == LPFC_SLI_REV3) { 5746 /* 5747 * The VPI bitmask and physical ID array are allocated 5748 * and initialized once only - at driver load. A port 5749 * reset doesn't need to reinitialize this memory. 5750 */ 5751 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5752 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5753 phba->vpi_bmask = kcalloc(longs, 5754 sizeof(unsigned long), 5755 GFP_KERNEL); 5756 if (!phba->vpi_bmask) { 5757 rc = -ENOMEM; 5758 goto lpfc_sli_hba_setup_error; 5759 } 5760 5761 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5762 sizeof(uint16_t), 5763 GFP_KERNEL); 5764 if (!phba->vpi_ids) { 5765 kfree(phba->vpi_bmask); 5766 rc = -ENOMEM; 5767 goto lpfc_sli_hba_setup_error; 5768 } 5769 for (i = 0; i < phba->max_vpi; i++) 5770 phba->vpi_ids[i] = i; 5771 } 5772 } 5773 5774 /* Init HBQs */ 5775 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5776 rc = lpfc_sli_hbq_setup(phba); 5777 if (rc) 5778 goto lpfc_sli_hba_setup_error; 5779 } 5780 spin_lock_irq(&phba->hbalock); 5781 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5782 spin_unlock_irq(&phba->hbalock); 5783 5784 rc = lpfc_config_port_post(phba); 5785 if (rc) 5786 goto lpfc_sli_hba_setup_error; 5787 5788 return rc; 5789 5790 lpfc_sli_hba_setup_error: 5791 phba->link_state = LPFC_HBA_ERROR; 5792 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5793 "0445 Firmware initialization failed\n"); 5794 return rc; 5795 } 5796 5797 /** 5798 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5799 * @phba: Pointer to HBA context object. 5800 * 5801 * This function issue a dump mailbox command to read config region 5802 * 23 and parse the records in the region and populate driver 5803 * data structure. 5804 **/ 5805 static int 5806 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5807 { 5808 LPFC_MBOXQ_t *mboxq; 5809 struct lpfc_dmabuf *mp; 5810 struct lpfc_mqe *mqe; 5811 uint32_t data_length; 5812 int rc; 5813 5814 /* Program the default value of vlan_id and fc_map */ 5815 phba->valid_vlan = 0; 5816 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5817 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5818 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5819 5820 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5821 if (!mboxq) 5822 return -ENOMEM; 5823 5824 mqe = &mboxq->u.mqe; 5825 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5826 rc = -ENOMEM; 5827 goto out_free_mboxq; 5828 } 5829 5830 mp = mboxq->ctx_buf; 5831 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5832 5833 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5834 "(%d):2571 Mailbox cmd x%x Status x%x " 5835 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5836 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5837 "CQ: x%x x%x x%x x%x\n", 5838 mboxq->vport ? mboxq->vport->vpi : 0, 5839 bf_get(lpfc_mqe_command, mqe), 5840 bf_get(lpfc_mqe_status, mqe), 5841 mqe->un.mb_words[0], mqe->un.mb_words[1], 5842 mqe->un.mb_words[2], mqe->un.mb_words[3], 5843 mqe->un.mb_words[4], mqe->un.mb_words[5], 5844 mqe->un.mb_words[6], mqe->un.mb_words[7], 5845 mqe->un.mb_words[8], mqe->un.mb_words[9], 5846 mqe->un.mb_words[10], mqe->un.mb_words[11], 5847 mqe->un.mb_words[12], mqe->un.mb_words[13], 5848 mqe->un.mb_words[14], mqe->un.mb_words[15], 5849 mqe->un.mb_words[16], mqe->un.mb_words[50], 5850 mboxq->mcqe.word0, 5851 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5852 mboxq->mcqe.trailer); 5853 5854 if (rc) { 5855 rc = -EIO; 5856 goto out_free_mboxq; 5857 } 5858 data_length = mqe->un.mb_words[5]; 5859 if (data_length > DMP_RGN23_SIZE) { 5860 rc = -EIO; 5861 goto out_free_mboxq; 5862 } 5863 5864 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5865 rc = 0; 5866 5867 out_free_mboxq: 5868 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 5869 return rc; 5870 } 5871 5872 /** 5873 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5874 * @phba: pointer to lpfc hba data structure. 5875 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5876 * @vpd: pointer to the memory to hold resulting port vpd data. 5877 * @vpd_size: On input, the number of bytes allocated to @vpd. 5878 * On output, the number of data bytes in @vpd. 5879 * 5880 * This routine executes a READ_REV SLI4 mailbox command. In 5881 * addition, this routine gets the port vpd data. 5882 * 5883 * Return codes 5884 * 0 - successful 5885 * -ENOMEM - could not allocated memory. 5886 **/ 5887 static int 5888 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5889 uint8_t *vpd, uint32_t *vpd_size) 5890 { 5891 int rc = 0; 5892 uint32_t dma_size; 5893 struct lpfc_dmabuf *dmabuf; 5894 struct lpfc_mqe *mqe; 5895 5896 dmabuf = kzalloc_obj(struct lpfc_dmabuf); 5897 if (!dmabuf) 5898 return -ENOMEM; 5899 5900 /* 5901 * Get a DMA buffer for the vpd data resulting from the READ_REV 5902 * mailbox command. 5903 */ 5904 dma_size = *vpd_size; 5905 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5906 &dmabuf->phys, GFP_KERNEL); 5907 if (!dmabuf->virt) { 5908 kfree(dmabuf); 5909 return -ENOMEM; 5910 } 5911 5912 /* 5913 * The SLI4 implementation of READ_REV conflicts at word1, 5914 * bits 31:16 and SLI4 adds vpd functionality not present 5915 * in SLI3. This code corrects the conflicts. 5916 */ 5917 lpfc_read_rev(phba, mboxq); 5918 mqe = &mboxq->u.mqe; 5919 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5920 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5921 mqe->un.read_rev.word1 &= 0x0000FFFF; 5922 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5923 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5924 5925 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5926 if (rc) { 5927 dma_free_coherent(&phba->pcidev->dev, dma_size, 5928 dmabuf->virt, dmabuf->phys); 5929 kfree(dmabuf); 5930 return -EIO; 5931 } 5932 5933 /* 5934 * The available vpd length cannot be bigger than the 5935 * DMA buffer passed to the port. Catch the less than 5936 * case and update the caller's size. 5937 */ 5938 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5939 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5940 5941 memcpy(vpd, dmabuf->virt, *vpd_size); 5942 5943 dma_free_coherent(&phba->pcidev->dev, dma_size, 5944 dmabuf->virt, dmabuf->phys); 5945 kfree(dmabuf); 5946 return 0; 5947 } 5948 5949 /** 5950 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes 5951 * @phba: pointer to lpfc hba data structure. 5952 * 5953 * This routine retrieves SLI4 device physical port name this PCI function 5954 * is attached to. 5955 * 5956 * Return codes 5957 * 0 - successful 5958 * otherwise - failed to retrieve controller attributes 5959 **/ 5960 static int 5961 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) 5962 { 5963 LPFC_MBOXQ_t *mboxq; 5964 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5965 struct lpfc_controller_attribute *cntl_attr; 5966 void *virtaddr = NULL; 5967 uint32_t alloclen, reqlen; 5968 uint32_t shdr_status, shdr_add_status; 5969 union lpfc_sli4_cfg_shdr *shdr; 5970 int rc; 5971 5972 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5973 if (!mboxq) 5974 return -ENOMEM; 5975 5976 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ 5977 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5978 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5979 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5980 LPFC_SLI4_MBX_NEMBED); 5981 5982 if (alloclen < reqlen) { 5983 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5984 "3084 Allocated DMA memory size (%d) is " 5985 "less than the requested DMA memory size " 5986 "(%d)\n", alloclen, reqlen); 5987 rc = -ENOMEM; 5988 goto out_free_mboxq; 5989 } 5990 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5991 virtaddr = mboxq->sge_array->addr[0]; 5992 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5993 shdr = &mbx_cntl_attr->cfg_shdr; 5994 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5995 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5996 if (shdr_status || shdr_add_status || rc) { 5997 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5998 "3085 Mailbox x%x (x%x/x%x) failed, " 5999 "rc:x%x, status:x%x, add_status:x%x\n", 6000 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6001 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 6002 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 6003 rc, shdr_status, shdr_add_status); 6004 rc = -ENXIO; 6005 goto out_free_mboxq; 6006 } 6007 6008 cntl_attr = &mbx_cntl_attr->cntl_attr; 6009 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6010 phba->sli4_hba.lnk_info.lnk_tp = 6011 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 6012 phba->sli4_hba.lnk_info.lnk_no = 6013 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 6014 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); 6015 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); 6016 6017 memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str, 6018 sizeof(phba->BIOSVersion)); 6019 phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0'; 6020 6021 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6022 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " 6023 "flash_id: x%02x, asic_rev: x%02x\n", 6024 phba->sli4_hba.lnk_info.lnk_tp, 6025 phba->sli4_hba.lnk_info.lnk_no, 6026 phba->BIOSVersion, phba->sli4_hba.flash_id, 6027 phba->sli4_hba.asic_rev); 6028 out_free_mboxq: 6029 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 6030 lpfc_sli4_mbox_cmd_free(phba, mboxq); 6031 else 6032 mempool_free(mboxq, phba->mbox_mem_pool); 6033 return rc; 6034 } 6035 6036 /** 6037 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 6038 * @phba: pointer to lpfc hba data structure. 6039 * 6040 * This routine retrieves SLI4 device physical port name this PCI function 6041 * is attached to. 6042 * 6043 * Return codes 6044 * 0 - successful 6045 * otherwise - failed to retrieve physical port name 6046 **/ 6047 static int 6048 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 6049 { 6050 LPFC_MBOXQ_t *mboxq; 6051 struct lpfc_mbx_get_port_name *get_port_name; 6052 uint32_t shdr_status, shdr_add_status; 6053 union lpfc_sli4_cfg_shdr *shdr; 6054 char cport_name = 0; 6055 int rc; 6056 6057 /* We assume nothing at this point */ 6058 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 6059 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 6060 6061 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6062 if (!mboxq) 6063 return -ENOMEM; 6064 /* obtain link type and link number via READ_CONFIG */ 6065 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 6066 lpfc_sli4_read_config(phba); 6067 6068 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) 6069 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; 6070 6071 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 6072 goto retrieve_ppname; 6073 6074 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 6075 rc = lpfc_sli4_get_ctl_attr(phba); 6076 if (rc) 6077 goto out_free_mboxq; 6078 6079 retrieve_ppname: 6080 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6081 LPFC_MBOX_OPCODE_GET_PORT_NAME, 6082 sizeof(struct lpfc_mbx_get_port_name) - 6083 sizeof(struct lpfc_sli4_cfg_mhdr), 6084 LPFC_SLI4_MBX_EMBED); 6085 get_port_name = &mboxq->u.mqe.un.get_port_name; 6086 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 6087 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 6088 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 6089 phba->sli4_hba.lnk_info.lnk_tp); 6090 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6091 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6092 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6093 if (shdr_status || shdr_add_status || rc) { 6094 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6095 "3087 Mailbox x%x (x%x/x%x) failed: " 6096 "rc:x%x, status:x%x, add_status:x%x\n", 6097 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6098 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 6099 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 6100 rc, shdr_status, shdr_add_status); 6101 rc = -ENXIO; 6102 goto out_free_mboxq; 6103 } 6104 switch (phba->sli4_hba.lnk_info.lnk_no) { 6105 case LPFC_LINK_NUMBER_0: 6106 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 6107 &get_port_name->u.response); 6108 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 6109 break; 6110 case LPFC_LINK_NUMBER_1: 6111 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 6112 &get_port_name->u.response); 6113 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 6114 break; 6115 case LPFC_LINK_NUMBER_2: 6116 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 6117 &get_port_name->u.response); 6118 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 6119 break; 6120 case LPFC_LINK_NUMBER_3: 6121 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 6122 &get_port_name->u.response); 6123 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 6124 break; 6125 default: 6126 break; 6127 } 6128 6129 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 6130 phba->Port[0] = cport_name; 6131 phba->Port[1] = '\0'; 6132 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6133 "3091 SLI get port name: %s\n", phba->Port); 6134 } 6135 6136 out_free_mboxq: 6137 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 6138 lpfc_sli4_mbox_cmd_free(phba, mboxq); 6139 else 6140 mempool_free(mboxq, phba->mbox_mem_pool); 6141 return rc; 6142 } 6143 6144 /** 6145 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 6146 * @phba: pointer to lpfc hba data structure. 6147 * 6148 * This routine is called to explicitly arm the SLI4 device's completion and 6149 * event queues 6150 **/ 6151 static void 6152 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 6153 { 6154 int qidx; 6155 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 6156 struct lpfc_sli4_hdw_queue *qp; 6157 struct lpfc_queue *eq; 6158 6159 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); 6160 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); 6161 if (sli4_hba->nvmels_cq) 6162 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, 6163 LPFC_QUEUE_REARM); 6164 6165 if (sli4_hba->hdwq) { 6166 /* Loop thru all Hardware Queues */ 6167 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 6168 qp = &sli4_hba->hdwq[qidx]; 6169 /* ARM the corresponding CQ */ 6170 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0, 6171 LPFC_QUEUE_REARM); 6172 } 6173 6174 /* Loop thru all IRQ vectors */ 6175 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 6176 eq = sli4_hba->hba_eq_hdl[qidx].eq; 6177 /* ARM the corresponding EQ */ 6178 sli4_hba->sli4_write_eq_db(phba, eq, 6179 0, LPFC_QUEUE_REARM); 6180 } 6181 } 6182 6183 if (phba->nvmet_support) { 6184 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 6185 sli4_hba->sli4_write_cq_db(phba, 6186 sli4_hba->nvmet_cqset[qidx], 0, 6187 LPFC_QUEUE_REARM); 6188 } 6189 } 6190 } 6191 6192 /** 6193 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 6194 * @phba: Pointer to HBA context object. 6195 * @type: The resource extent type. 6196 * @extnt_count: buffer to hold port available extent count. 6197 * @extnt_size: buffer to hold element count per extent. 6198 * 6199 * This function calls the port and retrievs the number of available 6200 * extents and their size for a particular extent type. 6201 * 6202 * Returns: 0 if successful. Nonzero otherwise. 6203 **/ 6204 int 6205 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 6206 uint16_t *extnt_count, uint16_t *extnt_size) 6207 { 6208 int rc = 0; 6209 uint32_t length; 6210 uint32_t mbox_tmo; 6211 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 6212 LPFC_MBOXQ_t *mbox; 6213 6214 *extnt_count = 0; 6215 *extnt_size = 0; 6216 6217 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6218 if (!mbox) 6219 return -ENOMEM; 6220 6221 /* Find out how many extents are available for this resource type */ 6222 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 6223 sizeof(struct lpfc_sli4_cfg_mhdr)); 6224 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6225 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 6226 length, LPFC_SLI4_MBX_EMBED); 6227 6228 /* Send an extents count of 0 - the GET doesn't use it. */ 6229 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6230 LPFC_SLI4_MBX_EMBED); 6231 if (unlikely(rc)) { 6232 rc = -EIO; 6233 goto err_exit; 6234 } 6235 6236 if (!phba->sli4_hba.intr_enable) 6237 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6238 else { 6239 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6240 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6241 } 6242 if (unlikely(rc)) { 6243 rc = -EIO; 6244 goto err_exit; 6245 } 6246 6247 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 6248 if (bf_get(lpfc_mbox_hdr_status, 6249 &rsrc_info->header.cfg_shdr.response)) { 6250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6251 "2930 Failed to get resource extents " 6252 "Status 0x%x Add'l Status 0x%x\n", 6253 bf_get(lpfc_mbox_hdr_status, 6254 &rsrc_info->header.cfg_shdr.response), 6255 bf_get(lpfc_mbox_hdr_add_status, 6256 &rsrc_info->header.cfg_shdr.response)); 6257 rc = -EIO; 6258 goto err_exit; 6259 } 6260 6261 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 6262 &rsrc_info->u.rsp); 6263 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 6264 &rsrc_info->u.rsp); 6265 6266 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6267 "3162 Retrieved extents type-%d from port: count:%d, " 6268 "size:%d\n", type, *extnt_count, *extnt_size); 6269 6270 err_exit: 6271 mempool_free(mbox, phba->mbox_mem_pool); 6272 return rc; 6273 } 6274 6275 /** 6276 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 6277 * @phba: Pointer to HBA context object. 6278 * @type: The extent type to check. 6279 * 6280 * This function reads the current available extents from the port and checks 6281 * if the extent count or extent size has changed since the last access. 6282 * Callers use this routine post port reset to understand if there is a 6283 * extent reprovisioning requirement. 6284 * 6285 * Returns: 6286 * -Error: error indicates problem. 6287 * 1: Extent count or size has changed. 6288 * 0: No changes. 6289 **/ 6290 static int 6291 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 6292 { 6293 uint16_t curr_ext_cnt, rsrc_ext_cnt; 6294 uint16_t size_diff, rsrc_ext_size; 6295 int rc = 0; 6296 struct lpfc_rsrc_blks *rsrc_entry; 6297 struct list_head *rsrc_blk_list = NULL; 6298 6299 size_diff = 0; 6300 curr_ext_cnt = 0; 6301 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 6302 &rsrc_ext_cnt, 6303 &rsrc_ext_size); 6304 if (unlikely(rc)) 6305 return -EIO; 6306 6307 switch (type) { 6308 case LPFC_RSC_TYPE_FCOE_RPI: 6309 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 6310 break; 6311 case LPFC_RSC_TYPE_FCOE_VPI: 6312 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 6313 break; 6314 case LPFC_RSC_TYPE_FCOE_XRI: 6315 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 6316 break; 6317 case LPFC_RSC_TYPE_FCOE_VFI: 6318 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 6319 break; 6320 default: 6321 break; 6322 } 6323 6324 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 6325 curr_ext_cnt++; 6326 if (rsrc_entry->rsrc_size != rsrc_ext_size) 6327 size_diff++; 6328 } 6329 6330 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 6331 rc = 1; 6332 6333 return rc; 6334 } 6335 6336 /** 6337 * lpfc_sli4_cfg_post_extnts - 6338 * @phba: Pointer to HBA context object. 6339 * @extnt_cnt: number of available extents. 6340 * @type: the extent type (rpi, xri, vfi, vpi). 6341 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation. 6342 * @mbox: pointer to the caller's allocated mailbox structure. 6343 * 6344 * This function executes the extents allocation request. It also 6345 * takes care of the amount of memory needed to allocate or get the 6346 * allocated extents. It is the caller's responsibility to evaluate 6347 * the response. 6348 * 6349 * Returns: 6350 * -Error: Error value describes the condition found. 6351 * 0: if successful 6352 **/ 6353 static int 6354 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 6355 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 6356 { 6357 int rc = 0; 6358 uint32_t req_len; 6359 uint32_t emb_len; 6360 uint32_t alloc_len, mbox_tmo; 6361 6362 /* Calculate the total requested length of the dma memory */ 6363 req_len = extnt_cnt * sizeof(uint16_t); 6364 6365 /* 6366 * Calculate the size of an embedded mailbox. The uint32_t 6367 * accounts for extents-specific word. 6368 */ 6369 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6370 sizeof(uint32_t); 6371 6372 /* 6373 * Presume the allocation and response will fit into an embedded 6374 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6375 */ 6376 *emb = LPFC_SLI4_MBX_EMBED; 6377 if (req_len > emb_len) { 6378 req_len = extnt_cnt * sizeof(uint16_t) + 6379 sizeof(union lpfc_sli4_cfg_shdr) + 6380 sizeof(uint32_t); 6381 *emb = LPFC_SLI4_MBX_NEMBED; 6382 } 6383 6384 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6385 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 6386 req_len, *emb); 6387 if (alloc_len < req_len) { 6388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6389 "2982 Allocated DMA memory size (x%x) is " 6390 "less than the requested DMA memory " 6391 "size (x%x)\n", alloc_len, req_len); 6392 return -ENOMEM; 6393 } 6394 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 6395 if (unlikely(rc)) 6396 return -EIO; 6397 6398 if (!phba->sli4_hba.intr_enable) 6399 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6400 else { 6401 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6402 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6403 } 6404 6405 if (unlikely(rc)) 6406 rc = -EIO; 6407 return rc; 6408 } 6409 6410 /** 6411 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 6412 * @phba: Pointer to HBA context object. 6413 * @type: The resource extent type to allocate. 6414 * 6415 * This function allocates the number of elements for the specified 6416 * resource type. 6417 **/ 6418 static int 6419 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 6420 { 6421 bool emb = false; 6422 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 6423 uint16_t rsrc_id, rsrc_start, j, k; 6424 uint16_t *ids; 6425 int i, rc; 6426 unsigned long longs; 6427 unsigned long *bmask; 6428 struct lpfc_rsrc_blks *rsrc_blks; 6429 LPFC_MBOXQ_t *mbox; 6430 uint32_t length; 6431 struct lpfc_id_range *id_array = NULL; 6432 void *virtaddr = NULL; 6433 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6434 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6435 struct list_head *ext_blk_list; 6436 6437 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 6438 &rsrc_cnt, 6439 &rsrc_size); 6440 if (unlikely(rc)) 6441 return -EIO; 6442 6443 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 6444 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6445 "3009 No available Resource Extents " 6446 "for resource type 0x%x: Count: 0x%x, " 6447 "Size 0x%x\n", type, rsrc_cnt, 6448 rsrc_size); 6449 return -ENOMEM; 6450 } 6451 6452 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 6453 "2903 Post resource extents type-0x%x: " 6454 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 6455 6456 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6457 if (!mbox) 6458 return -ENOMEM; 6459 6460 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 6461 if (unlikely(rc)) { 6462 rc = -EIO; 6463 goto err_exit; 6464 } 6465 6466 /* 6467 * Figure out where the response is located. Then get local pointers 6468 * to the response data. The port does not guarantee to respond to 6469 * all extents counts request so update the local variable with the 6470 * allocated count from the port. 6471 */ 6472 if (emb == LPFC_SLI4_MBX_EMBED) { 6473 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6474 id_array = &rsrc_ext->u.rsp.id[0]; 6475 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6476 } else { 6477 virtaddr = mbox->sge_array->addr[0]; 6478 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6479 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6480 id_array = &n_rsrc->id; 6481 } 6482 6483 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 6484 rsrc_id_cnt = rsrc_cnt * rsrc_size; 6485 6486 /* 6487 * Based on the resource size and count, correct the base and max 6488 * resource values. 6489 */ 6490 length = sizeof(struct lpfc_rsrc_blks); 6491 switch (type) { 6492 case LPFC_RSC_TYPE_FCOE_RPI: 6493 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6494 sizeof(unsigned long), 6495 GFP_KERNEL); 6496 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6497 rc = -ENOMEM; 6498 goto err_exit; 6499 } 6500 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 6501 sizeof(uint16_t), 6502 GFP_KERNEL); 6503 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6504 kfree(phba->sli4_hba.rpi_bmask); 6505 rc = -ENOMEM; 6506 goto err_exit; 6507 } 6508 6509 /* 6510 * The next_rpi was initialized with the maximum available 6511 * count but the port may allocate a smaller number. Catch 6512 * that case and update the next_rpi. 6513 */ 6514 phba->sli4_hba.next_rpi = rsrc_id_cnt; 6515 6516 /* Initialize local ptrs for common extent processing later. */ 6517 bmask = phba->sli4_hba.rpi_bmask; 6518 ids = phba->sli4_hba.rpi_ids; 6519 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 6520 break; 6521 case LPFC_RSC_TYPE_FCOE_VPI: 6522 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6523 GFP_KERNEL); 6524 if (unlikely(!phba->vpi_bmask)) { 6525 rc = -ENOMEM; 6526 goto err_exit; 6527 } 6528 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 6529 GFP_KERNEL); 6530 if (unlikely(!phba->vpi_ids)) { 6531 kfree(phba->vpi_bmask); 6532 rc = -ENOMEM; 6533 goto err_exit; 6534 } 6535 6536 /* Initialize local ptrs for common extent processing later. */ 6537 bmask = phba->vpi_bmask; 6538 ids = phba->vpi_ids; 6539 ext_blk_list = &phba->lpfc_vpi_blk_list; 6540 break; 6541 case LPFC_RSC_TYPE_FCOE_XRI: 6542 phba->sli4_hba.xri_bmask = kcalloc(longs, 6543 sizeof(unsigned long), 6544 GFP_KERNEL); 6545 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6546 rc = -ENOMEM; 6547 goto err_exit; 6548 } 6549 phba->sli4_hba.max_cfg_param.xri_used = 0; 6550 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 6551 sizeof(uint16_t), 6552 GFP_KERNEL); 6553 if (unlikely(!phba->sli4_hba.xri_ids)) { 6554 kfree(phba->sli4_hba.xri_bmask); 6555 rc = -ENOMEM; 6556 goto err_exit; 6557 } 6558 6559 /* Initialize local ptrs for common extent processing later. */ 6560 bmask = phba->sli4_hba.xri_bmask; 6561 ids = phba->sli4_hba.xri_ids; 6562 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 6563 break; 6564 case LPFC_RSC_TYPE_FCOE_VFI: 6565 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6566 sizeof(unsigned long), 6567 GFP_KERNEL); 6568 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6569 rc = -ENOMEM; 6570 goto err_exit; 6571 } 6572 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 6573 sizeof(uint16_t), 6574 GFP_KERNEL); 6575 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6576 kfree(phba->sli4_hba.vfi_bmask); 6577 rc = -ENOMEM; 6578 goto err_exit; 6579 } 6580 6581 /* Initialize local ptrs for common extent processing later. */ 6582 bmask = phba->sli4_hba.vfi_bmask; 6583 ids = phba->sli4_hba.vfi_ids; 6584 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 6585 break; 6586 default: 6587 /* Unsupported Opcode. Fail call. */ 6588 id_array = NULL; 6589 bmask = NULL; 6590 ids = NULL; 6591 ext_blk_list = NULL; 6592 goto err_exit; 6593 } 6594 6595 /* 6596 * Complete initializing the extent configuration with the 6597 * allocated ids assigned to this function. The bitmask serves 6598 * as an index into the array and manages the available ids. The 6599 * array just stores the ids communicated to the port via the wqes. 6600 */ 6601 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 6602 if ((i % 2) == 0) 6603 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 6604 &id_array[k]); 6605 else 6606 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 6607 &id_array[k]); 6608 6609 rsrc_blks = kzalloc(length, GFP_KERNEL); 6610 if (unlikely(!rsrc_blks)) { 6611 rc = -ENOMEM; 6612 kfree(bmask); 6613 kfree(ids); 6614 goto err_exit; 6615 } 6616 rsrc_blks->rsrc_start = rsrc_id; 6617 rsrc_blks->rsrc_size = rsrc_size; 6618 list_add_tail(&rsrc_blks->list, ext_blk_list); 6619 rsrc_start = rsrc_id; 6620 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 6621 phba->sli4_hba.io_xri_start = rsrc_start + 6622 lpfc_sli4_get_iocb_cnt(phba); 6623 } 6624 6625 while (rsrc_id < (rsrc_start + rsrc_size)) { 6626 ids[j] = rsrc_id; 6627 rsrc_id++; 6628 j++; 6629 } 6630 /* Entire word processed. Get next word.*/ 6631 if ((i % 2) == 1) 6632 k++; 6633 } 6634 err_exit: 6635 lpfc_sli4_mbox_cmd_free(phba, mbox); 6636 return rc; 6637 } 6638 6639 6640 6641 /** 6642 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 6643 * @phba: Pointer to HBA context object. 6644 * @type: the extent's type. 6645 * 6646 * This function deallocates all extents of a particular resource type. 6647 * SLI4 does not allow for deallocating a particular extent range. It 6648 * is the caller's responsibility to release all kernel memory resources. 6649 **/ 6650 static int 6651 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6652 { 6653 int rc; 6654 uint32_t length, mbox_tmo = 0; 6655 LPFC_MBOXQ_t *mbox; 6656 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6657 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6658 6659 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6660 if (!mbox) 6661 return -ENOMEM; 6662 6663 /* 6664 * This function sends an embedded mailbox because it only sends the 6665 * the resource type. All extents of this type are released by the 6666 * port. 6667 */ 6668 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6669 sizeof(struct lpfc_sli4_cfg_mhdr)); 6670 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6671 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6672 length, LPFC_SLI4_MBX_EMBED); 6673 6674 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6675 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6676 LPFC_SLI4_MBX_EMBED); 6677 if (unlikely(rc)) { 6678 rc = -EIO; 6679 goto out_free_mbox; 6680 } 6681 if (!phba->sli4_hba.intr_enable) 6682 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6683 else { 6684 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6685 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6686 } 6687 if (unlikely(rc)) { 6688 rc = -EIO; 6689 goto out_free_mbox; 6690 } 6691 6692 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6693 if (bf_get(lpfc_mbox_hdr_status, 6694 &dealloc_rsrc->header.cfg_shdr.response)) { 6695 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6696 "2919 Failed to release resource extents " 6697 "for type %d - Status 0x%x Add'l Status 0x%x. " 6698 "Resource memory not released.\n", 6699 type, 6700 bf_get(lpfc_mbox_hdr_status, 6701 &dealloc_rsrc->header.cfg_shdr.response), 6702 bf_get(lpfc_mbox_hdr_add_status, 6703 &dealloc_rsrc->header.cfg_shdr.response)); 6704 rc = -EIO; 6705 goto out_free_mbox; 6706 } 6707 6708 /* Release kernel memory resources for the specific type. */ 6709 switch (type) { 6710 case LPFC_RSC_TYPE_FCOE_VPI: 6711 kfree(phba->vpi_bmask); 6712 kfree(phba->vpi_ids); 6713 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6714 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6715 &phba->lpfc_vpi_blk_list, list) { 6716 list_del_init(&rsrc_blk->list); 6717 kfree(rsrc_blk); 6718 } 6719 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6720 break; 6721 case LPFC_RSC_TYPE_FCOE_XRI: 6722 kfree(phba->sli4_hba.xri_bmask); 6723 kfree(phba->sli4_hba.xri_ids); 6724 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6725 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6726 list_del_init(&rsrc_blk->list); 6727 kfree(rsrc_blk); 6728 } 6729 break; 6730 case LPFC_RSC_TYPE_FCOE_VFI: 6731 kfree(phba->sli4_hba.vfi_bmask); 6732 kfree(phba->sli4_hba.vfi_ids); 6733 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6734 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6735 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6736 list_del_init(&rsrc_blk->list); 6737 kfree(rsrc_blk); 6738 } 6739 break; 6740 case LPFC_RSC_TYPE_FCOE_RPI: 6741 /* RPI bitmask and physical id array are cleaned up earlier. */ 6742 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6743 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6744 list_del_init(&rsrc_blk->list); 6745 kfree(rsrc_blk); 6746 } 6747 break; 6748 default: 6749 break; 6750 } 6751 6752 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6753 6754 out_free_mbox: 6755 mempool_free(mbox, phba->mbox_mem_pool); 6756 return rc; 6757 } 6758 6759 static void 6760 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6761 uint32_t feature) 6762 { 6763 uint32_t len; 6764 u32 sig_freq = 0; 6765 6766 len = sizeof(struct lpfc_mbx_set_feature) - 6767 sizeof(struct lpfc_sli4_cfg_mhdr); 6768 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6769 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6770 LPFC_SLI4_MBX_EMBED); 6771 6772 switch (feature) { 6773 case LPFC_SET_UE_RECOVERY: 6774 bf_set(lpfc_mbx_set_feature_UER, 6775 &mbox->u.mqe.un.set_feature, 1); 6776 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6777 mbox->u.mqe.un.set_feature.param_len = 8; 6778 break; 6779 case LPFC_SET_MDS_DIAGS: 6780 bf_set(lpfc_mbx_set_feature_mds, 6781 &mbox->u.mqe.un.set_feature, 1); 6782 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6783 &mbox->u.mqe.un.set_feature, 1); 6784 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6785 mbox->u.mqe.un.set_feature.param_len = 8; 6786 break; 6787 case LPFC_SET_CGN_SIGNAL: 6788 if (phba->cmf_active_mode == LPFC_CFG_OFF) 6789 sig_freq = 0; 6790 else 6791 sig_freq = phba->cgn_sig_freq; 6792 6793 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6794 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq, 6795 &mbox->u.mqe.un.set_feature, sig_freq); 6796 bf_set(lpfc_mbx_set_feature_CGN_warn_freq, 6797 &mbox->u.mqe.un.set_feature, sig_freq); 6798 } 6799 6800 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) 6801 bf_set(lpfc_mbx_set_feature_CGN_warn_freq, 6802 &mbox->u.mqe.un.set_feature, sig_freq); 6803 6804 if (phba->cmf_active_mode == LPFC_CFG_OFF || 6805 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED) 6806 sig_freq = 0; 6807 else 6808 sig_freq = lpfc_acqe_cgn_frequency; 6809 6810 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq, 6811 &mbox->u.mqe.un.set_feature, sig_freq); 6812 6813 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL; 6814 mbox->u.mqe.un.set_feature.param_len = 12; 6815 break; 6816 case LPFC_SET_DUAL_DUMP: 6817 bf_set(lpfc_mbx_set_feature_dd, 6818 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); 6819 bf_set(lpfc_mbx_set_feature_ddquery, 6820 &mbox->u.mqe.un.set_feature, 0); 6821 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; 6822 mbox->u.mqe.un.set_feature.param_len = 4; 6823 break; 6824 case LPFC_SET_ENABLE_MI: 6825 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI; 6826 mbox->u.mqe.un.set_feature.param_len = 4; 6827 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature, 6828 phba->pport->cfg_lun_queue_depth); 6829 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature, 6830 phba->sli4_hba.pc_sli4_params.mi_ver); 6831 break; 6832 case LPFC_SET_LD_SIGNAL: 6833 mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL; 6834 mbox->u.mqe.un.set_feature.param_len = 16; 6835 bf_set(lpfc_mbx_set_feature_lds_qry, 6836 &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP); 6837 break; 6838 case LPFC_SET_ENABLE_CMF: 6839 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF; 6840 mbox->u.mqe.un.set_feature.param_len = 4; 6841 bf_set(lpfc_mbx_set_feature_cmf, 6842 &mbox->u.mqe.un.set_feature, 1); 6843 break; 6844 } 6845 return; 6846 } 6847 6848 /** 6849 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter 6850 * @phba: Pointer to HBA context object. 6851 * 6852 * Disable FW logging into host memory on the adapter. To 6853 * be done before reading logs from the host memory. 6854 **/ 6855 void 6856 lpfc_ras_stop_fwlog(struct lpfc_hba *phba) 6857 { 6858 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6859 6860 spin_lock_irq(&phba->ras_fwlog_lock); 6861 ras_fwlog->state = INACTIVE; 6862 spin_unlock_irq(&phba->ras_fwlog_lock); 6863 6864 /* Disable FW logging to host memory */ 6865 writel(LPFC_CTL_PDEV_CTL_DDL_RAS, 6866 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 6867 6868 /* Wait 10ms for firmware to stop using DMA buffer */ 6869 usleep_range(10 * 1000, 20 * 1000); 6870 } 6871 6872 /** 6873 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6874 * @phba: Pointer to HBA context object. 6875 * 6876 * This function is called to free memory allocated for RAS FW logging 6877 * support in the driver. 6878 **/ 6879 void 6880 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) 6881 { 6882 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6883 struct lpfc_dmabuf *dmabuf, *next; 6884 6885 if (!list_empty(&ras_fwlog->fwlog_buff_list)) { 6886 list_for_each_entry_safe(dmabuf, next, 6887 &ras_fwlog->fwlog_buff_list, 6888 list) { 6889 list_del(&dmabuf->list); 6890 dma_free_coherent(&phba->pcidev->dev, 6891 LPFC_RAS_MAX_ENTRY_SIZE, 6892 dmabuf->virt, dmabuf->phys); 6893 kfree(dmabuf); 6894 } 6895 } 6896 6897 if (ras_fwlog->lwpd.virt) { 6898 dma_free_coherent(&phba->pcidev->dev, 6899 sizeof(uint32_t) * 2, 6900 ras_fwlog->lwpd.virt, 6901 ras_fwlog->lwpd.phys); 6902 ras_fwlog->lwpd.virt = NULL; 6903 } 6904 6905 spin_lock_irq(&phba->ras_fwlog_lock); 6906 ras_fwlog->state = INACTIVE; 6907 spin_unlock_irq(&phba->ras_fwlog_lock); 6908 } 6909 6910 /** 6911 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support 6912 * @phba: Pointer to HBA context object. 6913 * @fwlog_buff_count: Count of buffers to be created. 6914 * 6915 * This routine DMA memory for Log Write Position Data[LPWD] and buffer 6916 * to update FW log is posted to the adapter. 6917 * Buffer count is calculated based on module param ras_fwlog_buffsize 6918 * Size of each buffer posted to FW is 64K. 6919 **/ 6920 6921 static int 6922 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, 6923 uint32_t fwlog_buff_count) 6924 { 6925 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6926 struct lpfc_dmabuf *dmabuf; 6927 int rc = 0, i = 0; 6928 6929 /* Initialize List */ 6930 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); 6931 6932 /* Allocate memory for the LWPD */ 6933 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, 6934 sizeof(uint32_t) * 2, 6935 &ras_fwlog->lwpd.phys, 6936 GFP_KERNEL); 6937 if (!ras_fwlog->lwpd.virt) { 6938 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6939 "6185 LWPD Memory Alloc Failed\n"); 6940 6941 return -ENOMEM; 6942 } 6943 6944 ras_fwlog->fw_buffcount = fwlog_buff_count; 6945 for (i = 0; i < ras_fwlog->fw_buffcount; i++) { 6946 dmabuf = kzalloc_obj(struct lpfc_dmabuf); 6947 if (!dmabuf) { 6948 rc = -ENOMEM; 6949 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6950 "6186 Memory Alloc failed FW logging"); 6951 goto free_mem; 6952 } 6953 6954 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6955 LPFC_RAS_MAX_ENTRY_SIZE, 6956 &dmabuf->phys, GFP_KERNEL); 6957 if (!dmabuf->virt) { 6958 kfree(dmabuf); 6959 rc = -ENOMEM; 6960 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6961 "6187 DMA Alloc Failed FW logging"); 6962 goto free_mem; 6963 } 6964 dmabuf->buffer_tag = i; 6965 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6966 } 6967 6968 free_mem: 6969 if (rc) 6970 lpfc_sli4_ras_dma_free(phba); 6971 6972 return rc; 6973 } 6974 6975 /** 6976 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command 6977 * @phba: pointer to lpfc hba data structure. 6978 * @pmb: pointer to the driver internal queue element for mailbox command. 6979 * 6980 * Completion handler for driver's RAS MBX command to the device. 6981 **/ 6982 static void 6983 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6984 { 6985 MAILBOX_t *mb; 6986 union lpfc_sli4_cfg_shdr *shdr; 6987 uint32_t shdr_status, shdr_add_status; 6988 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6989 6990 mb = &pmb->u.mb; 6991 6992 shdr = (union lpfc_sli4_cfg_shdr *) 6993 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; 6994 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6995 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6996 6997 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6998 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6999 "6188 FW LOG mailbox " 7000 "completed with status x%x add_status x%x," 7001 " mbx status x%x\n", 7002 shdr_status, shdr_add_status, mb->mbxStatus); 7003 7004 ras_fwlog->ras_hwsupport = false; 7005 goto disable_ras; 7006 } 7007 7008 spin_lock_irq(&phba->ras_fwlog_lock); 7009 ras_fwlog->state = ACTIVE; 7010 spin_unlock_irq(&phba->ras_fwlog_lock); 7011 mempool_free(pmb, phba->mbox_mem_pool); 7012 7013 return; 7014 7015 disable_ras: 7016 /* Free RAS DMA memory */ 7017 lpfc_sli4_ras_dma_free(phba); 7018 mempool_free(pmb, phba->mbox_mem_pool); 7019 } 7020 7021 /** 7022 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command 7023 * @phba: pointer to lpfc hba data structure. 7024 * @fwlog_level: Logging verbosity level. 7025 * @fwlog_enable: Enable/Disable logging. 7026 * 7027 * Initialize memory and post mailbox command to enable FW logging in host 7028 * memory. 7029 **/ 7030 int 7031 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, 7032 uint32_t fwlog_level, 7033 uint32_t fwlog_enable) 7034 { 7035 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 7036 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; 7037 struct lpfc_dmabuf *dmabuf; 7038 LPFC_MBOXQ_t *mbox; 7039 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; 7040 int rc = 0; 7041 7042 spin_lock_irq(&phba->ras_fwlog_lock); 7043 ras_fwlog->state = INACTIVE; 7044 spin_unlock_irq(&phba->ras_fwlog_lock); 7045 7046 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * 7047 phba->cfg_ras_fwlog_buffsize); 7048 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); 7049 7050 /* 7051 * If re-enabling FW logging support use earlier allocated 7052 * DMA buffers while posting MBX command. 7053 **/ 7054 if (!ras_fwlog->lwpd.virt) { 7055 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 7056 if (rc) { 7057 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7058 "6189 FW Log Memory Allocation Failed"); 7059 return rc; 7060 } 7061 } 7062 7063 /* Setup Mailbox command */ 7064 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7065 if (!mbox) { 7066 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7067 "6190 RAS MBX Alloc Failed"); 7068 rc = -ENOMEM; 7069 goto mem_free; 7070 } 7071 7072 ras_fwlog->fw_loglevel = fwlog_level; 7073 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - 7074 sizeof(struct lpfc_sli4_cfg_mhdr)); 7075 7076 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, 7077 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, 7078 len, LPFC_SLI4_MBX_EMBED); 7079 7080 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; 7081 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, 7082 fwlog_enable); 7083 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, 7084 ras_fwlog->fw_loglevel); 7085 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, 7086 ras_fwlog->fw_buffcount); 7087 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, 7088 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); 7089 7090 /* Update DMA buffer address */ 7091 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { 7092 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 7093 7094 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = 7095 putPaddrLow(dmabuf->phys); 7096 7097 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = 7098 putPaddrHigh(dmabuf->phys); 7099 } 7100 7101 /* Update LPWD address */ 7102 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); 7103 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); 7104 7105 spin_lock_irq(&phba->ras_fwlog_lock); 7106 ras_fwlog->state = REG_INPROGRESS; 7107 spin_unlock_irq(&phba->ras_fwlog_lock); 7108 mbox->vport = phba->pport; 7109 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; 7110 7111 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7112 7113 if (rc == MBX_NOT_FINISHED) { 7114 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7115 "6191 FW-Log Mailbox failed. " 7116 "status %d mbxStatus : x%x", rc, 7117 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 7118 mempool_free(mbox, phba->mbox_mem_pool); 7119 rc = -EIO; 7120 goto mem_free; 7121 } else 7122 rc = 0; 7123 mem_free: 7124 if (rc) 7125 lpfc_sli4_ras_dma_free(phba); 7126 7127 return rc; 7128 } 7129 7130 /** 7131 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter 7132 * @phba: Pointer to HBA context object. 7133 * 7134 * Check if RAS is supported on the adapter and initialize it. 7135 **/ 7136 void 7137 lpfc_sli4_ras_setup(struct lpfc_hba *phba) 7138 { 7139 /* Check RAS FW Log needs to be enabled or not */ 7140 if (lpfc_check_fwlog_support(phba)) 7141 return; 7142 7143 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 7144 LPFC_RAS_ENABLE_LOGGING); 7145 } 7146 7147 /** 7148 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 7149 * @phba: Pointer to HBA context object. 7150 * 7151 * This function allocates all SLI4 resource identifiers. 7152 **/ 7153 int 7154 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 7155 { 7156 int i, rc, error = 0; 7157 uint16_t count, base; 7158 unsigned long longs; 7159 7160 if (!phba->sli4_hba.rpi_hdrs_in_use) 7161 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 7162 if (phba->sli4_hba.extents_in_use) { 7163 /* 7164 * The port supports resource extents. The XRI, VPI, VFI, RPI 7165 * resource extent count must be read and allocated before 7166 * provisioning the resource id arrays. 7167 */ 7168 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 7169 LPFC_IDX_RSRC_RDY) { 7170 /* 7171 * Extent-based resources are set - the driver could 7172 * be in a port reset. Figure out if any corrective 7173 * actions need to be taken. 7174 */ 7175 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 7176 LPFC_RSC_TYPE_FCOE_VFI); 7177 if (rc != 0) 7178 error++; 7179 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 7180 LPFC_RSC_TYPE_FCOE_VPI); 7181 if (rc != 0) 7182 error++; 7183 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 7184 LPFC_RSC_TYPE_FCOE_XRI); 7185 if (rc != 0) 7186 error++; 7187 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 7188 LPFC_RSC_TYPE_FCOE_RPI); 7189 if (rc != 0) 7190 error++; 7191 7192 /* 7193 * It's possible that the number of resources 7194 * provided to this port instance changed between 7195 * resets. Detect this condition and reallocate 7196 * resources. Otherwise, there is no action. 7197 */ 7198 if (error) { 7199 lpfc_printf_log(phba, KERN_INFO, 7200 LOG_MBOX | LOG_INIT, 7201 "2931 Detected extent resource " 7202 "change. Reallocating all " 7203 "extents.\n"); 7204 rc = lpfc_sli4_dealloc_extent(phba, 7205 LPFC_RSC_TYPE_FCOE_VFI); 7206 rc = lpfc_sli4_dealloc_extent(phba, 7207 LPFC_RSC_TYPE_FCOE_VPI); 7208 rc = lpfc_sli4_dealloc_extent(phba, 7209 LPFC_RSC_TYPE_FCOE_XRI); 7210 rc = lpfc_sli4_dealloc_extent(phba, 7211 LPFC_RSC_TYPE_FCOE_RPI); 7212 } else 7213 return 0; 7214 } 7215 7216 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 7217 if (unlikely(rc)) 7218 goto err_exit; 7219 7220 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 7221 if (unlikely(rc)) 7222 goto err_exit; 7223 7224 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 7225 if (unlikely(rc)) 7226 goto err_exit; 7227 7228 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 7229 if (unlikely(rc)) 7230 goto err_exit; 7231 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 7232 LPFC_IDX_RSRC_RDY); 7233 return rc; 7234 } else { 7235 /* 7236 * The port does not support resource extents. The XRI, VPI, 7237 * VFI, RPI resource ids were determined from READ_CONFIG. 7238 * Just allocate the bitmasks and provision the resource id 7239 * arrays. If a port reset is active, the resources don't 7240 * need any action - just exit. 7241 */ 7242 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 7243 LPFC_IDX_RSRC_RDY) { 7244 lpfc_sli4_dealloc_resource_identifiers(phba); 7245 lpfc_sli4_remove_rpis(phba); 7246 } 7247 /* RPIs. */ 7248 count = phba->sli4_hba.max_cfg_param.max_rpi; 7249 if (count <= 0) { 7250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7251 "3279 Invalid provisioning of " 7252 "rpi:%d\n", count); 7253 rc = -EINVAL; 7254 goto err_exit; 7255 } 7256 base = phba->sli4_hba.max_cfg_param.rpi_base; 7257 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 7258 phba->sli4_hba.rpi_bmask = kcalloc(longs, 7259 sizeof(unsigned long), 7260 GFP_KERNEL); 7261 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 7262 rc = -ENOMEM; 7263 goto err_exit; 7264 } 7265 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 7266 GFP_KERNEL); 7267 if (unlikely(!phba->sli4_hba.rpi_ids)) { 7268 rc = -ENOMEM; 7269 goto free_rpi_bmask; 7270 } 7271 7272 for (i = 0; i < count; i++) 7273 phba->sli4_hba.rpi_ids[i] = base + i; 7274 7275 /* VPIs. */ 7276 count = phba->sli4_hba.max_cfg_param.max_vpi; 7277 if (count <= 0) { 7278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7279 "3280 Invalid provisioning of " 7280 "vpi:%d\n", count); 7281 rc = -EINVAL; 7282 goto free_rpi_ids; 7283 } 7284 base = phba->sli4_hba.max_cfg_param.vpi_base; 7285 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 7286 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 7287 GFP_KERNEL); 7288 if (unlikely(!phba->vpi_bmask)) { 7289 rc = -ENOMEM; 7290 goto free_rpi_ids; 7291 } 7292 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 7293 GFP_KERNEL); 7294 if (unlikely(!phba->vpi_ids)) { 7295 rc = -ENOMEM; 7296 goto free_vpi_bmask; 7297 } 7298 7299 for (i = 0; i < count; i++) 7300 phba->vpi_ids[i] = base + i; 7301 7302 /* XRIs. */ 7303 count = phba->sli4_hba.max_cfg_param.max_xri; 7304 if (count <= 0) { 7305 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7306 "3281 Invalid provisioning of " 7307 "xri:%d\n", count); 7308 rc = -EINVAL; 7309 goto free_vpi_ids; 7310 } 7311 base = phba->sli4_hba.max_cfg_param.xri_base; 7312 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 7313 phba->sli4_hba.xri_bmask = kcalloc(longs, 7314 sizeof(unsigned long), 7315 GFP_KERNEL); 7316 if (unlikely(!phba->sli4_hba.xri_bmask)) { 7317 rc = -ENOMEM; 7318 goto free_vpi_ids; 7319 } 7320 phba->sli4_hba.max_cfg_param.xri_used = 0; 7321 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 7322 GFP_KERNEL); 7323 if (unlikely(!phba->sli4_hba.xri_ids)) { 7324 rc = -ENOMEM; 7325 goto free_xri_bmask; 7326 } 7327 7328 for (i = 0; i < count; i++) 7329 phba->sli4_hba.xri_ids[i] = base + i; 7330 7331 /* VFIs. */ 7332 count = phba->sli4_hba.max_cfg_param.max_vfi; 7333 if (count <= 0) { 7334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7335 "3282 Invalid provisioning of " 7336 "vfi:%d\n", count); 7337 rc = -EINVAL; 7338 goto free_xri_ids; 7339 } 7340 base = phba->sli4_hba.max_cfg_param.vfi_base; 7341 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 7342 phba->sli4_hba.vfi_bmask = kcalloc(longs, 7343 sizeof(unsigned long), 7344 GFP_KERNEL); 7345 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 7346 rc = -ENOMEM; 7347 goto free_xri_ids; 7348 } 7349 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 7350 GFP_KERNEL); 7351 if (unlikely(!phba->sli4_hba.vfi_ids)) { 7352 rc = -ENOMEM; 7353 goto free_vfi_bmask; 7354 } 7355 7356 for (i = 0; i < count; i++) 7357 phba->sli4_hba.vfi_ids[i] = base + i; 7358 7359 /* 7360 * Mark all resources ready. An HBA reset doesn't need 7361 * to reset the initialization. 7362 */ 7363 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 7364 LPFC_IDX_RSRC_RDY); 7365 return 0; 7366 } 7367 7368 free_vfi_bmask: 7369 kfree(phba->sli4_hba.vfi_bmask); 7370 phba->sli4_hba.vfi_bmask = NULL; 7371 free_xri_ids: 7372 kfree(phba->sli4_hba.xri_ids); 7373 phba->sli4_hba.xri_ids = NULL; 7374 free_xri_bmask: 7375 kfree(phba->sli4_hba.xri_bmask); 7376 phba->sli4_hba.xri_bmask = NULL; 7377 free_vpi_ids: 7378 kfree(phba->vpi_ids); 7379 phba->vpi_ids = NULL; 7380 free_vpi_bmask: 7381 kfree(phba->vpi_bmask); 7382 phba->vpi_bmask = NULL; 7383 free_rpi_ids: 7384 kfree(phba->sli4_hba.rpi_ids); 7385 phba->sli4_hba.rpi_ids = NULL; 7386 free_rpi_bmask: 7387 kfree(phba->sli4_hba.rpi_bmask); 7388 phba->sli4_hba.rpi_bmask = NULL; 7389 err_exit: 7390 return rc; 7391 } 7392 7393 /** 7394 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 7395 * @phba: Pointer to HBA context object. 7396 * 7397 * This function allocates the number of elements for the specified 7398 * resource type. 7399 **/ 7400 int 7401 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 7402 { 7403 if (phba->sli4_hba.extents_in_use) { 7404 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 7405 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 7406 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 7407 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 7408 } else { 7409 kfree(phba->vpi_bmask); 7410 phba->sli4_hba.max_cfg_param.vpi_used = 0; 7411 kfree(phba->vpi_ids); 7412 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 7413 kfree(phba->sli4_hba.xri_bmask); 7414 kfree(phba->sli4_hba.xri_ids); 7415 kfree(phba->sli4_hba.vfi_bmask); 7416 kfree(phba->sli4_hba.vfi_ids); 7417 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 7418 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 7419 } 7420 7421 return 0; 7422 } 7423 7424 /** 7425 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 7426 * @phba: Pointer to HBA context object. 7427 * @type: The resource extent type. 7428 * @extnt_cnt: buffer to hold port extent count response 7429 * @extnt_size: buffer to hold port extent size response. 7430 * 7431 * This function calls the port to read the host allocated extents 7432 * for a particular type. 7433 **/ 7434 int 7435 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 7436 uint16_t *extnt_cnt, uint16_t *extnt_size) 7437 { 7438 bool emb; 7439 int rc = 0; 7440 uint16_t curr_blks = 0; 7441 uint32_t req_len, emb_len; 7442 uint32_t alloc_len, mbox_tmo; 7443 struct list_head *blk_list_head; 7444 struct lpfc_rsrc_blks *rsrc_blk; 7445 LPFC_MBOXQ_t *mbox; 7446 void *virtaddr = NULL; 7447 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 7448 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 7449 union lpfc_sli4_cfg_shdr *shdr; 7450 7451 switch (type) { 7452 case LPFC_RSC_TYPE_FCOE_VPI: 7453 blk_list_head = &phba->lpfc_vpi_blk_list; 7454 break; 7455 case LPFC_RSC_TYPE_FCOE_XRI: 7456 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 7457 break; 7458 case LPFC_RSC_TYPE_FCOE_VFI: 7459 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 7460 break; 7461 case LPFC_RSC_TYPE_FCOE_RPI: 7462 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 7463 break; 7464 default: 7465 return -EIO; 7466 } 7467 7468 /* Count the number of extents currently allocatd for this type. */ 7469 list_for_each_entry(rsrc_blk, blk_list_head, list) { 7470 if (curr_blks == 0) { 7471 /* 7472 * The GET_ALLOCATED mailbox does not return the size, 7473 * just the count. The size should be just the size 7474 * stored in the current allocated block and all sizes 7475 * for an extent type are the same so set the return 7476 * value now. 7477 */ 7478 *extnt_size = rsrc_blk->rsrc_size; 7479 } 7480 curr_blks++; 7481 } 7482 7483 /* 7484 * Calculate the size of an embedded mailbox. The uint32_t 7485 * accounts for extents-specific word. 7486 */ 7487 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 7488 sizeof(uint32_t); 7489 7490 /* 7491 * Presume the allocation and response will fit into an embedded 7492 * mailbox. If not true, reconfigure to a non-embedded mailbox. 7493 */ 7494 emb = LPFC_SLI4_MBX_EMBED; 7495 req_len = emb_len; 7496 if (req_len > emb_len) { 7497 req_len = curr_blks * sizeof(uint16_t) + 7498 sizeof(union lpfc_sli4_cfg_shdr) + 7499 sizeof(uint32_t); 7500 emb = LPFC_SLI4_MBX_NEMBED; 7501 } 7502 7503 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7504 if (!mbox) 7505 return -ENOMEM; 7506 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 7507 7508 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7509 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 7510 req_len, emb); 7511 if (alloc_len < req_len) { 7512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7513 "2983 Allocated DMA memory size (x%x) is " 7514 "less than the requested DMA memory " 7515 "size (x%x)\n", alloc_len, req_len); 7516 rc = -ENOMEM; 7517 goto err_exit; 7518 } 7519 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 7520 if (unlikely(rc)) { 7521 rc = -EIO; 7522 goto err_exit; 7523 } 7524 7525 if (!phba->sli4_hba.intr_enable) 7526 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 7527 else { 7528 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 7529 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 7530 } 7531 7532 if (unlikely(rc)) { 7533 rc = -EIO; 7534 goto err_exit; 7535 } 7536 7537 /* 7538 * Figure out where the response is located. Then get local pointers 7539 * to the response data. The port does not guarantee to respond to 7540 * all extents counts request so update the local variable with the 7541 * allocated count from the port. 7542 */ 7543 if (emb == LPFC_SLI4_MBX_EMBED) { 7544 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 7545 shdr = &rsrc_ext->header.cfg_shdr; 7546 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 7547 } else { 7548 virtaddr = mbox->sge_array->addr[0]; 7549 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 7550 shdr = &n_rsrc->cfg_shdr; 7551 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 7552 } 7553 7554 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 7555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7556 "2984 Failed to read allocated resources " 7557 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 7558 type, 7559 bf_get(lpfc_mbox_hdr_status, &shdr->response), 7560 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 7561 rc = -EIO; 7562 goto err_exit; 7563 } 7564 err_exit: 7565 lpfc_sli4_mbox_cmd_free(phba, mbox); 7566 return rc; 7567 } 7568 7569 /** 7570 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 7571 * @phba: pointer to lpfc hba data structure. 7572 * @sgl_list: linked link of sgl buffers to post 7573 * @cnt: number of linked list buffers 7574 * 7575 * This routine walks the list of buffers that have been allocated and 7576 * repost them to the port by using SGL block post. This is needed after a 7577 * pci_function_reset/warm_start or start. It attempts to construct blocks 7578 * of buffer sgls which contains contiguous xris and uses the non-embedded 7579 * SGL block post mailbox commands to post them to the port. For single 7580 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 7581 * mailbox command for posting. 7582 * 7583 * Returns: 0 = success, non-zero failure. 7584 **/ 7585 static int 7586 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 7587 struct list_head *sgl_list, int cnt) 7588 { 7589 struct lpfc_sglq *sglq_entry = NULL; 7590 struct lpfc_sglq *sglq_entry_next = NULL; 7591 struct lpfc_sglq *sglq_entry_first = NULL; 7592 int status = 0, total_cnt; 7593 int post_cnt = 0, num_posted = 0, block_cnt = 0; 7594 int last_xritag = NO_XRI; 7595 LIST_HEAD(prep_sgl_list); 7596 LIST_HEAD(blck_sgl_list); 7597 LIST_HEAD(allc_sgl_list); 7598 LIST_HEAD(post_sgl_list); 7599 LIST_HEAD(free_sgl_list); 7600 7601 spin_lock_irq(&phba->hbalock); 7602 spin_lock(&phba->sli4_hba.sgl_list_lock); 7603 list_splice_init(sgl_list, &allc_sgl_list); 7604 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7605 spin_unlock_irq(&phba->hbalock); 7606 7607 total_cnt = cnt; 7608 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 7609 &allc_sgl_list, list) { 7610 list_del_init(&sglq_entry->list); 7611 block_cnt++; 7612 if ((last_xritag != NO_XRI) && 7613 (sglq_entry->sli4_xritag != last_xritag + 1)) { 7614 /* a hole in xri block, form a sgl posting block */ 7615 list_splice_init(&prep_sgl_list, &blck_sgl_list); 7616 post_cnt = block_cnt - 1; 7617 /* prepare list for next posting block */ 7618 list_add_tail(&sglq_entry->list, &prep_sgl_list); 7619 block_cnt = 1; 7620 } else { 7621 /* prepare list for next posting block */ 7622 list_add_tail(&sglq_entry->list, &prep_sgl_list); 7623 /* enough sgls for non-embed sgl mbox command */ 7624 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 7625 list_splice_init(&prep_sgl_list, 7626 &blck_sgl_list); 7627 post_cnt = block_cnt; 7628 block_cnt = 0; 7629 } 7630 } 7631 num_posted++; 7632 7633 /* keep track of last sgl's xritag */ 7634 last_xritag = sglq_entry->sli4_xritag; 7635 7636 /* end of repost sgl list condition for buffers */ 7637 if (num_posted == total_cnt) { 7638 if (post_cnt == 0) { 7639 list_splice_init(&prep_sgl_list, 7640 &blck_sgl_list); 7641 post_cnt = block_cnt; 7642 } else if (block_cnt == 1) { 7643 status = lpfc_sli4_post_sgl(phba, 7644 sglq_entry->phys, 0, 7645 sglq_entry->sli4_xritag); 7646 if (!status) { 7647 /* successful, put sgl to posted list */ 7648 list_add_tail(&sglq_entry->list, 7649 &post_sgl_list); 7650 } else { 7651 /* Failure, put sgl to free list */ 7652 lpfc_printf_log(phba, KERN_WARNING, 7653 LOG_SLI, 7654 "3159 Failed to post " 7655 "sgl, xritag:x%x\n", 7656 sglq_entry->sli4_xritag); 7657 list_add_tail(&sglq_entry->list, 7658 &free_sgl_list); 7659 total_cnt--; 7660 } 7661 } 7662 } 7663 7664 /* continue until a nembed page worth of sgls */ 7665 if (post_cnt == 0) 7666 continue; 7667 7668 /* post the buffer list sgls as a block */ 7669 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 7670 post_cnt); 7671 7672 if (!status) { 7673 /* success, put sgl list to posted sgl list */ 7674 list_splice_init(&blck_sgl_list, &post_sgl_list); 7675 } else { 7676 /* Failure, put sgl list to free sgl list */ 7677 sglq_entry_first = list_first_entry(&blck_sgl_list, 7678 struct lpfc_sglq, 7679 list); 7680 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 7681 "3160 Failed to post sgl-list, " 7682 "xritag:x%x-x%x\n", 7683 sglq_entry_first->sli4_xritag, 7684 (sglq_entry_first->sli4_xritag + 7685 post_cnt - 1)); 7686 list_splice_init(&blck_sgl_list, &free_sgl_list); 7687 total_cnt -= post_cnt; 7688 } 7689 7690 /* don't reset xirtag due to hole in xri block */ 7691 if (block_cnt == 0) 7692 last_xritag = NO_XRI; 7693 7694 /* reset sgl post count for next round of posting */ 7695 post_cnt = 0; 7696 } 7697 7698 /* free the sgls failed to post */ 7699 lpfc_free_sgl_list(phba, &free_sgl_list); 7700 7701 /* push sgls posted to the available list */ 7702 if (!list_empty(&post_sgl_list)) { 7703 spin_lock_irq(&phba->hbalock); 7704 spin_lock(&phba->sli4_hba.sgl_list_lock); 7705 list_splice_init(&post_sgl_list, sgl_list); 7706 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7707 spin_unlock_irq(&phba->hbalock); 7708 } else { 7709 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7710 "3161 Failure to post sgl to port,status %x " 7711 "blkcnt %d totalcnt %d postcnt %d\n", 7712 status, block_cnt, total_cnt, post_cnt); 7713 return -EIO; 7714 } 7715 7716 /* return the number of XRIs actually posted */ 7717 return total_cnt; 7718 } 7719 7720 /** 7721 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls 7722 * @phba: pointer to lpfc hba data structure. 7723 * 7724 * This routine walks the list of nvme buffers that have been allocated and 7725 * repost them to the port by using SGL block post. This is needed after a 7726 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 7727 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list 7728 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. 7729 * 7730 * Returns: 0 = success, non-zero failure. 7731 **/ 7732 static int 7733 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) 7734 { 7735 LIST_HEAD(post_nblist); 7736 int num_posted, rc = 0; 7737 7738 /* get all NVME buffers need to repost to a local list */ 7739 lpfc_io_buf_flush(phba, &post_nblist); 7740 7741 /* post the list of nvme buffer sgls to port if available */ 7742 if (!list_empty(&post_nblist)) { 7743 num_posted = lpfc_sli4_post_io_sgl_list( 7744 phba, &post_nblist, phba->sli4_hba.io_xri_cnt); 7745 /* failed to post any nvme buffer, return error */ 7746 if (num_posted == 0) 7747 rc = -EIO; 7748 } 7749 return rc; 7750 } 7751 7752 static void 7753 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 7754 { 7755 uint32_t len; 7756 7757 len = sizeof(struct lpfc_mbx_set_host_data) - 7758 sizeof(struct lpfc_sli4_cfg_mhdr); 7759 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7760 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 7761 LPFC_SLI4_MBX_EMBED); 7762 7763 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 7764 mbox->u.mqe.un.set_host_data.param_len = 7765 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 7766 snprintf(mbox->u.mqe.un.set_host_data.un.data, 7767 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7768 "Linux %s v"LPFC_DRIVER_VERSION, 7769 test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC"); 7770 } 7771 7772 int 7773 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 7774 struct lpfc_queue *drq, int count, int idx) 7775 { 7776 int rc, i; 7777 struct lpfc_rqe hrqe; 7778 struct lpfc_rqe drqe; 7779 struct lpfc_rqb *rqbp; 7780 unsigned long flags; 7781 struct rqb_dmabuf *rqb_buffer; 7782 LIST_HEAD(rqb_buf_list); 7783 7784 rqbp = hrq->rqbp; 7785 for (i = 0; i < count; i++) { 7786 spin_lock_irqsave(&phba->hbalock, flags); 7787 /* IF RQ is already full, don't bother */ 7788 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) { 7789 spin_unlock_irqrestore(&phba->hbalock, flags); 7790 break; 7791 } 7792 spin_unlock_irqrestore(&phba->hbalock, flags); 7793 7794 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 7795 if (!rqb_buffer) 7796 break; 7797 rqb_buffer->hrq = hrq; 7798 rqb_buffer->drq = drq; 7799 rqb_buffer->idx = idx; 7800 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 7801 } 7802 7803 spin_lock_irqsave(&phba->hbalock, flags); 7804 while (!list_empty(&rqb_buf_list)) { 7805 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 7806 hbuf.list); 7807 7808 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 7809 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 7810 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 7811 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7812 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7813 if (rc < 0) { 7814 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7815 "6421 Cannot post to HRQ %d: %x %x %x " 7816 "DRQ %x %x\n", 7817 hrq->queue_id, 7818 hrq->host_index, 7819 hrq->hba_index, 7820 hrq->entry_count, 7821 drq->host_index, 7822 drq->hba_index); 7823 rqbp->rqb_free_buffer(phba, rqb_buffer); 7824 } else { 7825 list_add_tail(&rqb_buffer->hbuf.list, 7826 &rqbp->rqb_buffer_list); 7827 rqbp->buffer_count++; 7828 } 7829 } 7830 spin_unlock_irqrestore(&phba->hbalock, flags); 7831 return 1; 7832 } 7833 7834 static void 7835 lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7836 { 7837 union lpfc_sli4_cfg_shdr *shdr; 7838 u32 shdr_status, shdr_add_status; 7839 7840 shdr = (union lpfc_sli4_cfg_shdr *) 7841 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7842 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7843 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7844 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { 7845 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX, 7846 "4622 SET_FEATURE (x%x) mbox failed, " 7847 "status x%x add_status x%x, mbx status x%x\n", 7848 LPFC_SET_LD_SIGNAL, shdr_status, 7849 shdr_add_status, pmb->u.mb.mbxStatus); 7850 phba->degrade_activate_threshold = 0; 7851 phba->degrade_deactivate_threshold = 0; 7852 phba->fec_degrade_interval = 0; 7853 goto out; 7854 } 7855 7856 phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7; 7857 phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8; 7858 phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10; 7859 7860 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT, 7861 "4624 Success: da x%x dd x%x interval x%x\n", 7862 phba->degrade_activate_threshold, 7863 phba->degrade_deactivate_threshold, 7864 phba->fec_degrade_interval); 7865 out: 7866 mempool_free(pmb, phba->mbox_mem_pool); 7867 } 7868 7869 int 7870 lpfc_read_lds_params(struct lpfc_hba *phba) 7871 { 7872 LPFC_MBOXQ_t *mboxq; 7873 int rc; 7874 7875 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7876 if (!mboxq) 7877 return -ENOMEM; 7878 7879 lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL); 7880 mboxq->vport = phba->pport; 7881 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params; 7882 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 7883 if (rc == MBX_NOT_FINISHED) { 7884 mempool_free(mboxq, phba->mbox_mem_pool); 7885 return -EIO; 7886 } 7887 return 0; 7888 } 7889 7890 static void 7891 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7892 { 7893 struct lpfc_vport *vport = pmb->vport; 7894 union lpfc_sli4_cfg_shdr *shdr; 7895 u32 shdr_status, shdr_add_status; 7896 u32 sig, acqe; 7897 7898 /* Two outcomes. (1) Set featurs was successul and EDC negotiation 7899 * is done. (2) Mailbox failed and send FPIN support only. 7900 */ 7901 shdr = (union lpfc_sli4_cfg_shdr *) 7902 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7903 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7904 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7905 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { 7906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, 7907 "2516 CGN SET_FEATURE mbox failed with " 7908 "status x%x add_status x%x, mbx status x%x " 7909 "Reset Congestion to FPINs only\n", 7910 shdr_status, shdr_add_status, 7911 pmb->u.mb.mbxStatus); 7912 /* If there is a mbox error, move on to RDF */ 7913 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 7914 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 7915 goto out; 7916 } 7917 7918 /* Zero out Congestion Signal ACQE counter */ 7919 phba->cgn_acqe_cnt = 0; 7920 7921 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq, 7922 &pmb->u.mqe.un.set_feature); 7923 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq, 7924 &pmb->u.mqe.un.set_feature); 7925 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7926 "4620 SET_FEATURES Success: Freq: %ds %dms " 7927 " Reg: x%x x%x\n", acqe, sig, 7928 phba->cgn_reg_signal, phba->cgn_reg_fpin); 7929 out: 7930 mempool_free(pmb, phba->mbox_mem_pool); 7931 7932 /* Register for FPIN events from the fabric now that the 7933 * EDC common_set_features has completed. 7934 */ 7935 lpfc_issue_els_rdf(vport, 0); 7936 } 7937 7938 int 7939 lpfc_config_cgn_signal(struct lpfc_hba *phba) 7940 { 7941 LPFC_MBOXQ_t *mboxq; 7942 u32 rc; 7943 7944 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7945 if (!mboxq) 7946 goto out_rdf; 7947 7948 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL); 7949 mboxq->vport = phba->pport; 7950 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs; 7951 7952 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7953 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: " 7954 "Reg: x%x x%x\n", 7955 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency, 7956 phba->cgn_reg_signal, phba->cgn_reg_fpin); 7957 7958 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 7959 if (rc == MBX_NOT_FINISHED) 7960 goto out; 7961 return 0; 7962 7963 out: 7964 mempool_free(mboxq, phba->mbox_mem_pool); 7965 out_rdf: 7966 /* If there is a mbox error, move on to RDF */ 7967 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 7968 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 7969 lpfc_issue_els_rdf(phba->pport, 0); 7970 return -EIO; 7971 } 7972 7973 /** 7974 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking 7975 * @phba: pointer to lpfc hba data structure. 7976 * 7977 * This routine initializes the per-eq idle_stat to dynamically dictate 7978 * polling decisions. 7979 * 7980 * Return codes: 7981 * None 7982 **/ 7983 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) 7984 { 7985 int i; 7986 struct lpfc_sli4_hdw_queue *hdwq; 7987 struct lpfc_queue *eq; 7988 struct lpfc_idle_stat *idle_stat; 7989 u64 wall; 7990 7991 for_each_present_cpu(i) { 7992 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 7993 eq = hdwq->hba_eq; 7994 7995 /* Skip if we've already handled this eq's primary CPU */ 7996 if (eq->chann != i) 7997 continue; 7998 7999 idle_stat = &phba->sli4_hba.idle_stat[i]; 8000 8001 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1); 8002 idle_stat->prev_wall = wall; 8003 8004 if (phba->nvmet_support || 8005 phba->cmf_active_mode != LPFC_CFG_OFF || 8006 phba->intr_type != MSIX) 8007 eq->poll_mode = LPFC_QUEUE_WORK; 8008 else 8009 eq->poll_mode = LPFC_THREADED_IRQ; 8010 } 8011 8012 if (!phba->nvmet_support && phba->intr_type == MSIX) 8013 schedule_delayed_work(&phba->idle_stat_delay_work, 8014 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 8015 } 8016 8017 static void lpfc_sli4_dip(struct lpfc_hba *phba) 8018 { 8019 uint32_t if_type; 8020 8021 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8022 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 || 8023 if_type == LPFC_SLI_INTF_IF_TYPE_6) { 8024 struct lpfc_register reg_data; 8025 8026 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 8027 ®_data.word0)) 8028 return; 8029 8030 if (bf_get(lpfc_sliport_status_dip, ®_data)) 8031 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8032 "2904 Firmware Dump Image Present" 8033 " on Adapter"); 8034 } 8035 } 8036 8037 /** 8038 * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor 8039 * @rx_monitor: Pointer to lpfc_rx_info_monitor object 8040 * @entries: Number of rx_info_entry objects to allocate in ring 8041 * 8042 * Return: 8043 * 0 - Success 8044 * ENOMEM - Failure to kmalloc 8045 **/ 8046 int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor, 8047 u32 entries) 8048 { 8049 rx_monitor->ring = kmalloc_objs(struct rx_info_entry, entries); 8050 if (!rx_monitor->ring) 8051 return -ENOMEM; 8052 8053 rx_monitor->head_idx = 0; 8054 rx_monitor->tail_idx = 0; 8055 spin_lock_init(&rx_monitor->lock); 8056 rx_monitor->entries = entries; 8057 8058 return 0; 8059 } 8060 8061 /** 8062 * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor 8063 * @rx_monitor: Pointer to lpfc_rx_info_monitor object 8064 * 8065 * Called after cancellation of cmf_timer. 8066 **/ 8067 void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor) 8068 { 8069 kfree(rx_monitor->ring); 8070 rx_monitor->ring = NULL; 8071 rx_monitor->entries = 0; 8072 rx_monitor->head_idx = 0; 8073 rx_monitor->tail_idx = 0; 8074 } 8075 8076 /** 8077 * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring 8078 * @rx_monitor: Pointer to lpfc_rx_info_monitor object 8079 * @entry: Pointer to rx_info_entry 8080 * 8081 * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a 8082 * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr. 8083 * 8084 * This is called from lpfc_cmf_timer, which is in timer/softirq context. 8085 * 8086 * In cases of old data overflow, we do a best effort of FIFO order. 8087 **/ 8088 void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor, 8089 struct rx_info_entry *entry) 8090 { 8091 struct rx_info_entry *ring = rx_monitor->ring; 8092 u32 *head_idx = &rx_monitor->head_idx; 8093 u32 *tail_idx = &rx_monitor->tail_idx; 8094 spinlock_t *ring_lock = &rx_monitor->lock; 8095 u32 ring_size = rx_monitor->entries; 8096 8097 spin_lock(ring_lock); 8098 memcpy(&ring[*tail_idx], entry, sizeof(*entry)); 8099 *tail_idx = (*tail_idx + 1) % ring_size; 8100 8101 /* Best effort of FIFO saved data */ 8102 if (*tail_idx == *head_idx) 8103 *head_idx = (*head_idx + 1) % ring_size; 8104 8105 spin_unlock(ring_lock); 8106 } 8107 8108 /** 8109 * lpfc_rx_monitor_report - Read out rx_monitor's ring 8110 * @phba: Pointer to lpfc_hba object 8111 * @rx_monitor: Pointer to lpfc_rx_info_monitor object 8112 * @buf: Pointer to char buffer that will contain rx monitor info data 8113 * @buf_len: Length buf including null char 8114 * @max_read_entries: Maximum number of entries to read out of ring 8115 * 8116 * Used to dump/read what's in rx_monitor's ring buffer. 8117 * 8118 * If buf is NULL || buf_len == 0, then it is implied that we want to log the 8119 * information to kmsg instead of filling out buf. 8120 * 8121 * Return: 8122 * Number of entries read out of the ring 8123 **/ 8124 u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, 8125 struct lpfc_rx_info_monitor *rx_monitor, char *buf, 8126 u32 buf_len, u32 max_read_entries) 8127 { 8128 struct rx_info_entry *ring = rx_monitor->ring; 8129 struct rx_info_entry *entry; 8130 u32 *head_idx = &rx_monitor->head_idx; 8131 u32 *tail_idx = &rx_monitor->tail_idx; 8132 spinlock_t *ring_lock = &rx_monitor->lock; 8133 u32 ring_size = rx_monitor->entries; 8134 u32 cnt = 0; 8135 char tmp[DBG_LOG_STR_SZ] = {0}; 8136 bool log_to_kmsg = (!buf || !buf_len) ? true : false; 8137 8138 if (!log_to_kmsg) { 8139 /* clear the buffer to be sure */ 8140 memset(buf, 0, buf_len); 8141 8142 scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s" 8143 "%-8s%-8s%-8s%-16s\n", 8144 "MaxBPI", "Tot_Data_CMF", 8145 "Tot_Data_Cmd", "Tot_Data_Cmpl", 8146 "Lat(us)", "Avg_IO", "Max_IO", "Bsy", 8147 "IO_cnt", "Info", "BWutil(ms)"); 8148 } 8149 8150 /* Needs to be _irq because record is called from timer interrupt 8151 * context 8152 */ 8153 spin_lock_irq(ring_lock); 8154 while (*head_idx != *tail_idx) { 8155 entry = &ring[*head_idx]; 8156 8157 /* Read out this entry's data. */ 8158 if (!log_to_kmsg) { 8159 /* If !log_to_kmsg, then store to buf. */ 8160 scnprintf(tmp, sizeof(tmp), 8161 "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu" 8162 "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n", 8163 *head_idx, entry->max_bytes_per_interval, 8164 entry->cmf_bytes, entry->total_bytes, 8165 entry->rcv_bytes, entry->avg_io_latency, 8166 entry->avg_io_size, entry->max_read_cnt, 8167 entry->cmf_busy, entry->io_cnt, 8168 entry->cmf_info, entry->timer_utilization, 8169 entry->timer_interval); 8170 8171 /* Check for buffer overflow */ 8172 if ((strlen(buf) + strlen(tmp)) >= buf_len) 8173 break; 8174 8175 /* Append entry's data to buffer */ 8176 strlcat(buf, tmp, buf_len); 8177 } else { 8178 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 8179 "4410 %02u: MBPI %llu Xmit %llu " 8180 "Cmpl %llu Lat %llu ASz %llu Info %02u " 8181 "BWUtil %u Int %u slot %u\n", 8182 cnt, entry->max_bytes_per_interval, 8183 entry->total_bytes, entry->rcv_bytes, 8184 entry->avg_io_latency, 8185 entry->avg_io_size, entry->cmf_info, 8186 entry->timer_utilization, 8187 entry->timer_interval, *head_idx); 8188 } 8189 8190 *head_idx = (*head_idx + 1) % ring_size; 8191 8192 /* Don't feed more than max_read_entries */ 8193 cnt++; 8194 if (cnt >= max_read_entries) 8195 break; 8196 } 8197 spin_unlock_irq(ring_lock); 8198 8199 return cnt; 8200 } 8201 8202 /** 8203 * lpfc_cmf_setup - Initialize idle_stat tracking 8204 * @phba: Pointer to HBA context object. 8205 * 8206 * This is called from HBA setup during driver load or when the HBA 8207 * comes online. this does all the initialization to support CMF and MI. 8208 **/ 8209 static int 8210 lpfc_cmf_setup(struct lpfc_hba *phba) 8211 { 8212 LPFC_MBOXQ_t *mboxq; 8213 struct lpfc_dmabuf *mp; 8214 struct lpfc_pc_sli4_params *sli4_params; 8215 int rc, cmf, mi_ver; 8216 8217 rc = lpfc_sli4_refresh_params(phba); 8218 if (unlikely(rc)) 8219 return rc; 8220 8221 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8222 if (!mboxq) 8223 return -ENOMEM; 8224 8225 sli4_params = &phba->sli4_hba.pc_sli4_params; 8226 8227 /* Always try to enable MI feature if we can */ 8228 if (sli4_params->mi_ver) { 8229 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI); 8230 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8231 mi_ver = bf_get(lpfc_mbx_set_feature_mi, 8232 &mboxq->u.mqe.un.set_feature); 8233 8234 if (rc == MBX_SUCCESS) { 8235 if (mi_ver) { 8236 lpfc_printf_log(phba, 8237 KERN_WARNING, LOG_CGN_MGMT, 8238 "6215 MI is enabled\n"); 8239 sli4_params->mi_ver = mi_ver; 8240 } else { 8241 lpfc_printf_log(phba, 8242 KERN_WARNING, LOG_CGN_MGMT, 8243 "6338 MI is disabled\n"); 8244 sli4_params->mi_ver = 0; 8245 } 8246 } else { 8247 /* mi_ver is already set from GET_SLI4_PARAMETERS */ 8248 lpfc_printf_log(phba, KERN_INFO, 8249 LOG_CGN_MGMT | LOG_INIT, 8250 "6245 Enable MI Mailbox x%x (x%x/x%x) " 8251 "failed, rc:x%x mi:x%x\n", 8252 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8253 lpfc_sli_config_mbox_subsys_get 8254 (phba, mboxq), 8255 lpfc_sli_config_mbox_opcode_get 8256 (phba, mboxq), 8257 rc, sli4_params->mi_ver); 8258 } 8259 } else { 8260 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8261 "6217 MI is disabled\n"); 8262 } 8263 8264 /* Ensure FDMI is enabled for MI if enable_mi is set */ 8265 if (sli4_params->mi_ver) 8266 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; 8267 8268 /* Always try to enable CMF feature if we can */ 8269 if (sli4_params->cmf) { 8270 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF); 8271 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8272 cmf = bf_get(lpfc_mbx_set_feature_cmf, 8273 &mboxq->u.mqe.un.set_feature); 8274 if (rc == MBX_SUCCESS && cmf) { 8275 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8276 "6218 CMF is enabled: mode %d\n", 8277 phba->cmf_active_mode); 8278 } else { 8279 lpfc_printf_log(phba, KERN_WARNING, 8280 LOG_CGN_MGMT | LOG_INIT, 8281 "6219 Enable CMF Mailbox x%x (x%x/x%x) " 8282 "failed, rc:x%x dd:x%x\n", 8283 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8284 lpfc_sli_config_mbox_subsys_get 8285 (phba, mboxq), 8286 lpfc_sli_config_mbox_opcode_get 8287 (phba, mboxq), 8288 rc, cmf); 8289 sli4_params->cmf = 0; 8290 phba->cmf_active_mode = LPFC_CFG_OFF; 8291 goto no_cmf; 8292 } 8293 8294 /* Allocate Congestion Information Buffer */ 8295 if (!phba->cgn_i) { 8296 mp = kmalloc_obj(*mp); 8297 if (mp) 8298 mp->virt = dma_alloc_coherent 8299 (&phba->pcidev->dev, 8300 sizeof(struct lpfc_cgn_info), 8301 &mp->phys, GFP_KERNEL); 8302 if (!mp || !mp->virt) { 8303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8304 "2640 Failed to alloc memory " 8305 "for Congestion Info\n"); 8306 kfree(mp); 8307 sli4_params->cmf = 0; 8308 phba->cmf_active_mode = LPFC_CFG_OFF; 8309 goto no_cmf; 8310 } 8311 phba->cgn_i = mp; 8312 8313 /* initialize congestion buffer info */ 8314 lpfc_init_congestion_buf(phba); 8315 lpfc_init_congestion_stat(phba); 8316 8317 /* Zero out Congestion Signal counters */ 8318 atomic64_set(&phba->cgn_acqe_stat.alarm, 0); 8319 atomic64_set(&phba->cgn_acqe_stat.warn, 0); 8320 } 8321 8322 rc = lpfc_sli4_cgn_params_read(phba); 8323 if (rc < 0) { 8324 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 8325 "6242 Error reading Cgn Params (%d)\n", 8326 rc); 8327 /* Ensure CGN Mode is off */ 8328 sli4_params->cmf = 0; 8329 } else if (!rc) { 8330 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 8331 "6243 CGN Event empty object.\n"); 8332 /* Ensure CGN Mode is off */ 8333 sli4_params->cmf = 0; 8334 } 8335 } else { 8336 no_cmf: 8337 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8338 "6220 CMF is disabled\n"); 8339 } 8340 8341 /* Only register congestion buffer with firmware if BOTH 8342 * CMF and E2E are enabled. 8343 */ 8344 if (sli4_params->cmf && sli4_params->mi_ver) { 8345 rc = lpfc_reg_congestion_buf(phba); 8346 if (rc) { 8347 dma_free_coherent(&phba->pcidev->dev, 8348 sizeof(struct lpfc_cgn_info), 8349 phba->cgn_i->virt, phba->cgn_i->phys); 8350 kfree(phba->cgn_i); 8351 phba->cgn_i = NULL; 8352 /* Ensure CGN Mode is off */ 8353 phba->cmf_active_mode = LPFC_CFG_OFF; 8354 sli4_params->cmf = 0; 8355 return 0; 8356 } 8357 } 8358 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8359 "6470 Setup MI version %d CMF %d mode %d\n", 8360 sli4_params->mi_ver, sli4_params->cmf, 8361 phba->cmf_active_mode); 8362 8363 mempool_free(mboxq, phba->mbox_mem_pool); 8364 8365 /* Initialize atomic counters */ 8366 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 8367 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 8368 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 8369 atomic_set(&phba->cgn_sync_warn_cnt, 0); 8370 atomic_set(&phba->cgn_driver_evt_cnt, 0); 8371 atomic_set(&phba->cgn_latency_evt_cnt, 0); 8372 atomic64_set(&phba->cgn_latency_evt, 0); 8373 8374 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; 8375 8376 /* Allocate RX Monitor Buffer */ 8377 if (!phba->rx_monitor) { 8378 phba->rx_monitor = kzalloc_obj(*phba->rx_monitor); 8379 8380 if (!phba->rx_monitor) { 8381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8382 "2644 Failed to alloc memory " 8383 "for RX Monitor Buffer\n"); 8384 return -ENOMEM; 8385 } 8386 8387 /* Instruct the rx_monitor object to instantiate its ring */ 8388 if (lpfc_rx_monitor_create_ring(phba->rx_monitor, 8389 LPFC_MAX_RXMONITOR_ENTRY)) { 8390 kfree(phba->rx_monitor); 8391 phba->rx_monitor = NULL; 8392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8393 "2645 Failed to alloc memory " 8394 "for RX Monitor's Ring\n"); 8395 return -ENOMEM; 8396 } 8397 } 8398 8399 return 0; 8400 } 8401 8402 static int 8403 lpfc_set_host_tm(struct lpfc_hba *phba) 8404 { 8405 LPFC_MBOXQ_t *mboxq; 8406 uint32_t len, rc; 8407 struct timespec64 cur_time; 8408 struct tm broken; 8409 uint32_t month, day, year; 8410 uint32_t hour, minute, second; 8411 struct lpfc_mbx_set_host_date_time *tm; 8412 8413 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8414 if (!mboxq) 8415 return -ENOMEM; 8416 8417 len = sizeof(struct lpfc_mbx_set_host_data) - 8418 sizeof(struct lpfc_sli4_cfg_mhdr); 8419 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8420 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 8421 LPFC_SLI4_MBX_EMBED); 8422 8423 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME; 8424 mboxq->u.mqe.un.set_host_data.param_len = 8425 sizeof(struct lpfc_mbx_set_host_date_time); 8426 tm = &mboxq->u.mqe.un.set_host_data.un.tm; 8427 ktime_get_real_ts64(&cur_time); 8428 time64_to_tm(cur_time.tv_sec, 0, &broken); 8429 month = broken.tm_mon + 1; 8430 day = broken.tm_mday; 8431 year = broken.tm_year - 100; 8432 hour = broken.tm_hour; 8433 minute = broken.tm_min; 8434 second = broken.tm_sec; 8435 bf_set(lpfc_mbx_set_host_month, tm, month); 8436 bf_set(lpfc_mbx_set_host_day, tm, day); 8437 bf_set(lpfc_mbx_set_host_year, tm, year); 8438 bf_set(lpfc_mbx_set_host_hour, tm, hour); 8439 bf_set(lpfc_mbx_set_host_min, tm, minute); 8440 bf_set(lpfc_mbx_set_host_sec, tm, second); 8441 8442 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8443 mempool_free(mboxq, phba->mbox_mem_pool); 8444 return rc; 8445 } 8446 8447 /** 8448 * lpfc_get_platform_uuid - Attempts to extract a platform uuid 8449 * @phba: pointer to lpfc hba data structure. 8450 * 8451 * This routine attempts to first read SMBIOS DMI data for the System 8452 * Information structure offset 08h called System UUID. Else, no platform 8453 * UUID will be advertised. 8454 **/ 8455 static void 8456 lpfc_get_platform_uuid(struct lpfc_hba *phba) 8457 { 8458 int rc; 8459 const char *uuid; 8460 char pni[17] = {0}; /* 16 characters + '\0' */ 8461 bool is_ff = true, is_00 = true; 8462 u8 i; 8463 8464 /* First attempt SMBIOS DMI */ 8465 uuid = dmi_get_system_info(DMI_PRODUCT_UUID); 8466 if (uuid) { 8467 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8468 "2088 SMBIOS UUID %s\n", 8469 uuid); 8470 } else { 8471 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8472 "2099 Could not extract UUID\n"); 8473 } 8474 8475 if (uuid && uuid_is_valid(uuid)) { 8476 /* Generate PNI from UUID format. 8477 * 8478 * 1.) Extract lower 64 bits from UUID format. 8479 * 2.) Set 3h for NAA Locally Assigned Name Identifier format. 8480 * 8481 * e.g. xxxxxxxx-xxxx-xxxx-yyyy-yyyyyyyyyyyy 8482 * 8483 * extract the yyyy-yyyyyyyyyyyy portion 8484 * final PNI 3yyyyyyyyyyyyyyy 8485 */ 8486 scnprintf(pni, sizeof(pni), "3%c%c%c%s", 8487 uuid[20], uuid[21], uuid[22], &uuid[24]); 8488 8489 /* Sanitize the converted PNI */ 8490 for (i = 1; i < 16 && (is_ff || is_00); i++) { 8491 if (pni[i] != '0') 8492 is_00 = false; 8493 if (pni[i] != 'f' && pni[i] != 'F') 8494 is_ff = false; 8495 } 8496 8497 /* Convert from char* to unsigned long */ 8498 rc = kstrtoul(pni, 16, &phba->pni); 8499 if (!rc && !is_ff && !is_00) { 8500 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8501 "2100 PNI 0x%016lx\n", phba->pni); 8502 } else { 8503 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8504 "2101 PNI %s generation status %d\n", 8505 pni, rc); 8506 phba->pni = 0; 8507 } 8508 } 8509 } 8510 8511 /** 8512 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 8513 * @phba: Pointer to HBA context object. 8514 * 8515 * This function is the main SLI4 device initialization PCI function. This 8516 * function is called by the HBA initialization code, HBA reset code and 8517 * HBA error attention handler code. Caller is not required to hold any 8518 * locks. 8519 **/ 8520 int 8521 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 8522 { 8523 int rc, i, cnt, len, dd; 8524 LPFC_MBOXQ_t *mboxq; 8525 struct lpfc_mqe *mqe; 8526 uint8_t *vpd; 8527 uint32_t vpd_size; 8528 uint32_t ftr_rsp = 0; 8529 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 8530 struct lpfc_vport *vport = phba->pport; 8531 struct lpfc_dmabuf *mp; 8532 struct lpfc_rqb *rqbp; 8533 u32 flg; 8534 8535 /* Perform a PCI function reset to start from clean */ 8536 rc = lpfc_pci_function_reset(phba); 8537 if (unlikely(rc)) 8538 return -ENODEV; 8539 8540 /* Check the HBA Host Status Register for readyness */ 8541 rc = lpfc_sli4_post_status_check(phba); 8542 if (unlikely(rc)) 8543 return -ENODEV; 8544 else { 8545 spin_lock_irq(&phba->hbalock); 8546 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 8547 flg = phba->sli.sli_flag; 8548 spin_unlock_irq(&phba->hbalock); 8549 /* Allow a little time after setting SLI_ACTIVE for any polled 8550 * MBX commands to complete via BSG. 8551 */ 8552 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { 8553 msleep(20); 8554 spin_lock_irq(&phba->hbalock); 8555 flg = phba->sli.sli_flag; 8556 spin_unlock_irq(&phba->hbalock); 8557 } 8558 } 8559 clear_bit(HBA_SETUP, &phba->hba_flag); 8560 8561 lpfc_sli4_dip(phba); 8562 8563 /* 8564 * Allocate a single mailbox container for initializing the 8565 * port. 8566 */ 8567 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8568 if (!mboxq) 8569 return -ENOMEM; 8570 8571 /* Issue READ_REV to collect vpd and FW information. */ 8572 vpd_size = SLI4_PAGE_SIZE; 8573 vpd = kzalloc(vpd_size, GFP_KERNEL); 8574 if (!vpd) { 8575 rc = -ENOMEM; 8576 goto out_free_mbox; 8577 } 8578 8579 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 8580 if (unlikely(rc)) { 8581 kfree(vpd); 8582 goto out_free_mbox; 8583 } 8584 8585 mqe = &mboxq->u.mqe; 8586 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 8587 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 8588 set_bit(HBA_FCOE_MODE, &phba->hba_flag); 8589 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 8590 } else { 8591 clear_bit(HBA_FCOE_MODE, &phba->hba_flag); 8592 } 8593 8594 /* Obtain platform UUID, only for SLI4 FC adapters */ 8595 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) 8596 lpfc_get_platform_uuid(phba); 8597 8598 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 8599 LPFC_DCBX_CEE_MODE) 8600 set_bit(HBA_FIP_SUPPORT, &phba->hba_flag); 8601 else 8602 clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag); 8603 8604 clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag); 8605 8606 if (phba->sli_rev != LPFC_SLI_REV4) { 8607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8608 "0376 READ_REV Error. SLI Level %d " 8609 "FCoE enabled %d\n", 8610 phba->sli_rev, 8611 test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0); 8612 rc = -EIO; 8613 kfree(vpd); 8614 goto out_free_mbox; 8615 } 8616 8617 rc = lpfc_set_host_tm(phba); 8618 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 8619 "6468 Set host date / time: Status x%x:\n", rc); 8620 8621 /* 8622 * Continue initialization with default values even if driver failed 8623 * to read FCoE param config regions, only read parameters if the 8624 * board is FCoE 8625 */ 8626 if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 8627 lpfc_sli4_read_fcoe_params(phba)) 8628 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 8629 "2570 Failed to read FCoE parameters\n"); 8630 8631 /* 8632 * Retrieve sli4 device physical port name, failure of doing it 8633 * is considered as non-fatal. 8634 */ 8635 rc = lpfc_sli4_retrieve_pport_name(phba); 8636 if (!rc) 8637 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8638 "3080 Successful retrieving SLI4 device " 8639 "physical port name: %s.\n", phba->Port); 8640 8641 rc = lpfc_sli4_get_ctl_attr(phba); 8642 if (!rc) 8643 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8644 "8351 Successful retrieving SLI4 device " 8645 "CTL ATTR\n"); 8646 8647 /* 8648 * Evaluate the read rev and vpd data. Populate the driver 8649 * state with the results. If this routine fails, the failure 8650 * is not fatal as the driver will use generic values. 8651 */ 8652 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 8653 if (unlikely(!rc)) 8654 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8655 "0377 Error %d parsing vpd. " 8656 "Using defaults.\n", rc); 8657 kfree(vpd); 8658 8659 /* Save information as VPD data */ 8660 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 8661 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 8662 8663 /* 8664 * This is because first G7 ASIC doesn't support the standard 8665 * 0x5a NVME cmd descriptor type/subtype 8666 */ 8667 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8668 LPFC_SLI_INTF_IF_TYPE_6) && 8669 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 8670 (phba->vpd.rev.smRev == 0) && 8671 (phba->cfg_nvme_embed_cmd == 1)) 8672 phba->cfg_nvme_embed_cmd = 0; 8673 8674 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 8675 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 8676 &mqe->un.read_rev); 8677 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 8678 &mqe->un.read_rev); 8679 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 8680 &mqe->un.read_rev); 8681 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 8682 &mqe->un.read_rev); 8683 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 8684 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 8685 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 8686 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 8687 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 8688 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 8689 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8690 "(%d):0380 READ_REV Status x%x " 8691 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 8692 mboxq->vport ? mboxq->vport->vpi : 0, 8693 bf_get(lpfc_mqe_status, mqe), 8694 phba->vpd.rev.opFwName, 8695 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 8696 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 8697 8698 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8699 LPFC_SLI_INTF_IF_TYPE_0) { 8700 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 8701 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8702 if (rc == MBX_SUCCESS) { 8703 set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag); 8704 /* Set 1Sec interval to detect UE */ 8705 phba->eratt_poll_interval = 1; 8706 phba->sli4_hba.ue_to_sr = bf_get( 8707 lpfc_mbx_set_feature_UESR, 8708 &mboxq->u.mqe.un.set_feature); 8709 phba->sli4_hba.ue_to_rp = bf_get( 8710 lpfc_mbx_set_feature_UERP, 8711 &mboxq->u.mqe.un.set_feature); 8712 } 8713 } 8714 8715 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 8716 /* Enable MDS Diagnostics only if the SLI Port supports it */ 8717 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 8718 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8719 if (rc != MBX_SUCCESS) 8720 phba->mds_diags_support = 0; 8721 } 8722 8723 /* 8724 * Discover the port's supported feature set and match it against the 8725 * hosts requests. 8726 */ 8727 lpfc_request_features(phba, mboxq); 8728 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8729 if (unlikely(rc)) { 8730 rc = -EIO; 8731 goto out_free_mbox; 8732 } 8733 8734 /* Disable VMID if app header is not supported */ 8735 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr, 8736 &mqe->un.req_ftrs))) { 8737 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0); 8738 phba->cfg_vmid_app_header = 0; 8739 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI, 8740 "1242 vmid feature not supported\n"); 8741 } 8742 8743 /* 8744 * The port must support FCP initiator mode as this is the 8745 * only mode running in the host. 8746 */ 8747 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 8748 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8749 "0378 No support for fcpi mode.\n"); 8750 ftr_rsp++; 8751 } 8752 8753 /* Performance Hints are ONLY for FCoE */ 8754 if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 8755 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 8756 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 8757 else 8758 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 8759 } 8760 8761 /* 8762 * If the port cannot support the host's requested features 8763 * then turn off the global config parameters to disable the 8764 * feature in the driver. This is not a fatal error. 8765 */ 8766 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 8767 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 8768 phba->cfg_enable_bg = 0; 8769 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 8770 ftr_rsp++; 8771 } 8772 } 8773 8774 if (phba->max_vpi && phba->cfg_enable_npiv && 8775 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 8776 ftr_rsp++; 8777 8778 if (ftr_rsp) { 8779 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8780 "0379 Feature Mismatch Data: x%08x %08x " 8781 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 8782 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 8783 phba->cfg_enable_npiv, phba->max_vpi); 8784 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 8785 phba->cfg_enable_bg = 0; 8786 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 8787 phba->cfg_enable_npiv = 0; 8788 } 8789 8790 /* These SLI3 features are assumed in SLI4 */ 8791 spin_lock_irq(&phba->hbalock); 8792 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 8793 spin_unlock_irq(&phba->hbalock); 8794 8795 /* Always try to enable dual dump feature if we can */ 8796 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP); 8797 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8798 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); 8799 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) 8800 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8801 "6448 Dual Dump is enabled\n"); 8802 else 8803 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, 8804 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, " 8805 "rc:x%x dd:x%x\n", 8806 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8807 lpfc_sli_config_mbox_subsys_get( 8808 phba, mboxq), 8809 lpfc_sli_config_mbox_opcode_get( 8810 phba, mboxq), 8811 rc, dd); 8812 8813 /* 8814 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 8815 * calls depends on these resources to complete port setup. 8816 */ 8817 rc = lpfc_sli4_alloc_resource_identifiers(phba); 8818 if (rc) { 8819 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8820 "2920 Failed to alloc Resource IDs " 8821 "rc = x%x\n", rc); 8822 goto out_free_mbox; 8823 } 8824 8825 lpfc_sli4_node_rpi_restore(phba); 8826 8827 lpfc_set_host_data(phba, mboxq); 8828 8829 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8830 if (rc) { 8831 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8832 "2134 Failed to set host os driver version %x", 8833 rc); 8834 } 8835 8836 /* Read the port's service parameters. */ 8837 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 8838 if (rc) { 8839 phba->link_state = LPFC_HBA_ERROR; 8840 rc = -ENOMEM; 8841 goto out_free_mbox; 8842 } 8843 8844 mboxq->vport = vport; 8845 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8846 mp = mboxq->ctx_buf; 8847 if (rc == MBX_SUCCESS) { 8848 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 8849 rc = 0; 8850 } 8851 8852 /* 8853 * This memory was allocated by the lpfc_read_sparam routine but is 8854 * no longer needed. It is released and ctx_buf NULLed to prevent 8855 * unintended pointer access as the mbox is reused. 8856 */ 8857 lpfc_mbuf_free(phba, mp->virt, mp->phys); 8858 kfree(mp); 8859 mboxq->ctx_buf = NULL; 8860 if (unlikely(rc)) { 8861 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8862 "0382 READ_SPARAM command failed " 8863 "status %d, mbxStatus x%x\n", 8864 rc, bf_get(lpfc_mqe_status, mqe)); 8865 phba->link_state = LPFC_HBA_ERROR; 8866 rc = -EIO; 8867 goto out_free_mbox; 8868 } 8869 8870 lpfc_update_vport_wwn(vport); 8871 8872 /* Update the fc_host data structures with new wwn. */ 8873 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 8874 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 8875 8876 /* Create all the SLI4 queues */ 8877 rc = lpfc_sli4_queue_create(phba); 8878 if (rc) { 8879 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8880 "3089 Failed to allocate queues\n"); 8881 rc = -ENODEV; 8882 goto out_free_mbox; 8883 } 8884 /* Set up all the queues to the device */ 8885 rc = lpfc_sli4_queue_setup(phba); 8886 if (unlikely(rc)) { 8887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8888 "0381 Error %d during queue setup.\n", rc); 8889 goto out_destroy_queue; 8890 } 8891 /* Initialize the driver internal SLI layer lists. */ 8892 lpfc_sli4_setup(phba); 8893 lpfc_sli4_queue_init(phba); 8894 8895 /* update host els xri-sgl sizes and mappings */ 8896 rc = lpfc_sli4_els_sgl_update(phba); 8897 if (unlikely(rc)) { 8898 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8899 "1400 Failed to update xri-sgl size and " 8900 "mapping: %d\n", rc); 8901 goto out_destroy_queue; 8902 } 8903 8904 /* register the els sgl pool to the port */ 8905 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 8906 phba->sli4_hba.els_xri_cnt); 8907 if (unlikely(rc < 0)) { 8908 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8909 "0582 Error %d during els sgl post " 8910 "operation\n", rc); 8911 rc = -ENODEV; 8912 goto out_destroy_queue; 8913 } 8914 phba->sli4_hba.els_xri_cnt = rc; 8915 8916 if (phba->nvmet_support) { 8917 /* update host nvmet xri-sgl sizes and mappings */ 8918 rc = lpfc_sli4_nvmet_sgl_update(phba); 8919 if (unlikely(rc)) { 8920 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8921 "6308 Failed to update nvmet-sgl size " 8922 "and mapping: %d\n", rc); 8923 goto out_destroy_queue; 8924 } 8925 8926 /* register the nvmet sgl pool to the port */ 8927 rc = lpfc_sli4_repost_sgl_list( 8928 phba, 8929 &phba->sli4_hba.lpfc_nvmet_sgl_list, 8930 phba->sli4_hba.nvmet_xri_cnt); 8931 if (unlikely(rc < 0)) { 8932 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8933 "3117 Error %d during nvmet " 8934 "sgl post\n", rc); 8935 rc = -ENODEV; 8936 goto out_destroy_queue; 8937 } 8938 phba->sli4_hba.nvmet_xri_cnt = rc; 8939 8940 /* We allocate an iocbq for every receive context SGL. 8941 * The additional allocation is for abort and ls handling. 8942 */ 8943 cnt = phba->sli4_hba.nvmet_xri_cnt + 8944 phba->sli4_hba.max_cfg_param.max_xri; 8945 } else { 8946 /* update host common xri-sgl sizes and mappings */ 8947 rc = lpfc_sli4_io_sgl_update(phba); 8948 if (unlikely(rc)) { 8949 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8950 "6082 Failed to update nvme-sgl size " 8951 "and mapping: %d\n", rc); 8952 goto out_destroy_queue; 8953 } 8954 8955 /* register the allocated common sgl pool to the port */ 8956 rc = lpfc_sli4_repost_io_sgl_list(phba); 8957 if (unlikely(rc)) { 8958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8959 "6116 Error %d during nvme sgl post " 8960 "operation\n", rc); 8961 /* Some NVME buffers were moved to abort nvme list */ 8962 /* A pci function reset will repost them */ 8963 rc = -ENODEV; 8964 goto out_destroy_queue; 8965 } 8966 /* Each lpfc_io_buf job structure has an iocbq element. 8967 * This cnt provides for abort, els, ct and ls requests. 8968 */ 8969 cnt = phba->sli4_hba.max_cfg_param.max_xri; 8970 } 8971 8972 if (!phba->sli.iocbq_lookup) { 8973 /* Initialize and populate the iocb list per host */ 8974 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8975 "2821 initialize iocb list with %d entries\n", 8976 cnt); 8977 rc = lpfc_init_iocb_list(phba, cnt); 8978 if (rc) { 8979 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8980 "1413 Failed to init iocb list.\n"); 8981 goto out_destroy_queue; 8982 } 8983 } 8984 8985 if (phba->nvmet_support) 8986 lpfc_nvmet_create_targetport(phba); 8987 8988 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 8989 /* Post initial buffers to all RQs created */ 8990 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 8991 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 8992 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 8993 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 8994 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 8995 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 8996 rqbp->buffer_count = 0; 8997 8998 lpfc_post_rq_buffer( 8999 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 9000 phba->sli4_hba.nvmet_mrq_data[i], 9001 phba->cfg_nvmet_mrq_post, i); 9002 } 9003 } 9004 9005 /* Post the rpi header region to the device. */ 9006 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 9007 if (unlikely(rc)) { 9008 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9009 "0393 Error %d during rpi post operation\n", 9010 rc); 9011 rc = -ENODEV; 9012 goto out_free_iocblist; 9013 } 9014 9015 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 9016 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 9017 /* 9018 * The FC Port needs to register FCFI (index 0) 9019 */ 9020 lpfc_reg_fcfi(phba, mboxq); 9021 mboxq->vport = phba->pport; 9022 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9023 if (rc != MBX_SUCCESS) 9024 goto out_unset_queue; 9025 rc = 0; 9026 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 9027 &mboxq->u.mqe.un.reg_fcfi); 9028 } else { 9029 /* We are a NVME Target mode with MRQ > 1 */ 9030 9031 /* First register the FCFI */ 9032 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 9033 mboxq->vport = phba->pport; 9034 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9035 if (rc != MBX_SUCCESS) 9036 goto out_unset_queue; 9037 rc = 0; 9038 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 9039 &mboxq->u.mqe.un.reg_fcfi_mrq); 9040 9041 /* Next register the MRQs */ 9042 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 9043 mboxq->vport = phba->pport; 9044 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9045 if (rc != MBX_SUCCESS) 9046 goto out_unset_queue; 9047 rc = 0; 9048 } 9049 /* Check if the port is configured to be disabled */ 9050 lpfc_sli_read_link_ste(phba); 9051 } 9052 9053 /* Don't post more new bufs if repost already recovered 9054 * the nvme sgls. 9055 */ 9056 if (phba->nvmet_support == 0) { 9057 if (phba->sli4_hba.io_xri_cnt == 0) { 9058 len = lpfc_new_io_buf( 9059 phba, phba->sli4_hba.io_xri_max); 9060 if (len == 0) { 9061 rc = -ENOMEM; 9062 goto out_unset_queue; 9063 } 9064 9065 if (phba->cfg_xri_rebalancing) 9066 lpfc_create_multixri_pools(phba); 9067 } 9068 } else { 9069 phba->cfg_xri_rebalancing = 0; 9070 } 9071 9072 /* Allow asynchronous mailbox command to go through */ 9073 spin_lock_irq(&phba->hbalock); 9074 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 9075 spin_unlock_irq(&phba->hbalock); 9076 9077 /* Post receive buffers to the device */ 9078 lpfc_sli4_rb_setup(phba); 9079 9080 /* Reset HBA FCF states after HBA reset */ 9081 phba->fcf.fcf_flag = 0; 9082 phba->fcf.current_rec.flag = 0; 9083 9084 /* Start the ELS watchdog timer */ 9085 mod_timer(&vport->els_tmofunc, 9086 jiffies + secs_to_jiffies(phba->fc_ratov * 2)); 9087 9088 /* Start heart beat timer */ 9089 mod_timer(&phba->hb_tmofunc, 9090 jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); 9091 clear_bit(HBA_HBEAT_INP, &phba->hba_flag); 9092 clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); 9093 phba->last_completion_time = jiffies; 9094 9095 /* start eq_delay heartbeat */ 9096 if (phba->cfg_auto_imax) 9097 queue_delayed_work(phba->wq, &phba->eq_delay_work, 9098 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 9099 9100 /* start per phba idle_stat_delay heartbeat */ 9101 lpfc_init_idle_stat_hb(phba); 9102 9103 /* Start error attention (ERATT) polling timer */ 9104 mod_timer(&phba->eratt_poll, 9105 jiffies + secs_to_jiffies(phba->eratt_poll_interval)); 9106 9107 /* 9108 * The port is ready, set the host's link state to LINK_DOWN 9109 * in preparation for link interrupts. 9110 */ 9111 spin_lock_irq(&phba->hbalock); 9112 phba->link_state = LPFC_LINK_DOWN; 9113 9114 /* Check if physical ports are trunked */ 9115 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 9116 phba->trunk_link.link0.state = LPFC_LINK_DOWN; 9117 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 9118 phba->trunk_link.link1.state = LPFC_LINK_DOWN; 9119 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 9120 phba->trunk_link.link2.state = LPFC_LINK_DOWN; 9121 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 9122 phba->trunk_link.link3.state = LPFC_LINK_DOWN; 9123 spin_unlock_irq(&phba->hbalock); 9124 9125 /* Arm the CQs and then EQs on device */ 9126 lpfc_sli4_arm_cqeq_intr(phba); 9127 9128 /* Indicate device interrupt mode */ 9129 phba->sli4_hba.intr_enable = 1; 9130 9131 /* Setup CMF after HBA is initialized */ 9132 lpfc_cmf_setup(phba); 9133 9134 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 9135 test_bit(LINK_DISABLED, &phba->hba_flag)) { 9136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9137 "3103 Adapter Link is disabled.\n"); 9138 lpfc_down_link(phba, mboxq); 9139 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9140 if (rc != MBX_SUCCESS) { 9141 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9142 "3104 Adapter failed to issue " 9143 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 9144 goto out_io_buff_free; 9145 } 9146 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 9147 /* don't perform init_link on SLI4 FC port loopback test */ 9148 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 9149 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 9150 if (rc) 9151 goto out_io_buff_free; 9152 } 9153 } 9154 mempool_free(mboxq, phba->mbox_mem_pool); 9155 9156 /* Enable RAS FW log support */ 9157 lpfc_sli4_ras_setup(phba); 9158 9159 set_bit(HBA_SETUP, &phba->hba_flag); 9160 return rc; 9161 9162 out_io_buff_free: 9163 /* Free allocated IO Buffers */ 9164 lpfc_io_free(phba); 9165 out_unset_queue: 9166 /* Unset all the queues set up in this routine when error out */ 9167 lpfc_sli4_queue_unset(phba); 9168 out_free_iocblist: 9169 lpfc_free_iocb_list(phba); 9170 out_destroy_queue: 9171 lpfc_sli4_queue_destroy(phba); 9172 lpfc_stop_hba_timers(phba); 9173 out_free_mbox: 9174 mempool_free(mboxq, phba->mbox_mem_pool); 9175 return rc; 9176 } 9177 9178 /** 9179 * lpfc_mbox_timeout - Timeout call back function for mbox timer 9180 * @t: Context to fetch pointer to hba structure from. 9181 * 9182 * This is the callback function for mailbox timer. The mailbox 9183 * timer is armed when a new mailbox command is issued and the timer 9184 * is deleted when the mailbox complete. The function is called by 9185 * the kernel timer code when a mailbox does not complete within 9186 * expected time. This function wakes up the worker thread to 9187 * process the mailbox timeout and returns. All the processing is 9188 * done by the worker thread function lpfc_mbox_timeout_handler. 9189 **/ 9190 void 9191 lpfc_mbox_timeout(struct timer_list *t) 9192 { 9193 struct lpfc_hba *phba = timer_container_of(phba, t, sli.mbox_tmo); 9194 unsigned long iflag; 9195 uint32_t tmo_posted; 9196 9197 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 9198 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 9199 if (!tmo_posted) 9200 phba->pport->work_port_events |= WORKER_MBOX_TMO; 9201 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 9202 9203 if (!tmo_posted) 9204 lpfc_worker_wake_up(phba); 9205 return; 9206 } 9207 9208 /** 9209 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 9210 * are pending 9211 * @phba: Pointer to HBA context object. 9212 * 9213 * This function checks if any mailbox completions are present on the mailbox 9214 * completion queue. 9215 **/ 9216 static bool 9217 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 9218 { 9219 9220 uint32_t idx; 9221 struct lpfc_queue *mcq; 9222 struct lpfc_mcqe *mcqe; 9223 bool pending_completions = false; 9224 uint8_t qe_valid; 9225 9226 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 9227 return false; 9228 9229 /* Check for completions on mailbox completion queue */ 9230 9231 mcq = phba->sli4_hba.mbx_cq; 9232 idx = mcq->hba_index; 9233 qe_valid = mcq->qe_valid; 9234 while (bf_get_le32(lpfc_cqe_valid, 9235 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { 9236 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); 9237 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 9238 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 9239 pending_completions = true; 9240 break; 9241 } 9242 idx = (idx + 1) % mcq->entry_count; 9243 if (mcq->hba_index == idx) 9244 break; 9245 9246 /* if the index wrapped around, toggle the valid bit */ 9247 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 9248 qe_valid = (qe_valid) ? 0 : 1; 9249 } 9250 return pending_completions; 9251 9252 } 9253 9254 /** 9255 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 9256 * that were missed. 9257 * @phba: Pointer to HBA context object. 9258 * 9259 * For sli4, it is possible to miss an interrupt. As such mbox completions 9260 * maybe missed causing erroneous mailbox timeouts to occur. This function 9261 * checks to see if mbox completions are on the mailbox completion queue 9262 * and will process all the completions associated with the eq for the 9263 * mailbox completion queue. 9264 **/ 9265 static bool 9266 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 9267 { 9268 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 9269 uint32_t eqidx; 9270 struct lpfc_queue *fpeq = NULL; 9271 struct lpfc_queue *eq; 9272 bool mbox_pending; 9273 9274 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 9275 return false; 9276 9277 /* Find the EQ associated with the mbox CQ */ 9278 if (sli4_hba->hdwq) { 9279 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { 9280 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; 9281 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { 9282 fpeq = eq; 9283 break; 9284 } 9285 } 9286 } 9287 if (!fpeq) 9288 return false; 9289 9290 /* Turn off interrupts from this EQ */ 9291 9292 sli4_hba->sli4_eq_clr_intr(fpeq); 9293 9294 /* Check to see if a mbox completion is pending */ 9295 9296 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 9297 9298 /* 9299 * If a mbox completion is pending, process all the events on EQ 9300 * associated with the mbox completion queue (this could include 9301 * mailbox commands, async events, els commands, receive queue data 9302 * and fcp commands) 9303 */ 9304 9305 if (mbox_pending) 9306 /* process and rearm the EQ */ 9307 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, 9308 LPFC_QUEUE_WORK); 9309 else 9310 /* Always clear and re-arm the EQ */ 9311 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); 9312 9313 return mbox_pending; 9314 9315 } 9316 9317 /** 9318 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 9319 * @phba: Pointer to HBA context object. 9320 * 9321 * This function is called from worker thread when a mailbox command times out. 9322 * The caller is not required to hold any locks. This function will reset the 9323 * HBA and recover all the pending commands. 9324 **/ 9325 void 9326 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 9327 { 9328 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 9329 MAILBOX_t *mb = NULL; 9330 9331 struct lpfc_sli *psli = &phba->sli; 9332 9333 /* If the mailbox completed, process the completion */ 9334 lpfc_sli4_process_missed_mbox_completions(phba); 9335 9336 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) 9337 return; 9338 9339 if (pmbox != NULL) 9340 mb = &pmbox->u.mb; 9341 /* Check the pmbox pointer first. There is a race condition 9342 * between the mbox timeout handler getting executed in the 9343 * worklist and the mailbox actually completing. When this 9344 * race condition occurs, the mbox_active will be NULL. 9345 */ 9346 spin_lock_irq(&phba->hbalock); 9347 if (pmbox == NULL) { 9348 lpfc_printf_log(phba, KERN_WARNING, 9349 LOG_MBOX | LOG_SLI, 9350 "0353 Active Mailbox cleared - mailbox timeout " 9351 "exiting\n"); 9352 spin_unlock_irq(&phba->hbalock); 9353 return; 9354 } 9355 9356 /* Mbox cmd <mbxCommand> timeout */ 9357 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9358 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", 9359 mb->mbxCommand, 9360 phba->pport->port_state, 9361 phba->sli.sli_flag, 9362 phba->sli.mbox_active); 9363 spin_unlock_irq(&phba->hbalock); 9364 9365 /* Setting state unknown so lpfc_sli_abort_iocb_ring 9366 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 9367 * it to fail all outstanding SCSI IO. 9368 */ 9369 set_bit(MBX_TMO_ERR, &phba->bit_flags); 9370 spin_lock_irq(&phba->pport->work_port_lock); 9371 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 9372 spin_unlock_irq(&phba->pport->work_port_lock); 9373 spin_lock_irq(&phba->hbalock); 9374 phba->link_state = LPFC_LINK_UNKNOWN; 9375 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9376 spin_unlock_irq(&phba->hbalock); 9377 9378 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9379 "0345 Resetting board due to mailbox timeout\n"); 9380 9381 /* Reset the HBA device */ 9382 lpfc_reset_hba(phba); 9383 } 9384 9385 /** 9386 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 9387 * @phba: Pointer to HBA context object. 9388 * @pmbox: Pointer to mailbox object. 9389 * @flag: Flag indicating how the mailbox need to be processed. 9390 * 9391 * This function is called by discovery code and HBA management code 9392 * to submit a mailbox command to firmware with SLI-3 interface spec. This 9393 * function gets the hbalock to protect the data structures. 9394 * The mailbox command can be submitted in polling mode, in which case 9395 * this function will wait in a polling loop for the completion of the 9396 * mailbox. 9397 * If the mailbox is submitted in no_wait mode (not polling) the 9398 * function will submit the command and returns immediately without waiting 9399 * for the mailbox completion. The no_wait is supported only when HBA 9400 * is in SLI2/SLI3 mode - interrupts are enabled. 9401 * The SLI interface allows only one mailbox pending at a time. If the 9402 * mailbox is issued in polling mode and there is already a mailbox 9403 * pending, then the function will return an error. If the mailbox is issued 9404 * in NO_WAIT mode and there is a mailbox pending already, the function 9405 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 9406 * The sli layer owns the mailbox object until the completion of mailbox 9407 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 9408 * return codes the caller owns the mailbox command after the return of 9409 * the function. 9410 **/ 9411 static int 9412 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 9413 uint32_t flag) 9414 { 9415 MAILBOX_t *mbx; 9416 struct lpfc_sli *psli = &phba->sli; 9417 uint32_t status, evtctr; 9418 uint32_t ha_copy, hc_copy; 9419 int i; 9420 unsigned long timeout; 9421 unsigned long drvr_flag = 0; 9422 uint32_t word0, ldata; 9423 void __iomem *to_slim; 9424 int processing_queue = 0; 9425 9426 spin_lock_irqsave(&phba->hbalock, drvr_flag); 9427 if (!pmbox) { 9428 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9429 /* processing mbox queue from intr_handler */ 9430 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 9431 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9432 return MBX_SUCCESS; 9433 } 9434 processing_queue = 1; 9435 pmbox = lpfc_mbox_get(phba); 9436 if (!pmbox) { 9437 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9438 return MBX_SUCCESS; 9439 } 9440 } 9441 9442 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 9443 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 9444 if(!pmbox->vport) { 9445 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9446 lpfc_printf_log(phba, KERN_ERR, 9447 LOG_MBOX | LOG_VPORT, 9448 "1806 Mbox x%x failed. No vport\n", 9449 pmbox->u.mb.mbxCommand); 9450 dump_stack(); 9451 goto out_not_finished; 9452 } 9453 } 9454 9455 /* If the PCI channel is in offline state, do not post mbox. */ 9456 if (unlikely(pci_channel_offline(phba->pcidev))) { 9457 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9458 goto out_not_finished; 9459 } 9460 9461 /* If HBA has a deferred error attention, fail the iocb. */ 9462 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { 9463 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9464 goto out_not_finished; 9465 } 9466 9467 psli = &phba->sli; 9468 9469 mbx = &pmbox->u.mb; 9470 status = MBX_SUCCESS; 9471 9472 if (phba->link_state == LPFC_HBA_ERROR) { 9473 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9474 9475 /* Mbox command <mbxCommand> cannot issue */ 9476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9477 "(%d):0311 Mailbox command x%x cannot " 9478 "issue Data: x%x x%x\n", 9479 pmbox->vport ? pmbox->vport->vpi : 0, 9480 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 9481 goto out_not_finished; 9482 } 9483 9484 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 9485 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 9486 !(hc_copy & HC_MBINT_ENA)) { 9487 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9488 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9489 "(%d):2528 Mailbox command x%x cannot " 9490 "issue Data: x%x x%x\n", 9491 pmbox->vport ? pmbox->vport->vpi : 0, 9492 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 9493 goto out_not_finished; 9494 } 9495 } 9496 9497 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9498 /* Polling for a mbox command when another one is already active 9499 * is not allowed in SLI. Also, the driver must have established 9500 * SLI2 mode to queue and process multiple mbox commands. 9501 */ 9502 9503 if (flag & MBX_POLL) { 9504 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9505 9506 /* Mbox command <mbxCommand> cannot issue */ 9507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9508 "(%d):2529 Mailbox command x%x " 9509 "cannot issue Data: x%x x%x\n", 9510 pmbox->vport ? pmbox->vport->vpi : 0, 9511 pmbox->u.mb.mbxCommand, 9512 psli->sli_flag, flag); 9513 goto out_not_finished; 9514 } 9515 9516 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 9517 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9518 /* Mbox command <mbxCommand> cannot issue */ 9519 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9520 "(%d):2530 Mailbox command x%x " 9521 "cannot issue Data: x%x x%x\n", 9522 pmbox->vport ? pmbox->vport->vpi : 0, 9523 pmbox->u.mb.mbxCommand, 9524 psli->sli_flag, flag); 9525 goto out_not_finished; 9526 } 9527 9528 /* Another mailbox command is still being processed, queue this 9529 * command to be processed later. 9530 */ 9531 lpfc_mbox_put(phba, pmbox); 9532 9533 /* Mbox cmd issue - BUSY */ 9534 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 9535 "(%d):0308 Mbox cmd issue - BUSY Data: " 9536 "x%x x%x x%x x%x\n", 9537 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 9538 mbx->mbxCommand, 9539 phba->pport ? phba->pport->port_state : 0xff, 9540 psli->sli_flag, flag); 9541 9542 psli->slistat.mbox_busy++; 9543 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9544 9545 if (pmbox->vport) { 9546 lpfc_debugfs_disc_trc(pmbox->vport, 9547 LPFC_DISC_TRC_MBOX_VPORT, 9548 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 9549 (uint32_t)mbx->mbxCommand, 9550 mbx->un.varWords[0], mbx->un.varWords[1]); 9551 } 9552 else { 9553 lpfc_debugfs_disc_trc(phba->pport, 9554 LPFC_DISC_TRC_MBOX, 9555 "MBOX Bsy: cmd:x%x mb:x%x x%x", 9556 (uint32_t)mbx->mbxCommand, 9557 mbx->un.varWords[0], mbx->un.varWords[1]); 9558 } 9559 9560 return MBX_BUSY; 9561 } 9562 9563 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 9564 9565 /* If we are not polling, we MUST be in SLI2 mode */ 9566 if (flag != MBX_POLL) { 9567 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 9568 (mbx->mbxCommand != MBX_KILL_BOARD)) { 9569 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9570 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9571 /* Mbox command <mbxCommand> cannot issue */ 9572 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9573 "(%d):2531 Mailbox command x%x " 9574 "cannot issue Data: x%x x%x\n", 9575 pmbox->vport ? pmbox->vport->vpi : 0, 9576 pmbox->u.mb.mbxCommand, 9577 psli->sli_flag, flag); 9578 goto out_not_finished; 9579 } 9580 /* timeout active mbox command */ 9581 timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)); 9582 mod_timer(&psli->mbox_tmo, jiffies + timeout); 9583 } 9584 9585 /* Mailbox cmd <cmd> issue */ 9586 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 9587 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 9588 "x%x\n", 9589 pmbox->vport ? pmbox->vport->vpi : 0, 9590 mbx->mbxCommand, 9591 phba->pport ? phba->pport->port_state : 0xff, 9592 psli->sli_flag, flag); 9593 9594 if (mbx->mbxCommand != MBX_HEARTBEAT) { 9595 if (pmbox->vport) { 9596 lpfc_debugfs_disc_trc(pmbox->vport, 9597 LPFC_DISC_TRC_MBOX_VPORT, 9598 "MBOX Send vport: cmd:x%x mb:x%x x%x", 9599 (uint32_t)mbx->mbxCommand, 9600 mbx->un.varWords[0], mbx->un.varWords[1]); 9601 } 9602 else { 9603 lpfc_debugfs_disc_trc(phba->pport, 9604 LPFC_DISC_TRC_MBOX, 9605 "MBOX Send: cmd:x%x mb:x%x x%x", 9606 (uint32_t)mbx->mbxCommand, 9607 mbx->un.varWords[0], mbx->un.varWords[1]); 9608 } 9609 } 9610 9611 psli->slistat.mbox_cmd++; 9612 evtctr = psli->slistat.mbox_event; 9613 9614 /* next set own bit for the adapter and copy over command word */ 9615 mbx->mbxOwner = OWN_CHIP; 9616 9617 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9618 /* Populate mbox extension offset word. */ 9619 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 9620 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 9621 = (uint8_t *)phba->mbox_ext 9622 - (uint8_t *)phba->mbox; 9623 } 9624 9625 /* Copy the mailbox extension data */ 9626 if (pmbox->in_ext_byte_len && pmbox->ext_buf) { 9627 lpfc_sli_pcimem_bcopy(pmbox->ext_buf, 9628 (uint8_t *)phba->mbox_ext, 9629 pmbox->in_ext_byte_len); 9630 } 9631 /* Copy command data to host SLIM area */ 9632 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 9633 } else { 9634 /* Populate mbox extension offset word. */ 9635 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 9636 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 9637 = MAILBOX_HBA_EXT_OFFSET; 9638 9639 /* Copy the mailbox extension data */ 9640 if (pmbox->in_ext_byte_len && pmbox->ext_buf) 9641 lpfc_memcpy_to_slim(phba->MBslimaddr + 9642 MAILBOX_HBA_EXT_OFFSET, 9643 pmbox->ext_buf, pmbox->in_ext_byte_len); 9644 9645 if (mbx->mbxCommand == MBX_CONFIG_PORT) 9646 /* copy command data into host mbox for cmpl */ 9647 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 9648 MAILBOX_CMD_SIZE); 9649 9650 /* First copy mbox command data to HBA SLIM, skip past first 9651 word */ 9652 to_slim = phba->MBslimaddr + sizeof (uint32_t); 9653 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 9654 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 9655 9656 /* Next copy over first word, with mbxOwner set */ 9657 ldata = *((uint32_t *)mbx); 9658 to_slim = phba->MBslimaddr; 9659 writel(ldata, to_slim); 9660 readl(to_slim); /* flush */ 9661 9662 if (mbx->mbxCommand == MBX_CONFIG_PORT) 9663 /* switch over to host mailbox */ 9664 psli->sli_flag |= LPFC_SLI_ACTIVE; 9665 } 9666 9667 wmb(); 9668 9669 switch (flag) { 9670 case MBX_NOWAIT: 9671 /* Set up reference to mailbox command */ 9672 psli->mbox_active = pmbox; 9673 /* Interrupt board to do it */ 9674 writel(CA_MBATT, phba->CAregaddr); 9675 readl(phba->CAregaddr); /* flush */ 9676 /* Don't wait for it to finish, just return */ 9677 break; 9678 9679 case MBX_POLL: 9680 /* Set up null reference to mailbox command */ 9681 psli->mbox_active = NULL; 9682 /* Interrupt board to do it */ 9683 writel(CA_MBATT, phba->CAregaddr); 9684 readl(phba->CAregaddr); /* flush */ 9685 9686 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9687 /* First read mbox status word */ 9688 word0 = *((uint32_t *)phba->mbox); 9689 word0 = le32_to_cpu(word0); 9690 } else { 9691 /* First read mbox status word */ 9692 if (lpfc_readl(phba->MBslimaddr, &word0)) { 9693 spin_unlock_irqrestore(&phba->hbalock, 9694 drvr_flag); 9695 goto out_not_finished; 9696 } 9697 } 9698 9699 /* Read the HBA Host Attention Register */ 9700 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 9701 spin_unlock_irqrestore(&phba->hbalock, 9702 drvr_flag); 9703 goto out_not_finished; 9704 } 9705 timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)) + jiffies; 9706 i = 0; 9707 /* Wait for command to complete */ 9708 while (((word0 & OWN_CHIP) == OWN_CHIP) || 9709 (!(ha_copy & HA_MBATT) && 9710 (phba->link_state > LPFC_WARM_START))) { 9711 if (time_after(jiffies, timeout)) { 9712 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9713 spin_unlock_irqrestore(&phba->hbalock, 9714 drvr_flag); 9715 goto out_not_finished; 9716 } 9717 9718 /* Check if we took a mbox interrupt while we were 9719 polling */ 9720 if (((word0 & OWN_CHIP) != OWN_CHIP) 9721 && (evtctr != psli->slistat.mbox_event)) 9722 break; 9723 9724 if (i++ > 10) { 9725 spin_unlock_irqrestore(&phba->hbalock, 9726 drvr_flag); 9727 msleep(1); 9728 spin_lock_irqsave(&phba->hbalock, drvr_flag); 9729 } 9730 9731 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9732 /* First copy command data */ 9733 word0 = *((uint32_t *)phba->mbox); 9734 word0 = le32_to_cpu(word0); 9735 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 9736 MAILBOX_t *slimmb; 9737 uint32_t slimword0; 9738 /* Check real SLIM for any errors */ 9739 slimword0 = readl(phba->MBslimaddr); 9740 slimmb = (MAILBOX_t *) & slimword0; 9741 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 9742 && slimmb->mbxStatus) { 9743 psli->sli_flag &= 9744 ~LPFC_SLI_ACTIVE; 9745 word0 = slimword0; 9746 } 9747 } 9748 } else { 9749 /* First copy command data */ 9750 word0 = readl(phba->MBslimaddr); 9751 } 9752 /* Read the HBA Host Attention Register */ 9753 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 9754 spin_unlock_irqrestore(&phba->hbalock, 9755 drvr_flag); 9756 goto out_not_finished; 9757 } 9758 } 9759 9760 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9761 /* copy results back to user */ 9762 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 9763 MAILBOX_CMD_SIZE); 9764 /* Copy the mailbox extension data */ 9765 if (pmbox->out_ext_byte_len && pmbox->ext_buf) { 9766 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 9767 pmbox->ext_buf, 9768 pmbox->out_ext_byte_len); 9769 } 9770 } else { 9771 /* First copy command data */ 9772 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 9773 MAILBOX_CMD_SIZE); 9774 /* Copy the mailbox extension data */ 9775 if (pmbox->out_ext_byte_len && pmbox->ext_buf) { 9776 lpfc_memcpy_from_slim( 9777 pmbox->ext_buf, 9778 phba->MBslimaddr + 9779 MAILBOX_HBA_EXT_OFFSET, 9780 pmbox->out_ext_byte_len); 9781 } 9782 } 9783 9784 writel(HA_MBATT, phba->HAregaddr); 9785 readl(phba->HAregaddr); /* flush */ 9786 9787 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9788 status = mbx->mbxStatus; 9789 } 9790 9791 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9792 return status; 9793 9794 out_not_finished: 9795 if (processing_queue) { 9796 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 9797 lpfc_mbox_cmpl_put(phba, pmbox); 9798 } 9799 return MBX_NOT_FINISHED; 9800 } 9801 9802 /** 9803 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 9804 * @phba: Pointer to HBA context object. 9805 * 9806 * The function blocks the posting of SLI4 asynchronous mailbox commands from 9807 * the driver internal pending mailbox queue. It will then try to wait out the 9808 * possible outstanding mailbox command before return. 9809 * 9810 * Returns: 9811 * 0 - the outstanding mailbox command completed; otherwise, the wait for 9812 * the outstanding mailbox command timed out. 9813 **/ 9814 static int 9815 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 9816 { 9817 struct lpfc_sli *psli = &phba->sli; 9818 LPFC_MBOXQ_t *mboxq; 9819 int rc = 0; 9820 unsigned long timeout = 0; 9821 u32 sli_flag; 9822 u8 cmd, subsys, opcode; 9823 9824 /* Mark the asynchronous mailbox command posting as blocked */ 9825 spin_lock_irq(&phba->hbalock); 9826 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9827 /* Determine how long we might wait for the active mailbox 9828 * command to be gracefully completed by firmware. 9829 */ 9830 if (phba->sli.mbox_active) 9831 timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, 9832 phba->sli.mbox_active)) + jiffies; 9833 spin_unlock_irq(&phba->hbalock); 9834 9835 /* Make sure the mailbox is really active */ 9836 if (timeout) 9837 lpfc_sli4_process_missed_mbox_completions(phba); 9838 9839 /* Wait for the outstanding mailbox command to complete */ 9840 while (phba->sli.mbox_active) { 9841 /* Check active mailbox complete status every 2ms */ 9842 msleep(2); 9843 if (time_after(jiffies, timeout)) { 9844 /* Timeout, mark the outstanding cmd not complete */ 9845 9846 /* Sanity check sli.mbox_active has not completed or 9847 * cancelled from another context during last 2ms sleep, 9848 * so take hbalock to be sure before logging. 9849 */ 9850 spin_lock_irq(&phba->hbalock); 9851 if (phba->sli.mbox_active) { 9852 mboxq = phba->sli.mbox_active; 9853 cmd = mboxq->u.mb.mbxCommand; 9854 subsys = lpfc_sli_config_mbox_subsys_get(phba, 9855 mboxq); 9856 opcode = lpfc_sli_config_mbox_opcode_get(phba, 9857 mboxq); 9858 sli_flag = psli->sli_flag; 9859 spin_unlock_irq(&phba->hbalock); 9860 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9861 "2352 Mailbox command x%x " 9862 "(x%x/x%x) sli_flag x%x could " 9863 "not complete\n", 9864 cmd, subsys, opcode, 9865 sli_flag); 9866 } else { 9867 spin_unlock_irq(&phba->hbalock); 9868 } 9869 9870 rc = 1; 9871 break; 9872 } 9873 } 9874 9875 /* Can not cleanly block async mailbox command, fails it */ 9876 if (rc) { 9877 spin_lock_irq(&phba->hbalock); 9878 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 9879 spin_unlock_irq(&phba->hbalock); 9880 } 9881 return rc; 9882 } 9883 9884 /** 9885 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 9886 * @phba: Pointer to HBA context object. 9887 * 9888 * The function unblocks and resume posting of SLI4 asynchronous mailbox 9889 * commands from the driver internal pending mailbox queue. It makes sure 9890 * that there is no outstanding mailbox command before resuming posting 9891 * asynchronous mailbox commands. If, for any reason, there is outstanding 9892 * mailbox command, it will try to wait it out before resuming asynchronous 9893 * mailbox command posting. 9894 **/ 9895 static void 9896 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 9897 { 9898 struct lpfc_sli *psli = &phba->sli; 9899 9900 spin_lock_irq(&phba->hbalock); 9901 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 9902 /* Asynchronous mailbox posting is not blocked, do nothing */ 9903 spin_unlock_irq(&phba->hbalock); 9904 return; 9905 } 9906 9907 /* Outstanding synchronous mailbox command is guaranteed to be done, 9908 * successful or timeout, after timing-out the outstanding mailbox 9909 * command shall always be removed, so just unblock posting async 9910 * mailbox command and resume 9911 */ 9912 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 9913 spin_unlock_irq(&phba->hbalock); 9914 9915 /* wake up worker thread to post asynchronous mailbox command */ 9916 lpfc_worker_wake_up(phba); 9917 } 9918 9919 /** 9920 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 9921 * @phba: Pointer to HBA context object. 9922 * @mboxq: Pointer to mailbox object. 9923 * 9924 * The function waits for the bootstrap mailbox register ready bit from 9925 * port for twice the regular mailbox command timeout value. 9926 * 9927 * 0 - no timeout on waiting for bootstrap mailbox register ready. 9928 * MBXERR_ERROR - wait for bootstrap mailbox register timed out or port 9929 * is in an unrecoverable state. 9930 **/ 9931 static int 9932 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9933 { 9934 uint32_t db_ready; 9935 unsigned long timeout; 9936 struct lpfc_register bmbx_reg; 9937 struct lpfc_register portstat_reg = {-1}; 9938 9939 /* Sanity check - there is no point to wait if the port is in an 9940 * unrecoverable state. 9941 */ 9942 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 9943 LPFC_SLI_INTF_IF_TYPE_2) { 9944 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9945 &portstat_reg.word0) || 9946 lpfc_sli4_unrecoverable_port(&portstat_reg)) { 9947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9948 "3858 Skipping bmbx ready because " 9949 "Port Status x%x\n", 9950 portstat_reg.word0); 9951 return MBXERR_ERROR; 9952 } 9953 } 9954 9955 timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)) + jiffies; 9956 9957 do { 9958 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 9959 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 9960 if (!db_ready) 9961 mdelay(2); 9962 9963 if (time_after(jiffies, timeout)) 9964 return MBXERR_ERROR; 9965 } while (!db_ready); 9966 9967 return 0; 9968 } 9969 9970 /** 9971 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 9972 * @phba: Pointer to HBA context object. 9973 * @mboxq: Pointer to mailbox object. 9974 * 9975 * The function posts a mailbox to the port. The mailbox is expected 9976 * to be comletely filled in and ready for the port to operate on it. 9977 * This routine executes a synchronous completion operation on the 9978 * mailbox by polling for its completion. 9979 * 9980 * The caller must not be holding any locks when calling this routine. 9981 * 9982 * Returns: 9983 * MBX_SUCCESS - mailbox posted successfully 9984 * Any of the MBX error values. 9985 **/ 9986 static int 9987 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9988 { 9989 int rc = MBX_SUCCESS; 9990 unsigned long iflag; 9991 uint32_t mcqe_status; 9992 uint32_t mbx_cmnd; 9993 struct lpfc_sli *psli = &phba->sli; 9994 struct lpfc_mqe *mb = &mboxq->u.mqe; 9995 struct lpfc_bmbx_create *mbox_rgn; 9996 struct dma_address *dma_address; 9997 9998 /* 9999 * Only one mailbox can be active to the bootstrap mailbox region 10000 * at a time and there is no queueing provided. 10001 */ 10002 spin_lock_irqsave(&phba->hbalock, iflag); 10003 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 10004 spin_unlock_irqrestore(&phba->hbalock, iflag); 10005 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10006 "(%d):2532 Mailbox command x%x (x%x/x%x) " 10007 "cannot issue Data: x%x x%x\n", 10008 mboxq->vport ? mboxq->vport->vpi : 0, 10009 mboxq->u.mb.mbxCommand, 10010 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10011 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10012 psli->sli_flag, MBX_POLL); 10013 return MBXERR_ERROR; 10014 } 10015 /* The server grabs the token and owns it until release */ 10016 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 10017 phba->sli.mbox_active = mboxq; 10018 spin_unlock_irqrestore(&phba->hbalock, iflag); 10019 10020 /* wait for bootstrap mbox register for readyness */ 10021 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 10022 if (rc) 10023 goto exit; 10024 /* 10025 * Initialize the bootstrap memory region to avoid stale data areas 10026 * in the mailbox post. Then copy the caller's mailbox contents to 10027 * the bmbx mailbox region. 10028 */ 10029 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 10030 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 10031 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 10032 sizeof(struct lpfc_mqe)); 10033 10034 /* Post the high mailbox dma address to the port and wait for ready. */ 10035 dma_address = &phba->sli4_hba.bmbx.dma_address; 10036 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 10037 10038 /* wait for bootstrap mbox register for hi-address write done */ 10039 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 10040 if (rc) 10041 goto exit; 10042 10043 /* Post the low mailbox dma address to the port. */ 10044 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 10045 10046 /* wait for bootstrap mbox register for low address write done */ 10047 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 10048 if (rc) 10049 goto exit; 10050 10051 /* 10052 * Read the CQ to ensure the mailbox has completed. 10053 * If so, update the mailbox status so that the upper layers 10054 * can complete the request normally. 10055 */ 10056 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 10057 sizeof(struct lpfc_mqe)); 10058 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 10059 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 10060 sizeof(struct lpfc_mcqe)); 10061 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 10062 /* 10063 * When the CQE status indicates a failure and the mailbox status 10064 * indicates success then copy the CQE status into the mailbox status 10065 * (and prefix it with x4000). 10066 */ 10067 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 10068 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 10069 bf_set(lpfc_mqe_status, mb, 10070 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 10071 rc = MBXERR_ERROR; 10072 } else 10073 lpfc_sli4_swap_str(phba, mboxq); 10074 10075 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 10076 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 10077 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 10078 " x%x x%x CQ: x%x x%x x%x x%x\n", 10079 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 10080 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10081 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10082 bf_get(lpfc_mqe_status, mb), 10083 mb->un.mb_words[0], mb->un.mb_words[1], 10084 mb->un.mb_words[2], mb->un.mb_words[3], 10085 mb->un.mb_words[4], mb->un.mb_words[5], 10086 mb->un.mb_words[6], mb->un.mb_words[7], 10087 mb->un.mb_words[8], mb->un.mb_words[9], 10088 mb->un.mb_words[10], mb->un.mb_words[11], 10089 mb->un.mb_words[12], mboxq->mcqe.word0, 10090 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 10091 mboxq->mcqe.trailer); 10092 exit: 10093 /* We are holding the token, no needed for lock when release */ 10094 spin_lock_irqsave(&phba->hbalock, iflag); 10095 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10096 phba->sli.mbox_active = NULL; 10097 spin_unlock_irqrestore(&phba->hbalock, iflag); 10098 return rc; 10099 } 10100 10101 /** 10102 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 10103 * @phba: Pointer to HBA context object. 10104 * @mboxq: Pointer to mailbox object. 10105 * @flag: Flag indicating how the mailbox need to be processed. 10106 * 10107 * This function is called by discovery code and HBA management code to submit 10108 * a mailbox command to firmware with SLI-4 interface spec. 10109 * 10110 * Return codes the caller owns the mailbox command after the return of the 10111 * function. 10112 **/ 10113 static int 10114 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 10115 uint32_t flag) 10116 { 10117 struct lpfc_sli *psli = &phba->sli; 10118 unsigned long iflags; 10119 int rc; 10120 10121 /* dump from issue mailbox command if setup */ 10122 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 10123 10124 rc = lpfc_mbox_dev_check(phba); 10125 if (unlikely(rc)) { 10126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10127 "(%d):2544 Mailbox command x%x (x%x/x%x) " 10128 "cannot issue Data: x%x x%x\n", 10129 mboxq->vport ? mboxq->vport->vpi : 0, 10130 mboxq->u.mb.mbxCommand, 10131 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10132 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10133 psli->sli_flag, flag); 10134 goto out_not_finished; 10135 } 10136 10137 /* Detect polling mode and jump to a handler */ 10138 if (!phba->sli4_hba.intr_enable) { 10139 if (flag == MBX_POLL) 10140 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 10141 else 10142 rc = -EIO; 10143 if (rc != MBX_SUCCESS) 10144 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 10145 "(%d):2541 Mailbox command x%x " 10146 "(x%x/x%x) failure: " 10147 "mqe_sta: x%x mcqe_sta: x%x/x%x " 10148 "Data: x%x x%x\n", 10149 mboxq->vport ? mboxq->vport->vpi : 0, 10150 mboxq->u.mb.mbxCommand, 10151 lpfc_sli_config_mbox_subsys_get(phba, 10152 mboxq), 10153 lpfc_sli_config_mbox_opcode_get(phba, 10154 mboxq), 10155 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 10156 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 10157 bf_get(lpfc_mcqe_ext_status, 10158 &mboxq->mcqe), 10159 psli->sli_flag, flag); 10160 return rc; 10161 } else if (flag == MBX_POLL) { 10162 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 10163 "(%d):2542 Try to issue mailbox command " 10164 "x%x (x%x/x%x) synchronously ahead of async " 10165 "mailbox command queue: x%x x%x\n", 10166 mboxq->vport ? mboxq->vport->vpi : 0, 10167 mboxq->u.mb.mbxCommand, 10168 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10169 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10170 psli->sli_flag, flag); 10171 /* Try to block the asynchronous mailbox posting */ 10172 rc = lpfc_sli4_async_mbox_block(phba); 10173 if (!rc) { 10174 /* Successfully blocked, now issue sync mbox cmd */ 10175 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 10176 if (rc != MBX_SUCCESS) 10177 lpfc_printf_log(phba, KERN_WARNING, 10178 LOG_MBOX | LOG_SLI, 10179 "(%d):2597 Sync Mailbox command " 10180 "x%x (x%x/x%x) failure: " 10181 "mqe_sta: x%x mcqe_sta: x%x/x%x " 10182 "Data: x%x x%x\n", 10183 mboxq->vport ? mboxq->vport->vpi : 0, 10184 mboxq->u.mb.mbxCommand, 10185 lpfc_sli_config_mbox_subsys_get(phba, 10186 mboxq), 10187 lpfc_sli_config_mbox_opcode_get(phba, 10188 mboxq), 10189 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 10190 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 10191 bf_get(lpfc_mcqe_ext_status, 10192 &mboxq->mcqe), 10193 psli->sli_flag, flag); 10194 /* Unblock the async mailbox posting afterward */ 10195 lpfc_sli4_async_mbox_unblock(phba); 10196 } 10197 return rc; 10198 } 10199 10200 /* Now, interrupt mode asynchronous mailbox command */ 10201 rc = lpfc_mbox_cmd_check(phba, mboxq); 10202 if (rc) { 10203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10204 "(%d):2543 Mailbox command x%x (x%x/x%x) " 10205 "cannot issue Data: x%x x%x\n", 10206 mboxq->vport ? mboxq->vport->vpi : 0, 10207 mboxq->u.mb.mbxCommand, 10208 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10209 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10210 psli->sli_flag, flag); 10211 goto out_not_finished; 10212 } 10213 10214 /* Put the mailbox command to the driver internal FIFO */ 10215 psli->slistat.mbox_busy++; 10216 spin_lock_irqsave(&phba->hbalock, iflags); 10217 lpfc_mbox_put(phba, mboxq); 10218 spin_unlock_irqrestore(&phba->hbalock, iflags); 10219 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 10220 "(%d):0354 Mbox cmd issue - Enqueue Data: " 10221 "x%x (x%x/x%x) x%x x%x x%x x%x\n", 10222 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 10223 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 10224 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10225 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10226 mboxq->u.mb.un.varUnregLogin.rpi, 10227 phba->pport->port_state, 10228 psli->sli_flag, MBX_NOWAIT); 10229 /* Wake up worker thread to transport mailbox command from head */ 10230 lpfc_worker_wake_up(phba); 10231 10232 return MBX_BUSY; 10233 10234 out_not_finished: 10235 return MBX_NOT_FINISHED; 10236 } 10237 10238 /** 10239 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 10240 * @phba: Pointer to HBA context object. 10241 * 10242 * This function is called by worker thread to send a mailbox command to 10243 * SLI4 HBA firmware. 10244 * 10245 **/ 10246 int 10247 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 10248 { 10249 struct lpfc_sli *psli = &phba->sli; 10250 LPFC_MBOXQ_t *mboxq; 10251 int rc = MBX_SUCCESS; 10252 unsigned long iflags; 10253 struct lpfc_mqe *mqe; 10254 uint32_t mbx_cmnd; 10255 10256 /* Check interrupt mode before post async mailbox command */ 10257 if (unlikely(!phba->sli4_hba.intr_enable)) 10258 return MBX_NOT_FINISHED; 10259 10260 /* Check for mailbox command service token */ 10261 spin_lock_irqsave(&phba->hbalock, iflags); 10262 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 10263 spin_unlock_irqrestore(&phba->hbalock, iflags); 10264 return MBX_NOT_FINISHED; 10265 } 10266 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 10267 spin_unlock_irqrestore(&phba->hbalock, iflags); 10268 return MBX_NOT_FINISHED; 10269 } 10270 if (unlikely(phba->sli.mbox_active)) { 10271 spin_unlock_irqrestore(&phba->hbalock, iflags); 10272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10273 "0384 There is pending active mailbox cmd\n"); 10274 return MBX_NOT_FINISHED; 10275 } 10276 /* Take the mailbox command service token */ 10277 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 10278 10279 /* Get the next mailbox command from head of queue */ 10280 mboxq = lpfc_mbox_get(phba); 10281 10282 /* If no more mailbox command waiting for post, we're done */ 10283 if (!mboxq) { 10284 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10285 spin_unlock_irqrestore(&phba->hbalock, iflags); 10286 return MBX_SUCCESS; 10287 } 10288 phba->sli.mbox_active = mboxq; 10289 spin_unlock_irqrestore(&phba->hbalock, iflags); 10290 10291 /* Check device readiness for posting mailbox command */ 10292 rc = lpfc_mbox_dev_check(phba); 10293 if (unlikely(rc)) 10294 /* Driver clean routine will clean up pending mailbox */ 10295 goto out_not_finished; 10296 10297 /* Prepare the mbox command to be posted */ 10298 mqe = &mboxq->u.mqe; 10299 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 10300 10301 /* Start timer for the mbox_tmo and log some mailbox post messages */ 10302 mod_timer(&psli->mbox_tmo, (jiffies + 10303 secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)))); 10304 10305 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 10306 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 10307 "x%x x%x\n", 10308 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 10309 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10310 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10311 phba->pport->port_state, psli->sli_flag); 10312 10313 if (mbx_cmnd != MBX_HEARTBEAT) { 10314 if (mboxq->vport) { 10315 lpfc_debugfs_disc_trc(mboxq->vport, 10316 LPFC_DISC_TRC_MBOX_VPORT, 10317 "MBOX Send vport: cmd:x%x mb:x%x x%x", 10318 mbx_cmnd, mqe->un.mb_words[0], 10319 mqe->un.mb_words[1]); 10320 } else { 10321 lpfc_debugfs_disc_trc(phba->pport, 10322 LPFC_DISC_TRC_MBOX, 10323 "MBOX Send: cmd:x%x mb:x%x x%x", 10324 mbx_cmnd, mqe->un.mb_words[0], 10325 mqe->un.mb_words[1]); 10326 } 10327 } 10328 psli->slistat.mbox_cmd++; 10329 10330 /* Post the mailbox command to the port */ 10331 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 10332 if (rc != MBX_SUCCESS) { 10333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10334 "(%d):2533 Mailbox command x%x (x%x/x%x) " 10335 "cannot issue Data: x%x x%x\n", 10336 mboxq->vport ? mboxq->vport->vpi : 0, 10337 mboxq->u.mb.mbxCommand, 10338 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 10339 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 10340 psli->sli_flag, MBX_NOWAIT); 10341 goto out_not_finished; 10342 } 10343 10344 return rc; 10345 10346 out_not_finished: 10347 spin_lock_irqsave(&phba->hbalock, iflags); 10348 if (phba->sli.mbox_active) { 10349 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 10350 __lpfc_mbox_cmpl_put(phba, mboxq); 10351 /* Release the token */ 10352 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10353 phba->sli.mbox_active = NULL; 10354 } 10355 spin_unlock_irqrestore(&phba->hbalock, iflags); 10356 10357 return MBX_NOT_FINISHED; 10358 } 10359 10360 /** 10361 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 10362 * @phba: Pointer to HBA context object. 10363 * @pmbox: Pointer to mailbox object. 10364 * @flag: Flag indicating how the mailbox need to be processed. 10365 * 10366 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 10367 * the API jump table function pointer from the lpfc_hba struct. 10368 * 10369 * Return codes the caller owns the mailbox command after the return of the 10370 * function. 10371 **/ 10372 int 10373 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 10374 { 10375 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 10376 } 10377 10378 /** 10379 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 10380 * @phba: The hba struct for which this call is being executed. 10381 * @dev_grp: The HBA PCI-Device group number. 10382 * 10383 * This routine sets up the mbox interface API function jump table in @phba 10384 * struct. 10385 * Returns: 0 - success, -ENODEV - failure. 10386 **/ 10387 int 10388 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 10389 { 10390 10391 switch (dev_grp) { 10392 case LPFC_PCI_DEV_LP: 10393 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 10394 phba->lpfc_sli_handle_slow_ring_event = 10395 lpfc_sli_handle_slow_ring_event_s3; 10396 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 10397 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 10398 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 10399 break; 10400 case LPFC_PCI_DEV_OC: 10401 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 10402 phba->lpfc_sli_handle_slow_ring_event = 10403 lpfc_sli_handle_slow_ring_event_s4; 10404 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 10405 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 10406 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 10407 break; 10408 default: 10409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10410 "1420 Invalid HBA PCI-device group: 0x%x\n", 10411 dev_grp); 10412 return -ENODEV; 10413 } 10414 return 0; 10415 } 10416 10417 /** 10418 * __lpfc_sli_ringtx_put - Add an iocb to the txq 10419 * @phba: Pointer to HBA context object. 10420 * @pring: Pointer to driver SLI ring object. 10421 * @piocb: Pointer to address of newly added command iocb. 10422 * 10423 * This function is called with hbalock held for SLI3 ports or 10424 * the ring lock held for SLI4 ports to add a command 10425 * iocb to the txq when SLI layer cannot submit the command iocb 10426 * to the ring. 10427 **/ 10428 void 10429 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10430 struct lpfc_iocbq *piocb) 10431 { 10432 if (phba->sli_rev == LPFC_SLI_REV4) 10433 lockdep_assert_held(&pring->ring_lock); 10434 else 10435 lockdep_assert_held(&phba->hbalock); 10436 /* Insert the caller's iocb in the txq tail for later processing. */ 10437 list_add_tail(&piocb->list, &pring->txq); 10438 } 10439 10440 /** 10441 * lpfc_sli_next_iocb - Get the next iocb in the txq 10442 * @phba: Pointer to HBA context object. 10443 * @pring: Pointer to driver SLI ring object. 10444 * @piocb: Pointer to address of newly added command iocb. 10445 * 10446 * This function is called with hbalock held before a new 10447 * iocb is submitted to the firmware. This function checks 10448 * txq to flush the iocbs in txq to Firmware before 10449 * submitting new iocbs to the Firmware. 10450 * If there are iocbs in the txq which need to be submitted 10451 * to firmware, lpfc_sli_next_iocb returns the first element 10452 * of the txq after dequeuing it from txq. 10453 * If there is no iocb in the txq then the function will return 10454 * *piocb and *piocb is set to NULL. Caller needs to check 10455 * *piocb to find if there are more commands in the txq. 10456 **/ 10457 static struct lpfc_iocbq * 10458 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10459 struct lpfc_iocbq **piocb) 10460 { 10461 struct lpfc_iocbq * nextiocb; 10462 10463 lockdep_assert_held(&phba->hbalock); 10464 10465 nextiocb = lpfc_sli_ringtx_get(phba, pring); 10466 if (!nextiocb) { 10467 nextiocb = *piocb; 10468 *piocb = NULL; 10469 } 10470 10471 return nextiocb; 10472 } 10473 10474 /** 10475 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 10476 * @phba: Pointer to HBA context object. 10477 * @ring_number: SLI ring number to issue iocb on. 10478 * @piocb: Pointer to command iocb. 10479 * @flag: Flag indicating if this command can be put into txq. 10480 * 10481 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 10482 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 10483 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 10484 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 10485 * this function allows only iocbs for posting buffers. This function finds 10486 * next available slot in the command ring and posts the command to the 10487 * available slot and writes the port attention register to request HBA start 10488 * processing new iocb. If there is no slot available in the ring and 10489 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 10490 * the function returns IOCB_BUSY. 10491 * 10492 * This function is called with hbalock held. The function will return success 10493 * after it successfully submit the iocb to firmware or after adding to the 10494 * txq. 10495 **/ 10496 static int 10497 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 10498 struct lpfc_iocbq *piocb, uint32_t flag) 10499 { 10500 struct lpfc_iocbq *nextiocb; 10501 IOCB_t *iocb; 10502 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 10503 10504 lockdep_assert_held(&phba->hbalock); 10505 10506 if (piocb->cmd_cmpl && (!piocb->vport) && 10507 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 10508 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 10509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10510 "1807 IOCB x%x failed. No vport\n", 10511 piocb->iocb.ulpCommand); 10512 dump_stack(); 10513 return IOCB_ERROR; 10514 } 10515 10516 10517 /* If the PCI channel is in offline state, do not post iocbs. */ 10518 if (unlikely(pci_channel_offline(phba->pcidev))) 10519 return IOCB_ERROR; 10520 10521 /* If HBA has a deferred error attention, fail the iocb. */ 10522 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) 10523 return IOCB_ERROR; 10524 10525 /* 10526 * We should never get an IOCB if we are in a < LINK_DOWN state 10527 */ 10528 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 10529 return IOCB_ERROR; 10530 10531 /* 10532 * Check to see if we are blocking IOCB processing because of a 10533 * outstanding event. 10534 */ 10535 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 10536 goto iocb_busy; 10537 10538 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 10539 /* 10540 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 10541 * can be issued if the link is not up. 10542 */ 10543 switch (piocb->iocb.ulpCommand) { 10544 case CMD_QUE_RING_BUF_CN: 10545 case CMD_QUE_RING_BUF64_CN: 10546 /* 10547 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 10548 * completion, cmd_cmpl MUST be 0. 10549 */ 10550 if (piocb->cmd_cmpl) 10551 piocb->cmd_cmpl = NULL; 10552 fallthrough; 10553 case CMD_CREATE_XRI_CR: 10554 case CMD_CLOSE_XRI_CN: 10555 case CMD_CLOSE_XRI_CX: 10556 break; 10557 default: 10558 goto iocb_busy; 10559 } 10560 10561 /* 10562 * For FCP commands, we must be in a state where we can process link 10563 * attention events. 10564 */ 10565 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 10566 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 10567 goto iocb_busy; 10568 } 10569 10570 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 10571 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 10572 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 10573 10574 if (iocb) 10575 lpfc_sli_update_ring(phba, pring); 10576 else 10577 lpfc_sli_update_full_ring(phba, pring); 10578 10579 if (!piocb) 10580 return IOCB_SUCCESS; 10581 10582 goto out_busy; 10583 10584 iocb_busy: 10585 pring->stats.iocb_cmd_delay++; 10586 10587 out_busy: 10588 10589 if (!(flag & SLI_IOCB_RET_IOCB)) { 10590 __lpfc_sli_ringtx_put(phba, pring, piocb); 10591 return IOCB_SUCCESS; 10592 } 10593 10594 return IOCB_BUSY; 10595 } 10596 10597 /** 10598 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb 10599 * @phba: Pointer to HBA context object. 10600 * @ring_number: SLI ring number to issue wqe on. 10601 * @piocb: Pointer to command iocb. 10602 * @flag: Flag indicating if this command can be put into txq. 10603 * 10604 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to 10605 * send an iocb command to an HBA with SLI-3 interface spec. 10606 * 10607 * This function takes the hbalock before invoking the lockless version. 10608 * The function will return success after it successfully submit the wqe to 10609 * firmware or after adding to the txq. 10610 **/ 10611 static int 10612 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number, 10613 struct lpfc_iocbq *piocb, uint32_t flag) 10614 { 10615 unsigned long iflags; 10616 int rc; 10617 10618 spin_lock_irqsave(&phba->hbalock, iflags); 10619 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag); 10620 spin_unlock_irqrestore(&phba->hbalock, iflags); 10621 10622 return rc; 10623 } 10624 10625 /** 10626 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe 10627 * @phba: Pointer to HBA context object. 10628 * @ring_number: SLI ring number to issue wqe on. 10629 * @piocb: Pointer to command iocb. 10630 * @flag: Flag indicating if this command can be put into txq. 10631 * 10632 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue 10633 * an wqe command to an HBA with SLI-4 interface spec. 10634 * 10635 * This function is a lockless version. The function will return success 10636 * after it successfully submit the wqe to firmware or after adding to the 10637 * txq. 10638 **/ 10639 static int 10640 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number, 10641 struct lpfc_iocbq *piocb, uint32_t flag) 10642 { 10643 struct lpfc_io_buf *lpfc_cmd = piocb->io_buf; 10644 10645 lpfc_prep_embed_io(phba, lpfc_cmd); 10646 return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb); 10647 } 10648 10649 void 10650 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 10651 { 10652 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; 10653 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; 10654 struct sli4_sge_le *sgl; 10655 u32 type_size; 10656 10657 /* 128 byte wqe support here */ 10658 sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; 10659 10660 if (phba->fcp_embed_io) { 10661 struct fcp_cmnd *fcp_cmnd; 10662 u32 *ptr; 10663 10664 fcp_cmnd = lpfc_cmd->fcp_cmnd; 10665 10666 /* Word 0-2 - FCP_CMND */ 10667 type_size = le32_to_cpu(sgl->sge_len); 10668 type_size |= ULP_BDE64_TYPE_BDE_IMMED; 10669 wqe->generic.bde.tus.w = type_size; 10670 wqe->generic.bde.addrHigh = 0; 10671 wqe->generic.bde.addrLow = 72; /* Word 18 */ 10672 10673 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 10674 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 10675 10676 /* Word 18-29 FCP CMND Payload */ 10677 ptr = &wqe->words[18]; 10678 lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len)); 10679 } else { 10680 /* Word 0-2 - Inline BDE */ 10681 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 10682 wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len); 10683 wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi); 10684 wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo); 10685 10686 /* Word 10 */ 10687 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); 10688 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); 10689 } 10690 10691 /* add the VMID tags as per switch response */ 10692 if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) { 10693 if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { 10694 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 10695 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 10696 (piocb->vmid_tag.cs_ctl_vmid)); 10697 } else if (phba->cfg_vmid_app_header) { 10698 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); 10699 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 10700 wqe->words[31] = piocb->vmid_tag.app_id; 10701 } 10702 } 10703 } 10704 10705 /** 10706 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 10707 * @phba: Pointer to HBA context object. 10708 * @ring_number: SLI ring number to issue iocb on. 10709 * @piocb: Pointer to command iocb. 10710 * @flag: Flag indicating if this command can be put into txq. 10711 * 10712 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 10713 * an iocb command to an HBA with SLI-4 interface spec. 10714 * 10715 * This function is called with ringlock held. The function will return success 10716 * after it successfully submit the iocb to firmware or after adding to the 10717 * txq. 10718 **/ 10719 static int 10720 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 10721 struct lpfc_iocbq *piocb, uint32_t flag) 10722 { 10723 struct lpfc_sglq *sglq; 10724 union lpfc_wqe128 *wqe; 10725 struct lpfc_queue *wq; 10726 struct lpfc_sli_ring *pring; 10727 u32 ulp_command = get_job_cmnd(phba, piocb); 10728 10729 /* Get the WQ */ 10730 if ((piocb->cmd_flag & LPFC_IO_FCP) || 10731 (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { 10732 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; 10733 } else { 10734 wq = phba->sli4_hba.els_wq; 10735 } 10736 10737 /* Get corresponding ring */ 10738 pring = wq->pring; 10739 10740 /* 10741 * The WQE can be either 64 or 128 bytes, 10742 */ 10743 10744 lockdep_assert_held(&pring->ring_lock); 10745 wqe = &piocb->wqe; 10746 if (piocb->sli4_xritag == NO_XRI) { 10747 if (ulp_command == CMD_ABORT_XRI_CX) 10748 sglq = NULL; 10749 else { 10750 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 10751 if (!sglq) { 10752 if (!(flag & SLI_IOCB_RET_IOCB)) { 10753 __lpfc_sli_ringtx_put(phba, 10754 pring, 10755 piocb); 10756 return IOCB_SUCCESS; 10757 } else { 10758 return IOCB_BUSY; 10759 } 10760 } 10761 } 10762 } else if (piocb->cmd_flag & LPFC_IO_FCP) { 10763 /* These IO's already have an XRI and a mapped sgl. */ 10764 sglq = NULL; 10765 } 10766 else { 10767 /* 10768 * This is a continuation of a commandi,(CX) so this 10769 * sglq is on the active list 10770 */ 10771 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 10772 if (!sglq) 10773 return IOCB_ERROR; 10774 } 10775 10776 if (sglq) { 10777 piocb->sli4_lxritag = sglq->sli4_lxritag; 10778 piocb->sli4_xritag = sglq->sli4_xritag; 10779 10780 /* ABTS sent by initiator to CT exchange, the 10781 * RX_ID field will be filled with the newly 10782 * allocated responder XRI. 10783 */ 10784 if (ulp_command == CMD_XMIT_BLS_RSP64_CX && 10785 piocb->abort_bls == LPFC_ABTS_UNSOL_INT) 10786 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 10787 piocb->sli4_xritag); 10788 10789 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, 10790 piocb->sli4_xritag); 10791 10792 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI) 10793 return IOCB_ERROR; 10794 } 10795 10796 if (lpfc_sli4_wq_put(wq, wqe)) 10797 return IOCB_ERROR; 10798 10799 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 10800 10801 return 0; 10802 } 10803 10804 /* 10805 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o 10806 * 10807 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4 10808 * or IOCB for sli-3 function. 10809 * pointer from the lpfc_hba struct. 10810 * 10811 * Return codes: 10812 * IOCB_ERROR - Error 10813 * IOCB_SUCCESS - Success 10814 * IOCB_BUSY - Busy 10815 **/ 10816 int 10817 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number, 10818 struct lpfc_iocbq *piocb, uint32_t flag) 10819 { 10820 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag); 10821 } 10822 10823 /* 10824 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 10825 * 10826 * This routine wraps the actual lockless version for issusing IOCB function 10827 * pointer from the lpfc_hba struct. 10828 * 10829 * Return codes: 10830 * IOCB_ERROR - Error 10831 * IOCB_SUCCESS - Success 10832 * IOCB_BUSY - Busy 10833 **/ 10834 int 10835 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 10836 struct lpfc_iocbq *piocb, uint32_t flag) 10837 { 10838 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10839 } 10840 10841 static void 10842 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq, 10843 struct lpfc_vport *vport, 10844 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, 10845 u32 elscmd, u8 tmo, u8 expect_rsp) 10846 { 10847 struct lpfc_hba *phba = vport->phba; 10848 IOCB_t *cmd; 10849 10850 cmd = &cmdiocbq->iocb; 10851 memset(cmd, 0, sizeof(*cmd)); 10852 10853 cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 10854 cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys); 10855 cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 10856 10857 if (expect_rsp) { 10858 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 10859 cmd->un.elsreq64.remoteID = did; /* DID */ 10860 cmd->ulpCommand = CMD_ELS_REQUEST64_CR; 10861 cmd->ulpTimeout = tmo; 10862 } else { 10863 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); 10864 cmd->un.genreq64.xmit_els_remoteID = did; /* DID */ 10865 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 10866 cmd->ulpPU = PARM_NPIV_DID; 10867 } 10868 cmd->ulpBdeCount = 1; 10869 cmd->ulpLe = 1; 10870 cmd->ulpClass = CLASS3; 10871 10872 /* If we have NPIV enabled, we want to send ELS traffic by VPI. */ 10873 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 10874 if (expect_rsp) { 10875 cmd->un.elsreq64.myID = vport->fc_myDID; 10876 10877 /* For ELS_REQUEST64_CR, use the VPI by default */ 10878 cmd->ulpContext = phba->vpi_ids[vport->vpi]; 10879 } 10880 10881 cmd->ulpCt_h = 0; 10882 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 10883 if (elscmd == ELS_CMD_ECHO) 10884 cmd->ulpCt_l = 0; /* context = invalid RPI */ 10885 else 10886 cmd->ulpCt_l = 1; /* context = VPI */ 10887 } 10888 } 10889 10890 static void 10891 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq, 10892 struct lpfc_vport *vport, 10893 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, 10894 u32 elscmd, u8 tmo, u8 expect_rsp) 10895 { 10896 struct lpfc_hba *phba = vport->phba; 10897 union lpfc_wqe128 *wqe; 10898 struct ulp_bde64_le *bde; 10899 u8 els_id; 10900 10901 wqe = &cmdiocbq->wqe; 10902 memset(wqe, 0, sizeof(*wqe)); 10903 10904 /* Word 0 - 2 BDE */ 10905 bde = (struct ulp_bde64_le *)&wqe->generic.bde; 10906 bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys)); 10907 bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys)); 10908 bde->type_size = cpu_to_le32(cmd_size); 10909 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 10910 10911 if (expect_rsp) { 10912 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE); 10913 10914 /* Transfer length */ 10915 wqe->els_req.payload_len = cmd_size; 10916 wqe->els_req.max_response_payload_len = FCELSSIZE; 10917 10918 /* DID */ 10919 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did); 10920 10921 /* Word 11 - ELS_ID */ 10922 switch (elscmd) { 10923 case ELS_CMD_PLOGI: 10924 els_id = LPFC_ELS_ID_PLOGI; 10925 break; 10926 case ELS_CMD_FLOGI: 10927 els_id = LPFC_ELS_ID_FLOGI; 10928 break; 10929 case ELS_CMD_LOGO: 10930 els_id = LPFC_ELS_ID_LOGO; 10931 break; 10932 case ELS_CMD_FDISC: 10933 if (!vport->fc_myDID) { 10934 els_id = LPFC_ELS_ID_FDISC; 10935 break; 10936 } 10937 fallthrough; 10938 default: 10939 els_id = LPFC_ELS_ID_DEFAULT; 10940 break; 10941 } 10942 10943 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 10944 } else { 10945 /* DID */ 10946 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did); 10947 10948 /* Transfer length */ 10949 wqe->xmit_els_rsp.response_payload_len = cmd_size; 10950 10951 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com, 10952 CMD_XMIT_ELS_RSP64_WQE); 10953 } 10954 10955 bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo); 10956 bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag); 10957 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); 10958 10959 /* If we have NPIV enabled, we want to send ELS traffic by VPI. 10960 * For SLI4, since the driver controls VPIs we also want to include 10961 * all ELS pt2pt protocol traffic as well. 10962 */ 10963 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 10964 test_bit(FC_PT2PT, &vport->fc_flag)) { 10965 if (expect_rsp) { 10966 bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID); 10967 10968 /* For ELS_REQUEST64_WQE, use the VPI by default */ 10969 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 10970 phba->vpi_ids[vport->vpi]); 10971 } 10972 10973 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 10974 if (elscmd == ELS_CMD_ECHO) 10975 bf_set(wqe_ct, &wqe->generic.wqe_com, 0); 10976 else 10977 bf_set(wqe_ct, &wqe->generic.wqe_com, 1); 10978 } 10979 } 10980 10981 void 10982 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, 10983 struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, 10984 u16 cmd_size, u32 did, u32 elscmd, u8 tmo, 10985 u8 expect_rsp) 10986 { 10987 phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did, 10988 elscmd, tmo, expect_rsp); 10989 } 10990 10991 static void 10992 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, 10993 u16 rpi, u32 num_entry, u8 tmo) 10994 { 10995 IOCB_t *cmd; 10996 10997 cmd = &cmdiocbq->iocb; 10998 memset(cmd, 0, sizeof(*cmd)); 10999 11000 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 11001 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 11002 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 11003 cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64); 11004 11005 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 11006 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 11007 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 11008 11009 cmd->ulpContext = rpi; 11010 cmd->ulpClass = CLASS3; 11011 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 11012 cmd->ulpBdeCount = 1; 11013 cmd->ulpLe = 1; 11014 cmd->ulpOwner = OWN_CHIP; 11015 cmd->ulpTimeout = tmo; 11016 } 11017 11018 static void 11019 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, 11020 u16 rpi, u32 num_entry, u8 tmo) 11021 { 11022 union lpfc_wqe128 *cmdwqe; 11023 struct ulp_bde64_le *bde, *bpl; 11024 u32 xmit_len = 0, total_len = 0, size, type, i; 11025 11026 cmdwqe = &cmdiocbq->wqe; 11027 memset(cmdwqe, 0, sizeof(*cmdwqe)); 11028 11029 /* Calculate total_len and xmit_len */ 11030 bpl = (struct ulp_bde64_le *)bmp->virt; 11031 for (i = 0; i < num_entry; i++) { 11032 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; 11033 total_len += size; 11034 } 11035 for (i = 0; i < num_entry; i++) { 11036 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; 11037 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK; 11038 if (type != ULP_BDE64_TYPE_BDE_64) 11039 break; 11040 xmit_len += size; 11041 } 11042 11043 /* Words 0 - 2 */ 11044 bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde; 11045 bde->addr_low = bpl->addr_low; 11046 bde->addr_high = bpl->addr_high; 11047 bde->type_size = cpu_to_le32(xmit_len); 11048 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 11049 11050 /* Word 3 */ 11051 cmdwqe->gen_req.request_payload_len = xmit_len; 11052 11053 /* Word 5 */ 11054 bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT); 11055 bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); 11056 bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1); 11057 bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1); 11058 11059 /* Word 6 */ 11060 bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi); 11061 11062 /* Word 7 */ 11063 bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo); 11064 bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3); 11065 bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR); 11066 bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI); 11067 11068 /* Word 12 */ 11069 cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len; 11070 } 11071 11072 void 11073 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, 11074 struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo) 11075 { 11076 phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo); 11077 } 11078 11079 static void 11080 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq, 11081 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, 11082 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) 11083 { 11084 IOCB_t *icmd; 11085 11086 icmd = &cmdiocbq->iocb; 11087 memset(icmd, 0, sizeof(*icmd)); 11088 11089 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 11090 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 11091 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 11092 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 11093 icmd->un.xseq64.w5.hcsw.Fctl = LA; 11094 if (last_seq) 11095 icmd->un.xseq64.w5.hcsw.Fctl |= LS; 11096 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 11097 icmd->un.xseq64.w5.hcsw.Rctl = rctl; 11098 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 11099 11100 icmd->ulpBdeCount = 1; 11101 icmd->ulpLe = 1; 11102 icmd->ulpClass = CLASS3; 11103 11104 switch (cr_cx_cmd) { 11105 case CMD_XMIT_SEQUENCE64_CR: 11106 icmd->ulpContext = rpi; 11107 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 11108 break; 11109 case CMD_XMIT_SEQUENCE64_CX: 11110 icmd->ulpContext = ox_id; 11111 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 11112 break; 11113 default: 11114 break; 11115 } 11116 } 11117 11118 static void 11119 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq, 11120 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, 11121 u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd) 11122 { 11123 union lpfc_wqe128 *wqe; 11124 struct ulp_bde64 *bpl; 11125 11126 wqe = &cmdiocbq->wqe; 11127 memset(wqe, 0, sizeof(*wqe)); 11128 11129 /* Words 0 - 2 */ 11130 bpl = (struct ulp_bde64 *)bmp->virt; 11131 wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh; 11132 wqe->xmit_sequence.bde.addrLow = bpl->addrLow; 11133 wqe->xmit_sequence.bde.tus.w = bpl->tus.w; 11134 11135 /* Word 5 */ 11136 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq); 11137 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1); 11138 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); 11139 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl); 11140 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT); 11141 11142 /* Word 6 */ 11143 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi); 11144 11145 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, 11146 CMD_XMIT_SEQUENCE64_WQE); 11147 11148 /* Word 7 */ 11149 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); 11150 11151 /* Word 9 */ 11152 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); 11153 11154 if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) { 11155 /* Word 10 */ 11156 if (cmdiocbq->cmd_flag & LPFC_IO_VMID) { 11157 bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1); 11158 bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1); 11159 wqe->words[31] = LOOPBACK_SRC_APPID; 11160 } 11161 11162 /* Word 12 */ 11163 wqe->xmit_sequence.xmit_len = full_size; 11164 } 11165 else 11166 wqe->xmit_sequence.xmit_len = 11167 wqe->xmit_sequence.bde.tus.f.bdeSize; 11168 } 11169 11170 void 11171 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, 11172 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, 11173 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) 11174 { 11175 phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry, 11176 rctl, last_seq, cr_cx_cmd); 11177 } 11178 11179 static void 11180 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, 11181 u16 iotag, u8 ulp_class, u16 cqid, bool ia, 11182 bool wqec) 11183 { 11184 IOCB_t *icmd = NULL; 11185 11186 icmd = &cmdiocbq->iocb; 11187 memset(icmd, 0, sizeof(*icmd)); 11188 11189 /* Word 5 */ 11190 icmd->un.acxri.abortContextTag = ulp_context; 11191 icmd->un.acxri.abortIoTag = iotag; 11192 11193 if (ia) { 11194 /* Word 7 */ 11195 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 11196 } else { 11197 /* Word 3 */ 11198 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 11199 11200 /* Word 7 */ 11201 icmd->ulpClass = ulp_class; 11202 icmd->ulpCommand = CMD_ABORT_XRI_CN; 11203 } 11204 11205 /* Word 7 */ 11206 icmd->ulpLe = 1; 11207 } 11208 11209 static void 11210 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, 11211 u16 iotag, u8 ulp_class, u16 cqid, bool ia, 11212 bool wqec) 11213 { 11214 union lpfc_wqe128 *wqe; 11215 11216 wqe = &cmdiocbq->wqe; 11217 memset(wqe, 0, sizeof(*wqe)); 11218 11219 /* Word 3 */ 11220 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 11221 if (ia) 11222 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 11223 else 11224 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 11225 11226 /* Word 7 */ 11227 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE); 11228 11229 /* Word 8 */ 11230 wqe->abort_cmd.wqe_com.abort_tag = ulp_context; 11231 11232 /* Word 9 */ 11233 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag); 11234 11235 /* Word 10 */ 11236 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 11237 11238 /* Word 11 */ 11239 if (wqec) 11240 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1); 11241 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid); 11242 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND); 11243 } 11244 11245 void 11246 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, 11247 u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid, 11248 bool ia, bool wqec) 11249 { 11250 phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class, 11251 cqid, ia, wqec); 11252 } 11253 11254 /** 11255 * lpfc_sli_api_table_setup - Set up sli api function jump table 11256 * @phba: The hba struct for which this call is being executed. 11257 * @dev_grp: The HBA PCI-Device group number. 11258 * 11259 * This routine sets up the SLI interface API function jump table in @phba 11260 * struct. 11261 * Returns: 0 - success, -ENODEV - failure. 11262 **/ 11263 int 11264 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 11265 { 11266 11267 switch (dev_grp) { 11268 case LPFC_PCI_DEV_LP: 11269 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 11270 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 11271 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3; 11272 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3; 11273 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3; 11274 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3; 11275 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3; 11276 break; 11277 case LPFC_PCI_DEV_OC: 11278 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 11279 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 11280 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4; 11281 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4; 11282 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4; 11283 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4; 11284 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4; 11285 break; 11286 default: 11287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11288 "1419 Invalid HBA PCI-device group: 0x%x\n", 11289 dev_grp); 11290 return -ENODEV; 11291 } 11292 return 0; 11293 } 11294 11295 /** 11296 * lpfc_sli4_calc_ring - Calculates which ring to use 11297 * @phba: Pointer to HBA context object. 11298 * @piocb: Pointer to command iocb. 11299 * 11300 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 11301 * hba_wqidx, thus we need to calculate the corresponding ring. 11302 * Since ABORTS must go on the same WQ of the command they are 11303 * aborting, we use command's hba_wqidx. 11304 */ 11305 struct lpfc_sli_ring * 11306 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 11307 { 11308 struct lpfc_io_buf *lpfc_cmd; 11309 11310 if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 11311 if (unlikely(!phba->sli4_hba.hdwq)) 11312 return NULL; 11313 /* 11314 * for abort iocb hba_wqidx should already 11315 * be setup based on what work queue we used. 11316 */ 11317 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { 11318 lpfc_cmd = piocb->io_buf; 11319 piocb->hba_wqidx = lpfc_cmd->hdwq_no; 11320 } 11321 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring; 11322 } else { 11323 if (unlikely(!phba->sli4_hba.els_wq)) 11324 return NULL; 11325 piocb->hba_wqidx = 0; 11326 return phba->sli4_hba.els_wq->pring; 11327 } 11328 } 11329 11330 inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) 11331 { 11332 struct lpfc_hba *phba = eq->phba; 11333 11334 /* 11335 * Unlocking an irq is one of the entry point to check 11336 * for re-schedule, but we are good for io submission 11337 * path as midlayer does a get_cpu to glue us in. Flush 11338 * out the invalidate queue so we can see the updated 11339 * value for flag. 11340 */ 11341 smp_rmb(); 11342 11343 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) 11344 /* We will not likely get the completion for the caller 11345 * during this iteration but i guess that's fine. 11346 * Future io's coming on this eq should be able to 11347 * pick it up. As for the case of single io's, they 11348 * will be handled through a sched from polling timer 11349 * function which is currently triggered every 1msec. 11350 */ 11351 lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM, 11352 LPFC_QUEUE_WORK); 11353 } 11354 11355 /** 11356 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 11357 * @phba: Pointer to HBA context object. 11358 * @ring_number: Ring number 11359 * @piocb: Pointer to command iocb. 11360 * @flag: Flag indicating if this command can be put into txq. 11361 * 11362 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 11363 * function. This function gets the hbalock and calls 11364 * __lpfc_sli_issue_iocb function and will return the error returned 11365 * by __lpfc_sli_issue_iocb function. This wrapper is used by 11366 * functions which do not hold hbalock. 11367 **/ 11368 int 11369 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 11370 struct lpfc_iocbq *piocb, uint32_t flag) 11371 { 11372 struct lpfc_sli_ring *pring; 11373 struct lpfc_queue *eq; 11374 unsigned long iflags; 11375 int rc; 11376 11377 /* If the PCI channel is in offline state, do not post iocbs. */ 11378 if (unlikely(pci_channel_offline(phba->pcidev))) 11379 return IOCB_ERROR; 11380 11381 if (phba->sli_rev == LPFC_SLI_REV4) { 11382 lpfc_sli_prep_wqe(phba, piocb); 11383 11384 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; 11385 11386 pring = lpfc_sli4_calc_ring(phba, piocb); 11387 if (unlikely(pring == NULL)) 11388 return IOCB_ERROR; 11389 11390 spin_lock_irqsave(&pring->ring_lock, iflags); 11391 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 11392 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11393 11394 lpfc_sli4_poll_eq(eq); 11395 } else { 11396 /* For now, SLI2/3 will still use hbalock */ 11397 spin_lock_irqsave(&phba->hbalock, iflags); 11398 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 11399 spin_unlock_irqrestore(&phba->hbalock, iflags); 11400 } 11401 return rc; 11402 } 11403 11404 /** 11405 * lpfc_extra_ring_setup - Extra ring setup function 11406 * @phba: Pointer to HBA context object. 11407 * 11408 * This function is called while driver attaches with the 11409 * HBA to setup the extra ring. The extra ring is used 11410 * only when driver needs to support target mode functionality 11411 * or IP over FC functionalities. 11412 * 11413 * This function is called with no lock held. SLI3 only. 11414 **/ 11415 static int 11416 lpfc_extra_ring_setup( struct lpfc_hba *phba) 11417 { 11418 struct lpfc_sli *psli; 11419 struct lpfc_sli_ring *pring; 11420 11421 psli = &phba->sli; 11422 11423 /* Adjust cmd/rsp ring iocb entries more evenly */ 11424 11425 /* Take some away from the FCP ring */ 11426 pring = &psli->sli3_ring[LPFC_FCP_RING]; 11427 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 11428 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 11429 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 11430 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 11431 11432 /* and give them to the extra ring */ 11433 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 11434 11435 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 11436 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 11437 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 11438 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 11439 11440 /* Setup default profile for this ring */ 11441 pring->iotag_max = 4096; 11442 pring->num_mask = 1; 11443 pring->prt[0].profile = 0; /* Mask 0 */ 11444 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 11445 pring->prt[0].type = phba->cfg_multi_ring_type; 11446 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 11447 return 0; 11448 } 11449 11450 static void 11451 lpfc_sli_post_recovery_event(struct lpfc_hba *phba, 11452 struct lpfc_nodelist *ndlp) 11453 { 11454 unsigned long iflags; 11455 struct lpfc_work_evt *evtp = &ndlp->recovery_evt; 11456 11457 /* Hold a node reference for outstanding queued work */ 11458 if (!lpfc_nlp_get(ndlp)) 11459 return; 11460 11461 spin_lock_irqsave(&phba->hbalock, iflags); 11462 if (!list_empty(&evtp->evt_listp)) { 11463 spin_unlock_irqrestore(&phba->hbalock, iflags); 11464 lpfc_nlp_put(ndlp); 11465 return; 11466 } 11467 11468 evtp->evt_arg1 = ndlp; 11469 evtp->evt = LPFC_EVT_RECOVER_PORT; 11470 list_add_tail(&evtp->evt_listp, &phba->work_list); 11471 spin_unlock_irqrestore(&phba->hbalock, iflags); 11472 11473 lpfc_worker_wake_up(phba); 11474 } 11475 11476 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 11477 * @phba: Pointer to HBA context object. 11478 * @iocbq: Pointer to iocb object. 11479 * 11480 * The async_event handler calls this routine when it receives 11481 * an ASYNC_STATUS_CN event from the port. The port generates 11482 * this event when an Abort Sequence request to an rport fails 11483 * twice in succession. The abort could be originated by the 11484 * driver or by the port. The ABTS could have been for an ELS 11485 * or FCP IO. The port only generates this event when an ABTS 11486 * fails to complete after one retry. 11487 */ 11488 static void 11489 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 11490 struct lpfc_iocbq *iocbq) 11491 { 11492 struct lpfc_nodelist *ndlp = NULL; 11493 uint16_t rpi = 0, vpi = 0; 11494 struct lpfc_vport *vport = NULL; 11495 11496 /* The rpi in the ulpContext is vport-sensitive. */ 11497 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 11498 rpi = iocbq->iocb.ulpContext; 11499 11500 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11501 "3092 Port generated ABTS async event " 11502 "on vpi %d rpi %d status 0x%x\n", 11503 vpi, rpi, iocbq->iocb.ulpStatus); 11504 11505 vport = lpfc_find_vport_by_vpid(phba, vpi); 11506 if (!vport) 11507 goto err_exit; 11508 ndlp = lpfc_findnode_rpi(vport, rpi); 11509 if (!ndlp) 11510 goto err_exit; 11511 11512 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 11513 lpfc_sli_abts_recover_port(vport, ndlp); 11514 return; 11515 11516 err_exit: 11517 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11518 "3095 Event Context not found, no " 11519 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 11520 vpi, rpi, iocbq->iocb.ulpStatus, 11521 iocbq->iocb.ulpContext); 11522 } 11523 11524 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 11525 * @phba: pointer to HBA context object. 11526 * @ndlp: nodelist pointer for the impacted rport. 11527 * @axri: pointer to the wcqe containing the failed exchange. 11528 * 11529 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 11530 * port. The port generates this event when an abort exchange request to an 11531 * rport fails twice in succession with no reply. The abort could be originated 11532 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 11533 */ 11534 void 11535 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 11536 struct lpfc_nodelist *ndlp, 11537 struct sli4_wcqe_xri_aborted *axri) 11538 { 11539 uint32_t ext_status = 0; 11540 11541 if (!ndlp) { 11542 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11543 "3115 Node Context not found, driver " 11544 "ignoring abts err event\n"); 11545 return; 11546 } 11547 11548 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11549 "3116 Port generated FCP XRI ABORT event on " 11550 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 11551 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 11552 bf_get(lpfc_wcqe_xa_xri, axri), 11553 bf_get(lpfc_wcqe_xa_status, axri), 11554 axri->parameter); 11555 11556 /* 11557 * Catch the ABTS protocol failure case. Older OCe FW releases returned 11558 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 11559 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 11560 */ 11561 ext_status = axri->parameter & IOERR_PARAM_MASK; 11562 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 11563 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 11564 lpfc_sli_post_recovery_event(phba, ndlp); 11565 } 11566 11567 /** 11568 * lpfc_sli_async_event_handler - ASYNC iocb handler function 11569 * @phba: Pointer to HBA context object. 11570 * @pring: Pointer to driver SLI ring object. 11571 * @iocbq: Pointer to iocb object. 11572 * 11573 * This function is called by the slow ring event handler 11574 * function when there is an ASYNC event iocb in the ring. 11575 * This function is called with no lock held. 11576 * Currently this function handles only temperature related 11577 * ASYNC events. The function decodes the temperature sensor 11578 * event message and posts events for the management applications. 11579 **/ 11580 static void 11581 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 11582 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 11583 { 11584 IOCB_t *icmd; 11585 uint16_t evt_code; 11586 struct temp_event temp_event_data; 11587 struct Scsi_Host *shost; 11588 uint32_t *iocb_w; 11589 11590 icmd = &iocbq->iocb; 11591 evt_code = icmd->un.asyncstat.evt_code; 11592 11593 switch (evt_code) { 11594 case ASYNC_TEMP_WARN: 11595 case ASYNC_TEMP_SAFE: 11596 temp_event_data.data = (uint32_t) icmd->ulpContext; 11597 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 11598 if (evt_code == ASYNC_TEMP_WARN) { 11599 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 11600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11601 "0347 Adapter is very hot, please take " 11602 "corrective action. temperature : %d Celsius\n", 11603 (uint32_t) icmd->ulpContext); 11604 } else { 11605 temp_event_data.event_code = LPFC_NORMAL_TEMP; 11606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11607 "0340 Adapter temperature is OK now. " 11608 "temperature : %d Celsius\n", 11609 (uint32_t) icmd->ulpContext); 11610 } 11611 11612 /* Send temperature change event to applications */ 11613 shost = lpfc_shost_from_vport(phba->pport); 11614 fc_host_post_vendor_event(shost, fc_get_event_number(), 11615 sizeof(temp_event_data), (char *) &temp_event_data, 11616 LPFC_NL_VENDOR_ID); 11617 break; 11618 case ASYNC_STATUS_CN: 11619 lpfc_sli_abts_err_handler(phba, iocbq); 11620 break; 11621 default: 11622 iocb_w = (uint32_t *) icmd; 11623 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11624 "0346 Ring %d handler: unexpected ASYNC_STATUS" 11625 " evt_code 0x%x\n" 11626 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 11627 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 11628 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 11629 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 11630 pring->ringno, icmd->un.asyncstat.evt_code, 11631 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 11632 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 11633 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 11634 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 11635 11636 break; 11637 } 11638 } 11639 11640 11641 /** 11642 * lpfc_sli4_setup - SLI ring setup function 11643 * @phba: Pointer to HBA context object. 11644 * 11645 * lpfc_sli_setup sets up rings of the SLI interface with 11646 * number of iocbs per ring and iotags. This function is 11647 * called while driver attach to the HBA and before the 11648 * interrupts are enabled. So there is no need for locking. 11649 * 11650 * This function always returns 0. 11651 **/ 11652 int 11653 lpfc_sli4_setup(struct lpfc_hba *phba) 11654 { 11655 struct lpfc_sli_ring *pring; 11656 11657 pring = phba->sli4_hba.els_wq->pring; 11658 pring->num_mask = LPFC_MAX_RING_MASK; 11659 pring->prt[0].profile = 0; /* Mask 0 */ 11660 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 11661 pring->prt[0].type = FC_TYPE_ELS; 11662 pring->prt[0].lpfc_sli_rcv_unsol_event = 11663 lpfc_els_unsol_event; 11664 pring->prt[1].profile = 0; /* Mask 1 */ 11665 pring->prt[1].rctl = FC_RCTL_ELS_REP; 11666 pring->prt[1].type = FC_TYPE_ELS; 11667 pring->prt[1].lpfc_sli_rcv_unsol_event = 11668 lpfc_els_unsol_event; 11669 pring->prt[2].profile = 0; /* Mask 2 */ 11670 /* NameServer Inquiry */ 11671 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 11672 /* NameServer */ 11673 pring->prt[2].type = FC_TYPE_CT; 11674 pring->prt[2].lpfc_sli_rcv_unsol_event = 11675 lpfc_ct_unsol_event; 11676 pring->prt[3].profile = 0; /* Mask 3 */ 11677 /* NameServer response */ 11678 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 11679 /* NameServer */ 11680 pring->prt[3].type = FC_TYPE_CT; 11681 pring->prt[3].lpfc_sli_rcv_unsol_event = 11682 lpfc_ct_unsol_event; 11683 return 0; 11684 } 11685 11686 /** 11687 * lpfc_sli_setup - SLI ring setup function 11688 * @phba: Pointer to HBA context object. 11689 * 11690 * lpfc_sli_setup sets up rings of the SLI interface with 11691 * number of iocbs per ring and iotags. This function is 11692 * called while driver attach to the HBA and before the 11693 * interrupts are enabled. So there is no need for locking. 11694 * 11695 * This function always returns 0. SLI3 only. 11696 **/ 11697 int 11698 lpfc_sli_setup(struct lpfc_hba *phba) 11699 { 11700 int i, totiocbsize = 0; 11701 struct lpfc_sli *psli = &phba->sli; 11702 struct lpfc_sli_ring *pring; 11703 11704 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 11705 psli->sli_flag = 0; 11706 11707 psli->iocbq_lookup = NULL; 11708 psli->iocbq_lookup_len = 0; 11709 psli->last_iotag = 0; 11710 11711 for (i = 0; i < psli->num_rings; i++) { 11712 pring = &psli->sli3_ring[i]; 11713 switch (i) { 11714 case LPFC_FCP_RING: /* ring 0 - FCP */ 11715 /* numCiocb and numRiocb are used in config_port */ 11716 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 11717 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 11718 pring->sli.sli3.numCiocb += 11719 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 11720 pring->sli.sli3.numRiocb += 11721 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 11722 pring->sli.sli3.numCiocb += 11723 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 11724 pring->sli.sli3.numRiocb += 11725 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 11726 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 11727 SLI3_IOCB_CMD_SIZE : 11728 SLI2_IOCB_CMD_SIZE; 11729 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 11730 SLI3_IOCB_RSP_SIZE : 11731 SLI2_IOCB_RSP_SIZE; 11732 pring->iotag_ctr = 0; 11733 pring->iotag_max = 11734 (phba->cfg_hba_queue_depth * 2); 11735 pring->fast_iotag = pring->iotag_max; 11736 pring->num_mask = 0; 11737 break; 11738 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 11739 /* numCiocb and numRiocb are used in config_port */ 11740 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 11741 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 11742 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 11743 SLI3_IOCB_CMD_SIZE : 11744 SLI2_IOCB_CMD_SIZE; 11745 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 11746 SLI3_IOCB_RSP_SIZE : 11747 SLI2_IOCB_RSP_SIZE; 11748 pring->iotag_max = phba->cfg_hba_queue_depth; 11749 pring->num_mask = 0; 11750 break; 11751 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 11752 /* numCiocb and numRiocb are used in config_port */ 11753 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 11754 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 11755 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 11756 SLI3_IOCB_CMD_SIZE : 11757 SLI2_IOCB_CMD_SIZE; 11758 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 11759 SLI3_IOCB_RSP_SIZE : 11760 SLI2_IOCB_RSP_SIZE; 11761 pring->fast_iotag = 0; 11762 pring->iotag_ctr = 0; 11763 pring->iotag_max = 4096; 11764 pring->lpfc_sli_rcv_async_status = 11765 lpfc_sli_async_event_handler; 11766 pring->num_mask = LPFC_MAX_RING_MASK; 11767 pring->prt[0].profile = 0; /* Mask 0 */ 11768 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 11769 pring->prt[0].type = FC_TYPE_ELS; 11770 pring->prt[0].lpfc_sli_rcv_unsol_event = 11771 lpfc_els_unsol_event; 11772 pring->prt[1].profile = 0; /* Mask 1 */ 11773 pring->prt[1].rctl = FC_RCTL_ELS_REP; 11774 pring->prt[1].type = FC_TYPE_ELS; 11775 pring->prt[1].lpfc_sli_rcv_unsol_event = 11776 lpfc_els_unsol_event; 11777 pring->prt[2].profile = 0; /* Mask 2 */ 11778 /* NameServer Inquiry */ 11779 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 11780 /* NameServer */ 11781 pring->prt[2].type = FC_TYPE_CT; 11782 pring->prt[2].lpfc_sli_rcv_unsol_event = 11783 lpfc_ct_unsol_event; 11784 pring->prt[3].profile = 0; /* Mask 3 */ 11785 /* NameServer response */ 11786 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 11787 /* NameServer */ 11788 pring->prt[3].type = FC_TYPE_CT; 11789 pring->prt[3].lpfc_sli_rcv_unsol_event = 11790 lpfc_ct_unsol_event; 11791 break; 11792 } 11793 totiocbsize += (pring->sli.sli3.numCiocb * 11794 pring->sli.sli3.sizeCiocb) + 11795 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 11796 } 11797 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 11798 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 11799 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 11800 "SLI2 SLIM Data: x%x x%lx\n", 11801 phba->brd_no, totiocbsize, 11802 (unsigned long) MAX_SLIM_IOCB_SIZE); 11803 } 11804 if (phba->cfg_multi_ring_support == 2) 11805 lpfc_extra_ring_setup(phba); 11806 11807 return 0; 11808 } 11809 11810 /** 11811 * lpfc_sli4_queue_init - Queue initialization function 11812 * @phba: Pointer to HBA context object. 11813 * 11814 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 11815 * ring. This function also initializes ring indices of each ring. 11816 * This function is called during the initialization of the SLI 11817 * interface of an HBA. 11818 * This function is called with no lock held and always returns 11819 * 1. 11820 **/ 11821 void 11822 lpfc_sli4_queue_init(struct lpfc_hba *phba) 11823 { 11824 struct lpfc_sli *psli; 11825 struct lpfc_sli_ring *pring; 11826 int i; 11827 11828 psli = &phba->sli; 11829 spin_lock_irq(&phba->hbalock); 11830 INIT_LIST_HEAD(&psli->mboxq); 11831 INIT_LIST_HEAD(&psli->mboxq_cmpl); 11832 /* Initialize list headers for txq and txcmplq as double linked lists */ 11833 for (i = 0; i < phba->cfg_hdw_queue; i++) { 11834 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 11835 pring->flag = 0; 11836 pring->ringno = LPFC_FCP_RING; 11837 pring->txcmplq_cnt = 0; 11838 INIT_LIST_HEAD(&pring->txq); 11839 INIT_LIST_HEAD(&pring->txcmplq); 11840 INIT_LIST_HEAD(&pring->iocb_continueq); 11841 spin_lock_init(&pring->ring_lock); 11842 } 11843 pring = phba->sli4_hba.els_wq->pring; 11844 pring->flag = 0; 11845 pring->ringno = LPFC_ELS_RING; 11846 pring->txcmplq_cnt = 0; 11847 INIT_LIST_HEAD(&pring->txq); 11848 INIT_LIST_HEAD(&pring->txcmplq); 11849 INIT_LIST_HEAD(&pring->iocb_continueq); 11850 spin_lock_init(&pring->ring_lock); 11851 11852 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11853 pring = phba->sli4_hba.nvmels_wq->pring; 11854 pring->flag = 0; 11855 pring->ringno = LPFC_ELS_RING; 11856 pring->txcmplq_cnt = 0; 11857 INIT_LIST_HEAD(&pring->txq); 11858 INIT_LIST_HEAD(&pring->txcmplq); 11859 INIT_LIST_HEAD(&pring->iocb_continueq); 11860 spin_lock_init(&pring->ring_lock); 11861 } 11862 11863 spin_unlock_irq(&phba->hbalock); 11864 } 11865 11866 /** 11867 * lpfc_sli_queue_init - Queue initialization function 11868 * @phba: Pointer to HBA context object. 11869 * 11870 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 11871 * ring. This function also initializes ring indices of each ring. 11872 * This function is called during the initialization of the SLI 11873 * interface of an HBA. 11874 * This function is called with no lock held and always returns 11875 * 1. 11876 **/ 11877 void 11878 lpfc_sli_queue_init(struct lpfc_hba *phba) 11879 { 11880 struct lpfc_sli *psli; 11881 struct lpfc_sli_ring *pring; 11882 int i; 11883 11884 psli = &phba->sli; 11885 spin_lock_irq(&phba->hbalock); 11886 INIT_LIST_HEAD(&psli->mboxq); 11887 INIT_LIST_HEAD(&psli->mboxq_cmpl); 11888 /* Initialize list headers for txq and txcmplq as double linked lists */ 11889 for (i = 0; i < psli->num_rings; i++) { 11890 pring = &psli->sli3_ring[i]; 11891 pring->ringno = i; 11892 pring->sli.sli3.next_cmdidx = 0; 11893 pring->sli.sli3.local_getidx = 0; 11894 pring->sli.sli3.cmdidx = 0; 11895 INIT_LIST_HEAD(&pring->iocb_continueq); 11896 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 11897 INIT_LIST_HEAD(&pring->postbufq); 11898 pring->flag = 0; 11899 INIT_LIST_HEAD(&pring->txq); 11900 INIT_LIST_HEAD(&pring->txcmplq); 11901 spin_lock_init(&pring->ring_lock); 11902 } 11903 spin_unlock_irq(&phba->hbalock); 11904 } 11905 11906 /** 11907 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 11908 * @phba: Pointer to HBA context object. 11909 * 11910 * This routine flushes the mailbox command subsystem. It will unconditionally 11911 * flush all the mailbox commands in the three possible stages in the mailbox 11912 * command sub-system: pending mailbox command queue; the outstanding mailbox 11913 * command; and completed mailbox command queue. It is caller's responsibility 11914 * to make sure that the driver is in the proper state to flush the mailbox 11915 * command sub-system. Namely, the posting of mailbox commands into the 11916 * pending mailbox command queue from the various clients must be stopped; 11917 * either the HBA is in a state that it will never works on the outstanding 11918 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 11919 * mailbox command has been completed. 11920 **/ 11921 static void 11922 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 11923 { 11924 LIST_HEAD(completions); 11925 struct lpfc_sli *psli = &phba->sli; 11926 LPFC_MBOXQ_t *pmb; 11927 unsigned long iflag; 11928 11929 /* Disable softirqs, including timers from obtaining phba->hbalock */ 11930 local_bh_disable(); 11931 11932 /* Flush all the mailbox commands in the mbox system */ 11933 spin_lock_irqsave(&phba->hbalock, iflag); 11934 11935 /* The pending mailbox command queue */ 11936 list_splice_init(&phba->sli.mboxq, &completions); 11937 /* The outstanding active mailbox command */ 11938 if (psli->mbox_active) { 11939 list_add_tail(&psli->mbox_active->list, &completions); 11940 psli->mbox_active = NULL; 11941 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11942 } 11943 /* The completed mailbox command queue */ 11944 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 11945 spin_unlock_irqrestore(&phba->hbalock, iflag); 11946 11947 /* Enable softirqs again, done with phba->hbalock */ 11948 local_bh_enable(); 11949 11950 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 11951 while (!list_empty(&completions)) { 11952 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 11953 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 11954 if (pmb->mbox_cmpl) 11955 pmb->mbox_cmpl(phba, pmb); 11956 } 11957 } 11958 11959 /** 11960 * lpfc_sli_host_down - Vport cleanup function 11961 * @vport: Pointer to virtual port object. 11962 * 11963 * lpfc_sli_host_down is called to clean up the resources 11964 * associated with a vport before destroying virtual 11965 * port data structures. 11966 * This function does following operations: 11967 * - Free discovery resources associated with this virtual 11968 * port. 11969 * - Free iocbs associated with this virtual port in 11970 * the txq. 11971 * - Send abort for all iocb commands associated with this 11972 * vport in txcmplq. 11973 * 11974 * This function is called with no lock held and always returns 1. 11975 **/ 11976 int 11977 lpfc_sli_host_down(struct lpfc_vport *vport) 11978 { 11979 LIST_HEAD(completions); 11980 struct lpfc_hba *phba = vport->phba; 11981 struct lpfc_sli *psli = &phba->sli; 11982 struct lpfc_queue *qp = NULL; 11983 struct lpfc_sli_ring *pring; 11984 struct lpfc_iocbq *iocb, *next_iocb; 11985 int i; 11986 unsigned long flags = 0; 11987 uint16_t prev_pring_flag; 11988 11989 lpfc_cleanup_discovery_resources(vport); 11990 11991 spin_lock_irqsave(&phba->hbalock, flags); 11992 11993 /* 11994 * Error everything on the txq since these iocbs 11995 * have not been given to the FW yet. 11996 * Also issue ABTS for everything on the txcmplq 11997 */ 11998 if (phba->sli_rev != LPFC_SLI_REV4) { 11999 for (i = 0; i < psli->num_rings; i++) { 12000 pring = &psli->sli3_ring[i]; 12001 prev_pring_flag = pring->flag; 12002 /* Only slow rings */ 12003 if (pring->ringno == LPFC_ELS_RING) { 12004 pring->flag |= LPFC_DEFERRED_RING_EVENT; 12005 /* Set the lpfc data pending flag */ 12006 set_bit(LPFC_DATA_READY, &phba->data_flags); 12007 } 12008 list_for_each_entry_safe(iocb, next_iocb, 12009 &pring->txq, list) { 12010 if (iocb->vport != vport) 12011 continue; 12012 list_move_tail(&iocb->list, &completions); 12013 } 12014 list_for_each_entry_safe(iocb, next_iocb, 12015 &pring->txcmplq, list) { 12016 if (iocb->vport != vport) 12017 continue; 12018 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 12019 NULL); 12020 } 12021 pring->flag = prev_pring_flag; 12022 } 12023 } else { 12024 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 12025 pring = qp->pring; 12026 if (!pring) 12027 continue; 12028 if (pring == phba->sli4_hba.els_wq->pring) { 12029 pring->flag |= LPFC_DEFERRED_RING_EVENT; 12030 /* Set the lpfc data pending flag */ 12031 set_bit(LPFC_DATA_READY, &phba->data_flags); 12032 } 12033 prev_pring_flag = pring->flag; 12034 spin_lock(&pring->ring_lock); 12035 list_for_each_entry_safe(iocb, next_iocb, 12036 &pring->txq, list) { 12037 if (iocb->vport != vport) 12038 continue; 12039 list_move_tail(&iocb->list, &completions); 12040 } 12041 spin_unlock(&pring->ring_lock); 12042 list_for_each_entry_safe(iocb, next_iocb, 12043 &pring->txcmplq, list) { 12044 if (iocb->vport != vport) 12045 continue; 12046 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 12047 NULL); 12048 } 12049 pring->flag = prev_pring_flag; 12050 } 12051 } 12052 spin_unlock_irqrestore(&phba->hbalock, flags); 12053 12054 /* Make sure HBA is alive */ 12055 lpfc_issue_hb_tmo(phba); 12056 12057 /* Cancel all the IOCBs from the completions list */ 12058 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 12059 IOERR_SLI_DOWN); 12060 return 1; 12061 } 12062 12063 /** 12064 * lpfc_sli_hba_down - Resource cleanup function for the HBA 12065 * @phba: Pointer to HBA context object. 12066 * 12067 * This function cleans up all iocb, buffers, mailbox commands 12068 * while shutting down the HBA. This function is called with no 12069 * lock held and always returns 1. 12070 * This function does the following to cleanup driver resources: 12071 * - Free discovery resources for each virtual port 12072 * - Cleanup any pending fabric iocbs 12073 * - Iterate through the iocb txq and free each entry 12074 * in the list. 12075 * - Free up any buffer posted to the HBA 12076 * - Free mailbox commands in the mailbox queue. 12077 **/ 12078 int 12079 lpfc_sli_hba_down(struct lpfc_hba *phba) 12080 { 12081 LIST_HEAD(completions); 12082 struct lpfc_sli *psli = &phba->sli; 12083 struct lpfc_queue *qp = NULL; 12084 struct lpfc_sli_ring *pring; 12085 struct lpfc_dmabuf *buf_ptr; 12086 unsigned long flags = 0; 12087 int i; 12088 12089 /* Shutdown the mailbox command sub-system */ 12090 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 12091 12092 lpfc_hba_down_prep(phba); 12093 12094 /* Disable softirqs, including timers from obtaining phba->hbalock */ 12095 local_bh_disable(); 12096 12097 lpfc_fabric_abort_hba(phba); 12098 12099 spin_lock_irqsave(&phba->hbalock, flags); 12100 12101 /* 12102 * Error everything on the txq since these iocbs 12103 * have not been given to the FW yet. 12104 */ 12105 if (phba->sli_rev != LPFC_SLI_REV4) { 12106 for (i = 0; i < psli->num_rings; i++) { 12107 pring = &psli->sli3_ring[i]; 12108 /* Only slow rings */ 12109 if (pring->ringno == LPFC_ELS_RING) { 12110 pring->flag |= LPFC_DEFERRED_RING_EVENT; 12111 /* Set the lpfc data pending flag */ 12112 set_bit(LPFC_DATA_READY, &phba->data_flags); 12113 } 12114 list_splice_init(&pring->txq, &completions); 12115 } 12116 } else { 12117 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 12118 pring = qp->pring; 12119 if (!pring) 12120 continue; 12121 spin_lock(&pring->ring_lock); 12122 list_splice_init(&pring->txq, &completions); 12123 spin_unlock(&pring->ring_lock); 12124 if (pring == phba->sli4_hba.els_wq->pring) { 12125 pring->flag |= LPFC_DEFERRED_RING_EVENT; 12126 /* Set the lpfc data pending flag */ 12127 set_bit(LPFC_DATA_READY, &phba->data_flags); 12128 } 12129 } 12130 } 12131 spin_unlock_irqrestore(&phba->hbalock, flags); 12132 12133 /* Cancel all the IOCBs from the completions list */ 12134 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 12135 IOERR_SLI_DOWN); 12136 12137 spin_lock_irqsave(&phba->hbalock, flags); 12138 list_splice_init(&phba->elsbuf, &completions); 12139 phba->elsbuf_cnt = 0; 12140 phba->elsbuf_prev_cnt = 0; 12141 spin_unlock_irqrestore(&phba->hbalock, flags); 12142 12143 while (!list_empty(&completions)) { 12144 list_remove_head(&completions, buf_ptr, 12145 struct lpfc_dmabuf, list); 12146 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 12147 kfree(buf_ptr); 12148 } 12149 12150 /* Enable softirqs again, done with phba->hbalock */ 12151 local_bh_enable(); 12152 12153 /* Return any active mbox cmds */ 12154 timer_delete_sync(&psli->mbox_tmo); 12155 12156 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 12157 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 12158 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 12159 12160 return 1; 12161 } 12162 12163 /** 12164 * lpfc_sli_pcimem_bcopy - SLI memory copy function 12165 * @srcp: Source memory pointer. 12166 * @destp: Destination memory pointer. 12167 * @cnt: Number of words required to be copied. 12168 * 12169 * This function is used for copying data between driver memory 12170 * and the SLI memory. This function also changes the endianness 12171 * of each word if native endianness is different from SLI 12172 * endianness. This function can be called with or without 12173 * lock. 12174 **/ 12175 void 12176 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 12177 { 12178 uint32_t *src = srcp; 12179 uint32_t *dest = destp; 12180 uint32_t ldata; 12181 int i; 12182 12183 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 12184 ldata = *src; 12185 ldata = le32_to_cpu(ldata); 12186 *dest = ldata; 12187 src++; 12188 dest++; 12189 } 12190 } 12191 12192 12193 /** 12194 * lpfc_sli_bemem_bcopy - SLI memory copy function 12195 * @srcp: Source memory pointer. 12196 * @destp: Destination memory pointer. 12197 * @cnt: Number of words required to be copied. 12198 * 12199 * This function is used for copying data between a data structure 12200 * with big endian representation to local endianness. 12201 * This function can be called with or without lock. 12202 **/ 12203 void 12204 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 12205 { 12206 uint32_t *src = srcp; 12207 uint32_t *dest = destp; 12208 uint32_t ldata; 12209 int i; 12210 12211 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 12212 ldata = *src; 12213 ldata = be32_to_cpu(ldata); 12214 *dest = ldata; 12215 src++; 12216 dest++; 12217 } 12218 } 12219 12220 /** 12221 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 12222 * @phba: Pointer to HBA context object. 12223 * @pring: Pointer to driver SLI ring object. 12224 * @mp: Pointer to driver buffer object. 12225 * 12226 * This function is called with no lock held. 12227 * It always return zero after adding the buffer to the postbufq 12228 * buffer list. 12229 **/ 12230 int 12231 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 12232 struct lpfc_dmabuf *mp) 12233 { 12234 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 12235 later */ 12236 spin_lock_irq(&phba->hbalock); 12237 list_add_tail(&mp->list, &pring->postbufq); 12238 pring->postbufq_cnt++; 12239 spin_unlock_irq(&phba->hbalock); 12240 return 0; 12241 } 12242 12243 /** 12244 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 12245 * @phba: Pointer to HBA context object. 12246 * 12247 * When HBQ is enabled, buffers are searched based on tags. This function 12248 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 12249 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 12250 * does not conflict with tags of buffer posted for unsolicited events. 12251 * The function returns the allocated tag. The function is called with 12252 * no locks held. 12253 **/ 12254 uint32_t 12255 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 12256 { 12257 spin_lock_irq(&phba->hbalock); 12258 phba->buffer_tag_count++; 12259 /* 12260 * Always set the QUE_BUFTAG_BIT to distiguish between 12261 * a tag assigned by HBQ. 12262 */ 12263 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 12264 spin_unlock_irq(&phba->hbalock); 12265 return phba->buffer_tag_count; 12266 } 12267 12268 /** 12269 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 12270 * @phba: Pointer to HBA context object. 12271 * @pring: Pointer to driver SLI ring object. 12272 * @tag: Buffer tag. 12273 * 12274 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 12275 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 12276 * iocb is posted to the response ring with the tag of the buffer. 12277 * This function searches the pring->postbufq list using the tag 12278 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 12279 * iocb. If the buffer is found then lpfc_dmabuf object of the 12280 * buffer is returned to the caller else NULL is returned. 12281 * This function is called with no lock held. 12282 **/ 12283 struct lpfc_dmabuf * 12284 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 12285 uint32_t tag) 12286 { 12287 struct lpfc_dmabuf *mp, *next_mp; 12288 struct list_head *slp = &pring->postbufq; 12289 12290 /* Search postbufq, from the beginning, looking for a match on tag */ 12291 spin_lock_irq(&phba->hbalock); 12292 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 12293 if (mp->buffer_tag == tag) { 12294 list_del_init(&mp->list); 12295 pring->postbufq_cnt--; 12296 spin_unlock_irq(&phba->hbalock); 12297 return mp; 12298 } 12299 } 12300 12301 spin_unlock_irq(&phba->hbalock); 12302 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12303 "0402 Cannot find virtual addr for buffer tag on " 12304 "ring %d Data x%lx x%px x%px x%x\n", 12305 pring->ringno, (unsigned long) tag, 12306 slp->next, slp->prev, pring->postbufq_cnt); 12307 12308 return NULL; 12309 } 12310 12311 /** 12312 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 12313 * @phba: Pointer to HBA context object. 12314 * @pring: Pointer to driver SLI ring object. 12315 * @phys: DMA address of the buffer. 12316 * 12317 * This function searches the buffer list using the dma_address 12318 * of unsolicited event to find the driver's lpfc_dmabuf object 12319 * corresponding to the dma_address. The function returns the 12320 * lpfc_dmabuf object if a buffer is found else it returns NULL. 12321 * This function is called by the ct and els unsolicited event 12322 * handlers to get the buffer associated with the unsolicited 12323 * event. 12324 * 12325 * This function is called with no lock held. 12326 **/ 12327 struct lpfc_dmabuf * 12328 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 12329 dma_addr_t phys) 12330 { 12331 struct lpfc_dmabuf *mp, *next_mp; 12332 struct list_head *slp = &pring->postbufq; 12333 12334 /* Search postbufq, from the beginning, looking for a match on phys */ 12335 spin_lock_irq(&phba->hbalock); 12336 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 12337 if (mp->phys == phys) { 12338 list_del_init(&mp->list); 12339 pring->postbufq_cnt--; 12340 spin_unlock_irq(&phba->hbalock); 12341 return mp; 12342 } 12343 } 12344 12345 spin_unlock_irq(&phba->hbalock); 12346 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12347 "0410 Cannot find virtual addr for mapped buf on " 12348 "ring %d Data x%llx x%px x%px x%x\n", 12349 pring->ringno, (unsigned long long)phys, 12350 slp->next, slp->prev, pring->postbufq_cnt); 12351 return NULL; 12352 } 12353 12354 /** 12355 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 12356 * @phba: Pointer to HBA context object. 12357 * @cmdiocb: Pointer to driver command iocb object. 12358 * @rspiocb: Pointer to driver response iocb object. 12359 * 12360 * This function is the completion handler for the abort iocbs for 12361 * ELS commands. This function is called from the ELS ring event 12362 * handler with no lock held. This function frees memory resources 12363 * associated with the abort iocb. 12364 **/ 12365 static void 12366 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12367 struct lpfc_iocbq *rspiocb) 12368 { 12369 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12370 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12371 u8 cmnd = get_job_cmnd(phba, cmdiocb); 12372 12373 if (ulp_status) { 12374 /* 12375 * Assume that the port already completed and returned, or 12376 * will return the iocb. Just Log the message. 12377 */ 12378 if (phba->sli_rev < LPFC_SLI_REV4) { 12379 if (cmnd == CMD_ABORT_XRI_CX && 12380 ulp_status == IOSTAT_LOCAL_REJECT && 12381 ulp_word4 == IOERR_ABORT_REQUESTED) { 12382 goto release_iocb; 12383 } 12384 } 12385 } 12386 12387 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 12388 "0327 Abort els iocb complete x%px with io cmd xri %x " 12389 "abort tag x%x abort status %x abort code %x\n", 12390 cmdiocb, get_job_abtsiotag(phba, cmdiocb), 12391 (phba->sli_rev == LPFC_SLI_REV4) ? 12392 get_wqe_reqtag(cmdiocb) : 12393 cmdiocb->iocb.ulpIoTag, 12394 ulp_status, ulp_word4); 12395 release_iocb: 12396 lpfc_sli_release_iocbq(phba, cmdiocb); 12397 return; 12398 } 12399 12400 /** 12401 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 12402 * @phba: Pointer to HBA context object. 12403 * @cmdiocb: Pointer to driver command iocb object. 12404 * @rspiocb: Pointer to driver response iocb object. 12405 * 12406 * The function is called from SLI ring event handler with no 12407 * lock held. This function is the completion handler for ELS commands 12408 * which are aborted. The function frees memory resources used for 12409 * the aborted ELS commands. 12410 **/ 12411 void 12412 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12413 struct lpfc_iocbq *rspiocb) 12414 { 12415 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12416 IOCB_t *irsp; 12417 LPFC_MBOXQ_t *mbox; 12418 u32 ulp_command, ulp_status, ulp_word4, iotag; 12419 12420 ulp_command = get_job_cmnd(phba, cmdiocb); 12421 ulp_status = get_job_ulpstatus(phba, rspiocb); 12422 ulp_word4 = get_job_word4(phba, rspiocb); 12423 12424 if (phba->sli_rev == LPFC_SLI_REV4) { 12425 iotag = get_wqe_reqtag(cmdiocb); 12426 } else { 12427 irsp = &rspiocb->iocb; 12428 iotag = irsp->ulpIoTag; 12429 12430 /* It is possible a PLOGI_RJT for NPIV ports to get aborted. 12431 * The MBX_REG_LOGIN64 mbox command is freed back to the 12432 * mbox_mem_pool here. 12433 */ 12434 if (cmdiocb->context_un.mbox) { 12435 mbox = cmdiocb->context_un.mbox; 12436 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 12437 cmdiocb->context_un.mbox = NULL; 12438 } 12439 } 12440 12441 /* ELS cmd tag <ulpIoTag> completes */ 12442 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 12443 "0139 Ignoring ELS cmd code x%x ref cnt x%x Data: " 12444 "x%x x%x x%x x%px\n", 12445 ulp_command, kref_read(&cmdiocb->ndlp->kref), 12446 ulp_status, ulp_word4, iotag, cmdiocb->ndlp); 12447 /* 12448 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp 12449 * if exchange is busy. 12450 */ 12451 if (ulp_command == CMD_GEN_REQUEST64_CR) 12452 lpfc_ct_free_iocb(phba, cmdiocb); 12453 else 12454 lpfc_els_free_iocb(phba, cmdiocb); 12455 12456 lpfc_nlp_put(ndlp); 12457 } 12458 12459 /** 12460 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 12461 * @phba: Pointer to HBA context object. 12462 * @pring: Pointer to driver SLI ring object. 12463 * @cmdiocb: Pointer to driver command iocb object. 12464 * @cmpl: completion function. 12465 * 12466 * This function issues an abort iocb for the provided command iocb. In case 12467 * of unloading, the abort iocb will not be issued to commands on the ELS 12468 * ring. Instead, the callback function shall be changed to those commands 12469 * so that nothing happens when them finishes. This function is called with 12470 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS 12471 * when the command iocb is an abort request. 12472 * 12473 **/ 12474 int 12475 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 12476 struct lpfc_iocbq *cmdiocb, void *cmpl) 12477 { 12478 struct lpfc_vport *vport = cmdiocb->vport; 12479 struct lpfc_iocbq *abtsiocbp; 12480 int retval = IOCB_ERROR; 12481 unsigned long iflags; 12482 struct lpfc_nodelist *ndlp = NULL; 12483 u32 ulp_command = get_job_cmnd(phba, cmdiocb); 12484 u16 ulp_context, iotag; 12485 bool ia; 12486 12487 /* 12488 * There are certain command types we don't want to abort. And we 12489 * don't want to abort commands that are already in the process of 12490 * being aborted. 12491 */ 12492 if (ulp_command == CMD_ABORT_XRI_WQE || 12493 ulp_command == CMD_ABORT_XRI_CN || 12494 ulp_command == CMD_CLOSE_XRI_CN || 12495 cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED) 12496 return IOCB_ABORTING; 12497 12498 if (!pring) { 12499 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) 12500 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; 12501 else 12502 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; 12503 return retval; 12504 } 12505 12506 /* 12507 * Always abort the outstanding WQE and set the IA bit correctly 12508 * for the context. This is necessary for correctly removing 12509 * outstanding ndlp reference counts when the CQE completes with 12510 * the XB bit set. 12511 */ 12512 abtsiocbp = __lpfc_sli_get_iocbq(phba); 12513 if (abtsiocbp == NULL) 12514 return IOCB_NORESOURCE; 12515 12516 /* This signals the response to set the correct status 12517 * before calling the completion handler 12518 */ 12519 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; 12520 12521 if (phba->sli_rev == LPFC_SLI_REV4) { 12522 ulp_context = cmdiocb->sli4_xritag; 12523 iotag = abtsiocbp->iotag; 12524 } else { 12525 iotag = cmdiocb->iocb.ulpIoTag; 12526 if (pring->ringno == LPFC_ELS_RING) { 12527 ndlp = cmdiocb->ndlp; 12528 ulp_context = ndlp->nlp_rpi; 12529 } else { 12530 ulp_context = cmdiocb->iocb.ulpContext; 12531 } 12532 } 12533 12534 /* Just close the exchange under certain conditions. */ 12535 if (test_bit(FC_UNLOADING, &vport->load_flag) || 12536 phba->link_state < LPFC_LINK_UP || 12537 (phba->sli_rev == LPFC_SLI_REV4 && 12538 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) || 12539 (phba->link_flag & LS_EXTERNAL_LOOPBACK)) 12540 ia = true; 12541 else 12542 ia = false; 12543 12544 lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag, 12545 cmdiocb->iocb.ulpClass, 12546 LPFC_WQE_CQ_ID_DEFAULT, ia, false); 12547 12548 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 12549 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 12550 if (cmdiocb->cmd_flag & LPFC_IO_FCP) 12551 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX); 12552 12553 if (cmdiocb->cmd_flag & LPFC_IO_FOF) 12554 abtsiocbp->cmd_flag |= LPFC_IO_FOF; 12555 12556 if (cmpl) 12557 abtsiocbp->cmd_cmpl = cmpl; 12558 else 12559 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl; 12560 abtsiocbp->vport = vport; 12561 12562 if (phba->sli_rev == LPFC_SLI_REV4) { 12563 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 12564 if (unlikely(pring == NULL)) 12565 goto abort_iotag_exit; 12566 /* Note: both hbalock and ring_lock need to be set here */ 12567 spin_lock_irqsave(&pring->ring_lock, iflags); 12568 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 12569 abtsiocbp, 0); 12570 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12571 } else { 12572 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 12573 abtsiocbp, 0); 12574 } 12575 12576 abort_iotag_exit: 12577 12578 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 12579 "0339 Abort IO XRI x%x, Original iotag x%x, " 12580 "abort tag x%x Cmdjob : x%px Abortjob : x%px " 12581 "retval x%x : IA %d cmd_cmpl %ps\n", 12582 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ? 12583 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp, 12584 retval, ia, abtsiocbp->cmd_cmpl); 12585 if (retval) { 12586 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; 12587 __lpfc_sli_release_iocbq(phba, abtsiocbp); 12588 } 12589 12590 /* 12591 * Caller to this routine should check for IOCB_ERROR 12592 * and handle it properly. This routine no longer removes 12593 * iocb off txcmplq and call compl in case of IOCB_ERROR. 12594 */ 12595 return retval; 12596 } 12597 12598 /** 12599 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 12600 * @phba: pointer to lpfc HBA data structure. 12601 * 12602 * This routine will abort all pending and outstanding iocbs to an HBA. 12603 **/ 12604 void 12605 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 12606 { 12607 struct lpfc_sli *psli = &phba->sli; 12608 struct lpfc_sli_ring *pring; 12609 struct lpfc_queue *qp = NULL; 12610 int i; 12611 12612 if (phba->sli_rev != LPFC_SLI_REV4) { 12613 for (i = 0; i < psli->num_rings; i++) { 12614 pring = &psli->sli3_ring[i]; 12615 lpfc_sli_abort_iocb_ring(phba, pring); 12616 } 12617 return; 12618 } 12619 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 12620 pring = qp->pring; 12621 if (!pring) 12622 continue; 12623 lpfc_sli_abort_iocb_ring(phba, pring); 12624 } 12625 } 12626 12627 /** 12628 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts 12629 * @iocbq: Pointer to iocb object. 12630 * @vport: Pointer to driver virtual port object. 12631 * 12632 * This function acts as an iocb filter for functions which abort FCP iocbs. 12633 * 12634 * Return values 12635 * -ENODEV, if a null iocb or vport ptr is encountered 12636 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as 12637 * driver already started the abort process, or is an abort iocb itself 12638 * 0, passes criteria for aborting the FCP I/O iocb 12639 **/ 12640 static int 12641 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq, 12642 struct lpfc_vport *vport) 12643 { 12644 u8 ulp_command; 12645 12646 /* No null ptr vports */ 12647 if (!iocbq || iocbq->vport != vport) 12648 return -ENODEV; 12649 12650 /* iocb must be for FCP IO, already exists on the TX cmpl queue, 12651 * can't be premarked as driver aborted, nor be an ABORT iocb itself 12652 */ 12653 ulp_command = get_job_cmnd(vport->phba, iocbq); 12654 if (!(iocbq->cmd_flag & LPFC_IO_FCP) || 12655 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) || 12656 (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || 12657 (ulp_command == CMD_ABORT_XRI_CN || 12658 ulp_command == CMD_CLOSE_XRI_CN || 12659 ulp_command == CMD_ABORT_XRI_WQE)) 12660 return -EINVAL; 12661 12662 return 0; 12663 } 12664 12665 /** 12666 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target 12667 * @iocbq: Pointer to driver iocb object. 12668 * @vport: Pointer to driver virtual port object. 12669 * @tgt_id: SCSI ID of the target. 12670 * @lun_id: LUN ID of the scsi device. 12671 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 12672 * 12673 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI 12674 * host. 12675 * 12676 * It will return 12677 * 0 if the filtering criteria is met for the given iocb and will return 12678 * 1 if the filtering criteria is not met. 12679 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 12680 * given iocb is for the SCSI device specified by vport, tgt_id and 12681 * lun_id parameter. 12682 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 12683 * given iocb is for the SCSI target specified by vport and tgt_id 12684 * parameters. 12685 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 12686 * given iocb is for the SCSI host associated with the given vport. 12687 * This function is called with no locks held. 12688 **/ 12689 static int 12690 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 12691 uint16_t tgt_id, uint64_t lun_id, 12692 lpfc_ctx_cmd ctx_cmd) 12693 { 12694 struct lpfc_io_buf *lpfc_cmd; 12695 int rc = 1; 12696 12697 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 12698 12699 if (lpfc_cmd->pCmd == NULL) 12700 return rc; 12701 12702 switch (ctx_cmd) { 12703 case LPFC_CTX_LUN: 12704 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 12705 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 12706 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 12707 rc = 0; 12708 break; 12709 case LPFC_CTX_TGT: 12710 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 12711 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 12712 rc = 0; 12713 break; 12714 case LPFC_CTX_HOST: 12715 rc = 0; 12716 break; 12717 default: 12718 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 12719 __func__, ctx_cmd); 12720 break; 12721 } 12722 12723 return rc; 12724 } 12725 12726 /** 12727 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 12728 * @vport: Pointer to virtual port. 12729 * @tgt_id: SCSI ID of the target. 12730 * @lun_id: LUN ID of the scsi device. 12731 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 12732 * 12733 * This function returns number of FCP commands pending for the vport. 12734 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 12735 * commands pending on the vport associated with SCSI device specified 12736 * by tgt_id and lun_id parameters. 12737 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 12738 * commands pending on the vport associated with SCSI target specified 12739 * by tgt_id parameter. 12740 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 12741 * commands pending on the vport. 12742 * This function returns the number of iocbs which satisfy the filter. 12743 * This function is called without any lock held. 12744 **/ 12745 int 12746 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 12747 lpfc_ctx_cmd ctx_cmd) 12748 { 12749 struct lpfc_hba *phba = vport->phba; 12750 struct lpfc_iocbq *iocbq; 12751 int sum, i; 12752 unsigned long iflags; 12753 u8 ulp_command; 12754 12755 spin_lock_irqsave(&phba->hbalock, iflags); 12756 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 12757 iocbq = phba->sli.iocbq_lookup[i]; 12758 12759 if (!iocbq || iocbq->vport != vport) 12760 continue; 12761 if (!(iocbq->cmd_flag & LPFC_IO_FCP) || 12762 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) 12763 continue; 12764 12765 /* Include counting outstanding aborts */ 12766 ulp_command = get_job_cmnd(phba, iocbq); 12767 if (ulp_command == CMD_ABORT_XRI_CN || 12768 ulp_command == CMD_CLOSE_XRI_CN || 12769 ulp_command == CMD_ABORT_XRI_WQE) { 12770 sum++; 12771 continue; 12772 } 12773 12774 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 12775 ctx_cmd) == 0) 12776 sum++; 12777 } 12778 spin_unlock_irqrestore(&phba->hbalock, iflags); 12779 12780 return sum; 12781 } 12782 12783 /** 12784 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 12785 * @phba: Pointer to HBA context object 12786 * @cmdiocb: Pointer to command iocb object. 12787 * @rspiocb: Pointer to response iocb object. 12788 * 12789 * This function is called when an aborted FCP iocb completes. This 12790 * function is called by the ring event handler with no lock held. 12791 * This function frees the iocb. 12792 **/ 12793 void 12794 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12795 struct lpfc_iocbq *rspiocb) 12796 { 12797 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 12798 "3096 ABORT_XRI_CX completing on rpi x%x " 12799 "original iotag x%x, abort cmd iotag x%x " 12800 "status 0x%x, reason 0x%x\n", 12801 (phba->sli_rev == LPFC_SLI_REV4) ? 12802 cmdiocb->sli4_xritag : 12803 cmdiocb->iocb.un.acxri.abortContextTag, 12804 get_job_abtsiotag(phba, cmdiocb), 12805 cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb), 12806 get_job_word4(phba, rspiocb)); 12807 lpfc_sli_release_iocbq(phba, cmdiocb); 12808 return; 12809 } 12810 12811 /** 12812 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 12813 * @vport: Pointer to virtual port. 12814 * @tgt_id: SCSI ID of the target. 12815 * @lun_id: LUN ID of the scsi device. 12816 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 12817 * 12818 * This function sends an abort command for every SCSI command 12819 * associated with the given virtual port pending on the ring 12820 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then 12821 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before 12822 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort 12823 * followed by lpfc_sli_validate_fcp_iocb. 12824 * 12825 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 12826 * FCP iocbs associated with lun specified by tgt_id and lun_id 12827 * parameters 12828 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 12829 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 12830 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 12831 * FCP iocbs associated with virtual port. 12832 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4 12833 * lpfc_sli4_calc_ring is used. 12834 * This function returns number of iocbs it failed to abort. 12835 * This function is called with no locks held. 12836 **/ 12837 int 12838 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, 12839 lpfc_ctx_cmd abort_cmd) 12840 { 12841 struct lpfc_hba *phba = vport->phba; 12842 struct lpfc_sli_ring *pring = NULL; 12843 struct lpfc_iocbq *iocbq; 12844 int errcnt = 0, ret_val = 0; 12845 unsigned long iflags; 12846 int i; 12847 12848 /* all I/Os are in process of being flushed */ 12849 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) 12850 return errcnt; 12851 12852 for (i = 1; i <= phba->sli.last_iotag; i++) { 12853 iocbq = phba->sli.iocbq_lookup[i]; 12854 12855 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) 12856 continue; 12857 12858 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 12859 abort_cmd) != 0) 12860 continue; 12861 12862 spin_lock_irqsave(&phba->hbalock, iflags); 12863 if (phba->sli_rev == LPFC_SLI_REV3) { 12864 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12865 } else if (phba->sli_rev == LPFC_SLI_REV4) { 12866 pring = lpfc_sli4_calc_ring(phba, iocbq); 12867 } 12868 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq, 12869 lpfc_sli_abort_fcp_cmpl); 12870 spin_unlock_irqrestore(&phba->hbalock, iflags); 12871 if (ret_val != IOCB_SUCCESS) 12872 errcnt++; 12873 } 12874 12875 return errcnt; 12876 } 12877 12878 /** 12879 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 12880 * @vport: Pointer to virtual port. 12881 * @pring: Pointer to driver SLI ring object. 12882 * @tgt_id: SCSI ID of the target. 12883 * @lun_id: LUN ID of the scsi device. 12884 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 12885 * 12886 * This function sends an abort command for every SCSI command 12887 * associated with the given virtual port pending on the ring 12888 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then 12889 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before 12890 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort 12891 * followed by lpfc_sli_validate_fcp_iocb. 12892 * 12893 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 12894 * FCP iocbs associated with lun specified by tgt_id and lun_id 12895 * parameters 12896 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 12897 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 12898 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 12899 * FCP iocbs associated with virtual port. 12900 * This function returns number of iocbs it aborted . 12901 * This function is called with no locks held right after a taskmgmt 12902 * command is sent. 12903 **/ 12904 int 12905 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 12906 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 12907 { 12908 struct lpfc_hba *phba = vport->phba; 12909 struct lpfc_io_buf *lpfc_cmd; 12910 struct lpfc_iocbq *abtsiocbq; 12911 struct lpfc_nodelist *ndlp = NULL; 12912 struct lpfc_iocbq *iocbq; 12913 int sum, i, ret_val; 12914 unsigned long iflags; 12915 struct lpfc_sli_ring *pring_s4 = NULL; 12916 u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT; 12917 bool ia; 12918 12919 /* all I/Os are in process of being flushed */ 12920 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) 12921 return 0; 12922 12923 sum = 0; 12924 12925 spin_lock_irqsave(&phba->hbalock, iflags); 12926 for (i = 1; i <= phba->sli.last_iotag; i++) { 12927 iocbq = phba->sli.iocbq_lookup[i]; 12928 12929 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) 12930 continue; 12931 12932 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 12933 cmd) != 0) 12934 continue; 12935 12936 /* Guard against IO completion being called at same time */ 12937 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 12938 spin_lock(&lpfc_cmd->buf_lock); 12939 12940 if (!lpfc_cmd->pCmd) { 12941 spin_unlock(&lpfc_cmd->buf_lock); 12942 continue; 12943 } 12944 12945 if (phba->sli_rev == LPFC_SLI_REV4) { 12946 pring_s4 = 12947 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring; 12948 if (!pring_s4) { 12949 spin_unlock(&lpfc_cmd->buf_lock); 12950 continue; 12951 } 12952 /* Note: both hbalock and ring_lock must be set here */ 12953 spin_lock(&pring_s4->ring_lock); 12954 } 12955 12956 /* 12957 * If the iocbq is already being aborted, don't take a second 12958 * action, but do count it. 12959 */ 12960 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || 12961 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { 12962 if (phba->sli_rev == LPFC_SLI_REV4) 12963 spin_unlock(&pring_s4->ring_lock); 12964 spin_unlock(&lpfc_cmd->buf_lock); 12965 continue; 12966 } 12967 12968 /* issue ABTS for this IOCB based on iotag */ 12969 abtsiocbq = __lpfc_sli_get_iocbq(phba); 12970 if (!abtsiocbq) { 12971 if (phba->sli_rev == LPFC_SLI_REV4) 12972 spin_unlock(&pring_s4->ring_lock); 12973 spin_unlock(&lpfc_cmd->buf_lock); 12974 continue; 12975 } 12976 12977 if (phba->sli_rev == LPFC_SLI_REV4) { 12978 iotag = abtsiocbq->iotag; 12979 ulp_context = iocbq->sli4_xritag; 12980 cqid = lpfc_cmd->hdwq->io_cq_map; 12981 } else { 12982 iotag = iocbq->iocb.ulpIoTag; 12983 if (pring->ringno == LPFC_ELS_RING) { 12984 ndlp = iocbq->ndlp; 12985 ulp_context = ndlp->nlp_rpi; 12986 } else { 12987 ulp_context = iocbq->iocb.ulpContext; 12988 } 12989 } 12990 12991 ndlp = lpfc_cmd->rdata->pnode; 12992 12993 if (lpfc_is_link_up(phba) && 12994 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) && 12995 !(phba->link_flag & LS_EXTERNAL_LOOPBACK)) 12996 ia = false; 12997 else 12998 ia = true; 12999 13000 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag, 13001 iocbq->iocb.ulpClass, cqid, 13002 ia, false); 13003 13004 abtsiocbq->vport = vport; 13005 13006 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 13007 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 13008 if (iocbq->cmd_flag & LPFC_IO_FCP) 13009 abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX; 13010 if (iocbq->cmd_flag & LPFC_IO_FOF) 13011 abtsiocbq->cmd_flag |= LPFC_IO_FOF; 13012 13013 /* Setup callback routine and issue the command. */ 13014 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl; 13015 13016 /* 13017 * Indicate the IO is being aborted by the driver and set 13018 * the caller's flag into the aborted IO. 13019 */ 13020 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED; 13021 13022 if (phba->sli_rev == LPFC_SLI_REV4) { 13023 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 13024 abtsiocbq, 0); 13025 spin_unlock(&pring_s4->ring_lock); 13026 } else { 13027 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 13028 abtsiocbq, 0); 13029 } 13030 13031 spin_unlock(&lpfc_cmd->buf_lock); 13032 13033 if (ret_val == IOCB_ERROR) 13034 __lpfc_sli_release_iocbq(phba, abtsiocbq); 13035 else 13036 sum++; 13037 } 13038 spin_unlock_irqrestore(&phba->hbalock, iflags); 13039 return sum; 13040 } 13041 13042 /** 13043 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 13044 * @phba: Pointer to HBA context object. 13045 * @cmdiocbq: Pointer to command iocb. 13046 * @rspiocbq: Pointer to response iocb. 13047 * 13048 * This function is the completion handler for iocbs issued using 13049 * lpfc_sli_issue_iocb_wait function. This function is called by the 13050 * ring event handler function without any lock held. This function 13051 * can be called from both worker thread context and interrupt 13052 * context. This function also can be called from other thread which 13053 * cleans up the SLI layer objects. 13054 * This function copy the contents of the response iocb to the 13055 * response iocb memory object provided by the caller of 13056 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 13057 * sleeps for the iocb completion. 13058 **/ 13059 static void 13060 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 13061 struct lpfc_iocbq *cmdiocbq, 13062 struct lpfc_iocbq *rspiocbq) 13063 { 13064 wait_queue_head_t *pdone_q; 13065 unsigned long iflags; 13066 struct lpfc_io_buf *lpfc_cmd; 13067 size_t offset = offsetof(struct lpfc_iocbq, wqe); 13068 13069 spin_lock_irqsave(&phba->hbalock, iflags); 13070 if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) { 13071 13072 /* 13073 * A time out has occurred for the iocb. If a time out 13074 * completion handler has been supplied, call it. Otherwise, 13075 * just free the iocbq. 13076 */ 13077 13078 spin_unlock_irqrestore(&phba->hbalock, iflags); 13079 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; 13080 cmdiocbq->wait_cmd_cmpl = NULL; 13081 if (cmdiocbq->cmd_cmpl) 13082 cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL); 13083 else 13084 lpfc_sli_release_iocbq(phba, cmdiocbq); 13085 return; 13086 } 13087 13088 /* Copy the contents of the local rspiocb into the caller's buffer. */ 13089 cmdiocbq->cmd_flag |= LPFC_IO_WAKE; 13090 if (cmdiocbq->rsp_iocb && rspiocbq) 13091 memcpy((char *)cmdiocbq->rsp_iocb + offset, 13092 (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset); 13093 13094 /* Set the exchange busy flag for task management commands */ 13095 if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && 13096 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { 13097 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, 13098 cur_iocbq); 13099 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) 13100 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 13101 else 13102 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 13103 } 13104 13105 pdone_q = cmdiocbq->context_un.wait_queue; 13106 if (pdone_q) 13107 wake_up(pdone_q); 13108 spin_unlock_irqrestore(&phba->hbalock, iflags); 13109 return; 13110 } 13111 13112 /** 13113 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 13114 * @phba: Pointer to HBA context object.. 13115 * @piocbq: Pointer to command iocb. 13116 * @flag: Flag to test. 13117 * 13118 * This routine grabs the hbalock and then test the cmd_flag to 13119 * see if the passed in flag is set. 13120 * Returns: 13121 * 1 if flag is set. 13122 * 0 if flag is not set. 13123 **/ 13124 static int 13125 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 13126 struct lpfc_iocbq *piocbq, uint32_t flag) 13127 { 13128 unsigned long iflags; 13129 int ret; 13130 13131 spin_lock_irqsave(&phba->hbalock, iflags); 13132 ret = piocbq->cmd_flag & flag; 13133 spin_unlock_irqrestore(&phba->hbalock, iflags); 13134 return ret; 13135 13136 } 13137 13138 /** 13139 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 13140 * @phba: Pointer to HBA context object.. 13141 * @ring_number: Ring number 13142 * @piocb: Pointer to command iocb. 13143 * @prspiocbq: Pointer to response iocb. 13144 * @timeout: Timeout in number of seconds. 13145 * 13146 * This function issues the iocb to firmware and waits for the 13147 * iocb to complete. The cmd_cmpl field of the shall be used 13148 * to handle iocbs which time out. If the field is NULL, the 13149 * function shall free the iocbq structure. If more clean up is 13150 * needed, the caller is expected to provide a completion function 13151 * that will provide the needed clean up. If the iocb command is 13152 * not completed within timeout seconds, the function will either 13153 * free the iocbq structure (if cmd_cmpl == NULL) or execute the 13154 * completion function set in the cmd_cmpl field and then return 13155 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 13156 * resources if this function returns IOCB_TIMEDOUT. 13157 * The function waits for the iocb completion using an 13158 * non-interruptible wait. 13159 * This function will sleep while waiting for iocb completion. 13160 * So, this function should not be called from any context which 13161 * does not allow sleeping. Due to the same reason, this function 13162 * cannot be called with interrupt disabled. 13163 * This function assumes that the iocb completions occur while 13164 * this function sleep. So, this function cannot be called from 13165 * the thread which process iocb completion for this ring. 13166 * This function clears the cmd_flag of the iocb object before 13167 * issuing the iocb and the iocb completion handler sets this 13168 * flag and wakes this thread when the iocb completes. 13169 * The contents of the response iocb will be copied to prspiocbq 13170 * by the completion handler when the command completes. 13171 * This function returns IOCB_SUCCESS when success. 13172 * This function is called with no lock held. 13173 **/ 13174 int 13175 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 13176 uint32_t ring_number, 13177 struct lpfc_iocbq *piocb, 13178 struct lpfc_iocbq *prspiocbq, 13179 uint32_t timeout) 13180 { 13181 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 13182 long timeleft, timeout_req = 0; 13183 int retval = IOCB_SUCCESS; 13184 uint32_t creg_val; 13185 struct lpfc_iocbq *iocb; 13186 int txq_cnt = 0; 13187 int txcmplq_cnt = 0; 13188 struct lpfc_sli_ring *pring; 13189 unsigned long iflags; 13190 bool iocb_completed = true; 13191 13192 if (phba->sli_rev >= LPFC_SLI_REV4) { 13193 lpfc_sli_prep_wqe(phba, piocb); 13194 13195 pring = lpfc_sli4_calc_ring(phba, piocb); 13196 } else 13197 pring = &phba->sli.sli3_ring[ring_number]; 13198 /* 13199 * If the caller has provided a response iocbq buffer, then rsp_iocb 13200 * is NULL or its an error. 13201 */ 13202 if (prspiocbq) { 13203 if (piocb->rsp_iocb) 13204 return IOCB_ERROR; 13205 piocb->rsp_iocb = prspiocbq; 13206 } 13207 13208 piocb->wait_cmd_cmpl = piocb->cmd_cmpl; 13209 piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait; 13210 piocb->context_un.wait_queue = &done_q; 13211 piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 13212 13213 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 13214 if (lpfc_readl(phba->HCregaddr, &creg_val)) 13215 return IOCB_ERROR; 13216 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 13217 writel(creg_val, phba->HCregaddr); 13218 readl(phba->HCregaddr); /* flush */ 13219 } 13220 13221 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 13222 SLI_IOCB_RET_IOCB); 13223 if (retval == IOCB_SUCCESS) { 13224 timeout_req = secs_to_jiffies(timeout); 13225 timeleft = wait_event_timeout(done_q, 13226 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 13227 timeout_req); 13228 spin_lock_irqsave(&phba->hbalock, iflags); 13229 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) { 13230 13231 /* 13232 * IOCB timed out. Inform the wake iocb wait 13233 * completion function and set local status 13234 */ 13235 13236 iocb_completed = false; 13237 piocb->cmd_flag |= LPFC_IO_WAKE_TMO; 13238 } 13239 spin_unlock_irqrestore(&phba->hbalock, iflags); 13240 if (iocb_completed) { 13241 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13242 "0331 IOCB wake signaled\n"); 13243 /* Note: we are not indicating if the IOCB has a success 13244 * status or not - that's for the caller to check. 13245 * IOCB_SUCCESS means just that the command was sent and 13246 * completed. Not that it completed successfully. 13247 * */ 13248 } else if (timeleft == 0) { 13249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13250 "0338 IOCB wait timeout error - no " 13251 "wake response Data x%x\n", timeout); 13252 retval = IOCB_TIMEDOUT; 13253 } else { 13254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13255 "0330 IOCB wake NOT set, " 13256 "Data x%x x%lx\n", 13257 timeout, (timeleft / jiffies)); 13258 retval = IOCB_TIMEDOUT; 13259 } 13260 } else if (retval == IOCB_BUSY) { 13261 if (phba->cfg_log_verbose & LOG_SLI) { 13262 list_for_each_entry(iocb, &pring->txq, list) { 13263 txq_cnt++; 13264 } 13265 list_for_each_entry(iocb, &pring->txcmplq, list) { 13266 txcmplq_cnt++; 13267 } 13268 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13269 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 13270 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 13271 } 13272 return retval; 13273 } else { 13274 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13275 "0332 IOCB wait issue failed, Data x%x\n", 13276 retval); 13277 retval = IOCB_ERROR; 13278 } 13279 13280 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 13281 if (lpfc_readl(phba->HCregaddr, &creg_val)) 13282 return IOCB_ERROR; 13283 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 13284 writel(creg_val, phba->HCregaddr); 13285 readl(phba->HCregaddr); /* flush */ 13286 } 13287 13288 if (prspiocbq) 13289 piocb->rsp_iocb = NULL; 13290 13291 piocb->context_un.wait_queue = NULL; 13292 piocb->cmd_cmpl = NULL; 13293 return retval; 13294 } 13295 13296 /** 13297 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 13298 * @phba: Pointer to HBA context object. 13299 * @pmboxq: Pointer to driver mailbox object. 13300 * @timeout: Timeout in number of seconds. 13301 * 13302 * This function issues the mailbox to firmware and waits for the 13303 * mailbox command to complete. If the mailbox command is not 13304 * completed within timeout seconds, it returns MBX_TIMEOUT. 13305 * The function waits for the mailbox completion using an 13306 * interruptible wait. If the thread is woken up due to a 13307 * signal, MBX_TIMEOUT error is returned to the caller. Caller 13308 * should not free the mailbox resources, if this function returns 13309 * MBX_TIMEOUT. 13310 * This function will sleep while waiting for mailbox completion. 13311 * So, this function should not be called from any context which 13312 * does not allow sleeping. Due to the same reason, this function 13313 * cannot be called with interrupt disabled. 13314 * This function assumes that the mailbox completion occurs while 13315 * this function sleep. So, this function cannot be called from 13316 * the worker thread which processes mailbox completion. 13317 * This function is called in the context of HBA management 13318 * applications. 13319 * This function returns MBX_SUCCESS when successful. 13320 * This function is called with no lock held. 13321 **/ 13322 int 13323 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 13324 uint32_t timeout) 13325 { 13326 struct completion mbox_done; 13327 int retval; 13328 unsigned long flag; 13329 13330 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 13331 /* setup wake call as IOCB callback */ 13332 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 13333 13334 /* setup ctx_u field to pass wait_queue pointer to wake function */ 13335 init_completion(&mbox_done); 13336 pmboxq->ctx_u.mbox_wait = &mbox_done; 13337 /* now issue the command */ 13338 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 13339 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 13340 wait_for_completion_timeout(&mbox_done, secs_to_jiffies(timeout)); 13341 13342 spin_lock_irqsave(&phba->hbalock, flag); 13343 pmboxq->ctx_u.mbox_wait = NULL; 13344 /* 13345 * if LPFC_MBX_WAKE flag is set the mailbox is completed 13346 * else do not free the resources. 13347 */ 13348 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 13349 retval = MBX_SUCCESS; 13350 } else { 13351 retval = MBX_TIMEOUT; 13352 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13353 } 13354 spin_unlock_irqrestore(&phba->hbalock, flag); 13355 } 13356 return retval; 13357 } 13358 13359 /** 13360 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 13361 * @phba: Pointer to HBA context. 13362 * @mbx_action: Mailbox shutdown options. 13363 * 13364 * This function is called to shutdown the driver's mailbox sub-system. 13365 * It first marks the mailbox sub-system is in a block state to prevent 13366 * the asynchronous mailbox command from issued off the pending mailbox 13367 * command queue. If the mailbox command sub-system shutdown is due to 13368 * HBA error conditions such as EEH or ERATT, this routine shall invoke 13369 * the mailbox sub-system flush routine to forcefully bring down the 13370 * mailbox sub-system. Otherwise, if it is due to normal condition (such 13371 * as with offline or HBA function reset), this routine will wait for the 13372 * outstanding mailbox command to complete before invoking the mailbox 13373 * sub-system flush routine to gracefully bring down mailbox sub-system. 13374 **/ 13375 void 13376 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 13377 { 13378 struct lpfc_sli *psli = &phba->sli; 13379 unsigned long timeout; 13380 13381 if (mbx_action == LPFC_MBX_NO_WAIT) { 13382 /* delay 100ms for port state */ 13383 msleep(100); 13384 lpfc_sli_mbox_sys_flush(phba); 13385 return; 13386 } 13387 timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies; 13388 13389 /* Disable softirqs, including timers from obtaining phba->hbalock */ 13390 local_bh_disable(); 13391 13392 spin_lock_irq(&phba->hbalock); 13393 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 13394 13395 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 13396 /* Determine how long we might wait for the active mailbox 13397 * command to be gracefully completed by firmware. 13398 */ 13399 if (phba->sli.mbox_active) 13400 timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, 13401 phba->sli.mbox_active)) + jiffies; 13402 spin_unlock_irq(&phba->hbalock); 13403 13404 /* Enable softirqs again, done with phba->hbalock */ 13405 local_bh_enable(); 13406 13407 while (phba->sli.mbox_active) { 13408 /* Check active mailbox complete status every 2ms */ 13409 msleep(2); 13410 if (time_after(jiffies, timeout)) 13411 /* Timeout, let the mailbox flush routine to 13412 * forcefully release active mailbox command 13413 */ 13414 break; 13415 } 13416 } else { 13417 spin_unlock_irq(&phba->hbalock); 13418 13419 /* Enable softirqs again, done with phba->hbalock */ 13420 local_bh_enable(); 13421 } 13422 13423 lpfc_sli_mbox_sys_flush(phba); 13424 } 13425 13426 /** 13427 * lpfc_sli_eratt_read - read sli-3 error attention events 13428 * @phba: Pointer to HBA context. 13429 * 13430 * This function is called to read the SLI3 device error attention registers 13431 * for possible error attention events. The caller must hold the hostlock 13432 * with spin_lock_irq(). 13433 * 13434 * This function returns 1 when there is Error Attention in the Host Attention 13435 * Register and returns 0 otherwise. 13436 **/ 13437 static int 13438 lpfc_sli_eratt_read(struct lpfc_hba *phba) 13439 { 13440 uint32_t ha_copy; 13441 13442 /* Read chip Host Attention (HA) register */ 13443 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 13444 goto unplug_err; 13445 13446 if (ha_copy & HA_ERATT) { 13447 /* Read host status register to retrieve error event */ 13448 if (lpfc_sli_read_hs(phba)) 13449 goto unplug_err; 13450 13451 /* Check if there is a deferred error condition is active */ 13452 if ((HS_FFER1 & phba->work_hs) && 13453 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 13454 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 13455 set_bit(DEFER_ERATT, &phba->hba_flag); 13456 /* Clear all interrupt enable conditions */ 13457 writel(0, phba->HCregaddr); 13458 readl(phba->HCregaddr); 13459 } 13460 13461 /* Set the driver HA work bitmap */ 13462 phba->work_ha |= HA_ERATT; 13463 /* Indicate polling handles this ERATT */ 13464 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13465 return 1; 13466 } 13467 return 0; 13468 13469 unplug_err: 13470 /* Set the driver HS work bitmap */ 13471 phba->work_hs |= UNPLUG_ERR; 13472 /* Set the driver HA work bitmap */ 13473 phba->work_ha |= HA_ERATT; 13474 /* Indicate polling handles this ERATT */ 13475 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13476 return 1; 13477 } 13478 13479 /** 13480 * lpfc_sli4_eratt_read - read sli-4 error attention events 13481 * @phba: Pointer to HBA context. 13482 * 13483 * This function is called to read the SLI4 device error attention registers 13484 * for possible error attention events. The caller must hold the hostlock 13485 * with spin_lock_irq(). 13486 * 13487 * This function returns 1 when there is Error Attention in the Host Attention 13488 * Register and returns 0 otherwise. 13489 **/ 13490 static int 13491 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 13492 { 13493 uint32_t uerr_sta_hi, uerr_sta_lo; 13494 uint32_t if_type, portsmphr; 13495 struct lpfc_register portstat_reg; 13496 u32 logmask; 13497 13498 /* 13499 * For now, use the SLI4 device internal unrecoverable error 13500 * registers for error attention. This can be changed later. 13501 */ 13502 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 13503 switch (if_type) { 13504 case LPFC_SLI_INTF_IF_TYPE_0: 13505 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 13506 &uerr_sta_lo) || 13507 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 13508 &uerr_sta_hi)) { 13509 phba->work_hs |= UNPLUG_ERR; 13510 phba->work_ha |= HA_ERATT; 13511 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13512 return 1; 13513 } 13514 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 13515 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 13516 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13517 "1423 HBA Unrecoverable error: " 13518 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 13519 "ue_mask_lo_reg=0x%x, " 13520 "ue_mask_hi_reg=0x%x\n", 13521 uerr_sta_lo, uerr_sta_hi, 13522 phba->sli4_hba.ue_mask_lo, 13523 phba->sli4_hba.ue_mask_hi); 13524 phba->work_status[0] = uerr_sta_lo; 13525 phba->work_status[1] = uerr_sta_hi; 13526 phba->work_ha |= HA_ERATT; 13527 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13528 return 1; 13529 } 13530 break; 13531 case LPFC_SLI_INTF_IF_TYPE_2: 13532 case LPFC_SLI_INTF_IF_TYPE_6: 13533 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 13534 &portstat_reg.word0) || 13535 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 13536 &portsmphr)){ 13537 phba->work_hs |= UNPLUG_ERR; 13538 phba->work_ha |= HA_ERATT; 13539 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13540 return 1; 13541 } 13542 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 13543 phba->work_status[0] = 13544 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 13545 phba->work_status[1] = 13546 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 13547 logmask = LOG_TRACE_EVENT; 13548 if (phba->work_status[0] == 13549 SLIPORT_ERR1_REG_ERR_CODE_2 && 13550 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) 13551 logmask = LOG_SLI; 13552 lpfc_printf_log(phba, KERN_ERR, logmask, 13553 "2885 Port Status Event: " 13554 "port status reg 0x%x, " 13555 "port smphr reg 0x%x, " 13556 "error 1=0x%x, error 2=0x%x\n", 13557 portstat_reg.word0, 13558 portsmphr, 13559 phba->work_status[0], 13560 phba->work_status[1]); 13561 phba->work_ha |= HA_ERATT; 13562 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13563 return 1; 13564 } 13565 break; 13566 case LPFC_SLI_INTF_IF_TYPE_1: 13567 default: 13568 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13569 "2886 HBA Error Attention on unsupported " 13570 "if type %d.", if_type); 13571 return 1; 13572 } 13573 13574 return 0; 13575 } 13576 13577 /** 13578 * lpfc_sli_check_eratt - check error attention events 13579 * @phba: Pointer to HBA context. 13580 * 13581 * This function is called from timer soft interrupt context to check HBA's 13582 * error attention register bit for error attention events. 13583 * 13584 * This function returns 1 when there is Error Attention in the Host Attention 13585 * Register and returns 0 otherwise. 13586 **/ 13587 int 13588 lpfc_sli_check_eratt(struct lpfc_hba *phba) 13589 { 13590 uint32_t ha_copy; 13591 13592 /* If somebody is waiting to handle an eratt, don't process it 13593 * here. The brdkill function will do this. 13594 */ 13595 if (phba->link_flag & LS_IGNORE_ERATT) 13596 return 0; 13597 13598 /* Check if interrupt handler handles this ERATT */ 13599 if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) 13600 /* Interrupt handler has handled ERATT */ 13601 return 0; 13602 13603 /* 13604 * If there is deferred error attention, do not check for error 13605 * attention 13606 */ 13607 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) 13608 return 0; 13609 13610 spin_lock_irq(&phba->hbalock); 13611 /* If PCI channel is offline, don't process it */ 13612 if (unlikely(pci_channel_offline(phba->pcidev))) { 13613 spin_unlock_irq(&phba->hbalock); 13614 return 0; 13615 } 13616 13617 switch (phba->sli_rev) { 13618 case LPFC_SLI_REV2: 13619 case LPFC_SLI_REV3: 13620 /* Read chip Host Attention (HA) register */ 13621 ha_copy = lpfc_sli_eratt_read(phba); 13622 break; 13623 case LPFC_SLI_REV4: 13624 /* Read device Uncoverable Error (UERR) registers */ 13625 ha_copy = lpfc_sli4_eratt_read(phba); 13626 break; 13627 default: 13628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13629 "0299 Invalid SLI revision (%d)\n", 13630 phba->sli_rev); 13631 ha_copy = 0; 13632 break; 13633 } 13634 spin_unlock_irq(&phba->hbalock); 13635 13636 return ha_copy; 13637 } 13638 13639 /** 13640 * lpfc_intr_state_check - Check device state for interrupt handling 13641 * @phba: Pointer to HBA context. 13642 * 13643 * This inline routine checks whether a device or its PCI slot is in a state 13644 * that the interrupt should be handled. 13645 * 13646 * This function returns 0 if the device or the PCI slot is in a state that 13647 * interrupt should be handled, otherwise -EIO. 13648 */ 13649 static inline int 13650 lpfc_intr_state_check(struct lpfc_hba *phba) 13651 { 13652 /* If the pci channel is offline, ignore all the interrupts */ 13653 if (unlikely(pci_channel_offline(phba->pcidev))) 13654 return -EIO; 13655 13656 /* Update device level interrupt statistics */ 13657 phba->sli.slistat.sli_intr++; 13658 13659 /* Ignore all interrupts during initialization. */ 13660 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 13661 return -EIO; 13662 13663 return 0; 13664 } 13665 13666 /** 13667 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 13668 * @irq: Interrupt number. 13669 * @dev_id: The device context pointer. 13670 * 13671 * This function is directly called from the PCI layer as an interrupt 13672 * service routine when device with SLI-3 interface spec is enabled with 13673 * MSI-X multi-message interrupt mode and there are slow-path events in 13674 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 13675 * interrupt mode, this function is called as part of the device-level 13676 * interrupt handler. When the PCI slot is in error recovery or the HBA 13677 * is undergoing initialization, the interrupt handler will not process 13678 * the interrupt. The link attention and ELS ring attention events are 13679 * handled by the worker thread. The interrupt handler signals the worker 13680 * thread and returns for these events. This function is called without 13681 * any lock held. It gets the hbalock to access and update SLI data 13682 * structures. 13683 * 13684 * This function returns IRQ_HANDLED when interrupt is handled else it 13685 * returns IRQ_NONE. 13686 **/ 13687 irqreturn_t 13688 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 13689 { 13690 struct lpfc_hba *phba; 13691 uint32_t ha_copy, hc_copy; 13692 uint32_t work_ha_copy; 13693 unsigned long status; 13694 unsigned long iflag; 13695 uint32_t control; 13696 13697 MAILBOX_t *mbox, *pmbox; 13698 struct lpfc_vport *vport; 13699 struct lpfc_nodelist *ndlp; 13700 struct lpfc_dmabuf *mp; 13701 LPFC_MBOXQ_t *pmb; 13702 int rc; 13703 13704 /* 13705 * Get the driver's phba structure from the dev_id and 13706 * assume the HBA is not interrupting. 13707 */ 13708 phba = (struct lpfc_hba *)dev_id; 13709 13710 if (unlikely(!phba)) 13711 return IRQ_NONE; 13712 13713 /* 13714 * Stuff needs to be attented to when this function is invoked as an 13715 * individual interrupt handler in MSI-X multi-message interrupt mode 13716 */ 13717 if (phba->intr_type == MSIX) { 13718 /* Check device state for handling interrupt */ 13719 if (lpfc_intr_state_check(phba)) 13720 return IRQ_NONE; 13721 /* Need to read HA REG for slow-path events */ 13722 spin_lock_irqsave(&phba->hbalock, iflag); 13723 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 13724 goto unplug_error; 13725 /* If somebody is waiting to handle an eratt don't process it 13726 * here. The brdkill function will do this. 13727 */ 13728 if (phba->link_flag & LS_IGNORE_ERATT) 13729 ha_copy &= ~HA_ERATT; 13730 /* Check the need for handling ERATT in interrupt handler */ 13731 if (ha_copy & HA_ERATT) { 13732 if (test_and_set_bit(HBA_ERATT_HANDLED, 13733 &phba->hba_flag)) 13734 /* ERATT polling has handled ERATT */ 13735 ha_copy &= ~HA_ERATT; 13736 } 13737 13738 /* 13739 * If there is deferred error attention, do not check for any 13740 * interrupt. 13741 */ 13742 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { 13743 spin_unlock_irqrestore(&phba->hbalock, iflag); 13744 return IRQ_NONE; 13745 } 13746 13747 /* Clear up only attention source related to slow-path */ 13748 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 13749 goto unplug_error; 13750 13751 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 13752 HC_LAINT_ENA | HC_ERINT_ENA), 13753 phba->HCregaddr); 13754 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 13755 phba->HAregaddr); 13756 writel(hc_copy, phba->HCregaddr); 13757 readl(phba->HAregaddr); /* flush */ 13758 spin_unlock_irqrestore(&phba->hbalock, iflag); 13759 } else 13760 ha_copy = phba->ha_copy; 13761 13762 work_ha_copy = ha_copy & phba->work_ha_mask; 13763 13764 if (work_ha_copy) { 13765 if (work_ha_copy & HA_LATT) { 13766 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 13767 /* 13768 * Turn off Link Attention interrupts 13769 * until CLEAR_LA done 13770 */ 13771 spin_lock_irqsave(&phba->hbalock, iflag); 13772 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 13773 if (lpfc_readl(phba->HCregaddr, &control)) 13774 goto unplug_error; 13775 control &= ~HC_LAINT_ENA; 13776 writel(control, phba->HCregaddr); 13777 readl(phba->HCregaddr); /* flush */ 13778 spin_unlock_irqrestore(&phba->hbalock, iflag); 13779 } 13780 else 13781 work_ha_copy &= ~HA_LATT; 13782 } 13783 13784 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 13785 /* 13786 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 13787 * the only slow ring. 13788 */ 13789 status = (work_ha_copy & 13790 (HA_RXMASK << (4*LPFC_ELS_RING))); 13791 status >>= (4*LPFC_ELS_RING); 13792 if (status & HA_RXMASK) { 13793 spin_lock_irqsave(&phba->hbalock, iflag); 13794 if (lpfc_readl(phba->HCregaddr, &control)) 13795 goto unplug_error; 13796 13797 lpfc_debugfs_slow_ring_trc(phba, 13798 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 13799 control, status, 13800 (uint32_t)phba->sli.slistat.sli_intr); 13801 13802 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 13803 lpfc_debugfs_slow_ring_trc(phba, 13804 "ISR Disable ring:" 13805 "pwork:x%x hawork:x%x wait:x%x", 13806 phba->work_ha, work_ha_copy, 13807 (uint32_t)((unsigned long) 13808 &phba->work_waitq)); 13809 13810 control &= 13811 ~(HC_R0INT_ENA << LPFC_ELS_RING); 13812 writel(control, phba->HCregaddr); 13813 readl(phba->HCregaddr); /* flush */ 13814 } 13815 else { 13816 lpfc_debugfs_slow_ring_trc(phba, 13817 "ISR slow ring: pwork:" 13818 "x%x hawork:x%x wait:x%x", 13819 phba->work_ha, work_ha_copy, 13820 (uint32_t)((unsigned long) 13821 &phba->work_waitq)); 13822 } 13823 spin_unlock_irqrestore(&phba->hbalock, iflag); 13824 } 13825 } 13826 spin_lock_irqsave(&phba->hbalock, iflag); 13827 if (work_ha_copy & HA_ERATT) { 13828 if (lpfc_sli_read_hs(phba)) 13829 goto unplug_error; 13830 /* 13831 * Check if there is a deferred error condition 13832 * is active 13833 */ 13834 if ((HS_FFER1 & phba->work_hs) && 13835 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 13836 HS_FFER6 | HS_FFER7 | HS_FFER8) & 13837 phba->work_hs)) { 13838 set_bit(DEFER_ERATT, &phba->hba_flag); 13839 /* Clear all interrupt enable conditions */ 13840 writel(0, phba->HCregaddr); 13841 readl(phba->HCregaddr); 13842 } 13843 } 13844 13845 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 13846 pmb = phba->sli.mbox_active; 13847 pmbox = &pmb->u.mb; 13848 mbox = phba->mbox; 13849 vport = pmb->vport; 13850 13851 /* First check out the status word */ 13852 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 13853 if (pmbox->mbxOwner != OWN_HOST) { 13854 spin_unlock_irqrestore(&phba->hbalock, iflag); 13855 /* 13856 * Stray Mailbox Interrupt, mbxCommand <cmd> 13857 * mbxStatus <status> 13858 */ 13859 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13860 "(%d):0304 Stray Mailbox " 13861 "Interrupt mbxCommand x%x " 13862 "mbxStatus x%x\n", 13863 (vport ? vport->vpi : 0), 13864 pmbox->mbxCommand, 13865 pmbox->mbxStatus); 13866 /* clear mailbox attention bit */ 13867 work_ha_copy &= ~HA_MBATT; 13868 } else { 13869 phba->sli.mbox_active = NULL; 13870 spin_unlock_irqrestore(&phba->hbalock, iflag); 13871 phba->last_completion_time = jiffies; 13872 timer_delete(&phba->sli.mbox_tmo); 13873 if (pmb->mbox_cmpl) { 13874 lpfc_sli_pcimem_bcopy(mbox, pmbox, 13875 MAILBOX_CMD_SIZE); 13876 if (pmb->out_ext_byte_len && 13877 pmb->ext_buf) 13878 lpfc_sli_pcimem_bcopy( 13879 phba->mbox_ext, 13880 pmb->ext_buf, 13881 pmb->out_ext_byte_len); 13882 } 13883 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 13884 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 13885 13886 lpfc_debugfs_disc_trc(vport, 13887 LPFC_DISC_TRC_MBOX_VPORT, 13888 "MBOX dflt rpi: : " 13889 "status:x%x rpi:x%x", 13890 (uint32_t)pmbox->mbxStatus, 13891 pmbox->un.varWords[0], 0); 13892 13893 if (!pmbox->mbxStatus) { 13894 mp = pmb->ctx_buf; 13895 ndlp = pmb->ctx_ndlp; 13896 13897 /* Reg_LOGIN of dflt RPI was 13898 * successful. new lets get 13899 * rid of the RPI using the 13900 * same mbox buffer. 13901 */ 13902 lpfc_unreg_login(phba, 13903 vport->vpi, 13904 pmbox->un.varWords[0], 13905 pmb); 13906 pmb->mbox_cmpl = 13907 lpfc_mbx_cmpl_dflt_rpi; 13908 pmb->ctx_buf = mp; 13909 pmb->ctx_ndlp = ndlp; 13910 pmb->vport = vport; 13911 rc = lpfc_sli_issue_mbox(phba, 13912 pmb, 13913 MBX_NOWAIT); 13914 if (rc != MBX_BUSY) 13915 lpfc_printf_log(phba, 13916 KERN_ERR, 13917 LOG_TRACE_EVENT, 13918 "0350 rc should have" 13919 "been MBX_BUSY\n"); 13920 if (rc != MBX_NOT_FINISHED) 13921 goto send_current_mbox; 13922 } 13923 } 13924 spin_lock_irqsave( 13925 &phba->pport->work_port_lock, 13926 iflag); 13927 phba->pport->work_port_events &= 13928 ~WORKER_MBOX_TMO; 13929 spin_unlock_irqrestore( 13930 &phba->pport->work_port_lock, 13931 iflag); 13932 13933 /* Do NOT queue MBX_HEARTBEAT to the worker 13934 * thread for processing. 13935 */ 13936 if (pmbox->mbxCommand == MBX_HEARTBEAT) { 13937 /* Process mbox now */ 13938 phba->sli.mbox_active = NULL; 13939 phba->sli.sli_flag &= 13940 ~LPFC_SLI_MBOX_ACTIVE; 13941 if (pmb->mbox_cmpl) 13942 pmb->mbox_cmpl(phba, pmb); 13943 } else { 13944 /* Queue to worker thread to process */ 13945 lpfc_mbox_cmpl_put(phba, pmb); 13946 } 13947 } 13948 } else 13949 spin_unlock_irqrestore(&phba->hbalock, iflag); 13950 13951 if ((work_ha_copy & HA_MBATT) && 13952 (phba->sli.mbox_active == NULL)) { 13953 send_current_mbox: 13954 /* Process next mailbox command if there is one */ 13955 do { 13956 rc = lpfc_sli_issue_mbox(phba, NULL, 13957 MBX_NOWAIT); 13958 } while (rc == MBX_NOT_FINISHED); 13959 if (rc != MBX_SUCCESS) 13960 lpfc_printf_log(phba, KERN_ERR, 13961 LOG_TRACE_EVENT, 13962 "0349 rc should be " 13963 "MBX_SUCCESS\n"); 13964 } 13965 13966 spin_lock_irqsave(&phba->hbalock, iflag); 13967 phba->work_ha |= work_ha_copy; 13968 spin_unlock_irqrestore(&phba->hbalock, iflag); 13969 lpfc_worker_wake_up(phba); 13970 } 13971 return IRQ_HANDLED; 13972 unplug_error: 13973 spin_unlock_irqrestore(&phba->hbalock, iflag); 13974 return IRQ_HANDLED; 13975 13976 } /* lpfc_sli_sp_intr_handler */ 13977 13978 /** 13979 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 13980 * @irq: Interrupt number. 13981 * @dev_id: The device context pointer. 13982 * 13983 * This function is directly called from the PCI layer as an interrupt 13984 * service routine when device with SLI-3 interface spec is enabled with 13985 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 13986 * ring event in the HBA. However, when the device is enabled with either 13987 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 13988 * device-level interrupt handler. When the PCI slot is in error recovery 13989 * or the HBA is undergoing initialization, the interrupt handler will not 13990 * process the interrupt. The SCSI FCP fast-path ring event are handled in 13991 * the intrrupt context. This function is called without any lock held. 13992 * It gets the hbalock to access and update SLI data structures. 13993 * 13994 * This function returns IRQ_HANDLED when interrupt is handled else it 13995 * returns IRQ_NONE. 13996 **/ 13997 irqreturn_t 13998 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 13999 { 14000 struct lpfc_hba *phba; 14001 uint32_t ha_copy; 14002 unsigned long status; 14003 unsigned long iflag; 14004 struct lpfc_sli_ring *pring; 14005 14006 /* Get the driver's phba structure from the dev_id and 14007 * assume the HBA is not interrupting. 14008 */ 14009 phba = (struct lpfc_hba *) dev_id; 14010 14011 if (unlikely(!phba)) 14012 return IRQ_NONE; 14013 14014 /* 14015 * Stuff needs to be attented to when this function is invoked as an 14016 * individual interrupt handler in MSI-X multi-message interrupt mode 14017 */ 14018 if (phba->intr_type == MSIX) { 14019 /* Check device state for handling interrupt */ 14020 if (lpfc_intr_state_check(phba)) 14021 return IRQ_NONE; 14022 /* Need to read HA REG for FCP ring and other ring events */ 14023 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 14024 return IRQ_HANDLED; 14025 14026 /* 14027 * If there is deferred error attention, do not check for 14028 * any interrupt. 14029 */ 14030 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) 14031 return IRQ_NONE; 14032 14033 /* Clear up only attention source related to fast-path */ 14034 spin_lock_irqsave(&phba->hbalock, iflag); 14035 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 14036 phba->HAregaddr); 14037 readl(phba->HAregaddr); /* flush */ 14038 spin_unlock_irqrestore(&phba->hbalock, iflag); 14039 } else 14040 ha_copy = phba->ha_copy; 14041 14042 /* 14043 * Process all events on FCP ring. Take the optimized path for FCP IO. 14044 */ 14045 ha_copy &= ~(phba->work_ha_mask); 14046 14047 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 14048 status >>= (4*LPFC_FCP_RING); 14049 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 14050 if (status & HA_RXMASK) 14051 lpfc_sli_handle_fast_ring_event(phba, pring, status); 14052 14053 if (phba->cfg_multi_ring_support == 2) { 14054 /* 14055 * Process all events on extra ring. Take the optimized path 14056 * for extra ring IO. 14057 */ 14058 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 14059 status >>= (4*LPFC_EXTRA_RING); 14060 if (status & HA_RXMASK) { 14061 lpfc_sli_handle_fast_ring_event(phba, 14062 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 14063 status); 14064 } 14065 } 14066 return IRQ_HANDLED; 14067 } /* lpfc_sli_fp_intr_handler */ 14068 14069 /** 14070 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 14071 * @irq: Interrupt number. 14072 * @dev_id: The device context pointer. 14073 * 14074 * This function is the HBA device-level interrupt handler to device with 14075 * SLI-3 interface spec, called from the PCI layer when either MSI or 14076 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 14077 * requires driver attention. This function invokes the slow-path interrupt 14078 * attention handling function and fast-path interrupt attention handling 14079 * function in turn to process the relevant HBA attention events. This 14080 * function is called without any lock held. It gets the hbalock to access 14081 * and update SLI data structures. 14082 * 14083 * This function returns IRQ_HANDLED when interrupt is handled, else it 14084 * returns IRQ_NONE. 14085 **/ 14086 irqreturn_t 14087 lpfc_sli_intr_handler(int irq, void *dev_id) 14088 { 14089 struct lpfc_hba *phba; 14090 irqreturn_t sp_irq_rc, fp_irq_rc; 14091 unsigned long status1, status2; 14092 uint32_t hc_copy; 14093 14094 /* 14095 * Get the driver's phba structure from the dev_id and 14096 * assume the HBA is not interrupting. 14097 */ 14098 phba = (struct lpfc_hba *) dev_id; 14099 14100 if (unlikely(!phba)) 14101 return IRQ_NONE; 14102 14103 /* Check device state for handling interrupt */ 14104 if (lpfc_intr_state_check(phba)) 14105 return IRQ_NONE; 14106 14107 spin_lock(&phba->hbalock); 14108 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 14109 spin_unlock(&phba->hbalock); 14110 return IRQ_HANDLED; 14111 } 14112 14113 if (unlikely(!phba->ha_copy)) { 14114 spin_unlock(&phba->hbalock); 14115 return IRQ_NONE; 14116 } else if (phba->ha_copy & HA_ERATT) { 14117 if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) 14118 /* ERATT polling has handled ERATT */ 14119 phba->ha_copy &= ~HA_ERATT; 14120 } 14121 14122 /* 14123 * If there is deferred error attention, do not check for any interrupt. 14124 */ 14125 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { 14126 spin_unlock(&phba->hbalock); 14127 return IRQ_NONE; 14128 } 14129 14130 /* Clear attention sources except link and error attentions */ 14131 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 14132 spin_unlock(&phba->hbalock); 14133 return IRQ_HANDLED; 14134 } 14135 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 14136 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 14137 phba->HCregaddr); 14138 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 14139 writel(hc_copy, phba->HCregaddr); 14140 readl(phba->HAregaddr); /* flush */ 14141 spin_unlock(&phba->hbalock); 14142 14143 /* 14144 * Invokes slow-path host attention interrupt handling as appropriate. 14145 */ 14146 14147 /* status of events with mailbox and link attention */ 14148 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 14149 14150 /* status of events with ELS ring */ 14151 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 14152 status2 >>= (4*LPFC_ELS_RING); 14153 14154 if (status1 || (status2 & HA_RXMASK)) 14155 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 14156 else 14157 sp_irq_rc = IRQ_NONE; 14158 14159 /* 14160 * Invoke fast-path host attention interrupt handling as appropriate. 14161 */ 14162 14163 /* status of events with FCP ring */ 14164 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 14165 status1 >>= (4*LPFC_FCP_RING); 14166 14167 /* status of events with extra ring */ 14168 if (phba->cfg_multi_ring_support == 2) { 14169 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 14170 status2 >>= (4*LPFC_EXTRA_RING); 14171 } else 14172 status2 = 0; 14173 14174 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 14175 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 14176 else 14177 fp_irq_rc = IRQ_NONE; 14178 14179 /* Return device-level interrupt handling status */ 14180 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 14181 } /* lpfc_sli_intr_handler */ 14182 14183 /** 14184 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 14185 * @phba: pointer to lpfc hba data structure. 14186 * 14187 * This routine is invoked by the worker thread to process all the pending 14188 * SLI4 els abort xri events. 14189 **/ 14190 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 14191 { 14192 struct lpfc_cq_event *cq_event; 14193 unsigned long iflags; 14194 14195 /* First, declare the els xri abort event has been handled */ 14196 clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); 14197 14198 /* Now, handle all the els xri abort events */ 14199 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 14200 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 14201 /* Get the first event from the head of the event queue */ 14202 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 14203 cq_event, struct lpfc_cq_event, list); 14204 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, 14205 iflags); 14206 /* Notify aborted XRI for ELS work queue */ 14207 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 14208 14209 /* Free the event processed back to the free pool */ 14210 lpfc_sli4_cq_event_release(phba, cq_event); 14211 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, 14212 iflags); 14213 } 14214 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 14215 } 14216 14217 /** 14218 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe 14219 * @phba: Pointer to HBA context object. 14220 * @irspiocbq: Pointer to work-queue completion queue entry. 14221 * 14222 * This routine handles an ELS work-queue completion event and construct 14223 * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common 14224 * discovery engine to handle. 14225 * 14226 * Return: Pointer to the receive IOCBQ, NULL otherwise. 14227 **/ 14228 static struct lpfc_iocbq * 14229 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, 14230 struct lpfc_iocbq *irspiocbq) 14231 { 14232 struct lpfc_sli_ring *pring; 14233 struct lpfc_iocbq *cmdiocbq; 14234 struct lpfc_wcqe_complete *wcqe; 14235 unsigned long iflags; 14236 14237 pring = lpfc_phba_elsring(phba); 14238 if (unlikely(!pring)) 14239 return NULL; 14240 14241 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 14242 spin_lock_irqsave(&pring->ring_lock, iflags); 14243 pring->stats.iocb_event++; 14244 /* Look up the ELS command IOCB and create pseudo response IOCB */ 14245 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 14246 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 14247 if (unlikely(!cmdiocbq)) { 14248 spin_unlock_irqrestore(&pring->ring_lock, iflags); 14249 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14250 "0386 ELS complete with no corresponding " 14251 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 14252 wcqe->word0, wcqe->total_data_placed, 14253 wcqe->parameter, wcqe->word3); 14254 lpfc_sli_release_iocbq(phba, irspiocbq); 14255 return NULL; 14256 } 14257 14258 memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128)); 14259 memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe)); 14260 14261 /* Put the iocb back on the txcmplq */ 14262 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 14263 spin_unlock_irqrestore(&pring->ring_lock, iflags); 14264 14265 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 14266 spin_lock_irqsave(&phba->hbalock, iflags); 14267 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; 14268 spin_unlock_irqrestore(&phba->hbalock, iflags); 14269 } 14270 14271 return irspiocbq; 14272 } 14273 14274 inline struct lpfc_cq_event * 14275 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 14276 { 14277 struct lpfc_cq_event *cq_event; 14278 14279 /* Allocate a new internal CQ_EVENT entry */ 14280 cq_event = lpfc_sli4_cq_event_alloc(phba); 14281 if (!cq_event) { 14282 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14283 "0602 Failed to alloc CQ_EVENT entry\n"); 14284 return NULL; 14285 } 14286 14287 /* Move the CQE into the event */ 14288 memcpy(&cq_event->cqe, entry, size); 14289 return cq_event; 14290 } 14291 14292 /** 14293 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event 14294 * @phba: Pointer to HBA context object. 14295 * @mcqe: Pointer to mailbox completion queue entry. 14296 * 14297 * This routine process a mailbox completion queue entry with asynchronous 14298 * event. 14299 * 14300 * Return: true if work posted to worker thread, otherwise false. 14301 **/ 14302 static bool 14303 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 14304 { 14305 struct lpfc_cq_event *cq_event; 14306 unsigned long iflags; 14307 14308 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 14309 "0392 Async Event: word0:x%x, word1:x%x, " 14310 "word2:x%x, word3:x%x\n", mcqe->word0, 14311 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 14312 14313 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 14314 if (!cq_event) 14315 return false; 14316 14317 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 14318 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 14319 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 14320 14321 /* Set the async event flag */ 14322 set_bit(ASYNC_EVENT, &phba->hba_flag); 14323 14324 return true; 14325 } 14326 14327 /** 14328 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 14329 * @phba: Pointer to HBA context object. 14330 * @mcqe: Pointer to mailbox completion queue entry. 14331 * 14332 * This routine process a mailbox completion queue entry with mailbox 14333 * completion event. 14334 * 14335 * Return: true if work posted to worker thread, otherwise false. 14336 **/ 14337 static bool 14338 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 14339 { 14340 uint32_t mcqe_status; 14341 MAILBOX_t *mbox, *pmbox; 14342 struct lpfc_mqe *mqe; 14343 struct lpfc_vport *vport; 14344 struct lpfc_nodelist *ndlp; 14345 struct lpfc_dmabuf *mp; 14346 unsigned long iflags; 14347 LPFC_MBOXQ_t *pmb; 14348 bool workposted = false; 14349 int rc; 14350 14351 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 14352 if (!bf_get(lpfc_trailer_completed, mcqe)) 14353 goto out_no_mqe_complete; 14354 14355 /* Get the reference to the active mbox command */ 14356 spin_lock_irqsave(&phba->hbalock, iflags); 14357 pmb = phba->sli.mbox_active; 14358 if (unlikely(!pmb)) { 14359 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14360 "1832 No pending MBOX command to handle\n"); 14361 spin_unlock_irqrestore(&phba->hbalock, iflags); 14362 goto out_no_mqe_complete; 14363 } 14364 spin_unlock_irqrestore(&phba->hbalock, iflags); 14365 mqe = &pmb->u.mqe; 14366 pmbox = (MAILBOX_t *)&pmb->u.mqe; 14367 mbox = phba->mbox; 14368 vport = pmb->vport; 14369 14370 /* Reset heartbeat timer */ 14371 phba->last_completion_time = jiffies; 14372 timer_delete(&phba->sli.mbox_tmo); 14373 14374 /* Move mbox data to caller's mailbox region, do endian swapping */ 14375 if (pmb->mbox_cmpl && mbox) 14376 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 14377 14378 /* 14379 * For mcqe errors, conditionally move a modified error code to 14380 * the mbox so that the error will not be missed. 14381 */ 14382 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 14383 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 14384 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 14385 bf_set(lpfc_mqe_status, mqe, 14386 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 14387 } 14388 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 14389 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 14390 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 14391 "MBOX dflt rpi: status:x%x rpi:x%x", 14392 mcqe_status, 14393 pmbox->un.varWords[0], 0); 14394 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 14395 mp = pmb->ctx_buf; 14396 ndlp = pmb->ctx_ndlp; 14397 14398 /* Reg_LOGIN of dflt RPI was successful. Mark the 14399 * node as having an UNREG_LOGIN in progress to stop 14400 * an unsolicited PLOGI from the same NPortId from 14401 * starting another mailbox transaction. 14402 */ 14403 set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); 14404 lpfc_unreg_login(phba, vport->vpi, 14405 pmbox->un.varWords[0], pmb); 14406 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 14407 pmb->ctx_buf = mp; 14408 14409 /* No reference taken here. This is a default 14410 * RPI reg/immediate unreg cycle. The reference was 14411 * taken in the reg rpi path and is released when 14412 * this mailbox completes. 14413 */ 14414 pmb->ctx_ndlp = ndlp; 14415 pmb->vport = vport; 14416 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 14417 if (rc != MBX_BUSY) 14418 lpfc_printf_log(phba, KERN_ERR, 14419 LOG_TRACE_EVENT, 14420 "0385 rc should " 14421 "have been MBX_BUSY\n"); 14422 if (rc != MBX_NOT_FINISHED) 14423 goto send_current_mbox; 14424 } 14425 } 14426 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 14427 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 14428 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 14429 14430 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */ 14431 if (pmbox->mbxCommand == MBX_HEARTBEAT) { 14432 spin_lock_irqsave(&phba->hbalock, iflags); 14433 /* Release the mailbox command posting token */ 14434 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 14435 phba->sli.mbox_active = NULL; 14436 if (bf_get(lpfc_trailer_consumed, mcqe)) 14437 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 14438 spin_unlock_irqrestore(&phba->hbalock, iflags); 14439 14440 /* Post the next mbox command, if there is one */ 14441 lpfc_sli4_post_async_mbox(phba); 14442 14443 /* Process cmpl now */ 14444 if (pmb->mbox_cmpl) 14445 pmb->mbox_cmpl(phba, pmb); 14446 return false; 14447 } 14448 14449 /* There is mailbox completion work to queue to the worker thread */ 14450 spin_lock_irqsave(&phba->hbalock, iflags); 14451 __lpfc_mbox_cmpl_put(phba, pmb); 14452 phba->work_ha |= HA_MBATT; 14453 spin_unlock_irqrestore(&phba->hbalock, iflags); 14454 workposted = true; 14455 14456 send_current_mbox: 14457 spin_lock_irqsave(&phba->hbalock, iflags); 14458 /* Release the mailbox command posting token */ 14459 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 14460 /* Setting active mailbox pointer need to be in sync to flag clear */ 14461 phba->sli.mbox_active = NULL; 14462 if (bf_get(lpfc_trailer_consumed, mcqe)) 14463 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 14464 spin_unlock_irqrestore(&phba->hbalock, iflags); 14465 /* Wake up worker thread to post the next pending mailbox command */ 14466 lpfc_worker_wake_up(phba); 14467 return workposted; 14468 14469 out_no_mqe_complete: 14470 spin_lock_irqsave(&phba->hbalock, iflags); 14471 if (bf_get(lpfc_trailer_consumed, mcqe)) 14472 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 14473 spin_unlock_irqrestore(&phba->hbalock, iflags); 14474 return false; 14475 } 14476 14477 /** 14478 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 14479 * @phba: Pointer to HBA context object. 14480 * @cq: Pointer to associated CQ 14481 * @cqe: Pointer to mailbox completion queue entry. 14482 * 14483 * This routine process a mailbox completion queue entry, it invokes the 14484 * proper mailbox complete handling or asynchronous event handling routine 14485 * according to the MCQE's async bit. 14486 * 14487 * Return: true if work posted to worker thread, otherwise false. 14488 **/ 14489 static bool 14490 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 14491 struct lpfc_cqe *cqe) 14492 { 14493 struct lpfc_mcqe mcqe; 14494 bool workposted; 14495 14496 cq->CQ_mbox++; 14497 14498 /* Copy the mailbox MCQE and convert endian order as needed */ 14499 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 14500 14501 /* Invoke the proper event handling routine */ 14502 if (!bf_get(lpfc_trailer_async, &mcqe)) 14503 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 14504 else 14505 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 14506 return workposted; 14507 } 14508 14509 /** 14510 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 14511 * @phba: Pointer to HBA context object. 14512 * @cq: Pointer to associated CQ 14513 * @wcqe: Pointer to work-queue completion queue entry. 14514 * 14515 * This routine handles an ELS work-queue completion event. 14516 * 14517 * Return: true if work posted to worker thread, otherwise false. 14518 **/ 14519 static bool 14520 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 14521 struct lpfc_wcqe_complete *wcqe) 14522 { 14523 struct lpfc_iocbq *irspiocbq; 14524 unsigned long iflags; 14525 struct lpfc_sli_ring *pring = cq->pring; 14526 int txq_cnt = 0; 14527 int txcmplq_cnt = 0; 14528 14529 /* Check for response status */ 14530 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 14531 /* Log the error status */ 14532 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 14533 "0357 ELS CQE error: status=x%x: " 14534 "CQE: %08x %08x %08x %08x\n", 14535 bf_get(lpfc_wcqe_c_status, wcqe), 14536 wcqe->word0, wcqe->total_data_placed, 14537 wcqe->parameter, wcqe->word3); 14538 } 14539 14540 /* Get an irspiocbq for later ELS response processing use */ 14541 irspiocbq = lpfc_sli_get_iocbq(phba); 14542 if (!irspiocbq) { 14543 if (!list_empty(&pring->txq)) 14544 txq_cnt++; 14545 if (!list_empty(&pring->txcmplq)) 14546 txcmplq_cnt++; 14547 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14548 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 14549 "els_txcmplq_cnt=%d\n", 14550 txq_cnt, phba->iocb_cnt, 14551 txcmplq_cnt); 14552 return false; 14553 } 14554 14555 /* Save off the slow-path queue event for work thread to process */ 14556 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 14557 spin_lock_irqsave(&phba->hbalock, iflags); 14558 list_add_tail(&irspiocbq->cq_event.list, 14559 &phba->sli4_hba.sp_queue_event); 14560 spin_unlock_irqrestore(&phba->hbalock, iflags); 14561 set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 14562 14563 return true; 14564 } 14565 14566 /** 14567 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 14568 * @phba: Pointer to HBA context object. 14569 * @wcqe: Pointer to work-queue completion queue entry. 14570 * 14571 * This routine handles slow-path WQ entry consumed event by invoking the 14572 * proper WQ release routine to the slow-path WQ. 14573 **/ 14574 static void 14575 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 14576 struct lpfc_wcqe_release *wcqe) 14577 { 14578 /* sanity check on queue memory */ 14579 if (unlikely(!phba->sli4_hba.els_wq)) 14580 return; 14581 /* Check for the slow-path ELS work queue */ 14582 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 14583 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 14584 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 14585 else 14586 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14587 "2579 Slow-path wqe consume event carries " 14588 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 14589 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 14590 phba->sli4_hba.els_wq->queue_id); 14591 } 14592 14593 /** 14594 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 14595 * @phba: Pointer to HBA context object. 14596 * @cq: Pointer to a WQ completion queue. 14597 * @wcqe: Pointer to work-queue completion queue entry. 14598 * 14599 * This routine handles an XRI abort event. 14600 * 14601 * Return: true if work posted to worker thread, otherwise false. 14602 **/ 14603 static bool 14604 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 14605 struct lpfc_queue *cq, 14606 struct sli4_wcqe_xri_aborted *wcqe) 14607 { 14608 bool workposted = false; 14609 struct lpfc_cq_event *cq_event; 14610 unsigned long iflags; 14611 14612 switch (cq->subtype) { 14613 case LPFC_IO: 14614 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq); 14615 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14616 /* Notify aborted XRI for NVME work queue */ 14617 if (phba->nvmet_support) 14618 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 14619 } 14620 workposted = false; 14621 break; 14622 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 14623 case LPFC_ELS: 14624 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe)); 14625 if (!cq_event) { 14626 workposted = false; 14627 break; 14628 } 14629 cq_event->hdwq = cq->hdwq; 14630 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, 14631 iflags); 14632 list_add_tail(&cq_event->list, 14633 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 14634 /* Set the els xri abort event flag */ 14635 set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); 14636 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, 14637 iflags); 14638 workposted = true; 14639 break; 14640 default: 14641 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14642 "0603 Invalid CQ subtype %d: " 14643 "%08x %08x %08x %08x\n", 14644 cq->subtype, wcqe->word0, wcqe->parameter, 14645 wcqe->word2, wcqe->word3); 14646 workposted = false; 14647 break; 14648 } 14649 return workposted; 14650 } 14651 14652 #define FC_RCTL_MDS_DIAGS 0xF4 14653 14654 /** 14655 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 14656 * @phba: Pointer to HBA context object. 14657 * @rcqe: Pointer to receive-queue completion queue entry. 14658 * 14659 * This routine process a receive-queue completion queue entry. 14660 * 14661 * Return: true if work posted to worker thread, otherwise false. 14662 **/ 14663 static bool 14664 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 14665 { 14666 bool workposted = false; 14667 struct fc_frame_header *fc_hdr; 14668 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 14669 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 14670 struct lpfc_nvmet_tgtport *tgtp; 14671 struct hbq_dmabuf *dma_buf; 14672 uint32_t status, rq_id; 14673 unsigned long iflags; 14674 14675 /* sanity check on queue memory */ 14676 if (unlikely(!hrq) || unlikely(!drq)) 14677 return workposted; 14678 14679 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 14680 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 14681 else 14682 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 14683 if (rq_id != hrq->queue_id) 14684 goto out; 14685 14686 status = bf_get(lpfc_rcqe_status, rcqe); 14687 switch (status) { 14688 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 14689 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14690 "2537 Receive Frame Truncated!!\n"); 14691 fallthrough; 14692 case FC_STATUS_RQ_SUCCESS: 14693 spin_lock_irqsave(&phba->hbalock, iflags); 14694 lpfc_sli4_rq_release(hrq, drq); 14695 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 14696 if (!dma_buf) { 14697 hrq->RQ_no_buf_found++; 14698 spin_unlock_irqrestore(&phba->hbalock, iflags); 14699 goto out; 14700 } 14701 hrq->RQ_rcv_buf++; 14702 hrq->RQ_buf_posted--; 14703 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 14704 14705 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 14706 14707 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 14708 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 14709 spin_unlock_irqrestore(&phba->hbalock, iflags); 14710 /* Handle MDS Loopback frames */ 14711 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 14712 lpfc_sli4_handle_mds_loopback(phba->pport, 14713 dma_buf); 14714 else 14715 lpfc_in_buf_free(phba, &dma_buf->dbuf); 14716 break; 14717 } 14718 14719 /* save off the frame for the work thread to process */ 14720 list_add_tail(&dma_buf->cq_event.list, 14721 &phba->sli4_hba.sp_queue_event); 14722 spin_unlock_irqrestore(&phba->hbalock, iflags); 14723 /* Frame received */ 14724 set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 14725 workposted = true; 14726 break; 14727 case FC_STATUS_INSUFF_BUF_FRM_DISC: 14728 if (phba->nvmet_support) { 14729 tgtp = phba->targetport->private; 14730 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14731 "6402 RQE Error x%x, posted %d err_cnt " 14732 "%d: %x %x %x\n", 14733 status, hrq->RQ_buf_posted, 14734 hrq->RQ_no_posted_buf, 14735 atomic_read(&tgtp->rcv_fcp_cmd_in), 14736 atomic_read(&tgtp->rcv_fcp_cmd_out), 14737 atomic_read(&tgtp->xmt_fcp_release)); 14738 } 14739 fallthrough; 14740 14741 case FC_STATUS_INSUFF_BUF_NEED_BUF: 14742 hrq->RQ_no_posted_buf++; 14743 /* Post more buffers if possible */ 14744 set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag); 14745 workposted = true; 14746 break; 14747 case FC_STATUS_RQ_DMA_FAILURE: 14748 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14749 "2564 RQE DMA Error x%x, x%08x x%08x x%08x " 14750 "x%08x\n", 14751 status, rcqe->word0, rcqe->word1, 14752 rcqe->word2, rcqe->word3); 14753 14754 /* If IV set, no further recovery */ 14755 if (bf_get(lpfc_rcqe_iv, rcqe)) 14756 break; 14757 14758 /* recycle consumed resource */ 14759 spin_lock_irqsave(&phba->hbalock, iflags); 14760 lpfc_sli4_rq_release(hrq, drq); 14761 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 14762 if (!dma_buf) { 14763 hrq->RQ_no_buf_found++; 14764 spin_unlock_irqrestore(&phba->hbalock, iflags); 14765 break; 14766 } 14767 hrq->RQ_rcv_buf++; 14768 hrq->RQ_buf_posted--; 14769 spin_unlock_irqrestore(&phba->hbalock, iflags); 14770 lpfc_in_buf_free(phba, &dma_buf->dbuf); 14771 break; 14772 default: 14773 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14774 "2565 Unexpected RQE Status x%x, w0-3 x%08x " 14775 "x%08x x%08x x%08x\n", 14776 status, rcqe->word0, rcqe->word1, 14777 rcqe->word2, rcqe->word3); 14778 break; 14779 } 14780 out: 14781 return workposted; 14782 } 14783 14784 /** 14785 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 14786 * @phba: Pointer to HBA context object. 14787 * @cq: Pointer to the completion queue. 14788 * @cqe: Pointer to a completion queue entry. 14789 * 14790 * This routine process a slow-path work-queue or receive queue completion queue 14791 * entry. 14792 * 14793 * Return: true if work posted to worker thread, otherwise false. 14794 **/ 14795 static bool 14796 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 14797 struct lpfc_cqe *cqe) 14798 { 14799 struct lpfc_cqe cqevt; 14800 bool workposted = false; 14801 14802 /* Copy the work queue CQE and convert endian order if needed */ 14803 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 14804 14805 /* Check and process for different type of WCQE and dispatch */ 14806 switch (bf_get(lpfc_cqe_code, &cqevt)) { 14807 case CQE_CODE_COMPL_WQE: 14808 /* Process the WQ/RQ complete event */ 14809 phba->last_completion_time = jiffies; 14810 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 14811 (struct lpfc_wcqe_complete *)&cqevt); 14812 break; 14813 case CQE_CODE_RELEASE_WQE: 14814 /* Process the WQ release event */ 14815 lpfc_sli4_sp_handle_rel_wcqe(phba, 14816 (struct lpfc_wcqe_release *)&cqevt); 14817 break; 14818 case CQE_CODE_XRI_ABORTED: 14819 /* Process the WQ XRI abort event */ 14820 phba->last_completion_time = jiffies; 14821 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 14822 (struct sli4_wcqe_xri_aborted *)&cqevt); 14823 break; 14824 case CQE_CODE_RECEIVE: 14825 case CQE_CODE_RECEIVE_V1: 14826 /* Process the RQ event */ 14827 phba->last_completion_time = jiffies; 14828 workposted = lpfc_sli4_sp_handle_rcqe(phba, 14829 (struct lpfc_rcqe *)&cqevt); 14830 break; 14831 default: 14832 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14833 "0388 Not a valid WCQE code: x%x\n", 14834 bf_get(lpfc_cqe_code, &cqevt)); 14835 break; 14836 } 14837 return workposted; 14838 } 14839 14840 /** 14841 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 14842 * @phba: Pointer to HBA context object. 14843 * @eqe: Pointer to fast-path event queue entry. 14844 * @speq: Pointer to slow-path event queue. 14845 * 14846 * This routine process a event queue entry from the slow-path event queue. 14847 * It will check the MajorCode and MinorCode to determine this is for a 14848 * completion event on a completion queue, if not, an error shall be logged 14849 * and just return. Otherwise, it will get to the corresponding completion 14850 * queue and process all the entries on that completion queue, rearm the 14851 * completion queue, and then return. 14852 * 14853 **/ 14854 static void 14855 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 14856 struct lpfc_queue *speq) 14857 { 14858 struct lpfc_queue *cq = NULL, *childq; 14859 uint16_t cqid; 14860 int ret = 0; 14861 14862 /* Get the reference to the corresponding CQ */ 14863 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14864 14865 list_for_each_entry(childq, &speq->child_list, list) { 14866 if (childq->queue_id == cqid) { 14867 cq = childq; 14868 break; 14869 } 14870 } 14871 if (unlikely(!cq)) { 14872 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 14873 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14874 "0365 Slow-path CQ identifier " 14875 "(%d) does not exist\n", cqid); 14876 return; 14877 } 14878 14879 /* Save EQ associated with this CQ */ 14880 cq->assoc_qp = speq; 14881 14882 if (is_kdump_kernel()) 14883 ret = queue_work(phba->wq, &cq->spwork); 14884 else 14885 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork); 14886 14887 if (!ret) 14888 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14889 "0390 Cannot schedule queue work " 14890 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14891 cqid, cq->queue_id, raw_smp_processor_id()); 14892 } 14893 14894 /** 14895 * __lpfc_sli4_process_cq - Process elements of a CQ 14896 * @phba: Pointer to HBA context object. 14897 * @cq: Pointer to CQ to be processed 14898 * @handler: Routine to process each cqe 14899 * @delay: Pointer to usdelay to set in case of rescheduling of the handler 14900 * 14901 * This routine processes completion queue entries in a CQ. While a valid 14902 * queue element is found, the handler is called. During processing checks 14903 * are made for periodic doorbell writes to let the hardware know of 14904 * element consumption. 14905 * 14906 * If the max limit on cqes to process is hit, or there are no more valid 14907 * entries, the loop stops. If we processed a sufficient number of elements, 14908 * meaning there is sufficient load, rather than rearming and generating 14909 * another interrupt, a cq rescheduling delay will be set. A delay of 0 14910 * indicates no rescheduling. 14911 * 14912 * Returns True if work scheduled, False otherwise. 14913 **/ 14914 static bool 14915 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, 14916 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, 14917 struct lpfc_cqe *), unsigned long *delay) 14918 { 14919 struct lpfc_cqe *cqe; 14920 bool workposted = false; 14921 int count = 0, consumed = 0; 14922 bool arm = true; 14923 14924 /* default - no reschedule */ 14925 *delay = 0; 14926 14927 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) 14928 goto rearm_and_exit; 14929 14930 /* Process all the entries to the CQ */ 14931 cq->q_flag = 0; 14932 cqe = lpfc_sli4_cq_get(cq); 14933 while (cqe) { 14934 workposted |= handler(phba, cq, cqe); 14935 __lpfc_sli4_consume_cqe(phba, cq, cqe); 14936 14937 consumed++; 14938 if (!(++count % cq->max_proc_limit)) 14939 break; 14940 14941 if (!(count % cq->notify_interval)) { 14942 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 14943 LPFC_QUEUE_NOARM); 14944 consumed = 0; 14945 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK; 14946 } 14947 14948 if (count == LPFC_NVMET_CQ_NOTIFY) 14949 cq->q_flag |= HBA_NVMET_CQ_NOTIFY; 14950 14951 cqe = lpfc_sli4_cq_get(cq); 14952 } 14953 if (count >= phba->cfg_cq_poll_threshold) { 14954 *delay = 1; 14955 arm = false; 14956 } 14957 14958 /* Track the max number of CQEs processed in 1 EQ */ 14959 if (count > cq->CQ_max_cqe) 14960 cq->CQ_max_cqe = count; 14961 14962 cq->assoc_qp->EQ_cqe_cnt += count; 14963 14964 /* Catch the no cq entry condition */ 14965 if (unlikely(count == 0)) 14966 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 14967 "0369 No entry from completion queue " 14968 "qid=%d\n", cq->queue_id); 14969 14970 xchg(&cq->queue_claimed, 0); 14971 14972 rearm_and_exit: 14973 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 14974 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); 14975 14976 return workposted; 14977 } 14978 14979 /** 14980 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 14981 * @cq: pointer to CQ to process 14982 * 14983 * This routine calls the cq processing routine with a handler specific 14984 * to the type of queue bound to it. 14985 * 14986 * The CQ routine returns two values: the first is the calling status, 14987 * which indicates whether work was queued to the background discovery 14988 * thread. If true, the routine should wakeup the discovery thread; 14989 * the second is the delay parameter. If non-zero, rather than rearming 14990 * the CQ and yet another interrupt, the CQ handler should be queued so 14991 * that it is processed in a subsequent polling action. The value of 14992 * the delay indicates when to reschedule it. 14993 **/ 14994 static void 14995 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) 14996 { 14997 struct lpfc_hba *phba = cq->phba; 14998 unsigned long delay; 14999 bool workposted = false; 15000 int ret = 0; 15001 15002 /* Process and rearm the CQ */ 15003 switch (cq->type) { 15004 case LPFC_MCQ: 15005 workposted |= __lpfc_sli4_process_cq(phba, cq, 15006 lpfc_sli4_sp_handle_mcqe, 15007 &delay); 15008 break; 15009 case LPFC_WCQ: 15010 if (cq->subtype == LPFC_IO) 15011 workposted |= __lpfc_sli4_process_cq(phba, cq, 15012 lpfc_sli4_fp_handle_cqe, 15013 &delay); 15014 else 15015 workposted |= __lpfc_sli4_process_cq(phba, cq, 15016 lpfc_sli4_sp_handle_cqe, 15017 &delay); 15018 break; 15019 default: 15020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15021 "0370 Invalid completion queue type (%d)\n", 15022 cq->type); 15023 return; 15024 } 15025 15026 if (delay) { 15027 if (is_kdump_kernel()) 15028 ret = queue_delayed_work(phba->wq, &cq->sched_spwork, 15029 delay); 15030 else 15031 ret = queue_delayed_work_on(cq->chann, phba->wq, 15032 &cq->sched_spwork, delay); 15033 if (!ret) 15034 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15035 "0394 Cannot schedule queue work " 15036 "for cqid=%d on CPU %d\n", 15037 cq->queue_id, cq->chann); 15038 } 15039 15040 /* wake up worker thread if there are works to be done */ 15041 if (workposted) 15042 lpfc_worker_wake_up(phba); 15043 } 15044 15045 /** 15046 * lpfc_sli4_sp_process_cq - slow-path work handler when started by 15047 * interrupt 15048 * @work: pointer to work element 15049 * 15050 * translates from the work handler and calls the slow-path handler. 15051 **/ 15052 static void 15053 lpfc_sli4_sp_process_cq(struct work_struct *work) 15054 { 15055 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); 15056 15057 __lpfc_sli4_sp_process_cq(cq); 15058 } 15059 15060 /** 15061 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer 15062 * @work: pointer to work element 15063 * 15064 * translates from the work handler and calls the slow-path handler. 15065 **/ 15066 static void 15067 lpfc_sli4_dly_sp_process_cq(struct work_struct *work) 15068 { 15069 struct lpfc_queue *cq = container_of(to_delayed_work(work), 15070 struct lpfc_queue, sched_spwork); 15071 15072 __lpfc_sli4_sp_process_cq(cq); 15073 } 15074 15075 /** 15076 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 15077 * @phba: Pointer to HBA context object. 15078 * @cq: Pointer to associated CQ 15079 * @wcqe: Pointer to work-queue completion queue entry. 15080 * 15081 * This routine process a fast-path work queue completion entry from fast-path 15082 * event queue for FCP command response completion. 15083 **/ 15084 static void 15085 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 15086 struct lpfc_wcqe_complete *wcqe) 15087 { 15088 struct lpfc_sli_ring *pring = cq->pring; 15089 struct lpfc_iocbq *cmdiocbq; 15090 unsigned long iflags; 15091 15092 /* Check for response status */ 15093 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 15094 /* If resource errors reported from HBA, reduce queue 15095 * depth of the SCSI device. 15096 */ 15097 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 15098 IOSTAT_LOCAL_REJECT)) && 15099 ((wcqe->parameter & IOERR_PARAM_MASK) == 15100 IOERR_NO_RESOURCES)) 15101 phba->lpfc_rampdown_queue_depth(phba); 15102 15103 /* Log the cmpl status */ 15104 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 15105 "0373 FCP CQE cmpl: status=x%x: " 15106 "CQE: %08x %08x %08x %08x\n", 15107 bf_get(lpfc_wcqe_c_status, wcqe), 15108 wcqe->word0, wcqe->total_data_placed, 15109 wcqe->parameter, wcqe->word3); 15110 } 15111 15112 /* Look up the FCP command IOCB and create pseudo response IOCB */ 15113 spin_lock_irqsave(&pring->ring_lock, iflags); 15114 pring->stats.iocb_event++; 15115 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 15116 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 15117 spin_unlock_irqrestore(&pring->ring_lock, iflags); 15118 if (unlikely(!cmdiocbq)) { 15119 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 15120 "0374 FCP complete with no corresponding " 15121 "cmdiocb: iotag (%d)\n", 15122 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 15123 return; 15124 } 15125 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 15126 cmdiocbq->isr_timestamp = cq->isr_timestamp; 15127 #endif 15128 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 15129 spin_lock_irqsave(&phba->hbalock, iflags); 15130 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; 15131 spin_unlock_irqrestore(&phba->hbalock, iflags); 15132 } 15133 15134 if (cmdiocbq->cmd_cmpl) { 15135 /* For FCP the flag is cleared in cmd_cmpl */ 15136 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) && 15137 cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) { 15138 spin_lock_irqsave(&phba->hbalock, iflags); 15139 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; 15140 spin_unlock_irqrestore(&phba->hbalock, iflags); 15141 } 15142 15143 /* Pass the cmd_iocb and the wcqe to the upper layer */ 15144 memcpy(&cmdiocbq->wcqe_cmpl, wcqe, 15145 sizeof(struct lpfc_wcqe_complete)); 15146 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq); 15147 } else { 15148 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 15149 "0375 FCP cmdiocb not callback function " 15150 "iotag: (%d)\n", 15151 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 15152 } 15153 } 15154 15155 /** 15156 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 15157 * @phba: Pointer to HBA context object. 15158 * @cq: Pointer to completion queue. 15159 * @wcqe: Pointer to work-queue completion queue entry. 15160 * 15161 * This routine handles an fast-path WQ entry consumed event by invoking the 15162 * proper WQ release routine to the slow-path WQ. 15163 **/ 15164 static void 15165 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 15166 struct lpfc_wcqe_release *wcqe) 15167 { 15168 struct lpfc_queue *childwq; 15169 bool wqid_matched = false; 15170 uint16_t hba_wqid; 15171 15172 /* Check for fast-path FCP work queue release */ 15173 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 15174 list_for_each_entry(childwq, &cq->child_list, list) { 15175 if (childwq->queue_id == hba_wqid) { 15176 lpfc_sli4_wq_release(childwq, 15177 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 15178 if (childwq->q_flag & HBA_NVMET_WQFULL) 15179 lpfc_nvmet_wqfull_process(phba, childwq); 15180 wqid_matched = true; 15181 break; 15182 } 15183 } 15184 /* Report warning log message if no match found */ 15185 if (wqid_matched != true) 15186 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 15187 "2580 Fast-path wqe consume event carries " 15188 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 15189 } 15190 15191 /** 15192 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 15193 * @phba: Pointer to HBA context object. 15194 * @cq: Pointer to completion queue. 15195 * @rcqe: Pointer to receive-queue completion queue entry. 15196 * 15197 * This routine process a receive-queue completion queue entry. 15198 * 15199 * Return: true if work posted to worker thread, otherwise false. 15200 **/ 15201 static bool 15202 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 15203 struct lpfc_rcqe *rcqe) 15204 { 15205 bool workposted = false; 15206 struct lpfc_queue *hrq; 15207 struct lpfc_queue *drq; 15208 struct rqb_dmabuf *dma_buf; 15209 struct fc_frame_header *fc_hdr; 15210 struct lpfc_nvmet_tgtport *tgtp; 15211 uint32_t status, rq_id; 15212 unsigned long iflags; 15213 uint32_t fctl, idx; 15214 15215 if ((phba->nvmet_support == 0) || 15216 (phba->sli4_hba.nvmet_cqset == NULL)) 15217 return workposted; 15218 15219 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 15220 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 15221 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 15222 15223 /* sanity check on queue memory */ 15224 if (unlikely(!hrq) || unlikely(!drq)) 15225 return workposted; 15226 15227 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 15228 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 15229 else 15230 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 15231 15232 if ((phba->nvmet_support == 0) || 15233 (rq_id != hrq->queue_id)) 15234 return workposted; 15235 15236 status = bf_get(lpfc_rcqe_status, rcqe); 15237 switch (status) { 15238 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 15239 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15240 "6126 Receive Frame Truncated!!\n"); 15241 fallthrough; 15242 case FC_STATUS_RQ_SUCCESS: 15243 spin_lock_irqsave(&phba->hbalock, iflags); 15244 lpfc_sli4_rq_release(hrq, drq); 15245 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 15246 if (!dma_buf) { 15247 hrq->RQ_no_buf_found++; 15248 spin_unlock_irqrestore(&phba->hbalock, iflags); 15249 goto out; 15250 } 15251 spin_unlock_irqrestore(&phba->hbalock, iflags); 15252 hrq->RQ_rcv_buf++; 15253 hrq->RQ_buf_posted--; 15254 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 15255 15256 /* Just some basic sanity checks on FCP Command frame */ 15257 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 15258 fc_hdr->fh_f_ctl[1] << 8 | 15259 fc_hdr->fh_f_ctl[2]); 15260 if (((fctl & 15261 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 15262 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 15263 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 15264 goto drop; 15265 15266 if (fc_hdr->fh_type == FC_TYPE_FCP) { 15267 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 15268 lpfc_nvmet_unsol_fcp_event( 15269 phba, idx, dma_buf, cq->isr_timestamp, 15270 cq->q_flag & HBA_NVMET_CQ_NOTIFY); 15271 return false; 15272 } 15273 drop: 15274 lpfc_rq_buf_free(phba, &dma_buf->hbuf); 15275 break; 15276 case FC_STATUS_INSUFF_BUF_FRM_DISC: 15277 if (phba->nvmet_support) { 15278 tgtp = phba->targetport->private; 15279 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15280 "6401 RQE Error x%x, posted %d err_cnt " 15281 "%d: %x %x %x\n", 15282 status, hrq->RQ_buf_posted, 15283 hrq->RQ_no_posted_buf, 15284 atomic_read(&tgtp->rcv_fcp_cmd_in), 15285 atomic_read(&tgtp->rcv_fcp_cmd_out), 15286 atomic_read(&tgtp->xmt_fcp_release)); 15287 } 15288 fallthrough; 15289 15290 case FC_STATUS_INSUFF_BUF_NEED_BUF: 15291 hrq->RQ_no_posted_buf++; 15292 /* Post more buffers if possible */ 15293 break; 15294 case FC_STATUS_RQ_DMA_FAILURE: 15295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15296 "2575 RQE DMA Error x%x, x%08x x%08x x%08x " 15297 "x%08x\n", 15298 status, rcqe->word0, rcqe->word1, 15299 rcqe->word2, rcqe->word3); 15300 15301 /* If IV set, no further recovery */ 15302 if (bf_get(lpfc_rcqe_iv, rcqe)) 15303 break; 15304 15305 /* recycle consumed resource */ 15306 spin_lock_irqsave(&phba->hbalock, iflags); 15307 lpfc_sli4_rq_release(hrq, drq); 15308 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 15309 if (!dma_buf) { 15310 hrq->RQ_no_buf_found++; 15311 spin_unlock_irqrestore(&phba->hbalock, iflags); 15312 break; 15313 } 15314 hrq->RQ_rcv_buf++; 15315 hrq->RQ_buf_posted--; 15316 spin_unlock_irqrestore(&phba->hbalock, iflags); 15317 lpfc_rq_buf_free(phba, &dma_buf->hbuf); 15318 break; 15319 default: 15320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15321 "2576 Unexpected RQE Status x%x, w0-3 x%08x " 15322 "x%08x x%08x x%08x\n", 15323 status, rcqe->word0, rcqe->word1, 15324 rcqe->word2, rcqe->word3); 15325 break; 15326 } 15327 out: 15328 return workposted; 15329 } 15330 15331 /** 15332 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 15333 * @phba: adapter with cq 15334 * @cq: Pointer to the completion queue. 15335 * @cqe: Pointer to fast-path completion queue entry. 15336 * 15337 * This routine process a fast-path work queue completion entry from fast-path 15338 * event queue for FCP command response completion. 15339 * 15340 * Return: true if work posted to worker thread, otherwise false. 15341 **/ 15342 static bool 15343 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 15344 struct lpfc_cqe *cqe) 15345 { 15346 struct lpfc_wcqe_release wcqe; 15347 bool workposted = false; 15348 15349 /* Copy the work queue CQE and convert endian order if needed */ 15350 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 15351 15352 /* Check and process for different type of WCQE and dispatch */ 15353 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 15354 case CQE_CODE_COMPL_WQE: 15355 case CQE_CODE_NVME_ERSP: 15356 cq->CQ_wq++; 15357 /* Process the WQ complete event */ 15358 phba->last_completion_time = jiffies; 15359 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) 15360 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 15361 (struct lpfc_wcqe_complete *)&wcqe); 15362 break; 15363 case CQE_CODE_RELEASE_WQE: 15364 cq->CQ_release_wqe++; 15365 /* Process the WQ release event */ 15366 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 15367 (struct lpfc_wcqe_release *)&wcqe); 15368 break; 15369 case CQE_CODE_XRI_ABORTED: 15370 cq->CQ_xri_aborted++; 15371 /* Process the WQ XRI abort event */ 15372 phba->last_completion_time = jiffies; 15373 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 15374 (struct sli4_wcqe_xri_aborted *)&wcqe); 15375 break; 15376 case CQE_CODE_RECEIVE_V1: 15377 case CQE_CODE_RECEIVE: 15378 phba->last_completion_time = jiffies; 15379 if (cq->subtype == LPFC_NVMET) { 15380 workposted = lpfc_sli4_nvmet_handle_rcqe( 15381 phba, cq, (struct lpfc_rcqe *)&wcqe); 15382 } 15383 break; 15384 default: 15385 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15386 "0144 Not a valid CQE code: x%x\n", 15387 bf_get(lpfc_wcqe_c_code, &wcqe)); 15388 break; 15389 } 15390 return workposted; 15391 } 15392 15393 /** 15394 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 15395 * @cq: Pointer to CQ to be processed 15396 * 15397 * This routine calls the cq processing routine with the handler for 15398 * fast path CQEs. 15399 * 15400 * The CQ routine returns two values: the first is the calling status, 15401 * which indicates whether work was queued to the background discovery 15402 * thread. If true, the routine should wakeup the discovery thread; 15403 * the second is the delay parameter. If non-zero, rather than rearming 15404 * the CQ and yet another interrupt, the CQ handler should be queued so 15405 * that it is processed in a subsequent polling action. The value of 15406 * the delay indicates when to reschedule it. 15407 **/ 15408 static void 15409 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) 15410 { 15411 struct lpfc_hba *phba = cq->phba; 15412 unsigned long delay; 15413 bool workposted = false; 15414 int ret; 15415 15416 /* process and rearm the CQ */ 15417 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, 15418 &delay); 15419 15420 if (delay) { 15421 if (is_kdump_kernel()) 15422 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, 15423 delay); 15424 else 15425 ret = queue_delayed_work_on(cq->chann, phba->wq, 15426 &cq->sched_irqwork, delay); 15427 if (!ret) 15428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15429 "0367 Cannot schedule queue work " 15430 "for cqid=%d on CPU %d\n", 15431 cq->queue_id, cq->chann); 15432 } 15433 15434 /* wake up worker thread if there are works to be done */ 15435 if (workposted) 15436 lpfc_worker_wake_up(phba); 15437 } 15438 15439 /** 15440 * lpfc_sli4_hba_process_cq - fast-path work handler when started by 15441 * interrupt 15442 * @work: pointer to work element 15443 * 15444 * translates from the work handler and calls the fast-path handler. 15445 **/ 15446 static void 15447 lpfc_sli4_hba_process_cq(struct work_struct *work) 15448 { 15449 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); 15450 15451 __lpfc_sli4_hba_process_cq(cq); 15452 } 15453 15454 /** 15455 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 15456 * @phba: Pointer to HBA context object. 15457 * @eq: Pointer to the queue structure. 15458 * @eqe: Pointer to fast-path event queue entry. 15459 * @poll_mode: poll_mode to execute processing the cq. 15460 * 15461 * This routine process a event queue entry from the fast-path event queue. 15462 * It will check the MajorCode and MinorCode to determine this is for a 15463 * completion event on a completion queue, if not, an error shall be logged 15464 * and just return. Otherwise, it will get to the corresponding completion 15465 * queue and process all the entries on the completion queue, rearm the 15466 * completion queue, and then return. 15467 **/ 15468 static void 15469 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 15470 struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode) 15471 { 15472 struct lpfc_queue *cq = NULL; 15473 uint32_t qidx = eq->hdwq; 15474 uint16_t cqid, id; 15475 int ret; 15476 15477 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 15478 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15479 "0366 Not a valid completion " 15480 "event: majorcode=x%x, minorcode=x%x\n", 15481 bf_get_le32(lpfc_eqe_major_code, eqe), 15482 bf_get_le32(lpfc_eqe_minor_code, eqe)); 15483 return; 15484 } 15485 15486 /* Get the reference to the corresponding CQ */ 15487 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 15488 15489 /* Use the fast lookup method first */ 15490 if (cqid <= phba->sli4_hba.cq_max) { 15491 cq = phba->sli4_hba.cq_lookup[cqid]; 15492 if (cq) 15493 goto work_cq; 15494 } 15495 15496 /* Next check for NVMET completion */ 15497 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 15498 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 15499 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 15500 /* Process NVMET unsol rcv */ 15501 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 15502 goto process_cq; 15503 } 15504 } 15505 15506 if (phba->sli4_hba.nvmels_cq && 15507 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 15508 /* Process NVME unsol rcv */ 15509 cq = phba->sli4_hba.nvmels_cq; 15510 } 15511 15512 /* Otherwise this is a Slow path event */ 15513 if (cq == NULL) { 15514 lpfc_sli4_sp_handle_eqe(phba, eqe, 15515 phba->sli4_hba.hdwq[qidx].hba_eq); 15516 return; 15517 } 15518 15519 process_cq: 15520 if (unlikely(cqid != cq->queue_id)) { 15521 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15522 "0368 Miss-matched fast-path completion " 15523 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 15524 cqid, cq->queue_id); 15525 return; 15526 } 15527 15528 work_cq: 15529 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) 15530 if (phba->ktime_on) 15531 cq->isr_timestamp = ktime_get_ns(); 15532 else 15533 cq->isr_timestamp = 0; 15534 #endif 15535 15536 switch (poll_mode) { 15537 case LPFC_THREADED_IRQ: 15538 __lpfc_sli4_hba_process_cq(cq); 15539 break; 15540 case LPFC_QUEUE_WORK: 15541 default: 15542 if (is_kdump_kernel()) 15543 ret = queue_work(phba->wq, &cq->irqwork); 15544 else 15545 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); 15546 if (!ret) 15547 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15548 "0383 Cannot schedule queue work " 15549 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 15550 cqid, cq->queue_id, 15551 raw_smp_processor_id()); 15552 break; 15553 } 15554 } 15555 15556 /** 15557 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer 15558 * @work: pointer to work element 15559 * 15560 * translates from the work handler and calls the fast-path handler. 15561 **/ 15562 static void 15563 lpfc_sli4_dly_hba_process_cq(struct work_struct *work) 15564 { 15565 struct lpfc_queue *cq = container_of(to_delayed_work(work), 15566 struct lpfc_queue, sched_irqwork); 15567 15568 __lpfc_sli4_hba_process_cq(cq); 15569 } 15570 15571 /** 15572 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 15573 * @irq: Interrupt number. 15574 * @dev_id: The device context pointer. 15575 * 15576 * This function is directly called from the PCI layer as an interrupt 15577 * service routine when device with SLI-4 interface spec is enabled with 15578 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 15579 * ring event in the HBA. However, when the device is enabled with either 15580 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 15581 * device-level interrupt handler. When the PCI slot is in error recovery 15582 * or the HBA is undergoing initialization, the interrupt handler will not 15583 * process the interrupt. The SCSI FCP fast-path ring event are handled in 15584 * the intrrupt context. This function is called without any lock held. 15585 * It gets the hbalock to access and update SLI data structures. Note that, 15586 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 15587 * equal to that of FCP CQ index. 15588 * 15589 * The link attention and ELS ring attention events are handled 15590 * by the worker thread. The interrupt handler signals the worker thread 15591 * and returns for these events. This function is called without any lock 15592 * held. It gets the hbalock to access and update SLI data structures. 15593 * 15594 * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD 15595 * when interrupt is scheduled to be handled from a threaded irq context, or 15596 * else returns IRQ_NONE. 15597 **/ 15598 irqreturn_t 15599 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 15600 { 15601 struct lpfc_hba *phba; 15602 struct lpfc_hba_eq_hdl *hba_eq_hdl; 15603 struct lpfc_queue *fpeq; 15604 unsigned long iflag; 15605 int hba_eqidx; 15606 int ecount = 0; 15607 struct lpfc_eq_intr_info *eqi; 15608 15609 /* Get the driver's phba structure from the dev_id */ 15610 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 15611 phba = hba_eq_hdl->phba; 15612 hba_eqidx = hba_eq_hdl->idx; 15613 15614 if (unlikely(!phba)) 15615 return IRQ_NONE; 15616 if (unlikely(!phba->sli4_hba.hdwq)) 15617 return IRQ_NONE; 15618 15619 /* Get to the EQ struct associated with this vector */ 15620 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; 15621 if (unlikely(!fpeq)) 15622 return IRQ_NONE; 15623 15624 /* Check device state for handling interrupt */ 15625 if (unlikely(lpfc_intr_state_check(phba))) { 15626 /* Check again for link_state with lock held */ 15627 spin_lock_irqsave(&phba->hbalock, iflag); 15628 if (phba->link_state < LPFC_LINK_DOWN) 15629 /* Flush, clear interrupt, and rearm the EQ */ 15630 lpfc_sli4_eqcq_flush(phba, fpeq); 15631 spin_unlock_irqrestore(&phba->hbalock, iflag); 15632 return IRQ_NONE; 15633 } 15634 15635 switch (fpeq->poll_mode) { 15636 case LPFC_THREADED_IRQ: 15637 /* CGN mgmt is mutually exclusive from irq processing */ 15638 if (phba->cmf_active_mode == LPFC_CFG_OFF) 15639 return IRQ_WAKE_THREAD; 15640 fallthrough; 15641 case LPFC_QUEUE_WORK: 15642 default: 15643 eqi = this_cpu_ptr(phba->sli4_hba.eq_info); 15644 eqi->icnt++; 15645 15646 fpeq->last_cpu = raw_smp_processor_id(); 15647 15648 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && 15649 fpeq->q_flag & HBA_EQ_DELAY_CHK && 15650 phba->cfg_auto_imax && 15651 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 15652 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 15653 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, 15654 LPFC_MAX_AUTO_EQ_DELAY); 15655 15656 /* process and rearm the EQ */ 15657 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, 15658 LPFC_QUEUE_WORK); 15659 15660 if (unlikely(ecount == 0)) { 15661 fpeq->EQ_no_entry++; 15662 if (phba->intr_type == MSIX) 15663 /* MSI-X treated interrupt served as no EQ share INT */ 15664 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 15665 "0358 MSI-X interrupt with no EQE\n"); 15666 else 15667 /* Non MSI-X treated on interrupt as EQ share INT */ 15668 return IRQ_NONE; 15669 } 15670 } 15671 15672 return IRQ_HANDLED; 15673 } /* lpfc_sli4_hba_intr_handler */ 15674 15675 /** 15676 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 15677 * @irq: Interrupt number. 15678 * @dev_id: The device context pointer. 15679 * 15680 * This function is the device-level interrupt handler to device with SLI-4 15681 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 15682 * interrupt mode is enabled and there is an event in the HBA which requires 15683 * driver attention. This function invokes the slow-path interrupt attention 15684 * handling function and fast-path interrupt attention handling function in 15685 * turn to process the relevant HBA attention events. This function is called 15686 * without any lock held. It gets the hbalock to access and update SLI data 15687 * structures. 15688 * 15689 * This function returns IRQ_HANDLED when interrupt is handled, else it 15690 * returns IRQ_NONE. 15691 **/ 15692 irqreturn_t 15693 lpfc_sli4_intr_handler(int irq, void *dev_id) 15694 { 15695 struct lpfc_hba *phba; 15696 irqreturn_t hba_irq_rc; 15697 bool hba_handled = false; 15698 int qidx; 15699 15700 /* Get the driver's phba structure from the dev_id */ 15701 phba = (struct lpfc_hba *)dev_id; 15702 15703 if (unlikely(!phba)) 15704 return IRQ_NONE; 15705 15706 /* 15707 * Invoke fast-path host attention interrupt handling as appropriate. 15708 */ 15709 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 15710 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 15711 &phba->sli4_hba.hba_eq_hdl[qidx]); 15712 if (hba_irq_rc == IRQ_HANDLED) 15713 hba_handled |= true; 15714 } 15715 15716 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 15717 } /* lpfc_sli4_intr_handler */ 15718 15719 void lpfc_sli4_poll_hbtimer(struct timer_list *t) 15720 { 15721 struct lpfc_hba *phba = timer_container_of(phba, t, cpuhp_poll_timer); 15722 struct lpfc_queue *eq; 15723 15724 rcu_read_lock(); 15725 15726 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) 15727 lpfc_sli4_poll_eq(eq); 15728 if (!list_empty(&phba->poll_list)) 15729 mod_timer(&phba->cpuhp_poll_timer, 15730 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 15731 15732 rcu_read_unlock(); 15733 } 15734 15735 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) 15736 { 15737 struct lpfc_hba *phba = eq->phba; 15738 15739 /* kickstart slowpath processing if needed */ 15740 if (list_empty(&phba->poll_list)) 15741 mod_timer(&phba->cpuhp_poll_timer, 15742 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 15743 15744 list_add_rcu(&eq->_poll_list, &phba->poll_list); 15745 synchronize_rcu(); 15746 } 15747 15748 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) 15749 { 15750 struct lpfc_hba *phba = eq->phba; 15751 15752 /* Disable slowpath processing for this eq. Kick start the eq 15753 * by RE-ARMING the eq's ASAP 15754 */ 15755 list_del_rcu(&eq->_poll_list); 15756 synchronize_rcu(); 15757 15758 if (list_empty(&phba->poll_list)) 15759 timer_delete_sync(&phba->cpuhp_poll_timer); 15760 } 15761 15762 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) 15763 { 15764 struct lpfc_queue *eq, *next; 15765 15766 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) 15767 list_del(&eq->_poll_list); 15768 15769 INIT_LIST_HEAD(&phba->poll_list); 15770 synchronize_rcu(); 15771 } 15772 15773 static inline void 15774 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode) 15775 { 15776 if (mode == eq->mode) 15777 return; 15778 /* 15779 * currently this function is only called during a hotplug 15780 * event and the cpu on which this function is executing 15781 * is going offline. By now the hotplug has instructed 15782 * the scheduler to remove this cpu from cpu active mask. 15783 * So we don't need to work about being put aside by the 15784 * scheduler for a high priority process. Yes, the inte- 15785 * rrupts could come but they are known to retire ASAP. 15786 */ 15787 15788 /* Disable polling in the fastpath */ 15789 WRITE_ONCE(eq->mode, mode); 15790 /* flush out the store buffer */ 15791 smp_wmb(); 15792 15793 /* 15794 * Add this eq to the polling list and start polling. For 15795 * a grace period both interrupt handler and poller will 15796 * try to process the eq _but_ that's fine. We have a 15797 * synchronization mechanism in place (queue_claimed) to 15798 * deal with it. This is just a draining phase for int- 15799 * errupt handler (not eq's) as we have guranteed through 15800 * barrier that all the CPUs have seen the new CQ_POLLED 15801 * state. which will effectively disable the REARMING of 15802 * the EQ. The whole idea is eq's die off eventually as 15803 * we are not rearming EQ's anymore. 15804 */ 15805 mode ? lpfc_sli4_add_to_poll_list(eq) : 15806 lpfc_sli4_remove_from_poll_list(eq); 15807 } 15808 15809 void lpfc_sli4_start_polling(struct lpfc_queue *eq) 15810 { 15811 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL); 15812 } 15813 15814 void lpfc_sli4_stop_polling(struct lpfc_queue *eq) 15815 { 15816 struct lpfc_hba *phba = eq->phba; 15817 15818 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT); 15819 15820 /* Kick start for the pending io's in h/w. 15821 * Once we switch back to interrupt processing on a eq 15822 * the io path completion will only arm eq's when it 15823 * receives a completion. But since eq's are in disa- 15824 * rmed state it doesn't receive a completion. This 15825 * creates a deadlock scenaro. 15826 */ 15827 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); 15828 } 15829 15830 /** 15831 * lpfc_sli4_queue_free - free a queue structure and associated memory 15832 * @queue: The queue structure to free. 15833 * 15834 * This function frees a queue structure and the DMAable memory used for 15835 * the host resident queue. This function must be called after destroying the 15836 * queue on the HBA. 15837 **/ 15838 void 15839 lpfc_sli4_queue_free(struct lpfc_queue *queue) 15840 { 15841 struct lpfc_dmabuf *dmabuf; 15842 15843 if (!queue) 15844 return; 15845 15846 if (!list_empty(&queue->wq_list)) 15847 list_del(&queue->wq_list); 15848 15849 while (!list_empty(&queue->page_list)) { 15850 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 15851 list); 15852 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 15853 dmabuf->virt, dmabuf->phys); 15854 kfree(dmabuf); 15855 } 15856 if (queue->rqbp) { 15857 lpfc_free_rq_buffer(queue->phba, queue); 15858 kfree(queue->rqbp); 15859 } 15860 15861 if (!list_empty(&queue->cpu_list)) 15862 list_del(&queue->cpu_list); 15863 15864 kfree(queue); 15865 return; 15866 } 15867 15868 /** 15869 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 15870 * @phba: The HBA that this queue is being created on. 15871 * @page_size: The size of a queue page 15872 * @entry_size: The size of each queue entry for this queue. 15873 * @entry_count: The number of entries that this queue will handle. 15874 * @cpu: The cpu that will primarily utilize this queue. 15875 * 15876 * This function allocates a queue structure and the DMAable memory used for 15877 * the host resident queue. This function must be called before creating the 15878 * queue on the HBA. 15879 **/ 15880 struct lpfc_queue * 15881 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 15882 uint32_t entry_size, uint32_t entry_count, int cpu) 15883 { 15884 struct lpfc_queue *queue; 15885 struct lpfc_dmabuf *dmabuf; 15886 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15887 uint16_t x, pgcnt; 15888 15889 if (!phba->sli4_hba.pc_sli4_params.supported) 15890 hw_page_size = page_size; 15891 15892 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; 15893 15894 /* If needed, Adjust page count to match the max the adapter supports */ 15895 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) 15896 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; 15897 15898 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), 15899 GFP_KERNEL, cpu_to_node(cpu)); 15900 if (!queue) 15901 return NULL; 15902 15903 INIT_LIST_HEAD(&queue->list); 15904 INIT_LIST_HEAD(&queue->_poll_list); 15905 INIT_LIST_HEAD(&queue->wq_list); 15906 INIT_LIST_HEAD(&queue->wqfull_list); 15907 INIT_LIST_HEAD(&queue->page_list); 15908 INIT_LIST_HEAD(&queue->child_list); 15909 INIT_LIST_HEAD(&queue->cpu_list); 15910 15911 /* Set queue parameters now. If the system cannot provide memory 15912 * resources, the free routine needs to know what was allocated. 15913 */ 15914 queue->page_count = pgcnt; 15915 queue->q_pgs = (void **)&queue[1]; 15916 queue->entry_cnt_per_pg = hw_page_size / entry_size; 15917 queue->entry_size = entry_size; 15918 queue->entry_count = entry_count; 15919 queue->page_size = hw_page_size; 15920 queue->phba = phba; 15921 15922 for (x = 0; x < queue->page_count; x++) { 15923 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, 15924 dev_to_node(&phba->pcidev->dev)); 15925 if (!dmabuf) 15926 goto out_fail; 15927 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 15928 hw_page_size, &dmabuf->phys, 15929 GFP_KERNEL); 15930 if (!dmabuf->virt) { 15931 kfree(dmabuf); 15932 goto out_fail; 15933 } 15934 dmabuf->buffer_tag = x; 15935 list_add_tail(&dmabuf->list, &queue->page_list); 15936 /* use lpfc_sli4_qe to index a paritcular entry in this page */ 15937 queue->q_pgs[x] = dmabuf->virt; 15938 } 15939 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 15940 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 15941 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); 15942 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); 15943 15944 /* notify_interval will be set during q creation */ 15945 15946 return queue; 15947 out_fail: 15948 lpfc_sli4_queue_free(queue); 15949 return NULL; 15950 } 15951 15952 /** 15953 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 15954 * @phba: HBA structure that indicates port to create a queue on. 15955 * @pci_barset: PCI BAR set flag. 15956 * 15957 * This function shall perform iomap of the specified PCI BAR address to host 15958 * memory address if not already done so and return it. The returned host 15959 * memory address can be NULL. 15960 */ 15961 static void __iomem * 15962 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 15963 { 15964 if (!phba->pcidev) 15965 return NULL; 15966 15967 switch (pci_barset) { 15968 case WQ_PCI_BAR_0_AND_1: 15969 return phba->pci_bar0_memmap_p; 15970 case WQ_PCI_BAR_2_AND_3: 15971 return phba->pci_bar2_memmap_p; 15972 case WQ_PCI_BAR_4_AND_5: 15973 return phba->pci_bar4_memmap_p; 15974 default: 15975 break; 15976 } 15977 return NULL; 15978 } 15979 15980 static __maybe_unused void __iomem * 15981 lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset) 15982 { 15983 15984 /* DPP region is supposed to cover 64-bit BAR2 */ 15985 if (dpp_barset != WQ_PCI_BAR_4_AND_5) { 15986 lpfc_log_msg(phba, KERN_WARNING, LOG_INIT, 15987 "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n", 15988 dpp_barset); 15989 return NULL; 15990 } 15991 15992 if (!phba->sli4_hba.dpp_regs_memmap_wc_p) { 15993 void __iomem *dpp_map; 15994 15995 dpp_map = ioremap_wc(phba->pci_bar2_map, 15996 pci_resource_len(phba->pcidev, 15997 PCI_64BIT_BAR4)); 15998 15999 if (dpp_map) 16000 phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map; 16001 } 16002 16003 return phba->sli4_hba.dpp_regs_memmap_wc_p; 16004 } 16005 16006 /** 16007 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 16008 * @phba: HBA structure that EQs are on. 16009 * @startq: The starting EQ index to modify 16010 * @numq: The number of EQs (consecutive indexes) to modify 16011 * @usdelay: amount of delay 16012 * 16013 * This function revises the EQ delay on 1 or more EQs. The EQ delay 16014 * is set either by writing to a register (if supported by the SLI Port) 16015 * or by mailbox command. The mailbox command allows several EQs to be 16016 * updated at once. 16017 * 16018 * The @phba struct is used to send a mailbox command to HBA. The @startq 16019 * is used to get the starting EQ index to change. The @numq value is 16020 * used to specify how many consecutive EQ indexes, starting at EQ index, 16021 * are to be changed. This function is asynchronous and will wait for any 16022 * mailbox commands to finish before returning. 16023 * 16024 * On success this function will return a zero. If unable to allocate 16025 * enough memory this function will return -ENOMEM. If a mailbox command 16026 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may 16027 * have had their delay multipler changed. 16028 **/ 16029 void 16030 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 16031 uint32_t numq, uint32_t usdelay) 16032 { 16033 struct lpfc_mbx_modify_eq_delay *eq_delay; 16034 LPFC_MBOXQ_t *mbox; 16035 struct lpfc_queue *eq; 16036 int cnt = 0, rc, length; 16037 uint32_t shdr_status, shdr_add_status; 16038 uint32_t dmult; 16039 int qidx; 16040 union lpfc_sli4_cfg_shdr *shdr; 16041 16042 if (startq >= phba->cfg_irq_chann) 16043 return; 16044 16045 if (usdelay > 0xFFFF) { 16046 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, 16047 "6429 usdelay %d too large. Scaled down to " 16048 "0xFFFF.\n", usdelay); 16049 usdelay = 0xFFFF; 16050 } 16051 16052 /* set values by EQ_DELAY register if supported */ 16053 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 16054 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 16055 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 16056 if (!eq) 16057 continue; 16058 16059 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); 16060 16061 if (++cnt >= numq) 16062 break; 16063 } 16064 return; 16065 } 16066 16067 /* Otherwise, set values by mailbox cmd */ 16068 16069 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16070 if (!mbox) { 16071 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16072 "6428 Failed allocating mailbox cmd buffer." 16073 " EQ delay was not set.\n"); 16074 return; 16075 } 16076 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 16077 sizeof(struct lpfc_sli4_cfg_mhdr)); 16078 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16079 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 16080 length, LPFC_SLI4_MBX_EMBED); 16081 eq_delay = &mbox->u.mqe.un.eq_delay; 16082 16083 /* Calculate delay multiper from maximum interrupt per second */ 16084 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; 16085 if (dmult) 16086 dmult--; 16087 if (dmult > LPFC_DMULT_MAX) 16088 dmult = LPFC_DMULT_MAX; 16089 16090 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 16091 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 16092 if (!eq) 16093 continue; 16094 eq->q_mode = usdelay; 16095 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 16096 eq_delay->u.request.eq[cnt].phase = 0; 16097 eq_delay->u.request.eq[cnt].delay_multi = dmult; 16098 16099 if (++cnt >= numq) 16100 break; 16101 } 16102 eq_delay->u.request.num_eq = cnt; 16103 16104 mbox->vport = phba->pport; 16105 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16106 mbox->ctx_ndlp = NULL; 16107 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16108 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 16109 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16110 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16111 if (shdr_status || shdr_add_status || rc) { 16112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16113 "2512 MODIFY_EQ_DELAY mailbox failed with " 16114 "status x%x add_status x%x, mbx status x%x\n", 16115 shdr_status, shdr_add_status, rc); 16116 } 16117 mempool_free(mbox, phba->mbox_mem_pool); 16118 return; 16119 } 16120 16121 /** 16122 * lpfc_eq_create - Create an Event Queue on the HBA 16123 * @phba: HBA structure that indicates port to create a queue on. 16124 * @eq: The queue structure to use to create the event queue. 16125 * @imax: The maximum interrupt per second limit. 16126 * 16127 * This function creates an event queue, as detailed in @eq, on a port, 16128 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 16129 * 16130 * The @phba struct is used to send mailbox command to HBA. The @eq struct 16131 * is used to get the entry count and entry size that are necessary to 16132 * determine the number of pages to allocate and use for this queue. This 16133 * function will send the EQ_CREATE mailbox command to the HBA to setup the 16134 * event queue. This function is asynchronous and will wait for the mailbox 16135 * command to finish before continuing. 16136 * 16137 * On success this function will return a zero. If unable to allocate enough 16138 * memory this function will return -ENOMEM. If the queue create mailbox command 16139 * fails this function will return -ENXIO. 16140 **/ 16141 int 16142 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 16143 { 16144 struct lpfc_mbx_eq_create *eq_create; 16145 LPFC_MBOXQ_t *mbox; 16146 int rc, length, status = 0; 16147 struct lpfc_dmabuf *dmabuf; 16148 uint32_t shdr_status, shdr_add_status; 16149 union lpfc_sli4_cfg_shdr *shdr; 16150 uint16_t dmult; 16151 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16152 16153 /* sanity check on queue memory */ 16154 if (!eq) 16155 return -ENODEV; 16156 if (!phba->sli4_hba.pc_sli4_params.supported) 16157 hw_page_size = SLI4_PAGE_SIZE; 16158 16159 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16160 if (!mbox) 16161 return -ENOMEM; 16162 length = (sizeof(struct lpfc_mbx_eq_create) - 16163 sizeof(struct lpfc_sli4_cfg_mhdr)); 16164 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16165 LPFC_MBOX_OPCODE_EQ_CREATE, 16166 length, LPFC_SLI4_MBX_EMBED); 16167 eq_create = &mbox->u.mqe.un.eq_create; 16168 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 16169 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 16170 eq->page_count); 16171 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 16172 LPFC_EQE_SIZE); 16173 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 16174 16175 /* Use version 2 of CREATE_EQ if eqav is set */ 16176 if (phba->sli4_hba.pc_sli4_params.eqav) { 16177 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16178 LPFC_Q_CREATE_VERSION_2); 16179 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 16180 phba->sli4_hba.pc_sli4_params.eqav); 16181 } 16182 16183 /* don't setup delay multiplier using EQ_CREATE */ 16184 dmult = 0; 16185 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 16186 dmult); 16187 switch (eq->entry_count) { 16188 default: 16189 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16190 "0360 Unsupported EQ count. (%d)\n", 16191 eq->entry_count); 16192 if (eq->entry_count < 256) { 16193 status = -EINVAL; 16194 goto out; 16195 } 16196 fallthrough; /* otherwise default to smallest count */ 16197 case 256: 16198 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16199 LPFC_EQ_CNT_256); 16200 break; 16201 case 512: 16202 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16203 LPFC_EQ_CNT_512); 16204 break; 16205 case 1024: 16206 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16207 LPFC_EQ_CNT_1024); 16208 break; 16209 case 2048: 16210 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16211 LPFC_EQ_CNT_2048); 16212 break; 16213 case 4096: 16214 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 16215 LPFC_EQ_CNT_4096); 16216 break; 16217 } 16218 list_for_each_entry(dmabuf, &eq->page_list, list) { 16219 memset(dmabuf->virt, 0, hw_page_size); 16220 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 16221 putPaddrLow(dmabuf->phys); 16222 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 16223 putPaddrHigh(dmabuf->phys); 16224 } 16225 mbox->vport = phba->pport; 16226 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16227 mbox->ctx_buf = NULL; 16228 mbox->ctx_ndlp = NULL; 16229 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16230 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16231 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16232 if (shdr_status || shdr_add_status || rc) { 16233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16234 "2500 EQ_CREATE mailbox failed with " 16235 "status x%x add_status x%x, mbx status x%x\n", 16236 shdr_status, shdr_add_status, rc); 16237 status = -ENXIO; 16238 } 16239 eq->type = LPFC_EQ; 16240 eq->subtype = LPFC_NONE; 16241 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 16242 if (eq->queue_id == 0xFFFF) 16243 status = -ENXIO; 16244 eq->host_index = 0; 16245 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; 16246 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; 16247 out: 16248 mempool_free(mbox, phba->mbox_mem_pool); 16249 return status; 16250 } 16251 16252 /** 16253 * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler 16254 * @irq: Interrupt number. 16255 * @dev_id: The device context pointer. 16256 * 16257 * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within 16258 * threaded irq context. 16259 * 16260 * Returns 16261 * IRQ_HANDLED - interrupt is handled 16262 * IRQ_NONE - otherwise 16263 **/ 16264 irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id) 16265 { 16266 struct lpfc_hba *phba; 16267 struct lpfc_hba_eq_hdl *hba_eq_hdl; 16268 struct lpfc_queue *fpeq; 16269 int ecount = 0; 16270 int hba_eqidx; 16271 struct lpfc_eq_intr_info *eqi; 16272 16273 /* Get the driver's phba structure from the dev_id */ 16274 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 16275 phba = hba_eq_hdl->phba; 16276 hba_eqidx = hba_eq_hdl->idx; 16277 16278 if (unlikely(!phba)) 16279 return IRQ_NONE; 16280 if (unlikely(!phba->sli4_hba.hdwq)) 16281 return IRQ_NONE; 16282 16283 /* Get to the EQ struct associated with this vector */ 16284 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; 16285 if (unlikely(!fpeq)) 16286 return IRQ_NONE; 16287 16288 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id()); 16289 eqi->icnt++; 16290 16291 fpeq->last_cpu = raw_smp_processor_id(); 16292 16293 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && 16294 fpeq->q_flag & HBA_EQ_DELAY_CHK && 16295 phba->cfg_auto_imax && 16296 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 16297 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 16298 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); 16299 16300 /* process and rearm the EQ */ 16301 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, 16302 LPFC_THREADED_IRQ); 16303 16304 if (unlikely(ecount == 0)) { 16305 fpeq->EQ_no_entry++; 16306 if (phba->intr_type == MSIX) 16307 /* MSI-X treated interrupt served as no EQ share INT */ 16308 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16309 "3358 MSI-X interrupt with no EQE\n"); 16310 else 16311 /* Non MSI-X treated on interrupt as EQ share INT */ 16312 return IRQ_NONE; 16313 } 16314 return IRQ_HANDLED; 16315 } 16316 16317 /** 16318 * lpfc_cq_create - Create a Completion Queue on the HBA 16319 * @phba: HBA structure that indicates port to create a queue on. 16320 * @cq: The queue structure to use to create the completion queue. 16321 * @eq: The event queue to bind this completion queue to. 16322 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). 16323 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). 16324 * 16325 * This function creates a completion queue, as detailed in @wq, on a port, 16326 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 16327 * 16328 * The @phba struct is used to send mailbox command to HBA. The @cq struct 16329 * is used to get the entry count and entry size that are necessary to 16330 * determine the number of pages to allocate and use for this queue. The @eq 16331 * is used to indicate which event queue to bind this completion queue to. This 16332 * function will send the CQ_CREATE mailbox command to the HBA to setup the 16333 * completion queue. This function is asynchronous and will wait for the mailbox 16334 * command to finish before continuing. 16335 * 16336 * On success this function will return a zero. If unable to allocate enough 16337 * memory this function will return -ENOMEM. If the queue create mailbox command 16338 * fails this function will return -ENXIO. 16339 **/ 16340 int 16341 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 16342 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 16343 { 16344 struct lpfc_mbx_cq_create *cq_create; 16345 struct lpfc_dmabuf *dmabuf; 16346 LPFC_MBOXQ_t *mbox; 16347 int rc, length, status = 0; 16348 uint32_t shdr_status, shdr_add_status; 16349 union lpfc_sli4_cfg_shdr *shdr; 16350 16351 /* sanity check on queue memory */ 16352 if (!cq || !eq) 16353 return -ENODEV; 16354 16355 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16356 if (!mbox) 16357 return -ENOMEM; 16358 length = (sizeof(struct lpfc_mbx_cq_create) - 16359 sizeof(struct lpfc_sli4_cfg_mhdr)); 16360 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16361 LPFC_MBOX_OPCODE_CQ_CREATE, 16362 length, LPFC_SLI4_MBX_EMBED); 16363 cq_create = &mbox->u.mqe.un.cq_create; 16364 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 16365 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 16366 cq->page_count); 16367 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 16368 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 16369 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16370 phba->sli4_hba.pc_sli4_params.cqv); 16371 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 16372 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 16373 (cq->page_size / SLI4_PAGE_SIZE)); 16374 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 16375 eq->queue_id); 16376 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 16377 phba->sli4_hba.pc_sli4_params.cqav); 16378 } else { 16379 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 16380 eq->queue_id); 16381 } 16382 switch (cq->entry_count) { 16383 case 2048: 16384 case 4096: 16385 if (phba->sli4_hba.pc_sli4_params.cqv == 16386 LPFC_Q_CREATE_VERSION_2) { 16387 cq_create->u.request.context.lpfc_cq_context_count = 16388 cq->entry_count; 16389 bf_set(lpfc_cq_context_count, 16390 &cq_create->u.request.context, 16391 LPFC_CQ_CNT_WORD7); 16392 break; 16393 } 16394 fallthrough; 16395 default: 16396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16397 "0361 Unsupported CQ count: " 16398 "entry cnt %d sz %d pg cnt %d\n", 16399 cq->entry_count, cq->entry_size, 16400 cq->page_count); 16401 if (cq->entry_count < 256) { 16402 status = -EINVAL; 16403 goto out; 16404 } 16405 fallthrough; /* otherwise default to smallest count */ 16406 case 256: 16407 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 16408 LPFC_CQ_CNT_256); 16409 break; 16410 case 512: 16411 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 16412 LPFC_CQ_CNT_512); 16413 break; 16414 case 1024: 16415 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 16416 LPFC_CQ_CNT_1024); 16417 break; 16418 } 16419 list_for_each_entry(dmabuf, &cq->page_list, list) { 16420 memset(dmabuf->virt, 0, cq->page_size); 16421 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 16422 putPaddrLow(dmabuf->phys); 16423 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 16424 putPaddrHigh(dmabuf->phys); 16425 } 16426 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16427 16428 /* The IOCTL status is embedded in the mailbox subheader. */ 16429 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16430 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16431 if (shdr_status || shdr_add_status || rc) { 16432 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16433 "2501 CQ_CREATE mailbox failed with " 16434 "status x%x add_status x%x, mbx status x%x\n", 16435 shdr_status, shdr_add_status, rc); 16436 status = -ENXIO; 16437 goto out; 16438 } 16439 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 16440 if (cq->queue_id == 0xFFFF) { 16441 status = -ENXIO; 16442 goto out; 16443 } 16444 /* link the cq onto the parent eq child list */ 16445 list_add_tail(&cq->list, &eq->child_list); 16446 /* Set up completion queue's type and subtype */ 16447 cq->type = type; 16448 cq->subtype = subtype; 16449 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 16450 cq->assoc_qid = eq->queue_id; 16451 cq->assoc_qp = eq; 16452 cq->host_index = 0; 16453 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 16454 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); 16455 16456 if (cq->queue_id > phba->sli4_hba.cq_max) 16457 phba->sli4_hba.cq_max = cq->queue_id; 16458 out: 16459 mempool_free(mbox, phba->mbox_mem_pool); 16460 return status; 16461 } 16462 16463 /** 16464 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 16465 * @phba: HBA structure that indicates port to create a queue on. 16466 * @cqp: The queue structure array to use to create the completion queues. 16467 * @hdwq: The hardware queue array with the EQ to bind completion queues to. 16468 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). 16469 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). 16470 * 16471 * This function creates a set of completion queue, s to support MRQ 16472 * as detailed in @cqp, on a port, 16473 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 16474 * 16475 * The @phba struct is used to send mailbox command to HBA. The @cq struct 16476 * is used to get the entry count and entry size that are necessary to 16477 * determine the number of pages to allocate and use for this queue. The @eq 16478 * is used to indicate which event queue to bind this completion queue to. This 16479 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 16480 * completion queue. This function is asynchronous and will wait for the mailbox 16481 * command to finish before continuing. 16482 * 16483 * On success this function will return a zero. If unable to allocate enough 16484 * memory this function will return -ENOMEM. If the queue create mailbox command 16485 * fails this function will return -ENXIO. 16486 **/ 16487 int 16488 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 16489 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, 16490 uint32_t subtype) 16491 { 16492 struct lpfc_queue *cq; 16493 struct lpfc_queue *eq; 16494 struct lpfc_mbx_cq_create_set *cq_set; 16495 struct lpfc_dmabuf *dmabuf; 16496 LPFC_MBOXQ_t *mbox; 16497 int rc, length, alloclen, status = 0; 16498 int cnt, idx, numcq, page_idx = 0; 16499 uint32_t shdr_status, shdr_add_status; 16500 union lpfc_sli4_cfg_shdr *shdr; 16501 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16502 16503 /* sanity check on queue memory */ 16504 numcq = phba->cfg_nvmet_mrq; 16505 if (!cqp || !hdwq || !numcq) 16506 return -ENODEV; 16507 16508 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16509 if (!mbox) 16510 return -ENOMEM; 16511 16512 length = sizeof(struct lpfc_mbx_cq_create_set); 16513 length += ((numcq * cqp[0]->page_count) * 16514 sizeof(struct dma_address)); 16515 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16516 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 16517 LPFC_SLI4_MBX_NEMBED); 16518 if (alloclen < length) { 16519 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16520 "3098 Allocated DMA memory size (%d) is " 16521 "less than the requested DMA memory size " 16522 "(%d)\n", alloclen, length); 16523 status = -ENOMEM; 16524 goto out; 16525 } 16526 cq_set = mbox->sge_array->addr[0]; 16527 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 16528 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 16529 16530 for (idx = 0; idx < numcq; idx++) { 16531 cq = cqp[idx]; 16532 eq = hdwq[idx].hba_eq; 16533 if (!cq || !eq) { 16534 status = -ENOMEM; 16535 goto out; 16536 } 16537 if (!phba->sli4_hba.pc_sli4_params.supported) 16538 hw_page_size = cq->page_size; 16539 16540 switch (idx) { 16541 case 0: 16542 bf_set(lpfc_mbx_cq_create_set_page_size, 16543 &cq_set->u.request, 16544 (hw_page_size / SLI4_PAGE_SIZE)); 16545 bf_set(lpfc_mbx_cq_create_set_num_pages, 16546 &cq_set->u.request, cq->page_count); 16547 bf_set(lpfc_mbx_cq_create_set_evt, 16548 &cq_set->u.request, 1); 16549 bf_set(lpfc_mbx_cq_create_set_valid, 16550 &cq_set->u.request, 1); 16551 bf_set(lpfc_mbx_cq_create_set_cqe_size, 16552 &cq_set->u.request, 0); 16553 bf_set(lpfc_mbx_cq_create_set_num_cq, 16554 &cq_set->u.request, numcq); 16555 bf_set(lpfc_mbx_cq_create_set_autovalid, 16556 &cq_set->u.request, 16557 phba->sli4_hba.pc_sli4_params.cqav); 16558 switch (cq->entry_count) { 16559 case 2048: 16560 case 4096: 16561 if (phba->sli4_hba.pc_sli4_params.cqv == 16562 LPFC_Q_CREATE_VERSION_2) { 16563 bf_set(lpfc_mbx_cq_create_set_cqe_cnt_lo, 16564 &cq_set->u.request, 16565 cq->entry_count); 16566 bf_set(lpfc_mbx_cq_create_set_cqecnt, 16567 &cq_set->u.request, 16568 LPFC_CQ_CNT_WORD7); 16569 break; 16570 } 16571 fallthrough; 16572 default: 16573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16574 "3118 Bad CQ count. (%d)\n", 16575 cq->entry_count); 16576 if (cq->entry_count < 256) { 16577 status = -EINVAL; 16578 goto out; 16579 } 16580 fallthrough; /* otherwise default to smallest */ 16581 case 256: 16582 bf_set(lpfc_mbx_cq_create_set_cqecnt, 16583 &cq_set->u.request, LPFC_CQ_CNT_256); 16584 break; 16585 case 512: 16586 bf_set(lpfc_mbx_cq_create_set_cqecnt, 16587 &cq_set->u.request, LPFC_CQ_CNT_512); 16588 break; 16589 case 1024: 16590 bf_set(lpfc_mbx_cq_create_set_cqecnt, 16591 &cq_set->u.request, LPFC_CQ_CNT_1024); 16592 break; 16593 } 16594 bf_set(lpfc_mbx_cq_create_set_eq_id0, 16595 &cq_set->u.request, eq->queue_id); 16596 break; 16597 case 1: 16598 bf_set(lpfc_mbx_cq_create_set_eq_id1, 16599 &cq_set->u.request, eq->queue_id); 16600 break; 16601 case 2: 16602 bf_set(lpfc_mbx_cq_create_set_eq_id2, 16603 &cq_set->u.request, eq->queue_id); 16604 break; 16605 case 3: 16606 bf_set(lpfc_mbx_cq_create_set_eq_id3, 16607 &cq_set->u.request, eq->queue_id); 16608 break; 16609 case 4: 16610 bf_set(lpfc_mbx_cq_create_set_eq_id4, 16611 &cq_set->u.request, eq->queue_id); 16612 break; 16613 case 5: 16614 bf_set(lpfc_mbx_cq_create_set_eq_id5, 16615 &cq_set->u.request, eq->queue_id); 16616 break; 16617 case 6: 16618 bf_set(lpfc_mbx_cq_create_set_eq_id6, 16619 &cq_set->u.request, eq->queue_id); 16620 break; 16621 case 7: 16622 bf_set(lpfc_mbx_cq_create_set_eq_id7, 16623 &cq_set->u.request, eq->queue_id); 16624 break; 16625 case 8: 16626 bf_set(lpfc_mbx_cq_create_set_eq_id8, 16627 &cq_set->u.request, eq->queue_id); 16628 break; 16629 case 9: 16630 bf_set(lpfc_mbx_cq_create_set_eq_id9, 16631 &cq_set->u.request, eq->queue_id); 16632 break; 16633 case 10: 16634 bf_set(lpfc_mbx_cq_create_set_eq_id10, 16635 &cq_set->u.request, eq->queue_id); 16636 break; 16637 case 11: 16638 bf_set(lpfc_mbx_cq_create_set_eq_id11, 16639 &cq_set->u.request, eq->queue_id); 16640 break; 16641 case 12: 16642 bf_set(lpfc_mbx_cq_create_set_eq_id12, 16643 &cq_set->u.request, eq->queue_id); 16644 break; 16645 case 13: 16646 bf_set(lpfc_mbx_cq_create_set_eq_id13, 16647 &cq_set->u.request, eq->queue_id); 16648 break; 16649 case 14: 16650 bf_set(lpfc_mbx_cq_create_set_eq_id14, 16651 &cq_set->u.request, eq->queue_id); 16652 break; 16653 case 15: 16654 bf_set(lpfc_mbx_cq_create_set_eq_id15, 16655 &cq_set->u.request, eq->queue_id); 16656 break; 16657 } 16658 16659 /* link the cq onto the parent eq child list */ 16660 list_add_tail(&cq->list, &eq->child_list); 16661 /* Set up completion queue's type and subtype */ 16662 cq->type = type; 16663 cq->subtype = subtype; 16664 cq->assoc_qid = eq->queue_id; 16665 cq->assoc_qp = eq; 16666 cq->host_index = 0; 16667 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 16668 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 16669 cq->entry_count); 16670 cq->chann = idx; 16671 16672 rc = 0; 16673 list_for_each_entry(dmabuf, &cq->page_list, list) { 16674 memset(dmabuf->virt, 0, hw_page_size); 16675 cnt = page_idx + dmabuf->buffer_tag; 16676 cq_set->u.request.page[cnt].addr_lo = 16677 putPaddrLow(dmabuf->phys); 16678 cq_set->u.request.page[cnt].addr_hi = 16679 putPaddrHigh(dmabuf->phys); 16680 rc++; 16681 } 16682 page_idx += rc; 16683 } 16684 16685 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16686 16687 /* The IOCTL status is embedded in the mailbox subheader. */ 16688 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16689 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16690 if (shdr_status || shdr_add_status || rc) { 16691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16692 "3119 CQ_CREATE_SET mailbox failed with " 16693 "status x%x add_status x%x, mbx status x%x\n", 16694 shdr_status, shdr_add_status, rc); 16695 status = -ENXIO; 16696 goto out; 16697 } 16698 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 16699 if (rc == 0xFFFF) { 16700 status = -ENXIO; 16701 goto out; 16702 } 16703 16704 for (idx = 0; idx < numcq; idx++) { 16705 cq = cqp[idx]; 16706 cq->queue_id = rc + idx; 16707 if (cq->queue_id > phba->sli4_hba.cq_max) 16708 phba->sli4_hba.cq_max = cq->queue_id; 16709 } 16710 16711 out: 16712 lpfc_sli4_mbox_cmd_free(phba, mbox); 16713 return status; 16714 } 16715 16716 /** 16717 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 16718 * @phba: HBA structure that indicates port to create a queue on. 16719 * @mq: The queue structure to use to create the mailbox queue. 16720 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 16721 * @cq: The completion queue to associate with this cq. 16722 * 16723 * This function provides failback (fb) functionality when the 16724 * mq_create_ext fails on older FW generations. It's purpose is identical 16725 * to mq_create_ext otherwise. 16726 * 16727 * This routine cannot fail as all attributes were previously accessed and 16728 * initialized in mq_create_ext. 16729 **/ 16730 static void 16731 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 16732 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 16733 { 16734 struct lpfc_mbx_mq_create *mq_create; 16735 struct lpfc_dmabuf *dmabuf; 16736 int length; 16737 16738 length = (sizeof(struct lpfc_mbx_mq_create) - 16739 sizeof(struct lpfc_sli4_cfg_mhdr)); 16740 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16741 LPFC_MBOX_OPCODE_MQ_CREATE, 16742 length, LPFC_SLI4_MBX_EMBED); 16743 mq_create = &mbox->u.mqe.un.mq_create; 16744 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 16745 mq->page_count); 16746 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 16747 cq->queue_id); 16748 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 16749 switch (mq->entry_count) { 16750 case 16: 16751 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 16752 LPFC_MQ_RING_SIZE_16); 16753 break; 16754 case 32: 16755 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 16756 LPFC_MQ_RING_SIZE_32); 16757 break; 16758 case 64: 16759 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 16760 LPFC_MQ_RING_SIZE_64); 16761 break; 16762 case 128: 16763 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 16764 LPFC_MQ_RING_SIZE_128); 16765 break; 16766 } 16767 list_for_each_entry(dmabuf, &mq->page_list, list) { 16768 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 16769 putPaddrLow(dmabuf->phys); 16770 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 16771 putPaddrHigh(dmabuf->phys); 16772 } 16773 } 16774 16775 /** 16776 * lpfc_mq_create - Create a mailbox Queue on the HBA 16777 * @phba: HBA structure that indicates port to create a queue on. 16778 * @mq: The queue structure to use to create the mailbox queue. 16779 * @cq: The completion queue to associate with this cq. 16780 * @subtype: The queue's subtype. 16781 * 16782 * This function creates a mailbox queue, as detailed in @mq, on a port, 16783 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 16784 * 16785 * The @phba struct is used to send mailbox command to HBA. The @cq struct 16786 * is used to get the entry count and entry size that are necessary to 16787 * determine the number of pages to allocate and use for this queue. This 16788 * function will send the MQ_CREATE mailbox command to the HBA to setup the 16789 * mailbox queue. This function is asynchronous and will wait for the mailbox 16790 * command to finish before continuing. 16791 * 16792 * On success this function will return a zero. If unable to allocate enough 16793 * memory this function will return -ENOMEM. If the queue create mailbox command 16794 * fails this function will return -ENXIO. 16795 **/ 16796 int32_t 16797 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 16798 struct lpfc_queue *cq, uint32_t subtype) 16799 { 16800 struct lpfc_mbx_mq_create *mq_create; 16801 struct lpfc_mbx_mq_create_ext *mq_create_ext; 16802 struct lpfc_dmabuf *dmabuf; 16803 LPFC_MBOXQ_t *mbox; 16804 int rc, length, status = 0; 16805 uint32_t shdr_status, shdr_add_status; 16806 union lpfc_sli4_cfg_shdr *shdr; 16807 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16808 16809 /* sanity check on queue memory */ 16810 if (!mq || !cq) 16811 return -ENODEV; 16812 if (!phba->sli4_hba.pc_sli4_params.supported) 16813 hw_page_size = SLI4_PAGE_SIZE; 16814 16815 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16816 if (!mbox) 16817 return -ENOMEM; 16818 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 16819 sizeof(struct lpfc_sli4_cfg_mhdr)); 16820 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16821 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 16822 length, LPFC_SLI4_MBX_EMBED); 16823 16824 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 16825 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 16826 bf_set(lpfc_mbx_mq_create_ext_num_pages, 16827 &mq_create_ext->u.request, mq->page_count); 16828 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 16829 &mq_create_ext->u.request, 1); 16830 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 16831 &mq_create_ext->u.request, 1); 16832 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 16833 &mq_create_ext->u.request, 1); 16834 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 16835 &mq_create_ext->u.request, 1); 16836 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 16837 &mq_create_ext->u.request, 1); 16838 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 16839 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16840 phba->sli4_hba.pc_sli4_params.mqv); 16841 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 16842 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 16843 cq->queue_id); 16844 else 16845 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 16846 cq->queue_id); 16847 switch (mq->entry_count) { 16848 default: 16849 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16850 "0362 Unsupported MQ count. (%d)\n", 16851 mq->entry_count); 16852 if (mq->entry_count < 16) { 16853 status = -EINVAL; 16854 goto out; 16855 } 16856 fallthrough; /* otherwise default to smallest count */ 16857 case 16: 16858 bf_set(lpfc_mq_context_ring_size, 16859 &mq_create_ext->u.request.context, 16860 LPFC_MQ_RING_SIZE_16); 16861 break; 16862 case 32: 16863 bf_set(lpfc_mq_context_ring_size, 16864 &mq_create_ext->u.request.context, 16865 LPFC_MQ_RING_SIZE_32); 16866 break; 16867 case 64: 16868 bf_set(lpfc_mq_context_ring_size, 16869 &mq_create_ext->u.request.context, 16870 LPFC_MQ_RING_SIZE_64); 16871 break; 16872 case 128: 16873 bf_set(lpfc_mq_context_ring_size, 16874 &mq_create_ext->u.request.context, 16875 LPFC_MQ_RING_SIZE_128); 16876 break; 16877 } 16878 list_for_each_entry(dmabuf, &mq->page_list, list) { 16879 memset(dmabuf->virt, 0, hw_page_size); 16880 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 16881 putPaddrLow(dmabuf->phys); 16882 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 16883 putPaddrHigh(dmabuf->phys); 16884 } 16885 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16886 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 16887 &mq_create_ext->u.response); 16888 if (rc != MBX_SUCCESS) { 16889 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 16890 "2795 MQ_CREATE_EXT failed with " 16891 "status x%x. Failback to MQ_CREATE.\n", 16892 rc); 16893 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 16894 mq_create = &mbox->u.mqe.un.mq_create; 16895 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16896 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 16897 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 16898 &mq_create->u.response); 16899 } 16900 16901 /* The IOCTL status is embedded in the mailbox subheader. */ 16902 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16903 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16904 if (shdr_status || shdr_add_status || rc) { 16905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16906 "2502 MQ_CREATE mailbox failed with " 16907 "status x%x add_status x%x, mbx status x%x\n", 16908 shdr_status, shdr_add_status, rc); 16909 status = -ENXIO; 16910 goto out; 16911 } 16912 if (mq->queue_id == 0xFFFF) { 16913 status = -ENXIO; 16914 goto out; 16915 } 16916 mq->type = LPFC_MQ; 16917 mq->assoc_qid = cq->queue_id; 16918 mq->subtype = subtype; 16919 mq->host_index = 0; 16920 mq->hba_index = 0; 16921 16922 /* link the mq onto the parent cq child list */ 16923 list_add_tail(&mq->list, &cq->child_list); 16924 out: 16925 mempool_free(mbox, phba->mbox_mem_pool); 16926 return status; 16927 } 16928 16929 /** 16930 * lpfc_wq_create - Create a Work Queue on the HBA 16931 * @phba: HBA structure that indicates port to create a queue on. 16932 * @wq: The queue structure to use to create the work queue. 16933 * @cq: The completion queue to bind this work queue to. 16934 * @subtype: The subtype of the work queue indicating its functionality. 16935 * 16936 * This function creates a work queue, as detailed in @wq, on a port, described 16937 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 16938 * 16939 * The @phba struct is used to send mailbox command to HBA. The @wq struct 16940 * is used to get the entry count and entry size that are necessary to 16941 * determine the number of pages to allocate and use for this queue. The @cq 16942 * is used to indicate which completion queue to bind this work queue to. This 16943 * function will send the WQ_CREATE mailbox command to the HBA to setup the 16944 * work queue. This function is asynchronous and will wait for the mailbox 16945 * command to finish before continuing. 16946 * 16947 * On success this function will return a zero. If unable to allocate enough 16948 * memory this function will return -ENOMEM. If the queue create mailbox command 16949 * fails this function will return -ENXIO. 16950 **/ 16951 int 16952 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 16953 struct lpfc_queue *cq, uint32_t subtype) 16954 { 16955 struct lpfc_mbx_wq_create *wq_create; 16956 struct lpfc_dmabuf *dmabuf; 16957 LPFC_MBOXQ_t *mbox; 16958 int rc, length, status = 0; 16959 uint32_t shdr_status, shdr_add_status; 16960 union lpfc_sli4_cfg_shdr *shdr; 16961 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16962 struct dma_address *page; 16963 void __iomem *bar_memmap_p; 16964 uint32_t db_offset; 16965 uint16_t pci_barset; 16966 uint8_t dpp_barset; 16967 uint32_t dpp_offset; 16968 uint8_t wq_create_version; 16969 16970 /* sanity check on queue memory */ 16971 if (!wq || !cq) 16972 return -ENODEV; 16973 if (!phba->sli4_hba.pc_sli4_params.supported) 16974 hw_page_size = wq->page_size; 16975 16976 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16977 if (!mbox) 16978 return -ENOMEM; 16979 length = (sizeof(struct lpfc_mbx_wq_create) - 16980 sizeof(struct lpfc_sli4_cfg_mhdr)); 16981 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16982 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 16983 length, LPFC_SLI4_MBX_EMBED); 16984 wq_create = &mbox->u.mqe.un.wq_create; 16985 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 16986 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 16987 wq->page_count); 16988 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 16989 cq->queue_id); 16990 16991 /* wqv is the earliest version supported, NOT the latest */ 16992 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16993 phba->sli4_hba.pc_sli4_params.wqv); 16994 16995 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 16996 (wq->page_size > SLI4_PAGE_SIZE)) 16997 wq_create_version = LPFC_Q_CREATE_VERSION_1; 16998 else 16999 wq_create_version = LPFC_Q_CREATE_VERSION_0; 17000 17001 switch (wq_create_version) { 17002 case LPFC_Q_CREATE_VERSION_1: 17003 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 17004 wq->entry_count); 17005 bf_set(lpfc_mbox_hdr_version, &shdr->request, 17006 LPFC_Q_CREATE_VERSION_1); 17007 17008 switch (wq->entry_size) { 17009 default: 17010 case 64: 17011 bf_set(lpfc_mbx_wq_create_wqe_size, 17012 &wq_create->u.request_1, 17013 LPFC_WQ_WQE_SIZE_64); 17014 break; 17015 case 128: 17016 bf_set(lpfc_mbx_wq_create_wqe_size, 17017 &wq_create->u.request_1, 17018 LPFC_WQ_WQE_SIZE_128); 17019 break; 17020 } 17021 /* Request DPP by default */ 17022 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 17023 bf_set(lpfc_mbx_wq_create_page_size, 17024 &wq_create->u.request_1, 17025 (wq->page_size / SLI4_PAGE_SIZE)); 17026 page = wq_create->u.request_1.page; 17027 break; 17028 default: 17029 page = wq_create->u.request.page; 17030 break; 17031 } 17032 17033 list_for_each_entry(dmabuf, &wq->page_list, list) { 17034 memset(dmabuf->virt, 0, hw_page_size); 17035 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 17036 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 17037 } 17038 17039 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 17040 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 17041 17042 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 17043 /* The IOCTL status is embedded in the mailbox subheader. */ 17044 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17045 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17046 if (shdr_status || shdr_add_status || rc) { 17047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17048 "2503 WQ_CREATE mailbox failed with " 17049 "status x%x add_status x%x, mbx status x%x\n", 17050 shdr_status, shdr_add_status, rc); 17051 status = -ENXIO; 17052 goto out; 17053 } 17054 17055 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 17056 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 17057 &wq_create->u.response); 17058 else 17059 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 17060 &wq_create->u.response_1); 17061 17062 if (wq->queue_id == 0xFFFF) { 17063 status = -ENXIO; 17064 goto out; 17065 } 17066 17067 wq->db_format = LPFC_DB_LIST_FORMAT; 17068 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 17069 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 17070 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 17071 &wq_create->u.response); 17072 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 17073 (wq->db_format != LPFC_DB_RING_FORMAT)) { 17074 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17075 "3265 WQ[%d] doorbell format " 17076 "not supported: x%x\n", 17077 wq->queue_id, wq->db_format); 17078 status = -EINVAL; 17079 goto out; 17080 } 17081 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 17082 &wq_create->u.response); 17083 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 17084 pci_barset); 17085 if (!bar_memmap_p) { 17086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17087 "3263 WQ[%d] failed to memmap " 17088 "pci barset:x%x\n", 17089 wq->queue_id, pci_barset); 17090 status = -ENOMEM; 17091 goto out; 17092 } 17093 db_offset = wq_create->u.response.doorbell_offset; 17094 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 17095 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 17096 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17097 "3252 WQ[%d] doorbell offset " 17098 "not supported: x%x\n", 17099 wq->queue_id, db_offset); 17100 status = -EINVAL; 17101 goto out; 17102 } 17103 wq->db_regaddr = bar_memmap_p + db_offset; 17104 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 17105 "3264 WQ[%d]: barset:x%x, offset:x%x, " 17106 "format:x%x\n", wq->queue_id, 17107 pci_barset, db_offset, wq->db_format); 17108 } else 17109 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 17110 } else { 17111 /* Check if DPP was honored by the firmware */ 17112 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 17113 &wq_create->u.response_1); 17114 if (wq->dpp_enable) { 17115 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 17116 &wq_create->u.response_1); 17117 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 17118 pci_barset); 17119 if (!bar_memmap_p) { 17120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17121 "3267 WQ[%d] failed to memmap " 17122 "pci barset:x%x\n", 17123 wq->queue_id, pci_barset); 17124 status = -ENOMEM; 17125 goto out; 17126 } 17127 db_offset = wq_create->u.response_1.doorbell_offset; 17128 wq->db_regaddr = bar_memmap_p + db_offset; 17129 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 17130 &wq_create->u.response_1); 17131 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 17132 &wq_create->u.response_1); 17133 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 17134 dpp_barset); 17135 if (!bar_memmap_p) { 17136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17137 "3268 WQ[%d] failed to memmap " 17138 "pci barset:x%x\n", 17139 wq->queue_id, dpp_barset); 17140 status = -ENOMEM; 17141 goto out; 17142 } 17143 dpp_offset = wq_create->u.response_1.dpp_offset; 17144 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 17145 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 17146 "3271 WQ[%d]: barset:x%x, offset:x%x, " 17147 "dpp_id:x%x dpp_barset:x%x " 17148 "dpp_offset:x%x\n", 17149 wq->queue_id, pci_barset, db_offset, 17150 wq->dpp_id, dpp_barset, dpp_offset); 17151 17152 #ifdef CONFIG_X86 17153 /* Enable combined writes for DPP aperture */ 17154 bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset); 17155 if (!bar_memmap_p) { 17156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17157 "3272 Cannot setup Combined " 17158 "Write on WQ[%d] - disable DPP\n", 17159 wq->queue_id); 17160 phba->cfg_enable_dpp = 0; 17161 } else { 17162 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 17163 } 17164 #else 17165 phba->cfg_enable_dpp = 0; 17166 #endif 17167 } else 17168 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 17169 } 17170 wq->pring = kzalloc_obj(struct lpfc_sli_ring); 17171 if (wq->pring == NULL) { 17172 status = -ENOMEM; 17173 goto out; 17174 } 17175 wq->type = LPFC_WQ; 17176 wq->assoc_qid = cq->queue_id; 17177 wq->subtype = subtype; 17178 wq->host_index = 0; 17179 wq->hba_index = 0; 17180 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; 17181 17182 /* link the wq onto the parent cq child list */ 17183 list_add_tail(&wq->list, &cq->child_list); 17184 out: 17185 mempool_free(mbox, phba->mbox_mem_pool); 17186 return status; 17187 } 17188 17189 /** 17190 * lpfc_rq_create - Create a Receive Queue on the HBA 17191 * @phba: HBA structure that indicates port to create a queue on. 17192 * @hrq: The queue structure to use to create the header receive queue. 17193 * @drq: The queue structure to use to create the data receive queue. 17194 * @cq: The completion queue to bind this work queue to. 17195 * @subtype: The subtype of the work queue indicating its functionality. 17196 * 17197 * This function creates a receive buffer queue pair , as detailed in @hrq and 17198 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 17199 * to the HBA. 17200 * 17201 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 17202 * struct is used to get the entry count that is necessary to determine the 17203 * number of pages to use for this queue. The @cq is used to indicate which 17204 * completion queue to bind received buffers that are posted to these queues to. 17205 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 17206 * receive queue pair. This function is asynchronous and will wait for the 17207 * mailbox command to finish before continuing. 17208 * 17209 * On success this function will return a zero. If unable to allocate enough 17210 * memory this function will return -ENOMEM. If the queue create mailbox command 17211 * fails this function will return -ENXIO. 17212 **/ 17213 int 17214 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 17215 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 17216 { 17217 struct lpfc_mbx_rq_create *rq_create; 17218 struct lpfc_dmabuf *dmabuf; 17219 LPFC_MBOXQ_t *mbox; 17220 int rc, length, status = 0; 17221 uint32_t shdr_status, shdr_add_status; 17222 union lpfc_sli4_cfg_shdr *shdr; 17223 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 17224 void __iomem *bar_memmap_p; 17225 uint32_t db_offset; 17226 uint16_t pci_barset; 17227 17228 /* sanity check on queue memory */ 17229 if (!hrq || !drq || !cq) 17230 return -ENODEV; 17231 if (!phba->sli4_hba.pc_sli4_params.supported) 17232 hw_page_size = SLI4_PAGE_SIZE; 17233 17234 if (hrq->entry_count != drq->entry_count) 17235 return -EINVAL; 17236 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17237 if (!mbox) 17238 return -ENOMEM; 17239 length = (sizeof(struct lpfc_mbx_rq_create) - 17240 sizeof(struct lpfc_sli4_cfg_mhdr)); 17241 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17242 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 17243 length, LPFC_SLI4_MBX_EMBED); 17244 rq_create = &mbox->u.mqe.un.rq_create; 17245 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 17246 bf_set(lpfc_mbox_hdr_version, &shdr->request, 17247 phba->sli4_hba.pc_sli4_params.rqv); 17248 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 17249 bf_set(lpfc_rq_context_rqe_count_1, 17250 &rq_create->u.request.context, 17251 hrq->entry_count); 17252 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 17253 bf_set(lpfc_rq_context_rqe_size, 17254 &rq_create->u.request.context, 17255 LPFC_RQE_SIZE_8); 17256 bf_set(lpfc_rq_context_page_size, 17257 &rq_create->u.request.context, 17258 LPFC_RQ_PAGE_SIZE_4096); 17259 } else { 17260 switch (hrq->entry_count) { 17261 default: 17262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17263 "2535 Unsupported RQ count. (%d)\n", 17264 hrq->entry_count); 17265 if (hrq->entry_count < 512) { 17266 status = -EINVAL; 17267 goto out; 17268 } 17269 fallthrough; /* otherwise default to smallest count */ 17270 case 512: 17271 bf_set(lpfc_rq_context_rqe_count, 17272 &rq_create->u.request.context, 17273 LPFC_RQ_RING_SIZE_512); 17274 break; 17275 case 1024: 17276 bf_set(lpfc_rq_context_rqe_count, 17277 &rq_create->u.request.context, 17278 LPFC_RQ_RING_SIZE_1024); 17279 break; 17280 case 2048: 17281 bf_set(lpfc_rq_context_rqe_count, 17282 &rq_create->u.request.context, 17283 LPFC_RQ_RING_SIZE_2048); 17284 break; 17285 case 4096: 17286 bf_set(lpfc_rq_context_rqe_count, 17287 &rq_create->u.request.context, 17288 LPFC_RQ_RING_SIZE_4096); 17289 break; 17290 } 17291 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 17292 LPFC_HDR_BUF_SIZE); 17293 } 17294 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 17295 cq->queue_id); 17296 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 17297 hrq->page_count); 17298 list_for_each_entry(dmabuf, &hrq->page_list, list) { 17299 memset(dmabuf->virt, 0, hw_page_size); 17300 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 17301 putPaddrLow(dmabuf->phys); 17302 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 17303 putPaddrHigh(dmabuf->phys); 17304 } 17305 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 17306 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 17307 17308 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 17309 /* The IOCTL status is embedded in the mailbox subheader. */ 17310 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17311 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17312 if (shdr_status || shdr_add_status || rc) { 17313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17314 "2504 RQ_CREATE mailbox failed with " 17315 "status x%x add_status x%x, mbx status x%x\n", 17316 shdr_status, shdr_add_status, rc); 17317 status = -ENXIO; 17318 goto out; 17319 } 17320 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 17321 if (hrq->queue_id == 0xFFFF) { 17322 status = -ENXIO; 17323 goto out; 17324 } 17325 17326 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 17327 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 17328 &rq_create->u.response); 17329 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 17330 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 17331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17332 "3262 RQ [%d] doorbell format not " 17333 "supported: x%x\n", hrq->queue_id, 17334 hrq->db_format); 17335 status = -EINVAL; 17336 goto out; 17337 } 17338 17339 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 17340 &rq_create->u.response); 17341 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 17342 if (!bar_memmap_p) { 17343 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17344 "3269 RQ[%d] failed to memmap pci " 17345 "barset:x%x\n", hrq->queue_id, 17346 pci_barset); 17347 status = -ENOMEM; 17348 goto out; 17349 } 17350 17351 db_offset = rq_create->u.response.doorbell_offset; 17352 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 17353 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 17354 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17355 "3270 RQ[%d] doorbell offset not " 17356 "supported: x%x\n", hrq->queue_id, 17357 db_offset); 17358 status = -EINVAL; 17359 goto out; 17360 } 17361 hrq->db_regaddr = bar_memmap_p + db_offset; 17362 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 17363 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 17364 "format:x%x\n", hrq->queue_id, pci_barset, 17365 db_offset, hrq->db_format); 17366 } else { 17367 hrq->db_format = LPFC_DB_RING_FORMAT; 17368 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 17369 } 17370 hrq->type = LPFC_HRQ; 17371 hrq->assoc_qid = cq->queue_id; 17372 hrq->subtype = subtype; 17373 hrq->host_index = 0; 17374 hrq->hba_index = 0; 17375 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 17376 17377 /* now create the data queue */ 17378 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17379 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 17380 length, LPFC_SLI4_MBX_EMBED); 17381 bf_set(lpfc_mbox_hdr_version, &shdr->request, 17382 phba->sli4_hba.pc_sli4_params.rqv); 17383 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 17384 bf_set(lpfc_rq_context_rqe_count_1, 17385 &rq_create->u.request.context, hrq->entry_count); 17386 if (subtype == LPFC_NVMET) 17387 rq_create->u.request.context.buffer_size = 17388 LPFC_NVMET_DATA_BUF_SIZE; 17389 else 17390 rq_create->u.request.context.buffer_size = 17391 LPFC_DATA_BUF_SIZE; 17392 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 17393 LPFC_RQE_SIZE_8); 17394 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 17395 (PAGE_SIZE/SLI4_PAGE_SIZE)); 17396 } else { 17397 switch (drq->entry_count) { 17398 default: 17399 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17400 "2536 Unsupported RQ count. (%d)\n", 17401 drq->entry_count); 17402 if (drq->entry_count < 512) { 17403 status = -EINVAL; 17404 goto out; 17405 } 17406 fallthrough; /* otherwise default to smallest count */ 17407 case 512: 17408 bf_set(lpfc_rq_context_rqe_count, 17409 &rq_create->u.request.context, 17410 LPFC_RQ_RING_SIZE_512); 17411 break; 17412 case 1024: 17413 bf_set(lpfc_rq_context_rqe_count, 17414 &rq_create->u.request.context, 17415 LPFC_RQ_RING_SIZE_1024); 17416 break; 17417 case 2048: 17418 bf_set(lpfc_rq_context_rqe_count, 17419 &rq_create->u.request.context, 17420 LPFC_RQ_RING_SIZE_2048); 17421 break; 17422 case 4096: 17423 bf_set(lpfc_rq_context_rqe_count, 17424 &rq_create->u.request.context, 17425 LPFC_RQ_RING_SIZE_4096); 17426 break; 17427 } 17428 if (subtype == LPFC_NVMET) 17429 bf_set(lpfc_rq_context_buf_size, 17430 &rq_create->u.request.context, 17431 LPFC_NVMET_DATA_BUF_SIZE); 17432 else 17433 bf_set(lpfc_rq_context_buf_size, 17434 &rq_create->u.request.context, 17435 LPFC_DATA_BUF_SIZE); 17436 } 17437 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 17438 cq->queue_id); 17439 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 17440 drq->page_count); 17441 list_for_each_entry(dmabuf, &drq->page_list, list) { 17442 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 17443 putPaddrLow(dmabuf->phys); 17444 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 17445 putPaddrHigh(dmabuf->phys); 17446 } 17447 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 17448 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 17449 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 17450 /* The IOCTL status is embedded in the mailbox subheader. */ 17451 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 17452 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17453 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17454 if (shdr_status || shdr_add_status || rc) { 17455 status = -ENXIO; 17456 goto out; 17457 } 17458 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 17459 if (drq->queue_id == 0xFFFF) { 17460 status = -ENXIO; 17461 goto out; 17462 } 17463 drq->type = LPFC_DRQ; 17464 drq->assoc_qid = cq->queue_id; 17465 drq->subtype = subtype; 17466 drq->host_index = 0; 17467 drq->hba_index = 0; 17468 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 17469 17470 /* link the header and data RQs onto the parent cq child list */ 17471 list_add_tail(&hrq->list, &cq->child_list); 17472 list_add_tail(&drq->list, &cq->child_list); 17473 17474 out: 17475 mempool_free(mbox, phba->mbox_mem_pool); 17476 return status; 17477 } 17478 17479 /** 17480 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 17481 * @phba: HBA structure that indicates port to create a queue on. 17482 * @hrqp: The queue structure array to use to create the header receive queues. 17483 * @drqp: The queue structure array to use to create the data receive queues. 17484 * @cqp: The completion queue array to bind these receive queues to. 17485 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). 17486 * 17487 * This function creates a receive buffer queue pair , as detailed in @hrq and 17488 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 17489 * to the HBA. 17490 * 17491 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 17492 * struct is used to get the entry count that is necessary to determine the 17493 * number of pages to use for this queue. The @cq is used to indicate which 17494 * completion queue to bind received buffers that are posted to these queues to. 17495 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 17496 * receive queue pair. This function is asynchronous and will wait for the 17497 * mailbox command to finish before continuing. 17498 * 17499 * On success this function will return a zero. If unable to allocate enough 17500 * memory this function will return -ENOMEM. If the queue create mailbox command 17501 * fails this function will return -ENXIO. 17502 **/ 17503 int 17504 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 17505 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 17506 uint32_t subtype) 17507 { 17508 struct lpfc_queue *hrq, *drq, *cq; 17509 struct lpfc_mbx_rq_create_v2 *rq_create; 17510 struct lpfc_dmabuf *dmabuf; 17511 LPFC_MBOXQ_t *mbox; 17512 int rc, length, alloclen, status = 0; 17513 int cnt, idx, numrq, page_idx = 0; 17514 uint32_t shdr_status, shdr_add_status; 17515 union lpfc_sli4_cfg_shdr *shdr; 17516 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 17517 17518 numrq = phba->cfg_nvmet_mrq; 17519 /* sanity check on array memory */ 17520 if (!hrqp || !drqp || !cqp || !numrq) 17521 return -ENODEV; 17522 if (!phba->sli4_hba.pc_sli4_params.supported) 17523 hw_page_size = SLI4_PAGE_SIZE; 17524 17525 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17526 if (!mbox) 17527 return -ENOMEM; 17528 17529 length = sizeof(struct lpfc_mbx_rq_create_v2); 17530 length += ((2 * numrq * hrqp[0]->page_count) * 17531 sizeof(struct dma_address)); 17532 17533 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17534 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 17535 LPFC_SLI4_MBX_NEMBED); 17536 if (alloclen < length) { 17537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17538 "3099 Allocated DMA memory size (%d) is " 17539 "less than the requested DMA memory size " 17540 "(%d)\n", alloclen, length); 17541 status = -ENOMEM; 17542 goto out; 17543 } 17544 17545 17546 17547 rq_create = mbox->sge_array->addr[0]; 17548 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 17549 17550 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 17551 cnt = 0; 17552 17553 for (idx = 0; idx < numrq; idx++) { 17554 hrq = hrqp[idx]; 17555 drq = drqp[idx]; 17556 cq = cqp[idx]; 17557 17558 /* sanity check on queue memory */ 17559 if (!hrq || !drq || !cq) { 17560 status = -ENODEV; 17561 goto out; 17562 } 17563 17564 if (hrq->entry_count != drq->entry_count) { 17565 status = -EINVAL; 17566 goto out; 17567 } 17568 17569 if (idx == 0) { 17570 bf_set(lpfc_mbx_rq_create_num_pages, 17571 &rq_create->u.request, 17572 hrq->page_count); 17573 bf_set(lpfc_mbx_rq_create_rq_cnt, 17574 &rq_create->u.request, (numrq * 2)); 17575 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 17576 1); 17577 bf_set(lpfc_rq_context_base_cq, 17578 &rq_create->u.request.context, 17579 cq->queue_id); 17580 bf_set(lpfc_rq_context_data_size, 17581 &rq_create->u.request.context, 17582 LPFC_NVMET_DATA_BUF_SIZE); 17583 bf_set(lpfc_rq_context_hdr_size, 17584 &rq_create->u.request.context, 17585 LPFC_HDR_BUF_SIZE); 17586 bf_set(lpfc_rq_context_rqe_count_1, 17587 &rq_create->u.request.context, 17588 hrq->entry_count); 17589 bf_set(lpfc_rq_context_rqe_size, 17590 &rq_create->u.request.context, 17591 LPFC_RQE_SIZE_8); 17592 bf_set(lpfc_rq_context_page_size, 17593 &rq_create->u.request.context, 17594 (PAGE_SIZE/SLI4_PAGE_SIZE)); 17595 } 17596 rc = 0; 17597 list_for_each_entry(dmabuf, &hrq->page_list, list) { 17598 memset(dmabuf->virt, 0, hw_page_size); 17599 cnt = page_idx + dmabuf->buffer_tag; 17600 rq_create->u.request.page[cnt].addr_lo = 17601 putPaddrLow(dmabuf->phys); 17602 rq_create->u.request.page[cnt].addr_hi = 17603 putPaddrHigh(dmabuf->phys); 17604 rc++; 17605 } 17606 page_idx += rc; 17607 17608 rc = 0; 17609 list_for_each_entry(dmabuf, &drq->page_list, list) { 17610 memset(dmabuf->virt, 0, hw_page_size); 17611 cnt = page_idx + dmabuf->buffer_tag; 17612 rq_create->u.request.page[cnt].addr_lo = 17613 putPaddrLow(dmabuf->phys); 17614 rq_create->u.request.page[cnt].addr_hi = 17615 putPaddrHigh(dmabuf->phys); 17616 rc++; 17617 } 17618 page_idx += rc; 17619 17620 hrq->db_format = LPFC_DB_RING_FORMAT; 17621 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 17622 hrq->type = LPFC_HRQ; 17623 hrq->assoc_qid = cq->queue_id; 17624 hrq->subtype = subtype; 17625 hrq->host_index = 0; 17626 hrq->hba_index = 0; 17627 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 17628 17629 drq->db_format = LPFC_DB_RING_FORMAT; 17630 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 17631 drq->type = LPFC_DRQ; 17632 drq->assoc_qid = cq->queue_id; 17633 drq->subtype = subtype; 17634 drq->host_index = 0; 17635 drq->hba_index = 0; 17636 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 17637 17638 list_add_tail(&hrq->list, &cq->child_list); 17639 list_add_tail(&drq->list, &cq->child_list); 17640 } 17641 17642 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 17643 /* The IOCTL status is embedded in the mailbox subheader. */ 17644 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17645 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17646 if (shdr_status || shdr_add_status || rc) { 17647 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17648 "3120 RQ_CREATE mailbox failed with " 17649 "status x%x add_status x%x, mbx status x%x\n", 17650 shdr_status, shdr_add_status, rc); 17651 status = -ENXIO; 17652 goto out; 17653 } 17654 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 17655 if (rc == 0xFFFF) { 17656 status = -ENXIO; 17657 goto out; 17658 } 17659 17660 /* Initialize all RQs with associated queue id */ 17661 for (idx = 0; idx < numrq; idx++) { 17662 hrq = hrqp[idx]; 17663 hrq->queue_id = rc + (2 * idx); 17664 drq = drqp[idx]; 17665 drq->queue_id = rc + (2 * idx) + 1; 17666 } 17667 17668 out: 17669 lpfc_sli4_mbox_cmd_free(phba, mbox); 17670 return status; 17671 } 17672 17673 /** 17674 * lpfc_eq_destroy - Destroy an event Queue on the HBA 17675 * @phba: HBA structure that indicates port to destroy a queue on. 17676 * @eq: The queue structure associated with the queue to destroy. 17677 * 17678 * This function destroys a queue, as detailed in @eq by sending an mailbox 17679 * command, specific to the type of queue, to the HBA. 17680 * 17681 * The @eq struct is used to get the queue ID of the queue to destroy. 17682 * 17683 * On success this function will return a zero. If the queue destroy mailbox 17684 * command fails this function will return -ENXIO. 17685 **/ 17686 int 17687 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 17688 { 17689 LPFC_MBOXQ_t *mbox; 17690 int rc, length, status = 0; 17691 uint32_t shdr_status, shdr_add_status; 17692 union lpfc_sli4_cfg_shdr *shdr; 17693 17694 /* sanity check on queue memory */ 17695 if (!eq) 17696 return -ENODEV; 17697 17698 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) 17699 goto list_remove; 17700 17701 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 17702 if (!mbox) 17703 return -ENOMEM; 17704 length = (sizeof(struct lpfc_mbx_eq_destroy) - 17705 sizeof(struct lpfc_sli4_cfg_mhdr)); 17706 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 17707 LPFC_MBOX_OPCODE_EQ_DESTROY, 17708 length, LPFC_SLI4_MBX_EMBED); 17709 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 17710 eq->queue_id); 17711 mbox->vport = eq->phba->pport; 17712 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17713 17714 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 17715 /* The IOCTL status is embedded in the mailbox subheader. */ 17716 shdr = (union lpfc_sli4_cfg_shdr *) 17717 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 17718 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17719 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17720 if (shdr_status || shdr_add_status || rc) { 17721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17722 "2505 EQ_DESTROY mailbox failed with " 17723 "status x%x add_status x%x, mbx status x%x\n", 17724 shdr_status, shdr_add_status, rc); 17725 status = -ENXIO; 17726 } 17727 mempool_free(mbox, eq->phba->mbox_mem_pool); 17728 17729 list_remove: 17730 /* Remove eq from any list */ 17731 list_del_init(&eq->list); 17732 17733 return status; 17734 } 17735 17736 /** 17737 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 17738 * @phba: HBA structure that indicates port to destroy a queue on. 17739 * @cq: The queue structure associated with the queue to destroy. 17740 * 17741 * This function destroys a queue, as detailed in @cq by sending an mailbox 17742 * command, specific to the type of queue, to the HBA. 17743 * 17744 * The @cq struct is used to get the queue ID of the queue to destroy. 17745 * 17746 * On success this function will return a zero. If the queue destroy mailbox 17747 * command fails this function will return -ENXIO. 17748 **/ 17749 int 17750 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 17751 { 17752 LPFC_MBOXQ_t *mbox; 17753 int rc, length, status = 0; 17754 uint32_t shdr_status, shdr_add_status; 17755 union lpfc_sli4_cfg_shdr *shdr; 17756 17757 /* sanity check on queue memory */ 17758 if (!cq) 17759 return -ENODEV; 17760 17761 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) 17762 goto list_remove; 17763 17764 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 17765 if (!mbox) 17766 return -ENOMEM; 17767 length = (sizeof(struct lpfc_mbx_cq_destroy) - 17768 sizeof(struct lpfc_sli4_cfg_mhdr)); 17769 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 17770 LPFC_MBOX_OPCODE_CQ_DESTROY, 17771 length, LPFC_SLI4_MBX_EMBED); 17772 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 17773 cq->queue_id); 17774 mbox->vport = cq->phba->pport; 17775 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17776 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 17777 /* The IOCTL status is embedded in the mailbox subheader. */ 17778 shdr = (union lpfc_sli4_cfg_shdr *) 17779 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 17780 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17781 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17782 if (shdr_status || shdr_add_status || rc) { 17783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17784 "2506 CQ_DESTROY mailbox failed with " 17785 "status x%x add_status x%x, mbx status x%x\n", 17786 shdr_status, shdr_add_status, rc); 17787 status = -ENXIO; 17788 } 17789 mempool_free(mbox, cq->phba->mbox_mem_pool); 17790 17791 list_remove: 17792 /* Remove cq from any list */ 17793 list_del_init(&cq->list); 17794 return status; 17795 } 17796 17797 /** 17798 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 17799 * @phba: HBA structure that indicates port to destroy a queue on. 17800 * @mq: The queue structure associated with the queue to destroy. 17801 * 17802 * This function destroys a queue, as detailed in @mq by sending an mailbox 17803 * command, specific to the type of queue, to the HBA. 17804 * 17805 * The @mq struct is used to get the queue ID of the queue to destroy. 17806 * 17807 * On success this function will return a zero. If the queue destroy mailbox 17808 * command fails this function will return -ENXIO. 17809 **/ 17810 int 17811 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 17812 { 17813 LPFC_MBOXQ_t *mbox; 17814 int rc, length, status = 0; 17815 uint32_t shdr_status, shdr_add_status; 17816 union lpfc_sli4_cfg_shdr *shdr; 17817 17818 /* sanity check on queue memory */ 17819 if (!mq) 17820 return -ENODEV; 17821 17822 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) 17823 goto list_remove; 17824 17825 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 17826 if (!mbox) 17827 return -ENOMEM; 17828 length = (sizeof(struct lpfc_mbx_mq_destroy) - 17829 sizeof(struct lpfc_sli4_cfg_mhdr)); 17830 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 17831 LPFC_MBOX_OPCODE_MQ_DESTROY, 17832 length, LPFC_SLI4_MBX_EMBED); 17833 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 17834 mq->queue_id); 17835 mbox->vport = mq->phba->pport; 17836 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17837 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 17838 /* The IOCTL status is embedded in the mailbox subheader. */ 17839 shdr = (union lpfc_sli4_cfg_shdr *) 17840 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 17841 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17842 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17843 if (shdr_status || shdr_add_status || rc) { 17844 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17845 "2507 MQ_DESTROY mailbox failed with " 17846 "status x%x add_status x%x, mbx status x%x\n", 17847 shdr_status, shdr_add_status, rc); 17848 status = -ENXIO; 17849 } 17850 mempool_free(mbox, mq->phba->mbox_mem_pool); 17851 17852 list_remove: 17853 /* Remove mq from any list */ 17854 list_del_init(&mq->list); 17855 return status; 17856 } 17857 17858 /** 17859 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 17860 * @phba: HBA structure that indicates port to destroy a queue on. 17861 * @wq: The queue structure associated with the queue to destroy. 17862 * 17863 * This function destroys a queue, as detailed in @wq by sending an mailbox 17864 * command, specific to the type of queue, to the HBA. 17865 * 17866 * The @wq struct is used to get the queue ID of the queue to destroy. 17867 * 17868 * On success this function will return a zero. If the queue destroy mailbox 17869 * command fails this function will return -ENXIO. 17870 **/ 17871 int 17872 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 17873 { 17874 LPFC_MBOXQ_t *mbox; 17875 int rc, length, status = 0; 17876 uint32_t shdr_status, shdr_add_status; 17877 union lpfc_sli4_cfg_shdr *shdr; 17878 17879 /* sanity check on queue memory */ 17880 if (!wq) 17881 return -ENODEV; 17882 17883 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) 17884 goto list_remove; 17885 17886 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 17887 if (!mbox) 17888 return -ENOMEM; 17889 length = (sizeof(struct lpfc_mbx_wq_destroy) - 17890 sizeof(struct lpfc_sli4_cfg_mhdr)); 17891 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17892 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 17893 length, LPFC_SLI4_MBX_EMBED); 17894 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 17895 wq->queue_id); 17896 mbox->vport = wq->phba->pport; 17897 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17898 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 17899 shdr = (union lpfc_sli4_cfg_shdr *) 17900 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 17901 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17902 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17903 if (shdr_status || shdr_add_status || rc) { 17904 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17905 "2508 WQ_DESTROY mailbox failed with " 17906 "status x%x add_status x%x, mbx status x%x\n", 17907 shdr_status, shdr_add_status, rc); 17908 status = -ENXIO; 17909 } 17910 mempool_free(mbox, wq->phba->mbox_mem_pool); 17911 17912 list_remove: 17913 /* Remove wq from any list */ 17914 list_del_init(&wq->list); 17915 kfree(wq->pring); 17916 wq->pring = NULL; 17917 return status; 17918 } 17919 17920 /** 17921 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 17922 * @phba: HBA structure that indicates port to destroy a queue on. 17923 * @hrq: The queue structure associated with the queue to destroy. 17924 * @drq: The queue structure associated with the queue to destroy. 17925 * 17926 * This function destroys a queue, as detailed in @rq by sending an mailbox 17927 * command, specific to the type of queue, to the HBA. 17928 * 17929 * The @rq struct is used to get the queue ID of the queue to destroy. 17930 * 17931 * On success this function will return a zero. If the queue destroy mailbox 17932 * command fails this function will return -ENXIO. 17933 **/ 17934 int 17935 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 17936 struct lpfc_queue *drq) 17937 { 17938 LPFC_MBOXQ_t *mbox; 17939 int rc, length, status = 0; 17940 uint32_t shdr_status, shdr_add_status; 17941 union lpfc_sli4_cfg_shdr *shdr; 17942 17943 /* sanity check on queue memory */ 17944 if (!hrq || !drq) 17945 return -ENODEV; 17946 17947 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) 17948 goto list_remove; 17949 17950 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 17951 if (!mbox) 17952 return -ENOMEM; 17953 length = (sizeof(struct lpfc_mbx_rq_destroy) - 17954 sizeof(struct lpfc_sli4_cfg_mhdr)); 17955 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 17956 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 17957 length, LPFC_SLI4_MBX_EMBED); 17958 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 17959 hrq->queue_id); 17960 mbox->vport = hrq->phba->pport; 17961 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17962 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 17963 /* The IOCTL status is embedded in the mailbox subheader. */ 17964 shdr = (union lpfc_sli4_cfg_shdr *) 17965 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 17966 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17967 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17968 if (shdr_status || shdr_add_status || rc) { 17969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17970 "2509 RQ_DESTROY mailbox failed with " 17971 "status x%x add_status x%x, mbx status x%x\n", 17972 shdr_status, shdr_add_status, rc); 17973 mempool_free(mbox, hrq->phba->mbox_mem_pool); 17974 return -ENXIO; 17975 } 17976 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 17977 drq->queue_id); 17978 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 17979 shdr = (union lpfc_sli4_cfg_shdr *) 17980 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 17981 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17982 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17983 if (shdr_status || shdr_add_status || rc) { 17984 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17985 "2510 RQ_DESTROY mailbox failed with " 17986 "status x%x add_status x%x, mbx status x%x\n", 17987 shdr_status, shdr_add_status, rc); 17988 status = -ENXIO; 17989 } 17990 mempool_free(mbox, hrq->phba->mbox_mem_pool); 17991 17992 list_remove: 17993 list_del_init(&hrq->list); 17994 list_del_init(&drq->list); 17995 return status; 17996 } 17997 17998 /** 17999 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 18000 * @phba: The virtual port for which this call being executed. 18001 * @pdma_phys_addr0: Physical address of the 1st SGL page. 18002 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 18003 * @xritag: the xritag that ties this io to the SGL pages. 18004 * 18005 * This routine will post the sgl pages for the IO that has the xritag 18006 * that is in the iocbq structure. The xritag is assigned during iocbq 18007 * creation and persists for as long as the driver is loaded. 18008 * if the caller has fewer than 256 scatter gather segments to map then 18009 * pdma_phys_addr1 should be 0. 18010 * If the caller needs to map more than 256 scatter gather segment then 18011 * pdma_phys_addr1 should be a valid physical address. 18012 * physical address for SGLs must be 64 byte aligned. 18013 * If you are going to map 2 SGL's then the first one must have 256 entries 18014 * the second sgl can have between 1 and 256 entries. 18015 * 18016 * Return codes: 18017 * 0 - Success 18018 * -ENXIO, -ENOMEM - Failure 18019 **/ 18020 int 18021 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 18022 dma_addr_t pdma_phys_addr0, 18023 dma_addr_t pdma_phys_addr1, 18024 uint16_t xritag) 18025 { 18026 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 18027 LPFC_MBOXQ_t *mbox; 18028 int rc; 18029 uint32_t shdr_status, shdr_add_status; 18030 uint32_t mbox_tmo; 18031 union lpfc_sli4_cfg_shdr *shdr; 18032 18033 if (xritag == NO_XRI) { 18034 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18035 "0364 Invalid param:\n"); 18036 return -EINVAL; 18037 } 18038 18039 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18040 if (!mbox) 18041 return -ENOMEM; 18042 18043 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18044 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 18045 sizeof(struct lpfc_mbx_post_sgl_pages) - 18046 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 18047 18048 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 18049 &mbox->u.mqe.un.post_sgl_pages; 18050 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 18051 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 18052 18053 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 18054 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 18055 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 18056 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 18057 18058 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 18059 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 18060 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 18061 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 18062 if (!phba->sli4_hba.intr_enable) 18063 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 18064 else { 18065 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 18066 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 18067 } 18068 /* The IOCTL status is embedded in the mailbox subheader. */ 18069 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 18070 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18071 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18072 if (!phba->sli4_hba.intr_enable) 18073 mempool_free(mbox, phba->mbox_mem_pool); 18074 else if (rc != MBX_TIMEOUT) 18075 mempool_free(mbox, phba->mbox_mem_pool); 18076 if (shdr_status || shdr_add_status || rc) { 18077 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18078 "2511 POST_SGL mailbox failed with " 18079 "status x%x add_status x%x, mbx status x%x\n", 18080 shdr_status, shdr_add_status, rc); 18081 } 18082 return 0; 18083 } 18084 18085 /** 18086 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 18087 * @phba: pointer to lpfc hba data structure. 18088 * 18089 * This routine is invoked to post rpi header templates to the 18090 * HBA consistent with the SLI-4 interface spec. This routine 18091 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18092 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18093 * 18094 * Returns 18095 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 18096 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 18097 **/ 18098 static uint16_t 18099 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 18100 { 18101 unsigned long xri; 18102 18103 /* 18104 * Fetch the next logical xri. Because this index is logical, 18105 * the driver starts at 0 each time. 18106 */ 18107 spin_lock_irq(&phba->hbalock); 18108 xri = find_first_zero_bit(phba->sli4_hba.xri_bmask, 18109 phba->sli4_hba.max_cfg_param.max_xri); 18110 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 18111 spin_unlock_irq(&phba->hbalock); 18112 return NO_XRI; 18113 } else { 18114 set_bit(xri, phba->sli4_hba.xri_bmask); 18115 phba->sli4_hba.max_cfg_param.xri_used++; 18116 } 18117 spin_unlock_irq(&phba->hbalock); 18118 return xri; 18119 } 18120 18121 /** 18122 * __lpfc_sli4_free_xri - Release an xri for reuse. 18123 * @phba: pointer to lpfc hba data structure. 18124 * @xri: xri to release. 18125 * 18126 * This routine is invoked to release an xri to the pool of 18127 * available rpis maintained by the driver. 18128 **/ 18129 static void 18130 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 18131 { 18132 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 18133 phba->sli4_hba.max_cfg_param.xri_used--; 18134 } 18135 } 18136 18137 /** 18138 * lpfc_sli4_free_xri - Release an xri for reuse. 18139 * @phba: pointer to lpfc hba data structure. 18140 * @xri: xri to release. 18141 * 18142 * This routine is invoked to release an xri to the pool of 18143 * available rpis maintained by the driver. 18144 **/ 18145 void 18146 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 18147 { 18148 spin_lock_irq(&phba->hbalock); 18149 __lpfc_sli4_free_xri(phba, xri); 18150 spin_unlock_irq(&phba->hbalock); 18151 } 18152 18153 /** 18154 * lpfc_sli4_next_xritag - Get an xritag for the io 18155 * @phba: Pointer to HBA context object. 18156 * 18157 * This function gets an xritag for the iocb. If there is no unused xritag 18158 * it will return 0xffff. 18159 * The function returns the allocated xritag if successful, else returns zero. 18160 * Zero is not a valid xritag. 18161 * The caller is not required to hold any lock. 18162 **/ 18163 uint16_t 18164 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 18165 { 18166 uint16_t xri_index; 18167 18168 xri_index = lpfc_sli4_alloc_xri(phba); 18169 if (xri_index == NO_XRI) 18170 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 18171 "2004 Failed to allocate XRI.last XRITAG is %d" 18172 " Max XRI is %d, Used XRI is %d\n", 18173 xri_index, 18174 phba->sli4_hba.max_cfg_param.max_xri, 18175 phba->sli4_hba.max_cfg_param.xri_used); 18176 return xri_index; 18177 } 18178 18179 /** 18180 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 18181 * @phba: pointer to lpfc hba data structure. 18182 * @post_sgl_list: pointer to els sgl entry list. 18183 * @post_cnt: number of els sgl entries on the list. 18184 * 18185 * This routine is invoked to post a block of driver's sgl pages to the 18186 * HBA using non-embedded mailbox command. No Lock is held. This routine 18187 * is only called when the driver is loading and after all IO has been 18188 * stopped. 18189 **/ 18190 static int 18191 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 18192 struct list_head *post_sgl_list, 18193 int post_cnt) 18194 { 18195 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 18196 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 18197 struct sgl_page_pairs *sgl_pg_pairs; 18198 void *viraddr; 18199 LPFC_MBOXQ_t *mbox; 18200 uint32_t reqlen, alloclen, pg_pairs; 18201 uint32_t mbox_tmo; 18202 uint16_t xritag_start = 0; 18203 int rc = 0; 18204 uint32_t shdr_status, shdr_add_status; 18205 union lpfc_sli4_cfg_shdr *shdr; 18206 18207 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 18208 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 18209 if (reqlen > SLI4_PAGE_SIZE) { 18210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18211 "2559 Block sgl registration required DMA " 18212 "size (%d) great than a page\n", reqlen); 18213 return -ENOMEM; 18214 } 18215 18216 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18217 if (!mbox) 18218 return -ENOMEM; 18219 18220 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18221 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18222 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 18223 LPFC_SLI4_MBX_NEMBED); 18224 18225 if (alloclen < reqlen) { 18226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18227 "0285 Allocated DMA memory size (%d) is " 18228 "less than the requested DMA memory " 18229 "size (%d)\n", alloclen, reqlen); 18230 lpfc_sli4_mbox_cmd_free(phba, mbox); 18231 return -ENOMEM; 18232 } 18233 /* Set up the SGL pages in the non-embedded DMA pages */ 18234 viraddr = mbox->sge_array->addr[0]; 18235 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 18236 sgl_pg_pairs = &sgl->sgl_pg_pairs; 18237 18238 pg_pairs = 0; 18239 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 18240 /* Set up the sge entry */ 18241 sgl_pg_pairs->sgl_pg0_addr_lo = 18242 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 18243 sgl_pg_pairs->sgl_pg0_addr_hi = 18244 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 18245 sgl_pg_pairs->sgl_pg1_addr_lo = 18246 cpu_to_le32(putPaddrLow(0)); 18247 sgl_pg_pairs->sgl_pg1_addr_hi = 18248 cpu_to_le32(putPaddrHigh(0)); 18249 18250 /* Keep the first xritag on the list */ 18251 if (pg_pairs == 0) 18252 xritag_start = sglq_entry->sli4_xritag; 18253 sgl_pg_pairs++; 18254 pg_pairs++; 18255 } 18256 18257 /* Complete initialization and perform endian conversion. */ 18258 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 18259 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 18260 sgl->word0 = cpu_to_le32(sgl->word0); 18261 18262 if (!phba->sli4_hba.intr_enable) 18263 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 18264 else { 18265 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 18266 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 18267 } 18268 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 18269 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18270 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18271 if (!phba->sli4_hba.intr_enable) 18272 lpfc_sli4_mbox_cmd_free(phba, mbox); 18273 else if (rc != MBX_TIMEOUT) 18274 lpfc_sli4_mbox_cmd_free(phba, mbox); 18275 if (shdr_status || shdr_add_status || rc) { 18276 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18277 "2513 POST_SGL_BLOCK mailbox command failed " 18278 "status x%x add_status x%x mbx status x%x\n", 18279 shdr_status, shdr_add_status, rc); 18280 rc = -ENXIO; 18281 } 18282 return rc; 18283 } 18284 18285 /** 18286 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware 18287 * @phba: pointer to lpfc hba data structure. 18288 * @nblist: pointer to nvme buffer list. 18289 * @count: number of scsi buffers on the list. 18290 * 18291 * This routine is invoked to post a block of @count scsi sgl pages from a 18292 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. 18293 * No Lock is held. 18294 * 18295 **/ 18296 static int 18297 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, 18298 int count) 18299 { 18300 struct lpfc_io_buf *lpfc_ncmd; 18301 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 18302 struct sgl_page_pairs *sgl_pg_pairs; 18303 void *viraddr; 18304 LPFC_MBOXQ_t *mbox; 18305 uint32_t reqlen, alloclen, pg_pairs; 18306 uint32_t mbox_tmo; 18307 uint16_t xritag_start = 0; 18308 int rc = 0; 18309 uint32_t shdr_status, shdr_add_status; 18310 dma_addr_t pdma_phys_bpl1; 18311 union lpfc_sli4_cfg_shdr *shdr; 18312 18313 /* Calculate the requested length of the dma memory */ 18314 reqlen = count * sizeof(struct sgl_page_pairs) + 18315 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 18316 if (reqlen > SLI4_PAGE_SIZE) { 18317 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 18318 "6118 Block sgl registration required DMA " 18319 "size (%d) great than a page\n", reqlen); 18320 return -ENOMEM; 18321 } 18322 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18323 if (!mbox) { 18324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18325 "6119 Failed to allocate mbox cmd memory\n"); 18326 return -ENOMEM; 18327 } 18328 18329 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18330 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18331 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 18332 reqlen, LPFC_SLI4_MBX_NEMBED); 18333 18334 if (alloclen < reqlen) { 18335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18336 "6120 Allocated DMA memory size (%d) is " 18337 "less than the requested DMA memory " 18338 "size (%d)\n", alloclen, reqlen); 18339 lpfc_sli4_mbox_cmd_free(phba, mbox); 18340 return -ENOMEM; 18341 } 18342 18343 /* Get the first SGE entry from the non-embedded DMA memory */ 18344 viraddr = mbox->sge_array->addr[0]; 18345 18346 /* Set up the SGL pages in the non-embedded DMA pages */ 18347 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 18348 sgl_pg_pairs = &sgl->sgl_pg_pairs; 18349 18350 pg_pairs = 0; 18351 list_for_each_entry(lpfc_ncmd, nblist, list) { 18352 /* Set up the sge entry */ 18353 sgl_pg_pairs->sgl_pg0_addr_lo = 18354 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); 18355 sgl_pg_pairs->sgl_pg0_addr_hi = 18356 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); 18357 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 18358 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + 18359 SGL_PAGE_SIZE; 18360 else 18361 pdma_phys_bpl1 = 0; 18362 sgl_pg_pairs->sgl_pg1_addr_lo = 18363 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 18364 sgl_pg_pairs->sgl_pg1_addr_hi = 18365 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 18366 /* Keep the first xritag on the list */ 18367 if (pg_pairs == 0) 18368 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; 18369 sgl_pg_pairs++; 18370 pg_pairs++; 18371 } 18372 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 18373 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 18374 /* Perform endian conversion if necessary */ 18375 sgl->word0 = cpu_to_le32(sgl->word0); 18376 18377 if (!phba->sli4_hba.intr_enable) { 18378 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 18379 } else { 18380 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 18381 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 18382 } 18383 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; 18384 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18385 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18386 if (!phba->sli4_hba.intr_enable) 18387 lpfc_sli4_mbox_cmd_free(phba, mbox); 18388 else if (rc != MBX_TIMEOUT) 18389 lpfc_sli4_mbox_cmd_free(phba, mbox); 18390 if (shdr_status || shdr_add_status || rc) { 18391 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18392 "6125 POST_SGL_BLOCK mailbox command failed " 18393 "status x%x add_status x%x mbx status x%x\n", 18394 shdr_status, shdr_add_status, rc); 18395 rc = -ENXIO; 18396 } 18397 return rc; 18398 } 18399 18400 /** 18401 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list 18402 * @phba: pointer to lpfc hba data structure. 18403 * @post_nblist: pointer to the nvme buffer list. 18404 * @sb_count: number of nvme buffers. 18405 * 18406 * This routine walks a list of nvme buffers that was passed in. It attempts 18407 * to construct blocks of nvme buffer sgls which contains contiguous xris and 18408 * uses the non-embedded SGL block post mailbox commands to post to the port. 18409 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use 18410 * embedded SGL post mailbox command for posting. The @post_nblist passed in 18411 * must be local list, thus no lock is needed when manipulate the list. 18412 * 18413 * Returns: 0 = failure, non-zero number of successfully posted buffers. 18414 **/ 18415 int 18416 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, 18417 struct list_head *post_nblist, int sb_count) 18418 { 18419 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 18420 int status, sgl_size; 18421 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 18422 dma_addr_t pdma_phys_sgl1; 18423 int last_xritag = NO_XRI; 18424 int cur_xritag; 18425 LIST_HEAD(prep_nblist); 18426 LIST_HEAD(blck_nblist); 18427 LIST_HEAD(nvme_nblist); 18428 18429 /* sanity check */ 18430 if (sb_count <= 0) 18431 return -EINVAL; 18432 18433 sgl_size = phba->cfg_sg_dma_buf_size; 18434 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { 18435 list_del_init(&lpfc_ncmd->list); 18436 block_cnt++; 18437 if ((last_xritag != NO_XRI) && 18438 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { 18439 /* a hole in xri block, form a sgl posting block */ 18440 list_splice_init(&prep_nblist, &blck_nblist); 18441 post_cnt = block_cnt - 1; 18442 /* prepare list for next posting block */ 18443 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 18444 block_cnt = 1; 18445 } else { 18446 /* prepare list for next posting block */ 18447 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 18448 /* enough sgls for non-embed sgl mbox command */ 18449 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 18450 list_splice_init(&prep_nblist, &blck_nblist); 18451 post_cnt = block_cnt; 18452 block_cnt = 0; 18453 } 18454 } 18455 num_posting++; 18456 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 18457 18458 /* end of repost sgl list condition for NVME buffers */ 18459 if (num_posting == sb_count) { 18460 if (post_cnt == 0) { 18461 /* last sgl posting block */ 18462 list_splice_init(&prep_nblist, &blck_nblist); 18463 post_cnt = block_cnt; 18464 } else if (block_cnt == 1) { 18465 /* last single sgl with non-contiguous xri */ 18466 if (sgl_size > SGL_PAGE_SIZE) 18467 pdma_phys_sgl1 = 18468 lpfc_ncmd->dma_phys_sgl + 18469 SGL_PAGE_SIZE; 18470 else 18471 pdma_phys_sgl1 = 0; 18472 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 18473 status = lpfc_sli4_post_sgl( 18474 phba, lpfc_ncmd->dma_phys_sgl, 18475 pdma_phys_sgl1, cur_xritag); 18476 if (status) { 18477 /* Post error. Buffer unavailable. */ 18478 lpfc_ncmd->flags |= 18479 LPFC_SBUF_NOT_POSTED; 18480 } else { 18481 /* Post success. Bffer available. */ 18482 lpfc_ncmd->flags &= 18483 ~LPFC_SBUF_NOT_POSTED; 18484 lpfc_ncmd->status = IOSTAT_SUCCESS; 18485 num_posted++; 18486 } 18487 /* success, put on NVME buffer sgl list */ 18488 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 18489 } 18490 } 18491 18492 /* continue until a nembed page worth of sgls */ 18493 if (post_cnt == 0) 18494 continue; 18495 18496 /* post block of NVME buffer list sgls */ 18497 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, 18498 post_cnt); 18499 18500 /* don't reset xirtag due to hole in xri block */ 18501 if (block_cnt == 0) 18502 last_xritag = NO_XRI; 18503 18504 /* reset NVME buffer post count for next round of posting */ 18505 post_cnt = 0; 18506 18507 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ 18508 while (!list_empty(&blck_nblist)) { 18509 list_remove_head(&blck_nblist, lpfc_ncmd, 18510 struct lpfc_io_buf, list); 18511 if (status) { 18512 /* Post error. Mark buffer unavailable. */ 18513 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; 18514 } else { 18515 /* Post success, Mark buffer available. */ 18516 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; 18517 lpfc_ncmd->status = IOSTAT_SUCCESS; 18518 num_posted++; 18519 } 18520 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 18521 } 18522 } 18523 /* Push NVME buffers with sgl posted to the available list */ 18524 lpfc_io_buf_replenish(phba, &nvme_nblist); 18525 18526 return num_posted; 18527 } 18528 18529 /** 18530 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 18531 * @phba: pointer to lpfc_hba struct that the frame was received on 18532 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 18533 * 18534 * This function checks the fields in the @fc_hdr to see if the FC frame is a 18535 * valid type of frame that the LPFC driver will handle. This function will 18536 * return a zero if the frame is a valid frame or a non zero value when the 18537 * frame does not pass the check. 18538 **/ 18539 static int 18540 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 18541 { 18542 /* make rctl_names static to save stack space */ 18543 struct fc_vft_header *fc_vft_hdr; 18544 struct fc_app_header *fc_app_hdr; 18545 uint32_t *header = (uint32_t *) fc_hdr; 18546 18547 #define FC_RCTL_MDS_DIAGS 0xF4 18548 18549 switch (fc_hdr->fh_r_ctl) { 18550 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 18551 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 18552 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 18553 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 18554 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 18555 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 18556 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 18557 case FC_RCTL_DD_CMD_STATUS: /* command status */ 18558 case FC_RCTL_ELS_REQ: /* extended link services request */ 18559 case FC_RCTL_ELS_REP: /* extended link services reply */ 18560 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 18561 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 18562 case FC_RCTL_BA_ABTS: /* basic link service abort */ 18563 case FC_RCTL_BA_RMC: /* remove connection */ 18564 case FC_RCTL_BA_ACC: /* basic accept */ 18565 case FC_RCTL_BA_RJT: /* basic reject */ 18566 case FC_RCTL_BA_PRMT: 18567 case FC_RCTL_ACK_1: /* acknowledge_1 */ 18568 case FC_RCTL_ACK_0: /* acknowledge_0 */ 18569 case FC_RCTL_P_RJT: /* port reject */ 18570 case FC_RCTL_F_RJT: /* fabric reject */ 18571 case FC_RCTL_P_BSY: /* port busy */ 18572 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 18573 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 18574 case FC_RCTL_LCR: /* link credit reset */ 18575 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 18576 case FC_RCTL_END: /* end */ 18577 break; 18578 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 18579 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 18580 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 18581 return lpfc_fc_frame_check(phba, fc_hdr); 18582 case FC_RCTL_BA_NOP: /* basic link service NOP */ 18583 default: 18584 goto drop; 18585 } 18586 18587 switch (fc_hdr->fh_type) { 18588 case FC_TYPE_BLS: 18589 case FC_TYPE_ELS: 18590 case FC_TYPE_FCP: 18591 case FC_TYPE_CT: 18592 case FC_TYPE_NVME: 18593 break; 18594 case FC_TYPE_IP: 18595 case FC_TYPE_ILS: 18596 default: 18597 goto drop; 18598 } 18599 18600 if (unlikely(phba->link_flag == LS_LOOPBACK_MODE && 18601 phba->cfg_vmid_app_header)) { 18602 /* Application header is 16B device header */ 18603 if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) { 18604 fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1); 18605 if (be32_to_cpu(fc_app_hdr->src_app_id) != 18606 LOOPBACK_SRC_APPID) { 18607 lpfc_printf_log(phba, KERN_WARNING, 18608 LOG_ELS | LOG_LIBDFC, 18609 "1932 Loopback src app id " 18610 "not matched, app_id:x%x\n", 18611 be32_to_cpu(fc_app_hdr->src_app_id)); 18612 18613 goto drop; 18614 } 18615 } else { 18616 lpfc_printf_log(phba, KERN_WARNING, 18617 LOG_ELS | LOG_LIBDFC, 18618 "1933 Loopback df_ctl bit not set, " 18619 "df_ctl:x%x\n", 18620 fc_hdr->fh_df_ctl); 18621 18622 goto drop; 18623 } 18624 } 18625 18626 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 18627 "2538 Received frame rctl:x%x, type:x%x, " 18628 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 18629 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 18630 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 18631 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 18632 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 18633 be32_to_cpu(header[6])); 18634 return 0; 18635 drop: 18636 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 18637 "2539 Dropped frame rctl:x%x type:x%x\n", 18638 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 18639 return 1; 18640 } 18641 18642 /** 18643 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 18644 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 18645 * 18646 * This function processes the FC header to retrieve the VFI from the VF 18647 * header, if one exists. This function will return the VFI if one exists 18648 * or 0 if no VSAN Header exists. 18649 **/ 18650 static uint32_t 18651 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 18652 { 18653 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 18654 18655 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 18656 return 0; 18657 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 18658 } 18659 18660 /** 18661 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 18662 * @phba: Pointer to the HBA structure to search for the vport on 18663 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 18664 * @fcfi: The FC Fabric ID that the frame came from 18665 * @did: Destination ID to match against 18666 * 18667 * This function searches the @phba for a vport that matches the content of the 18668 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 18669 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 18670 * returns the matching vport pointer or NULL if unable to match frame to a 18671 * vport. 18672 **/ 18673 static struct lpfc_vport * 18674 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 18675 uint16_t fcfi, uint32_t did) 18676 { 18677 struct lpfc_vport **vports; 18678 struct lpfc_vport *vport = NULL; 18679 int i; 18680 18681 if (did == Fabric_DID) 18682 return phba->pport; 18683 if (test_bit(FC_PT2PT, &phba->pport->fc_flag) && 18684 phba->link_state != LPFC_HBA_READY) 18685 return phba->pport; 18686 18687 vports = lpfc_create_vport_work_array(phba); 18688 if (vports != NULL) { 18689 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 18690 if (phba->fcf.fcfi == fcfi && 18691 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 18692 vports[i]->fc_myDID == did) { 18693 vport = vports[i]; 18694 break; 18695 } 18696 } 18697 } 18698 lpfc_destroy_vport_work_array(phba, vports); 18699 return vport; 18700 } 18701 18702 /** 18703 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 18704 * @vport: The vport to work on. 18705 * 18706 * This function updates the receive sequence time stamp for this vport. The 18707 * receive sequence time stamp indicates the time that the last frame of the 18708 * the sequence that has been idle for the longest amount of time was received. 18709 * the driver uses this time stamp to indicate if any received sequences have 18710 * timed out. 18711 **/ 18712 static void 18713 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 18714 { 18715 struct lpfc_dmabuf *h_buf; 18716 struct hbq_dmabuf *dmabuf = NULL; 18717 18718 /* get the oldest sequence on the rcv list */ 18719 h_buf = list_get_first(&vport->rcv_buffer_list, 18720 struct lpfc_dmabuf, list); 18721 if (!h_buf) 18722 return; 18723 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18724 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 18725 } 18726 18727 /** 18728 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 18729 * @vport: The vport that the received sequences were sent to. 18730 * 18731 * This function cleans up all outstanding received sequences. This is called 18732 * by the driver when a link event or user action invalidates all the received 18733 * sequences. 18734 **/ 18735 void 18736 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 18737 { 18738 struct lpfc_dmabuf *h_buf, *hnext; 18739 struct lpfc_dmabuf *d_buf, *dnext; 18740 struct hbq_dmabuf *dmabuf = NULL; 18741 18742 /* start with the oldest sequence on the rcv list */ 18743 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 18744 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18745 list_del_init(&dmabuf->hbuf.list); 18746 list_for_each_entry_safe(d_buf, dnext, 18747 &dmabuf->dbuf.list, list) { 18748 list_del_init(&d_buf->list); 18749 lpfc_in_buf_free(vport->phba, d_buf); 18750 } 18751 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 18752 } 18753 } 18754 18755 /** 18756 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 18757 * @vport: The vport that the received sequences were sent to. 18758 * 18759 * This function determines whether any received sequences have timed out by 18760 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 18761 * indicates that there is at least one timed out sequence this routine will 18762 * go through the received sequences one at a time from most inactive to most 18763 * active to determine which ones need to be cleaned up. Once it has determined 18764 * that a sequence needs to be cleaned up it will simply free up the resources 18765 * without sending an abort. 18766 **/ 18767 void 18768 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 18769 { 18770 struct lpfc_dmabuf *h_buf, *hnext; 18771 struct lpfc_dmabuf *d_buf, *dnext; 18772 struct hbq_dmabuf *dmabuf = NULL; 18773 unsigned long timeout; 18774 int abort_count = 0; 18775 18776 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 18777 vport->rcv_buffer_time_stamp); 18778 if (list_empty(&vport->rcv_buffer_list) || 18779 time_before(jiffies, timeout)) 18780 return; 18781 /* start with the oldest sequence on the rcv list */ 18782 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 18783 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18784 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 18785 dmabuf->time_stamp); 18786 if (time_before(jiffies, timeout)) 18787 break; 18788 abort_count++; 18789 list_del_init(&dmabuf->hbuf.list); 18790 list_for_each_entry_safe(d_buf, dnext, 18791 &dmabuf->dbuf.list, list) { 18792 list_del_init(&d_buf->list); 18793 lpfc_in_buf_free(vport->phba, d_buf); 18794 } 18795 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 18796 } 18797 if (abort_count) 18798 lpfc_update_rcv_time_stamp(vport); 18799 } 18800 18801 /** 18802 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 18803 * @vport: pointer to a vitural port 18804 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 18805 * 18806 * This function searches through the existing incomplete sequences that have 18807 * been sent to this @vport. If the frame matches one of the incomplete 18808 * sequences then the dbuf in the @dmabuf is added to the list of frames that 18809 * make up that sequence. If no sequence is found that matches this frame then 18810 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 18811 * This function returns a pointer to the first dmabuf in the sequence list that 18812 * the frame was linked to. 18813 **/ 18814 static struct hbq_dmabuf * 18815 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 18816 { 18817 struct fc_frame_header *new_hdr; 18818 struct fc_frame_header *temp_hdr; 18819 struct lpfc_dmabuf *d_buf; 18820 struct lpfc_dmabuf *h_buf; 18821 struct hbq_dmabuf *seq_dmabuf = NULL; 18822 struct hbq_dmabuf *temp_dmabuf = NULL; 18823 uint8_t found = 0; 18824 18825 INIT_LIST_HEAD(&dmabuf->dbuf.list); 18826 dmabuf->time_stamp = jiffies; 18827 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 18828 18829 /* Use the hdr_buf to find the sequence that this frame belongs to */ 18830 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 18831 temp_hdr = (struct fc_frame_header *)h_buf->virt; 18832 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 18833 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 18834 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 18835 continue; 18836 /* found a pending sequence that matches this frame */ 18837 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18838 break; 18839 } 18840 if (!seq_dmabuf) { 18841 /* 18842 * This indicates first frame received for this sequence. 18843 * Queue the buffer on the vport's rcv_buffer_list. 18844 */ 18845 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 18846 lpfc_update_rcv_time_stamp(vport); 18847 return dmabuf; 18848 } 18849 temp_hdr = seq_dmabuf->hbuf.virt; 18850 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 18851 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 18852 list_del_init(&seq_dmabuf->hbuf.list); 18853 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 18854 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 18855 lpfc_update_rcv_time_stamp(vport); 18856 return dmabuf; 18857 } 18858 /* move this sequence to the tail to indicate a young sequence */ 18859 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 18860 seq_dmabuf->time_stamp = jiffies; 18861 lpfc_update_rcv_time_stamp(vport); 18862 if (list_empty(&seq_dmabuf->dbuf.list)) { 18863 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 18864 return seq_dmabuf; 18865 } 18866 /* find the correct place in the sequence to insert this frame */ 18867 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 18868 while (!found) { 18869 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 18870 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 18871 /* 18872 * If the frame's sequence count is greater than the frame on 18873 * the list then insert the frame right after this frame 18874 */ 18875 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 18876 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 18877 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 18878 found = 1; 18879 break; 18880 } 18881 18882 if (&d_buf->list == &seq_dmabuf->dbuf.list) 18883 break; 18884 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 18885 } 18886 18887 if (found) 18888 return seq_dmabuf; 18889 return NULL; 18890 } 18891 18892 /** 18893 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 18894 * @vport: pointer to a vitural port 18895 * @dmabuf: pointer to a dmabuf that describes the FC sequence 18896 * 18897 * This function tries to abort from the partially assembed sequence, described 18898 * by the information from basic abbort @dmabuf. It checks to see whether such 18899 * partially assembled sequence held by the driver. If so, it shall free up all 18900 * the frames from the partially assembled sequence. 18901 * 18902 * Return 18903 * true -- if there is matching partially assembled sequence present and all 18904 * the frames freed with the sequence; 18905 * false -- if there is no matching partially assembled sequence present so 18906 * nothing got aborted in the lower layer driver 18907 **/ 18908 static bool 18909 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 18910 struct hbq_dmabuf *dmabuf) 18911 { 18912 struct fc_frame_header *new_hdr; 18913 struct fc_frame_header *temp_hdr; 18914 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 18915 struct hbq_dmabuf *seq_dmabuf = NULL; 18916 18917 /* Use the hdr_buf to find the sequence that matches this frame */ 18918 INIT_LIST_HEAD(&dmabuf->dbuf.list); 18919 INIT_LIST_HEAD(&dmabuf->hbuf.list); 18920 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 18921 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 18922 temp_hdr = (struct fc_frame_header *)h_buf->virt; 18923 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 18924 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 18925 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 18926 continue; 18927 /* found a pending sequence that matches this frame */ 18928 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 18929 break; 18930 } 18931 18932 /* Free up all the frames from the partially assembled sequence */ 18933 if (seq_dmabuf) { 18934 list_for_each_entry_safe(d_buf, n_buf, 18935 &seq_dmabuf->dbuf.list, list) { 18936 list_del_init(&d_buf->list); 18937 lpfc_in_buf_free(vport->phba, d_buf); 18938 } 18939 return true; 18940 } 18941 return false; 18942 } 18943 18944 /** 18945 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 18946 * @vport: pointer to a vitural port 18947 * @dmabuf: pointer to a dmabuf that describes the FC sequence 18948 * 18949 * This function tries to abort from the assembed sequence from upper level 18950 * protocol, described by the information from basic abbort @dmabuf. It 18951 * checks to see whether such pending context exists at upper level protocol. 18952 * If so, it shall clean up the pending context. 18953 * 18954 * Return 18955 * true -- if there is matching pending context of the sequence cleaned 18956 * at ulp; 18957 * false -- if there is no matching pending context of the sequence present 18958 * at ulp. 18959 **/ 18960 static bool 18961 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 18962 { 18963 struct lpfc_hba *phba = vport->phba; 18964 int handled; 18965 18966 /* Accepting abort at ulp with SLI4 only */ 18967 if (phba->sli_rev < LPFC_SLI_REV4) 18968 return false; 18969 18970 /* Register all caring upper level protocols to attend abort */ 18971 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 18972 if (handled) 18973 return true; 18974 18975 return false; 18976 } 18977 18978 /** 18979 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 18980 * @phba: Pointer to HBA context object. 18981 * @cmd_iocbq: pointer to the command iocbq structure. 18982 * @rsp_iocbq: pointer to the response iocbq structure. 18983 * 18984 * This function handles the sequence abort response iocb command complete 18985 * event. It properly releases the memory allocated to the sequence abort 18986 * accept iocb. 18987 **/ 18988 static void 18989 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 18990 struct lpfc_iocbq *cmd_iocbq, 18991 struct lpfc_iocbq *rsp_iocbq) 18992 { 18993 if (cmd_iocbq) { 18994 lpfc_nlp_put(cmd_iocbq->ndlp); 18995 lpfc_sli_release_iocbq(phba, cmd_iocbq); 18996 } 18997 18998 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 18999 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 19000 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19001 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 19002 get_job_ulpstatus(phba, rsp_iocbq), 19003 get_job_word4(phba, rsp_iocbq)); 19004 } 19005 19006 /** 19007 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 19008 * @phba: Pointer to HBA context object. 19009 * @xri: xri id in transaction. 19010 * 19011 * This function validates the xri maps to the known range of XRIs allocated an 19012 * used by the driver. 19013 **/ 19014 uint16_t 19015 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 19016 uint16_t xri) 19017 { 19018 uint16_t i; 19019 19020 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 19021 if (xri == phba->sli4_hba.xri_ids[i]) 19022 return i; 19023 } 19024 return NO_XRI; 19025 } 19026 19027 /** 19028 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 19029 * @vport: pointer to a virtual port. 19030 * @fc_hdr: pointer to a FC frame header. 19031 * @aborted: was the partially assembled receive sequence successfully aborted 19032 * 19033 * This function sends a basic response to a previous unsol sequence abort 19034 * event after aborting the sequence handling. 19035 **/ 19036 void 19037 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 19038 struct fc_frame_header *fc_hdr, bool aborted) 19039 { 19040 struct lpfc_hba *phba = vport->phba; 19041 struct lpfc_iocbq *ctiocb = NULL; 19042 struct lpfc_nodelist *ndlp; 19043 uint16_t oxid, rxid, xri, lxri; 19044 uint32_t sid, fctl; 19045 union lpfc_wqe128 *icmd; 19046 int rc; 19047 19048 if (!lpfc_is_link_up(phba)) 19049 return; 19050 19051 sid = sli4_sid_from_fc_hdr(fc_hdr); 19052 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 19053 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 19054 19055 ndlp = lpfc_findnode_did(vport, sid); 19056 if (!ndlp) { 19057 ndlp = lpfc_nlp_init(vport, sid); 19058 if (!ndlp) { 19059 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 19060 "1268 Failed to allocate ndlp for " 19061 "oxid:x%x SID:x%x\n", oxid, sid); 19062 return; 19063 } 19064 /* Put ndlp onto vport node list */ 19065 lpfc_enqueue_node(vport, ndlp); 19066 } 19067 19068 /* Allocate buffer for rsp iocb */ 19069 ctiocb = lpfc_sli_get_iocbq(phba); 19070 if (!ctiocb) 19071 return; 19072 19073 icmd = &ctiocb->wqe; 19074 19075 /* Extract the F_CTL field from FC_HDR */ 19076 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 19077 19078 ctiocb->ndlp = lpfc_nlp_get(ndlp); 19079 if (!ctiocb->ndlp) { 19080 lpfc_sli_release_iocbq(phba, ctiocb); 19081 return; 19082 } 19083 19084 ctiocb->vport = vport; 19085 ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 19086 ctiocb->sli4_lxritag = NO_XRI; 19087 ctiocb->sli4_xritag = NO_XRI; 19088 ctiocb->abort_rctl = FC_RCTL_BA_ACC; 19089 19090 if (fctl & FC_FC_EX_CTX) 19091 /* Exchange responder sent the abort so we 19092 * own the oxid. 19093 */ 19094 xri = oxid; 19095 else 19096 xri = rxid; 19097 lxri = lpfc_sli4_xri_inrange(phba, xri); 19098 if (lxri != NO_XRI) 19099 lpfc_set_rrq_active(phba, ndlp, lxri, 19100 (xri == oxid) ? rxid : oxid, 0); 19101 /* For BA_ABTS from exchange responder, if the logical xri with 19102 * the oxid maps to the FCP XRI range, the port no longer has 19103 * that exchange context, send a BLS_RJT. Override the IOCB for 19104 * a BA_RJT. 19105 */ 19106 if ((fctl & FC_FC_EX_CTX) && 19107 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 19108 ctiocb->abort_rctl = FC_RCTL_BA_RJT; 19109 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); 19110 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, 19111 FC_BA_RJT_INV_XID); 19112 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, 19113 FC_BA_RJT_UNABLE); 19114 } 19115 19116 /* If BA_ABTS failed to abort a partially assembled receive sequence, 19117 * the driver no longer has that exchange, send a BLS_RJT. Override 19118 * the IOCB for a BA_RJT. 19119 */ 19120 if (aborted == false) { 19121 ctiocb->abort_rctl = FC_RCTL_BA_RJT; 19122 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); 19123 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, 19124 FC_BA_RJT_INV_XID); 19125 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, 19126 FC_BA_RJT_UNABLE); 19127 } 19128 19129 if (fctl & FC_FC_EX_CTX) { 19130 /* ABTS sent by responder to CT exchange, construction 19131 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 19132 * field and RX_ID from ABTS for RX_ID field. 19133 */ 19134 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP; 19135 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid); 19136 } else { 19137 /* ABTS sent by initiator to CT exchange, construction 19138 * of BA_ACC will need to allocate a new XRI as for the 19139 * XRI_TAG field. 19140 */ 19141 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT; 19142 } 19143 19144 /* OX_ID is invariable to who sent ABTS to CT exchange */ 19145 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid); 19146 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid); 19147 19148 /* Use CT=VPI */ 19149 bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest, 19150 ndlp->nlp_DID); 19151 bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp, 19152 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 19153 bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX); 19154 19155 /* Xmit CT abts response on exchange <xid> */ 19156 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 19157 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 19158 ctiocb->abort_rctl, oxid, phba->link_state); 19159 19160 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 19161 if (rc == IOCB_ERROR) { 19162 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 19163 "2925 Failed to issue CT ABTS RSP x%x on " 19164 "xri x%x, Data x%x\n", 19165 ctiocb->abort_rctl, oxid, 19166 phba->link_state); 19167 lpfc_nlp_put(ndlp); 19168 ctiocb->ndlp = NULL; 19169 lpfc_sli_release_iocbq(phba, ctiocb); 19170 } 19171 19172 /* if only usage of this nodelist is BLS response, release initial ref 19173 * to free ndlp when transmit completes 19174 */ 19175 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE && 19176 !test_bit(NLP_DROPPED, &ndlp->nlp_flag) && 19177 !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) { 19178 set_bit(NLP_DROPPED, &ndlp->nlp_flag); 19179 lpfc_nlp_put(ndlp); 19180 } 19181 } 19182 19183 /** 19184 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 19185 * @vport: Pointer to the vport on which this sequence was received 19186 * @dmabuf: pointer to a dmabuf that describes the FC sequence 19187 * 19188 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 19189 * receive sequence is only partially assembed by the driver, it shall abort 19190 * the partially assembled frames for the sequence. Otherwise, if the 19191 * unsolicited receive sequence has been completely assembled and passed to 19192 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the 19193 * unsolicited sequence has been aborted. After that, it will issue a basic 19194 * accept to accept the abort. 19195 **/ 19196 static void 19197 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 19198 struct hbq_dmabuf *dmabuf) 19199 { 19200 struct lpfc_hba *phba = vport->phba; 19201 struct fc_frame_header fc_hdr; 19202 uint32_t fctl; 19203 bool aborted; 19204 19205 /* Make a copy of fc_hdr before the dmabuf being released */ 19206 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 19207 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 19208 19209 if (fctl & FC_FC_EX_CTX) { 19210 /* ABTS by responder to exchange, no cleanup needed */ 19211 aborted = true; 19212 } else { 19213 /* ABTS by initiator to exchange, need to do cleanup */ 19214 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 19215 if (aborted == false) 19216 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 19217 } 19218 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19219 19220 if (phba->nvmet_support) { 19221 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 19222 return; 19223 } 19224 19225 /* Respond with BA_ACC or BA_RJT accordingly */ 19226 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 19227 } 19228 19229 /** 19230 * lpfc_seq_complete - Indicates if a sequence is complete 19231 * @dmabuf: pointer to a dmabuf that describes the FC sequence 19232 * 19233 * This function checks the sequence, starting with the frame described by 19234 * @dmabuf, to see if all the frames associated with this sequence are present. 19235 * the frames associated with this sequence are linked to the @dmabuf using the 19236 * dbuf list. This function looks for two major things. 1) That the first frame 19237 * has a sequence count of zero. 2) There is a frame with last frame of sequence 19238 * set. 3) That there are no holes in the sequence count. The function will 19239 * return 1 when the sequence is complete, otherwise it will return 0. 19240 **/ 19241 static int 19242 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 19243 { 19244 struct fc_frame_header *hdr; 19245 struct lpfc_dmabuf *d_buf; 19246 struct hbq_dmabuf *seq_dmabuf; 19247 uint32_t fctl; 19248 int seq_count = 0; 19249 19250 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 19251 /* make sure first fame of sequence has a sequence count of zero */ 19252 if (hdr->fh_seq_cnt != seq_count) 19253 return 0; 19254 fctl = (hdr->fh_f_ctl[0] << 16 | 19255 hdr->fh_f_ctl[1] << 8 | 19256 hdr->fh_f_ctl[2]); 19257 /* If last frame of sequence we can return success. */ 19258 if (fctl & FC_FC_END_SEQ) 19259 return 1; 19260 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 19261 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 19262 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 19263 /* If there is a hole in the sequence count then fail. */ 19264 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 19265 return 0; 19266 fctl = (hdr->fh_f_ctl[0] << 16 | 19267 hdr->fh_f_ctl[1] << 8 | 19268 hdr->fh_f_ctl[2]); 19269 /* If last frame of sequence we can return success. */ 19270 if (fctl & FC_FC_END_SEQ) 19271 return 1; 19272 } 19273 return 0; 19274 } 19275 19276 /** 19277 * lpfc_prep_seq - Prep sequence for ULP processing 19278 * @vport: Pointer to the vport on which this sequence was received 19279 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence 19280 * 19281 * This function takes a sequence, described by a list of frames, and creates 19282 * a list of iocbq structures to describe the sequence. This iocbq list will be 19283 * used to issue to the generic unsolicited sequence handler. This routine 19284 * returns a pointer to the first iocbq in the list. If the function is unable 19285 * to allocate an iocbq then it throw out the received frames that were not 19286 * able to be described and return a pointer to the first iocbq. If unable to 19287 * allocate any iocbqs (including the first) this function will return NULL. 19288 **/ 19289 static struct lpfc_iocbq * 19290 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 19291 { 19292 struct hbq_dmabuf *hbq_buf; 19293 struct lpfc_dmabuf *d_buf, *n_buf; 19294 struct lpfc_iocbq *first_iocbq, *iocbq; 19295 struct fc_frame_header *fc_hdr; 19296 uint32_t sid; 19297 uint32_t len, tot_len; 19298 19299 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 19300 /* remove from receive buffer list */ 19301 list_del_init(&seq_dmabuf->hbuf.list); 19302 lpfc_update_rcv_time_stamp(vport); 19303 /* get the Remote Port's SID */ 19304 sid = sli4_sid_from_fc_hdr(fc_hdr); 19305 tot_len = 0; 19306 /* Get an iocbq struct to fill in. */ 19307 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 19308 if (first_iocbq) { 19309 /* Initialize the first IOCB. */ 19310 first_iocbq->wcqe_cmpl.total_data_placed = 0; 19311 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl, 19312 IOSTAT_SUCCESS); 19313 first_iocbq->vport = vport; 19314 19315 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 19316 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 19317 bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp, 19318 sli4_did_from_fc_hdr(fc_hdr)); 19319 } 19320 19321 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com, 19322 NO_XRI); 19323 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com, 19324 be16_to_cpu(fc_hdr->fh_ox_id)); 19325 19326 /* put the first buffer into the first iocb */ 19327 tot_len = bf_get(lpfc_rcqe_length, 19328 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 19329 19330 first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf; 19331 first_iocbq->bpl_dmabuf = NULL; 19332 /* Keep track of the BDE count */ 19333 first_iocbq->wcqe_cmpl.word3 = 1; 19334 19335 if (tot_len > LPFC_DATA_BUF_SIZE) 19336 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = 19337 LPFC_DATA_BUF_SIZE; 19338 else 19339 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len; 19340 19341 first_iocbq->wcqe_cmpl.total_data_placed = tot_len; 19342 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest, 19343 sid); 19344 } 19345 iocbq = first_iocbq; 19346 /* 19347 * Each IOCBq can have two Buffers assigned, so go through the list 19348 * of buffers for this sequence and save two buffers in each IOCBq 19349 */ 19350 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 19351 if (!iocbq) { 19352 lpfc_in_buf_free(vport->phba, d_buf); 19353 continue; 19354 } 19355 if (!iocbq->bpl_dmabuf) { 19356 iocbq->bpl_dmabuf = d_buf; 19357 iocbq->wcqe_cmpl.word3++; 19358 /* We need to get the size out of the right CQE */ 19359 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 19360 len = bf_get(lpfc_rcqe_length, 19361 &hbq_buf->cq_event.cqe.rcqe_cmpl); 19362 iocbq->unsol_rcv_len = len; 19363 iocbq->wcqe_cmpl.total_data_placed += len; 19364 tot_len += len; 19365 } else { 19366 iocbq = lpfc_sli_get_iocbq(vport->phba); 19367 if (!iocbq) { 19368 if (first_iocbq) { 19369 bf_set(lpfc_wcqe_c_status, 19370 &first_iocbq->wcqe_cmpl, 19371 IOSTAT_SUCCESS); 19372 first_iocbq->wcqe_cmpl.parameter = 19373 IOERR_NO_RESOURCES; 19374 } 19375 lpfc_in_buf_free(vport->phba, d_buf); 19376 continue; 19377 } 19378 /* We need to get the size out of the right CQE */ 19379 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 19380 len = bf_get(lpfc_rcqe_length, 19381 &hbq_buf->cq_event.cqe.rcqe_cmpl); 19382 iocbq->cmd_dmabuf = d_buf; 19383 iocbq->bpl_dmabuf = NULL; 19384 iocbq->wcqe_cmpl.word3 = 1; 19385 19386 if (len > LPFC_DATA_BUF_SIZE) 19387 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = 19388 LPFC_DATA_BUF_SIZE; 19389 else 19390 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = 19391 len; 19392 19393 tot_len += len; 19394 iocbq->wcqe_cmpl.total_data_placed = tot_len; 19395 bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest, 19396 sid); 19397 list_add_tail(&iocbq->list, &first_iocbq->list); 19398 } 19399 } 19400 /* Free the sequence's header buffer */ 19401 if (!first_iocbq) 19402 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); 19403 19404 return first_iocbq; 19405 } 19406 19407 static void 19408 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 19409 struct hbq_dmabuf *seq_dmabuf) 19410 { 19411 struct fc_frame_header *fc_hdr; 19412 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 19413 struct lpfc_hba *phba = vport->phba; 19414 19415 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 19416 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 19417 if (!iocbq) { 19418 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19419 "2707 Ring %d handler: Failed to allocate " 19420 "iocb Rctl x%x Type x%x received\n", 19421 LPFC_ELS_RING, 19422 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 19423 return; 19424 } 19425 if (!lpfc_complete_unsol_iocb(phba, 19426 phba->sli4_hba.els_wq->pring, 19427 iocbq, fc_hdr->fh_r_ctl, 19428 fc_hdr->fh_type)) { 19429 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19430 "2540 Ring %d handler: unexpected Rctl " 19431 "x%x Type x%x received\n", 19432 LPFC_ELS_RING, 19433 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 19434 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf); 19435 } 19436 19437 /* Free iocb created in lpfc_prep_seq */ 19438 list_for_each_entry_safe(curr_iocb, next_iocb, 19439 &iocbq->list, list) { 19440 list_del_init(&curr_iocb->list); 19441 lpfc_sli_release_iocbq(phba, curr_iocb); 19442 } 19443 lpfc_sli_release_iocbq(phba, iocbq); 19444 } 19445 19446 static void 19447 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 19448 struct lpfc_iocbq *rspiocb) 19449 { 19450 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 19451 19452 if (pcmd && pcmd->virt) 19453 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 19454 kfree(pcmd); 19455 lpfc_sli_release_iocbq(phba, cmdiocb); 19456 lpfc_drain_txq(phba); 19457 } 19458 19459 static void 19460 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 19461 struct hbq_dmabuf *dmabuf) 19462 { 19463 struct fc_frame_header *fc_hdr; 19464 struct lpfc_hba *phba = vport->phba; 19465 struct lpfc_iocbq *iocbq = NULL; 19466 union lpfc_wqe128 *pwqe; 19467 struct lpfc_dmabuf *pcmd = NULL; 19468 uint32_t frame_len; 19469 int rc; 19470 unsigned long iflags; 19471 19472 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 19473 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 19474 19475 /* Send the received frame back */ 19476 iocbq = lpfc_sli_get_iocbq(phba); 19477 if (!iocbq) { 19478 /* Queue cq event and wakeup worker thread to process it */ 19479 spin_lock_irqsave(&phba->hbalock, iflags); 19480 list_add_tail(&dmabuf->cq_event.list, 19481 &phba->sli4_hba.sp_queue_event); 19482 spin_unlock_irqrestore(&phba->hbalock, iflags); 19483 set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 19484 lpfc_worker_wake_up(phba); 19485 return; 19486 } 19487 19488 /* Allocate buffer for command payload */ 19489 pcmd = kmalloc_obj(struct lpfc_dmabuf); 19490 if (pcmd) 19491 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 19492 &pcmd->phys); 19493 if (!pcmd || !pcmd->virt) 19494 goto exit; 19495 19496 INIT_LIST_HEAD(&pcmd->list); 19497 19498 /* copyin the payload */ 19499 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 19500 19501 iocbq->cmd_dmabuf = pcmd; 19502 iocbq->vport = vport; 19503 iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 19504 iocbq->cmd_flag |= LPFC_USE_FCPWQIDX; 19505 iocbq->num_bdes = 0; 19506 19507 pwqe = &iocbq->wqe; 19508 /* fill in BDE's for command */ 19509 pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys); 19510 pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys); 19511 pwqe->gen_req.bde.tus.f.bdeSize = frame_len; 19512 pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 19513 19514 pwqe->send_frame.frame_len = frame_len; 19515 pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr)); 19516 pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1)); 19517 pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2)); 19518 pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3)); 19519 pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4)); 19520 pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5)); 19521 19522 pwqe->generic.wqe_com.word7 = 0; 19523 pwqe->generic.wqe_com.word10 = 0; 19524 19525 bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME); 19526 bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */ 19527 bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */ 19528 bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1); 19529 bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1); 19530 bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1); 19531 bf_set(wqe_xc, &pwqe->generic.wqe_com, 1); 19532 bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA); 19533 bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 19534 bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag); 19535 bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag); 19536 bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3); 19537 pwqe->generic.wqe_com.abort_tag = iocbq->iotag; 19538 19539 iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl; 19540 19541 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 19542 if (rc == IOCB_ERROR) 19543 goto exit; 19544 19545 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19546 return; 19547 19548 exit: 19549 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 19550 "2023 Unable to process MDS loopback frame\n"); 19551 if (pcmd && pcmd->virt) 19552 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 19553 kfree(pcmd); 19554 if (iocbq) 19555 lpfc_sli_release_iocbq(phba, iocbq); 19556 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19557 } 19558 19559 /** 19560 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 19561 * @phba: Pointer to HBA context object. 19562 * @dmabuf: Pointer to a dmabuf that describes the FC sequence. 19563 * 19564 * This function is called with no lock held. This function processes all 19565 * the received buffers and gives it to upper layers when a received buffer 19566 * indicates that it is the final frame in the sequence. The interrupt 19567 * service routine processes received buffers at interrupt contexts. 19568 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 19569 * appropriate receive function when the final frame in a sequence is received. 19570 **/ 19571 void 19572 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 19573 struct hbq_dmabuf *dmabuf) 19574 { 19575 struct hbq_dmabuf *seq_dmabuf; 19576 struct fc_frame_header *fc_hdr; 19577 struct lpfc_vport *vport; 19578 uint32_t fcfi; 19579 uint32_t did; 19580 19581 /* Process each received buffer */ 19582 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 19583 19584 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 19585 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 19586 vport = phba->pport; 19587 /* Handle MDS Loopback frames */ 19588 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 19589 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 19590 else 19591 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19592 return; 19593 } 19594 19595 /* check to see if this a valid type of frame */ 19596 if (lpfc_fc_frame_check(phba, fc_hdr)) { 19597 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19598 return; 19599 } 19600 19601 if ((bf_get(lpfc_cqe_code, 19602 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 19603 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 19604 &dmabuf->cq_event.cqe.rcqe_cmpl); 19605 else 19606 fcfi = bf_get(lpfc_rcqe_fcf_id, 19607 &dmabuf->cq_event.cqe.rcqe_cmpl); 19608 19609 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { 19610 vport = phba->pport; 19611 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 19612 "2023 MDS Loopback %d bytes\n", 19613 bf_get(lpfc_rcqe_length, 19614 &dmabuf->cq_event.cqe.rcqe_cmpl)); 19615 /* Handle MDS Loopback frames */ 19616 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 19617 return; 19618 } 19619 19620 /* d_id this frame is directed to */ 19621 did = sli4_did_from_fc_hdr(fc_hdr); 19622 19623 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 19624 if (!vport) { 19625 /* throw out the frame */ 19626 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19627 return; 19628 } 19629 19630 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 19631 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 19632 (did != Fabric_DID)) { 19633 /* 19634 * Throw out the frame if we are not pt2pt. 19635 * The pt2pt protocol allows for discovery frames 19636 * to be received without a registered VPI. 19637 */ 19638 if (!test_bit(FC_PT2PT, &vport->fc_flag) || 19639 phba->link_state == LPFC_HBA_READY) { 19640 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19641 return; 19642 } 19643 } 19644 19645 /* Handle the basic abort sequence (BA_ABTS) event */ 19646 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 19647 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 19648 return; 19649 } 19650 19651 /* Link this frame */ 19652 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 19653 if (!seq_dmabuf) { 19654 /* unable to add frame to vport - throw it out */ 19655 lpfc_in_buf_free(phba, &dmabuf->dbuf); 19656 return; 19657 } 19658 /* If not last frame in sequence continue processing frames. */ 19659 if (!lpfc_seq_complete(seq_dmabuf)) 19660 return; 19661 19662 /* Send the complete sequence to the upper layer protocol */ 19663 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 19664 } 19665 19666 /** 19667 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 19668 * @phba: pointer to lpfc hba data structure. 19669 * 19670 * This routine is invoked to post rpi header templates to the 19671 * HBA consistent with the SLI-4 interface spec. This routine 19672 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 19673 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 19674 * 19675 * This routine does not require any locks. It's usage is expected 19676 * to be driver load or reset recovery when the driver is 19677 * sequential. 19678 * 19679 * Return codes 19680 * 0 - successful 19681 * -EIO - The mailbox failed to complete successfully. 19682 * When this error occurs, the driver is not guaranteed 19683 * to have any rpi regions posted to the device and 19684 * must either attempt to repost the regions or take a 19685 * fatal error. 19686 **/ 19687 int 19688 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 19689 { 19690 struct lpfc_rpi_hdr *rpi_page; 19691 uint32_t rc = 0; 19692 uint16_t lrpi = 0; 19693 19694 /* SLI4 ports that support extents do not require RPI headers. */ 19695 if (!phba->sli4_hba.rpi_hdrs_in_use) 19696 goto exit; 19697 if (phba->sli4_hba.extents_in_use) 19698 return -EIO; 19699 19700 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 19701 /* 19702 * Assign the rpi headers a physical rpi only if the driver 19703 * has not initialized those resources. A port reset only 19704 * needs the headers posted. 19705 */ 19706 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 19707 LPFC_RPI_RSRC_RDY) 19708 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 19709 19710 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 19711 if (rc != MBX_SUCCESS) { 19712 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19713 "2008 Error %d posting all rpi " 19714 "headers\n", rc); 19715 rc = -EIO; 19716 break; 19717 } 19718 } 19719 19720 exit: 19721 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 19722 LPFC_RPI_RSRC_RDY); 19723 return rc; 19724 } 19725 19726 /** 19727 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 19728 * @phba: pointer to lpfc hba data structure. 19729 * @rpi_page: pointer to the rpi memory region. 19730 * 19731 * This routine is invoked to post a single rpi header to the 19732 * HBA consistent with the SLI-4 interface spec. This memory region 19733 * maps up to 64 rpi context regions. 19734 * 19735 * Return codes 19736 * 0 - successful 19737 * -ENOMEM - No available memory 19738 * -EIO - The mailbox failed to complete successfully. 19739 **/ 19740 int 19741 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 19742 { 19743 LPFC_MBOXQ_t *mboxq; 19744 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 19745 uint32_t rc = 0; 19746 uint32_t shdr_status, shdr_add_status; 19747 union lpfc_sli4_cfg_shdr *shdr; 19748 19749 /* SLI4 ports that support extents do not require RPI headers. */ 19750 if (!phba->sli4_hba.rpi_hdrs_in_use) 19751 return rc; 19752 if (phba->sli4_hba.extents_in_use) 19753 return -EIO; 19754 19755 /* The port is notified of the header region via a mailbox command. */ 19756 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19757 if (!mboxq) { 19758 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19759 "2001 Unable to allocate memory for issuing " 19760 "SLI_CONFIG_SPECIAL mailbox command\n"); 19761 return -ENOMEM; 19762 } 19763 19764 /* Post all rpi memory regions to the port. */ 19765 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 19766 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 19767 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 19768 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 19769 sizeof(struct lpfc_sli4_cfg_mhdr), 19770 LPFC_SLI4_MBX_EMBED); 19771 19772 19773 /* Post the physical rpi to the port for this rpi header. */ 19774 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 19775 rpi_page->start_rpi); 19776 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 19777 hdr_tmpl, rpi_page->page_count); 19778 19779 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 19780 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 19781 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19782 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 19783 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 19784 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 19785 mempool_free(mboxq, phba->mbox_mem_pool); 19786 if (shdr_status || shdr_add_status || rc) { 19787 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19788 "2514 POST_RPI_HDR mailbox failed with " 19789 "status x%x add_status x%x, mbx status x%x\n", 19790 shdr_status, shdr_add_status, rc); 19791 rc = -ENXIO; 19792 } else { 19793 /* 19794 * The next_rpi stores the next logical module-64 rpi value used 19795 * to post physical rpis in subsequent rpi postings. 19796 */ 19797 spin_lock_irq(&phba->hbalock); 19798 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 19799 spin_unlock_irq(&phba->hbalock); 19800 } 19801 return rc; 19802 } 19803 19804 /** 19805 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 19806 * @phba: pointer to lpfc hba data structure. 19807 * 19808 * This routine is invoked to post rpi header templates to the 19809 * HBA consistent with the SLI-4 interface spec. This routine 19810 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 19811 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 19812 * 19813 * Returns 19814 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 19815 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 19816 **/ 19817 int 19818 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 19819 { 19820 unsigned long rpi; 19821 uint16_t max_rpi, rpi_limit; 19822 uint16_t rpi_remaining, lrpi = 0; 19823 struct lpfc_rpi_hdr *rpi_hdr; 19824 unsigned long iflag; 19825 19826 /* 19827 * Fetch the next logical rpi. Because this index is logical, 19828 * the driver starts at 0 each time. 19829 */ 19830 spin_lock_irqsave(&phba->hbalock, iflag); 19831 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 19832 rpi_limit = phba->sli4_hba.next_rpi; 19833 19834 rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit); 19835 if (rpi >= rpi_limit) 19836 rpi = LPFC_RPI_ALLOC_ERROR; 19837 else { 19838 set_bit(rpi, phba->sli4_hba.rpi_bmask); 19839 phba->sli4_hba.max_cfg_param.rpi_used++; 19840 phba->sli4_hba.rpi_count++; 19841 } 19842 lpfc_printf_log(phba, KERN_INFO, 19843 LOG_NODE | LOG_DISCOVERY, 19844 "0001 Allocated rpi:x%x max:x%x lim:x%x\n", 19845 (int) rpi, max_rpi, rpi_limit); 19846 19847 /* 19848 * Don't try to allocate more rpi header regions if the device limit 19849 * has been exhausted. 19850 */ 19851 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 19852 (phba->sli4_hba.rpi_count >= max_rpi)) { 19853 spin_unlock_irqrestore(&phba->hbalock, iflag); 19854 return rpi; 19855 } 19856 19857 /* 19858 * RPI header postings are not required for SLI4 ports capable of 19859 * extents. 19860 */ 19861 if (!phba->sli4_hba.rpi_hdrs_in_use) { 19862 spin_unlock_irqrestore(&phba->hbalock, iflag); 19863 return rpi; 19864 } 19865 19866 /* 19867 * If the driver is running low on rpi resources, allocate another 19868 * page now. Note that the next_rpi value is used because 19869 * it represents how many are actually in use whereas max_rpi notes 19870 * how many are supported max by the device. 19871 */ 19872 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 19873 spin_unlock_irqrestore(&phba->hbalock, iflag); 19874 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 19875 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 19876 if (!rpi_hdr) { 19877 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19878 "2002 Error Could not grow rpi " 19879 "count\n"); 19880 } else { 19881 lrpi = rpi_hdr->start_rpi; 19882 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 19883 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 19884 } 19885 } 19886 19887 return rpi; 19888 } 19889 19890 /** 19891 * __lpfc_sli4_free_rpi - Release an rpi for reuse. 19892 * @phba: pointer to lpfc hba data structure. 19893 * @rpi: rpi to free 19894 * 19895 * This routine is invoked to release an rpi to the pool of 19896 * available rpis maintained by the driver. 19897 **/ 19898 static void 19899 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 19900 { 19901 /* 19902 * if the rpi value indicates a prior unreg has already 19903 * been done, skip the unreg. 19904 */ 19905 if (rpi == LPFC_RPI_ALLOC_ERROR) 19906 return; 19907 19908 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 19909 phba->sli4_hba.rpi_count--; 19910 phba->sli4_hba.max_cfg_param.rpi_used--; 19911 } else { 19912 lpfc_printf_log(phba, KERN_INFO, 19913 LOG_NODE | LOG_DISCOVERY, 19914 "2016 rpi %x not inuse\n", 19915 rpi); 19916 } 19917 } 19918 19919 /** 19920 * lpfc_sli4_free_rpi - Release an rpi for reuse. 19921 * @phba: pointer to lpfc hba data structure. 19922 * @rpi: rpi to free 19923 * 19924 * This routine is invoked to release an rpi to the pool of 19925 * available rpis maintained by the driver. 19926 **/ 19927 void 19928 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 19929 { 19930 spin_lock_irq(&phba->hbalock); 19931 __lpfc_sli4_free_rpi(phba, rpi); 19932 spin_unlock_irq(&phba->hbalock); 19933 } 19934 19935 /** 19936 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 19937 * @phba: pointer to lpfc hba data structure. 19938 * 19939 * This routine is invoked to remove the memory region that 19940 * provided rpi via a bitmask. 19941 **/ 19942 void 19943 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 19944 { 19945 kfree(phba->sli4_hba.rpi_bmask); 19946 kfree(phba->sli4_hba.rpi_ids); 19947 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 19948 } 19949 19950 /** 19951 * lpfc_sli4_resume_rpi - Resume traffic relative to an RPI 19952 * @ndlp: pointer to lpfc nodelist data structure. 19953 * @cmpl: completion call-back. 19954 * @iocbq: data to load as mbox ctx_u information 19955 * 19956 * Return codes 19957 * 0 - successful 19958 * -ENOMEM - No available memory 19959 * -EIO - The mailbox failed to complete successfully. 19960 **/ 19961 int 19962 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 19963 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), 19964 struct lpfc_iocbq *iocbq) 19965 { 19966 LPFC_MBOXQ_t *mboxq; 19967 struct lpfc_hba *phba = ndlp->phba; 19968 int rc; 19969 19970 /* The port is notified of the header region via a mailbox command. */ 19971 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19972 if (!mboxq) 19973 return -ENOMEM; 19974 19975 /* If cmpl assigned, then this nlp_get pairs with 19976 * lpfc_mbx_cmpl_resume_rpi. 19977 * 19978 * Else cmpl is NULL, then this nlp_get pairs with 19979 * lpfc_sli_def_mbox_cmpl. 19980 */ 19981 if (!lpfc_nlp_get(ndlp)) { 19982 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19983 "2122 %s: Failed to get nlp ref\n", 19984 __func__); 19985 mempool_free(mboxq, phba->mbox_mem_pool); 19986 return -EIO; 19987 } 19988 19989 lpfc_resume_rpi(mboxq, ndlp); 19990 if (cmpl) { 19991 mboxq->mbox_cmpl = cmpl; 19992 mboxq->ctx_u.save_iocb = iocbq; 19993 } else 19994 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19995 mboxq->ctx_ndlp = ndlp; 19996 mboxq->vport = ndlp->vport; 19997 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 19998 if (rc == MBX_NOT_FINISHED) { 19999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20000 "2010 Resume RPI Mailbox failed " 20001 "status %d, mbxStatus x%x\n", rc, 20002 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 20003 lpfc_nlp_put(ndlp); 20004 mempool_free(mboxq, phba->mbox_mem_pool); 20005 return -EIO; 20006 } 20007 return 0; 20008 } 20009 20010 /** 20011 * lpfc_sli4_init_vpi - Initialize a vpi with the port 20012 * @vport: Pointer to the vport for which the vpi is being initialized 20013 * 20014 * This routine is invoked to activate a vpi with the port. 20015 * 20016 * Returns: 20017 * 0 success 20018 * -Evalue otherwise 20019 **/ 20020 int 20021 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 20022 { 20023 LPFC_MBOXQ_t *mboxq; 20024 int rc = 0; 20025 int retval = MBX_SUCCESS; 20026 uint32_t mbox_tmo; 20027 struct lpfc_hba *phba = vport->phba; 20028 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20029 if (!mboxq) 20030 return -ENOMEM; 20031 lpfc_init_vpi(phba, mboxq, vport->vpi); 20032 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 20033 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 20034 if (rc != MBX_SUCCESS) { 20035 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 20036 "2022 INIT VPI Mailbox failed " 20037 "status %d, mbxStatus x%x\n", rc, 20038 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 20039 retval = -EIO; 20040 } 20041 if (rc != MBX_TIMEOUT) 20042 mempool_free(mboxq, vport->phba->mbox_mem_pool); 20043 20044 return retval; 20045 } 20046 20047 /** 20048 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 20049 * @phba: pointer to lpfc hba data structure. 20050 * @mboxq: Pointer to mailbox object. 20051 * 20052 * This routine is invoked to manually add a single FCF record. The caller 20053 * must pass a completely initialized FCF_Record. This routine takes 20054 * care of the nonembedded mailbox operations. 20055 **/ 20056 static void 20057 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 20058 { 20059 void *virt_addr; 20060 union lpfc_sli4_cfg_shdr *shdr; 20061 uint32_t shdr_status, shdr_add_status; 20062 20063 virt_addr = mboxq->sge_array->addr[0]; 20064 /* The IOCTL status is embedded in the mailbox subheader. */ 20065 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 20066 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 20067 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 20068 20069 if ((shdr_status || shdr_add_status) && 20070 (shdr_status != STATUS_FCF_IN_USE)) 20071 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20072 "2558 ADD_FCF_RECORD mailbox failed with " 20073 "status x%x add_status x%x\n", 20074 shdr_status, shdr_add_status); 20075 20076 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20077 } 20078 20079 /** 20080 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 20081 * @phba: pointer to lpfc hba data structure. 20082 * @fcf_record: pointer to the initialized fcf record to add. 20083 * 20084 * This routine is invoked to manually add a single FCF record. The caller 20085 * must pass a completely initialized FCF_Record. This routine takes 20086 * care of the nonembedded mailbox operations. 20087 **/ 20088 int 20089 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 20090 { 20091 int rc = 0; 20092 LPFC_MBOXQ_t *mboxq; 20093 uint8_t *bytep; 20094 void *virt_addr; 20095 struct lpfc_mbx_sge sge; 20096 uint32_t alloc_len, req_len; 20097 uint32_t fcfindex; 20098 20099 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20100 if (!mboxq) { 20101 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20102 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 20103 return -ENOMEM; 20104 } 20105 20106 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 20107 sizeof(uint32_t); 20108 20109 /* Allocate DMA memory and set up the non-embedded mailbox command */ 20110 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 20111 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 20112 req_len, LPFC_SLI4_MBX_NEMBED); 20113 if (alloc_len < req_len) { 20114 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20115 "2523 Allocated DMA memory size (x%x) is " 20116 "less than the requested DMA memory " 20117 "size (x%x)\n", alloc_len, req_len); 20118 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20119 return -ENOMEM; 20120 } 20121 20122 /* 20123 * Get the first SGE entry from the non-embedded DMA memory. This 20124 * routine only uses a single SGE. 20125 */ 20126 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 20127 virt_addr = mboxq->sge_array->addr[0]; 20128 /* 20129 * Configure the FCF record for FCFI 0. This is the driver's 20130 * hardcoded default and gets used in nonFIP mode. 20131 */ 20132 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 20133 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 20134 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 20135 20136 /* 20137 * Copy the fcf_index and the FCF Record Data. The data starts after 20138 * the FCoE header plus word10. The data copy needs to be endian 20139 * correct. 20140 */ 20141 bytep += sizeof(uint32_t); 20142 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 20143 mboxq->vport = phba->pport; 20144 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 20145 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 20146 if (rc == MBX_NOT_FINISHED) { 20147 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20148 "2515 ADD_FCF_RECORD mailbox failed with " 20149 "status 0x%x\n", rc); 20150 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20151 rc = -EIO; 20152 } else 20153 rc = 0; 20154 20155 return rc; 20156 } 20157 20158 /** 20159 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 20160 * @phba: pointer to lpfc hba data structure. 20161 * @fcf_record: pointer to the fcf record to write the default data. 20162 * @fcf_index: FCF table entry index. 20163 * 20164 * This routine is invoked to build the driver's default FCF record. The 20165 * values used are hardcoded. This routine handles memory initialization. 20166 * 20167 **/ 20168 void 20169 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 20170 struct fcf_record *fcf_record, 20171 uint16_t fcf_index) 20172 { 20173 memset(fcf_record, 0, sizeof(struct fcf_record)); 20174 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 20175 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 20176 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 20177 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 20178 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 20179 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 20180 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 20181 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 20182 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 20183 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 20184 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 20185 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 20186 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 20187 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 20188 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 20189 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 20190 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 20191 /* Set the VLAN bit map */ 20192 if (phba->valid_vlan) { 20193 fcf_record->vlan_bitmap[phba->vlan_id / 8] 20194 = 1 << (phba->vlan_id % 8); 20195 } 20196 } 20197 20198 /** 20199 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 20200 * @phba: pointer to lpfc hba data structure. 20201 * @fcf_index: FCF table entry offset. 20202 * 20203 * This routine is invoked to scan the entire FCF table by reading FCF 20204 * record and processing it one at a time starting from the @fcf_index 20205 * for initial FCF discovery or fast FCF failover rediscovery. 20206 * 20207 * Return 0 if the mailbox command is submitted successfully, none 0 20208 * otherwise. 20209 **/ 20210 int 20211 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 20212 { 20213 int rc = 0, error; 20214 LPFC_MBOXQ_t *mboxq; 20215 20216 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 20217 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 20218 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20219 if (!mboxq) { 20220 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20221 "2000 Failed to allocate mbox for " 20222 "READ_FCF cmd\n"); 20223 error = -ENOMEM; 20224 goto fail_fcf_scan; 20225 } 20226 /* Construct the read FCF record mailbox command */ 20227 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 20228 if (rc) { 20229 error = -EINVAL; 20230 goto fail_fcf_scan; 20231 } 20232 /* Issue the mailbox command asynchronously */ 20233 mboxq->vport = phba->pport; 20234 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 20235 20236 set_bit(FCF_TS_INPROG, &phba->hba_flag); 20237 20238 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 20239 if (rc == MBX_NOT_FINISHED) 20240 error = -EIO; 20241 else { 20242 /* Reset eligible FCF count for new scan */ 20243 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 20244 phba->fcf.eligible_fcf_cnt = 0; 20245 error = 0; 20246 } 20247 fail_fcf_scan: 20248 if (error) { 20249 if (mboxq) 20250 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20251 /* FCF scan failed, clear FCF_TS_INPROG flag */ 20252 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 20253 } 20254 return error; 20255 } 20256 20257 /** 20258 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 20259 * @phba: pointer to lpfc hba data structure. 20260 * @fcf_index: FCF table entry offset. 20261 * 20262 * This routine is invoked to read an FCF record indicated by @fcf_index 20263 * and to use it for FLOGI roundrobin FCF failover. 20264 * 20265 * Return 0 if the mailbox command is submitted successfully, none 0 20266 * otherwise. 20267 **/ 20268 int 20269 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 20270 { 20271 int rc = 0, error; 20272 LPFC_MBOXQ_t *mboxq; 20273 20274 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20275 if (!mboxq) { 20276 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 20277 "2763 Failed to allocate mbox for " 20278 "READ_FCF cmd\n"); 20279 error = -ENOMEM; 20280 goto fail_fcf_read; 20281 } 20282 /* Construct the read FCF record mailbox command */ 20283 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 20284 if (rc) { 20285 error = -EINVAL; 20286 goto fail_fcf_read; 20287 } 20288 /* Issue the mailbox command asynchronously */ 20289 mboxq->vport = phba->pport; 20290 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 20291 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 20292 if (rc == MBX_NOT_FINISHED) 20293 error = -EIO; 20294 else 20295 error = 0; 20296 20297 fail_fcf_read: 20298 if (error && mboxq) 20299 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20300 return error; 20301 } 20302 20303 /** 20304 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 20305 * @phba: pointer to lpfc hba data structure. 20306 * @fcf_index: FCF table entry offset. 20307 * 20308 * This routine is invoked to read an FCF record indicated by @fcf_index to 20309 * determine whether it's eligible for FLOGI roundrobin failover list. 20310 * 20311 * Return 0 if the mailbox command is submitted successfully, none 0 20312 * otherwise. 20313 **/ 20314 int 20315 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 20316 { 20317 int rc = 0, error; 20318 LPFC_MBOXQ_t *mboxq; 20319 20320 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20321 if (!mboxq) { 20322 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 20323 "2758 Failed to allocate mbox for " 20324 "READ_FCF cmd\n"); 20325 error = -ENOMEM; 20326 goto fail_fcf_read; 20327 } 20328 /* Construct the read FCF record mailbox command */ 20329 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 20330 if (rc) { 20331 error = -EINVAL; 20332 goto fail_fcf_read; 20333 } 20334 /* Issue the mailbox command asynchronously */ 20335 mboxq->vport = phba->pport; 20336 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 20337 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 20338 if (rc == MBX_NOT_FINISHED) 20339 error = -EIO; 20340 else 20341 error = 0; 20342 20343 fail_fcf_read: 20344 if (error && mboxq) 20345 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20346 return error; 20347 } 20348 20349 /** 20350 * lpfc_check_next_fcf_pri_level 20351 * @phba: pointer to the lpfc_hba struct for this port. 20352 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 20353 * routine when the rr_bmask is empty. The FCF indecies are put into the 20354 * rr_bmask based on their priority level. Starting from the highest priority 20355 * to the lowest. The most likely FCF candidate will be in the highest 20356 * priority group. When this routine is called it searches the fcf_pri list for 20357 * next lowest priority group and repopulates the rr_bmask with only those 20358 * fcf_indexes. 20359 * returns: 20360 * 1=success 0=failure 20361 **/ 20362 static int 20363 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 20364 { 20365 uint16_t next_fcf_pri; 20366 uint16_t last_index; 20367 struct lpfc_fcf_pri *fcf_pri; 20368 int rc; 20369 int ret = 0; 20370 20371 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 20372 LPFC_SLI4_FCF_TBL_INDX_MAX); 20373 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20374 "3060 Last IDX %d\n", last_index); 20375 20376 /* Verify the priority list has 2 or more entries */ 20377 spin_lock_irq(&phba->hbalock); 20378 if (list_empty(&phba->fcf.fcf_pri_list) || 20379 list_is_singular(&phba->fcf.fcf_pri_list)) { 20380 spin_unlock_irq(&phba->hbalock); 20381 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 20382 "3061 Last IDX %d\n", last_index); 20383 return 0; /* Empty rr list */ 20384 } 20385 spin_unlock_irq(&phba->hbalock); 20386 20387 next_fcf_pri = 0; 20388 /* 20389 * Clear the rr_bmask and set all of the bits that are at this 20390 * priority. 20391 */ 20392 memset(phba->fcf.fcf_rr_bmask, 0, 20393 sizeof(*phba->fcf.fcf_rr_bmask)); 20394 spin_lock_irq(&phba->hbalock); 20395 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 20396 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 20397 continue; 20398 /* 20399 * the 1st priority that has not FLOGI failed 20400 * will be the highest. 20401 */ 20402 if (!next_fcf_pri) 20403 next_fcf_pri = fcf_pri->fcf_rec.priority; 20404 spin_unlock_irq(&phba->hbalock); 20405 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 20406 rc = lpfc_sli4_fcf_rr_index_set(phba, 20407 fcf_pri->fcf_rec.fcf_index); 20408 if (rc) 20409 return 0; 20410 } 20411 spin_lock_irq(&phba->hbalock); 20412 } 20413 /* 20414 * if next_fcf_pri was not set above and the list is not empty then 20415 * we have failed flogis on all of them. So reset flogi failed 20416 * and start at the beginning. 20417 */ 20418 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 20419 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 20420 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 20421 /* 20422 * the 1st priority that has not FLOGI failed 20423 * will be the highest. 20424 */ 20425 if (!next_fcf_pri) 20426 next_fcf_pri = fcf_pri->fcf_rec.priority; 20427 spin_unlock_irq(&phba->hbalock); 20428 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 20429 rc = lpfc_sli4_fcf_rr_index_set(phba, 20430 fcf_pri->fcf_rec.fcf_index); 20431 if (rc) 20432 return 0; 20433 } 20434 spin_lock_irq(&phba->hbalock); 20435 } 20436 } else 20437 ret = 1; 20438 spin_unlock_irq(&phba->hbalock); 20439 20440 return ret; 20441 } 20442 /** 20443 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 20444 * @phba: pointer to lpfc hba data structure. 20445 * 20446 * This routine is to get the next eligible FCF record index in a round 20447 * robin fashion. If the next eligible FCF record index equals to the 20448 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 20449 * shall be returned, otherwise, the next eligible FCF record's index 20450 * shall be returned. 20451 **/ 20452 uint16_t 20453 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 20454 { 20455 uint16_t next; 20456 20457 do { 20458 for_each_set_bit_wrap(next, phba->fcf.fcf_rr_bmask, 20459 LPFC_SLI4_FCF_TBL_INDX_MAX, phba->fcf.current_rec.fcf_indx) { 20460 if (next == phba->fcf.current_rec.fcf_indx) 20461 continue; 20462 20463 if (!(phba->fcf.fcf_pri[next].fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)) { 20464 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20465 "2845 Get next roundrobin failover FCF (x%x)\n", next); 20466 return next; 20467 } 20468 20469 if (list_is_singular(&phba->fcf.fcf_pri_list)) 20470 return LPFC_FCOE_FCF_NEXT_NONE; 20471 } 20472 20473 /* 20474 * If next fcf index is not found check if there are lower 20475 * Priority level fcf's in the fcf_priority list. 20476 * Set up the rr_bmask with all of the avaiable fcf bits 20477 * at that level and continue the selection process. 20478 */ 20479 } while (lpfc_check_next_fcf_pri_level(phba)); 20480 20481 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 20482 "2844 No roundrobin failover FCF available\n"); 20483 20484 return LPFC_FCOE_FCF_NEXT_NONE; 20485 } 20486 20487 /** 20488 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 20489 * @phba: pointer to lpfc hba data structure. 20490 * @fcf_index: index into the FCF table to 'set' 20491 * 20492 * This routine sets the FCF record index in to the eligible bmask for 20493 * roundrobin failover search. It checks to make sure that the index 20494 * does not go beyond the range of the driver allocated bmask dimension 20495 * before setting the bit. 20496 * 20497 * Returns 0 if the index bit successfully set, otherwise, it returns 20498 * -EINVAL. 20499 **/ 20500 int 20501 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 20502 { 20503 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 20504 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 20505 "2610 FCF (x%x) reached driver's book " 20506 "keeping dimension:x%x\n", 20507 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 20508 return -EINVAL; 20509 } 20510 /* Set the eligible FCF record index bmask */ 20511 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 20512 20513 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20514 "2790 Set FCF (x%x) to roundrobin FCF failover " 20515 "bmask\n", fcf_index); 20516 20517 return 0; 20518 } 20519 20520 /** 20521 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 20522 * @phba: pointer to lpfc hba data structure. 20523 * @fcf_index: index into the FCF table to 'clear' 20524 * 20525 * This routine clears the FCF record index from the eligible bmask for 20526 * roundrobin failover search. It checks to make sure that the index 20527 * does not go beyond the range of the driver allocated bmask dimension 20528 * before clearing the bit. 20529 **/ 20530 void 20531 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 20532 { 20533 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 20534 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 20535 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 20536 "2762 FCF (x%x) reached driver's book " 20537 "keeping dimension:x%x\n", 20538 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 20539 return; 20540 } 20541 /* Clear the eligible FCF record index bmask */ 20542 spin_lock_irq(&phba->hbalock); 20543 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 20544 list) { 20545 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 20546 list_del_init(&fcf_pri->list); 20547 break; 20548 } 20549 } 20550 spin_unlock_irq(&phba->hbalock); 20551 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 20552 20553 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20554 "2791 Clear FCF (x%x) from roundrobin failover " 20555 "bmask\n", fcf_index); 20556 } 20557 20558 /** 20559 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 20560 * @phba: pointer to lpfc hba data structure. 20561 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 20562 * 20563 * This routine is the completion routine for the rediscover FCF table mailbox 20564 * command. If the mailbox command returned failure, it will try to stop the 20565 * FCF rediscover wait timer. 20566 **/ 20567 static void 20568 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 20569 { 20570 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 20571 uint32_t shdr_status, shdr_add_status; 20572 20573 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 20574 20575 shdr_status = bf_get(lpfc_mbox_hdr_status, 20576 &redisc_fcf->header.cfg_shdr.response); 20577 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 20578 &redisc_fcf->header.cfg_shdr.response); 20579 if (shdr_status || shdr_add_status) { 20580 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 20581 "2746 Requesting for FCF rediscovery failed " 20582 "status x%x add_status x%x\n", 20583 shdr_status, shdr_add_status); 20584 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 20585 spin_lock_irq(&phba->hbalock); 20586 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 20587 spin_unlock_irq(&phba->hbalock); 20588 /* 20589 * CVL event triggered FCF rediscover request failed, 20590 * last resort to re-try current registered FCF entry. 20591 */ 20592 lpfc_retry_pport_discovery(phba); 20593 } else { 20594 spin_lock_irq(&phba->hbalock); 20595 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 20596 spin_unlock_irq(&phba->hbalock); 20597 /* 20598 * DEAD FCF event triggered FCF rediscover request 20599 * failed, last resort to fail over as a link down 20600 * to FCF registration. 20601 */ 20602 lpfc_sli4_fcf_dead_failthrough(phba); 20603 } 20604 } else { 20605 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20606 "2775 Start FCF rediscover quiescent timer\n"); 20607 /* 20608 * Start FCF rediscovery wait timer for pending FCF 20609 * before rescan FCF record table. 20610 */ 20611 lpfc_fcf_redisc_wait_start_timer(phba); 20612 } 20613 20614 mempool_free(mbox, phba->mbox_mem_pool); 20615 } 20616 20617 /** 20618 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 20619 * @phba: pointer to lpfc hba data structure. 20620 * 20621 * This routine is invoked to request for rediscovery of the entire FCF table 20622 * by the port. 20623 **/ 20624 int 20625 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 20626 { 20627 LPFC_MBOXQ_t *mbox; 20628 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 20629 int rc, length; 20630 20631 /* Cancel retry delay timers to all vports before FCF rediscover */ 20632 lpfc_cancel_all_vport_retry_delay_timer(phba); 20633 20634 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20635 if (!mbox) { 20636 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20637 "2745 Failed to allocate mbox for " 20638 "requesting FCF rediscover.\n"); 20639 return -ENOMEM; 20640 } 20641 20642 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 20643 sizeof(struct lpfc_sli4_cfg_mhdr)); 20644 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 20645 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 20646 length, LPFC_SLI4_MBX_EMBED); 20647 20648 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 20649 /* Set count to 0 for invalidating the entire FCF database */ 20650 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 20651 20652 /* Issue the mailbox command asynchronously */ 20653 mbox->vport = phba->pport; 20654 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 20655 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 20656 20657 if (rc == MBX_NOT_FINISHED) { 20658 mempool_free(mbox, phba->mbox_mem_pool); 20659 return -EIO; 20660 } 20661 return 0; 20662 } 20663 20664 /** 20665 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 20666 * @phba: pointer to lpfc hba data structure. 20667 * 20668 * This function is the failover routine as a last resort to the FCF DEAD 20669 * event when driver failed to perform fast FCF failover. 20670 **/ 20671 void 20672 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 20673 { 20674 uint32_t link_state; 20675 20676 /* 20677 * Last resort as FCF DEAD event failover will treat this as 20678 * a link down, but save the link state because we don't want 20679 * it to be changed to Link Down unless it is already down. 20680 */ 20681 link_state = phba->link_state; 20682 lpfc_linkdown(phba); 20683 phba->link_state = link_state; 20684 20685 /* Unregister FCF if no devices connected to it */ 20686 lpfc_unregister_unused_fcf(phba); 20687 } 20688 20689 /** 20690 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 20691 * @phba: pointer to lpfc hba data structure. 20692 * @rgn23_data: pointer to configure region 23 data. 20693 * 20694 * This function gets SLI3 port configure region 23 data through memory dump 20695 * mailbox command. When it successfully retrieves data, the size of the data 20696 * will be returned, otherwise, 0 will be returned. 20697 **/ 20698 static uint32_t 20699 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 20700 { 20701 LPFC_MBOXQ_t *pmb = NULL; 20702 MAILBOX_t *mb; 20703 uint32_t offset = 0; 20704 int rc; 20705 20706 if (!rgn23_data) 20707 return 0; 20708 20709 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20710 if (!pmb) { 20711 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20712 "2600 failed to allocate mailbox memory\n"); 20713 return 0; 20714 } 20715 mb = &pmb->u.mb; 20716 20717 do { 20718 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 20719 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 20720 20721 if (rc != MBX_SUCCESS) { 20722 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 20723 "2601 failed to read config " 20724 "region 23, rc 0x%x Status 0x%x\n", 20725 rc, mb->mbxStatus); 20726 mb->un.varDmp.word_cnt = 0; 20727 } 20728 /* 20729 * dump mem may return a zero when finished or we got a 20730 * mailbox error, either way we are done. 20731 */ 20732 if (mb->un.varDmp.word_cnt == 0) 20733 break; 20734 20735 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 20736 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 20737 20738 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 20739 rgn23_data + offset, 20740 mb->un.varDmp.word_cnt); 20741 offset += mb->un.varDmp.word_cnt; 20742 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 20743 20744 mempool_free(pmb, phba->mbox_mem_pool); 20745 return offset; 20746 } 20747 20748 /** 20749 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 20750 * @phba: pointer to lpfc hba data structure. 20751 * @rgn23_data: pointer to configure region 23 data. 20752 * 20753 * This function gets SLI4 port configure region 23 data through memory dump 20754 * mailbox command. When it successfully retrieves data, the size of the data 20755 * will be returned, otherwise, 0 will be returned. 20756 **/ 20757 static uint32_t 20758 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 20759 { 20760 LPFC_MBOXQ_t *mboxq = NULL; 20761 struct lpfc_dmabuf *mp = NULL; 20762 struct lpfc_mqe *mqe; 20763 uint32_t data_length = 0; 20764 int rc; 20765 20766 if (!rgn23_data) 20767 return 0; 20768 20769 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 20770 if (!mboxq) { 20771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20772 "3105 failed to allocate mailbox memory\n"); 20773 return 0; 20774 } 20775 20776 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 20777 goto out; 20778 mqe = &mboxq->u.mqe; 20779 mp = mboxq->ctx_buf; 20780 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 20781 if (rc) 20782 goto out; 20783 data_length = mqe->un.mb_words[5]; 20784 if (data_length == 0) 20785 goto out; 20786 if (data_length > DMP_RGN23_SIZE) { 20787 data_length = 0; 20788 goto out; 20789 } 20790 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 20791 out: 20792 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 20793 return data_length; 20794 } 20795 20796 /** 20797 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 20798 * @phba: pointer to lpfc hba data structure. 20799 * 20800 * This function read region 23 and parse TLV for port status to 20801 * decide if the user disaled the port. If the TLV indicates the 20802 * port is disabled, the hba_flag is set accordingly. 20803 **/ 20804 void 20805 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 20806 { 20807 uint8_t *rgn23_data = NULL; 20808 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 20809 uint32_t offset = 0; 20810 20811 /* Get adapter Region 23 data */ 20812 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 20813 if (!rgn23_data) 20814 goto out; 20815 20816 if (phba->sli_rev < LPFC_SLI_REV4) 20817 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 20818 else { 20819 if_type = bf_get(lpfc_sli_intf_if_type, 20820 &phba->sli4_hba.sli_intf); 20821 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 20822 goto out; 20823 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 20824 } 20825 20826 if (!data_size) 20827 goto out; 20828 20829 /* Check the region signature first */ 20830 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 20831 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20832 "2619 Config region 23 has bad signature\n"); 20833 goto out; 20834 } 20835 offset += 4; 20836 20837 /* Check the data structure version */ 20838 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 20839 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 20840 "2620 Config region 23 has bad version\n"); 20841 goto out; 20842 } 20843 offset += 4; 20844 20845 /* Parse TLV entries in the region */ 20846 while (offset < data_size) { 20847 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 20848 break; 20849 /* 20850 * If the TLV is not driver specific TLV or driver id is 20851 * not linux driver id, skip the record. 20852 */ 20853 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 20854 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 20855 (rgn23_data[offset + 3] != 0)) { 20856 offset += rgn23_data[offset + 1] * 4 + 4; 20857 continue; 20858 } 20859 20860 /* Driver found a driver specific TLV in the config region */ 20861 sub_tlv_len = rgn23_data[offset + 1] * 4; 20862 offset += 4; 20863 tlv_offset = 0; 20864 20865 /* 20866 * Search for configured port state sub-TLV. 20867 */ 20868 while ((offset < data_size) && 20869 (tlv_offset < sub_tlv_len)) { 20870 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 20871 offset += 4; 20872 tlv_offset += 4; 20873 break; 20874 } 20875 if (rgn23_data[offset] != PORT_STE_TYPE) { 20876 offset += rgn23_data[offset + 1] * 4 + 4; 20877 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 20878 continue; 20879 } 20880 20881 /* This HBA contains PORT_STE configured */ 20882 if (!rgn23_data[offset + 2]) 20883 set_bit(LINK_DISABLED, &phba->hba_flag); 20884 20885 goto out; 20886 } 20887 } 20888 20889 out: 20890 kfree(rgn23_data); 20891 return; 20892 } 20893 20894 /** 20895 * lpfc_log_fw_write_cmpl - logs firmware write completion status 20896 * @phba: pointer to lpfc hba data structure 20897 * @shdr_status: wr_object rsp's status field 20898 * @shdr_add_status: wr_object rsp's add_status field 20899 * @shdr_add_status_2: wr_object rsp's add_status_2 field 20900 * @shdr_change_status: wr_object rsp's change_status field 20901 * @shdr_csf: wr_object rsp's csf bit 20902 * 20903 * This routine is intended to be called after a firmware write completes. 20904 * It will log next action items to be performed by the user to instantiate 20905 * the newly downloaded firmware or reason for incompatibility. 20906 **/ 20907 static void 20908 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, 20909 u32 shdr_add_status, u32 shdr_add_status_2, 20910 u32 shdr_change_status, u32 shdr_csf) 20911 { 20912 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 20913 "4198 %s: flash_id x%02x, asic_rev x%02x, " 20914 "status x%02x, add_status x%02x, add_status_2 x%02x, " 20915 "change_status x%02x, csf %01x\n", __func__, 20916 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev, 20917 shdr_status, shdr_add_status, shdr_add_status_2, 20918 shdr_change_status, shdr_csf); 20919 20920 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { 20921 switch (shdr_add_status_2) { 20922 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: 20923 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 20924 "4199 Firmware write failed: " 20925 "image incompatible with flash x%02x\n", 20926 phba->sli4_hba.flash_id); 20927 break; 20928 case LPFC_ADD_STATUS_2_INCORRECT_ASIC: 20929 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 20930 "4200 Firmware write failed: " 20931 "image incompatible with ASIC " 20932 "architecture x%02x\n", 20933 phba->sli4_hba.asic_rev); 20934 break; 20935 default: 20936 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 20937 "4210 Firmware write failed: " 20938 "add_status_2 x%02x\n", 20939 shdr_add_status_2); 20940 break; 20941 } 20942 } else if (!shdr_status && !shdr_add_status) { 20943 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || 20944 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { 20945 if (shdr_csf) 20946 shdr_change_status = 20947 LPFC_CHANGE_STATUS_PCI_RESET; 20948 } 20949 20950 switch (shdr_change_status) { 20951 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): 20952 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, 20953 "3198 Firmware write complete: System " 20954 "reboot required to instantiate\n"); 20955 break; 20956 case (LPFC_CHANGE_STATUS_FW_RESET): 20957 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, 20958 "3199 Firmware write complete: " 20959 "Firmware reset required to " 20960 "instantiate\n"); 20961 break; 20962 case (LPFC_CHANGE_STATUS_PORT_MIGRATION): 20963 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, 20964 "3200 Firmware write complete: Port " 20965 "Migration or PCI Reset required to " 20966 "instantiate\n"); 20967 break; 20968 case (LPFC_CHANGE_STATUS_PCI_RESET): 20969 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, 20970 "3201 Firmware write complete: PCI " 20971 "Reset required to instantiate\n"); 20972 break; 20973 default: 20974 break; 20975 } 20976 } 20977 } 20978 20979 /** 20980 * lpfc_wr_object - write an object to the firmware 20981 * @phba: HBA structure that indicates port to create a queue on. 20982 * @dmabuf_list: list of dmabufs to write to the port. 20983 * @size: the total byte value of the objects to write to the port. 20984 * @offset: the current offset to be used to start the transfer. 20985 * 20986 * This routine will create a wr_object mailbox command to send to the port. 20987 * the mailbox command will be constructed using the dma buffers described in 20988 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 20989 * BDEs that the imbedded mailbox can support. The @offset variable will be 20990 * used to indicate the starting offset of the transfer and will also return 20991 * the offset after the write object mailbox has completed. @size is used to 20992 * determine the end of the object and whether the eof bit should be set. 20993 * 20994 * Return 0 is successful and offset will contain the new offset to use 20995 * for the next write. 20996 * Return negative value for error cases. 20997 **/ 20998 int 20999 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 21000 uint32_t size, uint32_t *offset) 21001 { 21002 struct lpfc_mbx_wr_object *wr_object; 21003 LPFC_MBOXQ_t *mbox; 21004 int rc = 0, i = 0; 21005 int mbox_status = 0; 21006 uint32_t shdr_status, shdr_add_status, shdr_add_status_2; 21007 uint32_t shdr_change_status = 0, shdr_csf = 0; 21008 uint32_t mbox_tmo; 21009 struct lpfc_dmabuf *dmabuf; 21010 uint32_t written = 0; 21011 bool check_change_status = false; 21012 21013 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 21014 if (!mbox) 21015 return -ENOMEM; 21016 21017 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 21018 LPFC_MBOX_OPCODE_WRITE_OBJECT, 21019 sizeof(struct lpfc_mbx_wr_object) - 21020 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 21021 21022 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 21023 wr_object->u.request.write_offset = *offset; 21024 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 21025 wr_object->u.request.object_name[0] = 21026 cpu_to_le32(wr_object->u.request.object_name[0]); 21027 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 21028 list_for_each_entry(dmabuf, dmabuf_list, list) { 21029 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 21030 break; 21031 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 21032 wr_object->u.request.bde[i].addrHigh = 21033 putPaddrHigh(dmabuf->phys); 21034 if (written + SLI4_PAGE_SIZE >= size) { 21035 wr_object->u.request.bde[i].tus.f.bdeSize = 21036 (size - written); 21037 written += (size - written); 21038 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 21039 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); 21040 check_change_status = true; 21041 } else { 21042 wr_object->u.request.bde[i].tus.f.bdeSize = 21043 SLI4_PAGE_SIZE; 21044 written += SLI4_PAGE_SIZE; 21045 } 21046 i++; 21047 } 21048 wr_object->u.request.bde_count = i; 21049 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 21050 if (!phba->sli4_hba.intr_enable) 21051 mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 21052 else { 21053 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 21054 mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 21055 } 21056 21057 /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */ 21058 rc = mbox_status; 21059 21060 /* The IOCTL status is embedded in the mailbox subheader. */ 21061 shdr_status = bf_get(lpfc_mbox_hdr_status, 21062 &wr_object->header.cfg_shdr.response); 21063 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 21064 &wr_object->header.cfg_shdr.response); 21065 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2, 21066 &wr_object->header.cfg_shdr.response); 21067 if (check_change_status) { 21068 shdr_change_status = bf_get(lpfc_wr_object_change_status, 21069 &wr_object->u.response); 21070 shdr_csf = bf_get(lpfc_wr_object_csf, 21071 &wr_object->u.response); 21072 } 21073 21074 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { 21075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 21076 "3025 Write Object mailbox failed with " 21077 "status x%x add_status x%x, add_status_2 x%x, " 21078 "mbx status x%x\n", 21079 shdr_status, shdr_add_status, shdr_add_status_2, 21080 rc); 21081 rc = -ENXIO; 21082 *offset = shdr_add_status; 21083 } else { 21084 *offset += wr_object->u.response.actual_write_length; 21085 } 21086 21087 if (rc || check_change_status) 21088 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, 21089 shdr_add_status_2, shdr_change_status, 21090 shdr_csf); 21091 21092 if (!phba->sli4_hba.intr_enable) 21093 mempool_free(mbox, phba->mbox_mem_pool); 21094 else if (mbox_status != MBX_TIMEOUT) 21095 mempool_free(mbox, phba->mbox_mem_pool); 21096 21097 return rc; 21098 } 21099 21100 /** 21101 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 21102 * @vport: pointer to vport data structure. 21103 * 21104 * This function iterate through the mailboxq and clean up all REG_LOGIN 21105 * and REG_VPI mailbox commands associated with the vport. This function 21106 * is called when driver want to restart discovery of the vport due to 21107 * a Clear Virtual Link event. 21108 **/ 21109 void 21110 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 21111 { 21112 struct lpfc_hba *phba = vport->phba; 21113 LPFC_MBOXQ_t *mb, *nextmb; 21114 struct lpfc_nodelist *ndlp; 21115 struct lpfc_nodelist *act_mbx_ndlp = NULL; 21116 LIST_HEAD(mbox_cmd_list); 21117 uint8_t restart_loop; 21118 21119 /* Clean up internally queued mailbox commands with the vport */ 21120 spin_lock_irq(&phba->hbalock); 21121 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 21122 if (mb->vport != vport) 21123 continue; 21124 21125 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 21126 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 21127 continue; 21128 21129 list_move_tail(&mb->list, &mbox_cmd_list); 21130 } 21131 /* Clean up active mailbox command with the vport */ 21132 mb = phba->sli.mbox_active; 21133 if (mb && (mb->vport == vport)) { 21134 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 21135 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 21136 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 21137 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 21138 act_mbx_ndlp = mb->ctx_ndlp; 21139 21140 /* This reference is local to this routine. The 21141 * reference is removed at routine exit. 21142 */ 21143 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 21144 21145 /* Unregister the RPI when mailbox complete */ 21146 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 21147 } 21148 } 21149 /* Cleanup any mailbox completions which are not yet processed */ 21150 do { 21151 restart_loop = 0; 21152 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 21153 /* 21154 * If this mailox is already processed or it is 21155 * for another vport ignore it. 21156 */ 21157 if ((mb->vport != vport) || 21158 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 21159 continue; 21160 21161 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 21162 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 21163 continue; 21164 21165 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 21166 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 21167 ndlp = mb->ctx_ndlp; 21168 /* Unregister the RPI when mailbox complete */ 21169 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 21170 restart_loop = 1; 21171 clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); 21172 break; 21173 } 21174 } 21175 } while (restart_loop); 21176 21177 spin_unlock_irq(&phba->hbalock); 21178 21179 /* Release the cleaned-up mailbox commands */ 21180 while (!list_empty(&mbox_cmd_list)) { 21181 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 21182 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 21183 ndlp = mb->ctx_ndlp; 21184 mb->ctx_ndlp = NULL; 21185 if (ndlp) { 21186 clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); 21187 lpfc_nlp_put(ndlp); 21188 } 21189 } 21190 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED); 21191 } 21192 21193 /* Release the ndlp with the cleaned-up active mailbox command */ 21194 if (act_mbx_ndlp) { 21195 clear_bit(NLP_IGNR_REG_CMPL, &act_mbx_ndlp->nlp_flag); 21196 lpfc_nlp_put(act_mbx_ndlp); 21197 } 21198 } 21199 21200 /** 21201 * lpfc_drain_txq - Drain the txq 21202 * @phba: Pointer to HBA context object. 21203 * 21204 * This function attempt to submit IOCBs on the txq 21205 * to the adapter. For SLI4 adapters, the txq contains 21206 * ELS IOCBs that have been deferred because the there 21207 * are no SGLs. This congestion can occur with large 21208 * vport counts during node discovery. 21209 **/ 21210 21211 uint32_t 21212 lpfc_drain_txq(struct lpfc_hba *phba) 21213 { 21214 LIST_HEAD(completions); 21215 struct lpfc_sli_ring *pring; 21216 struct lpfc_iocbq *piocbq = NULL; 21217 unsigned long iflags = 0; 21218 char *fail_msg = NULL; 21219 uint32_t txq_cnt = 0; 21220 struct lpfc_queue *wq; 21221 int ret = 0; 21222 21223 if (phba->link_flag & LS_MDS_LOOPBACK) { 21224 /* MDS WQE are posted only to first WQ*/ 21225 wq = phba->sli4_hba.hdwq[0].io_wq; 21226 if (unlikely(!wq)) 21227 return 0; 21228 pring = wq->pring; 21229 } else { 21230 wq = phba->sli4_hba.els_wq; 21231 if (unlikely(!wq)) 21232 return 0; 21233 pring = lpfc_phba_elsring(phba); 21234 } 21235 21236 if (unlikely(!pring) || list_empty(&pring->txq)) 21237 return 0; 21238 21239 spin_lock_irqsave(&pring->ring_lock, iflags); 21240 list_for_each_entry(piocbq, &pring->txq, list) { 21241 txq_cnt++; 21242 } 21243 21244 if (txq_cnt > pring->txq_max) 21245 pring->txq_max = txq_cnt; 21246 21247 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21248 21249 while (!list_empty(&pring->txq)) { 21250 spin_lock_irqsave(&pring->ring_lock, iflags); 21251 21252 piocbq = lpfc_sli_ringtx_get(phba, pring); 21253 if (!piocbq) { 21254 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 21256 "2823 txq empty and txq_cnt is %d\n", 21257 txq_cnt); 21258 break; 21259 } 21260 txq_cnt--; 21261 21262 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0); 21263 21264 if (ret && ret != IOCB_BUSY) { 21265 fail_msg = " - Cannot send IO "; 21266 piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; 21267 } 21268 if (fail_msg) { 21269 piocbq->cmd_flag |= LPFC_DRIVER_ABORTED; 21270 /* Failed means we can't issue and need to cancel */ 21271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 21272 "2822 IOCB failed %s iotag 0x%x " 21273 "xri 0x%x %d flg x%x\n", 21274 fail_msg, piocbq->iotag, 21275 piocbq->sli4_xritag, ret, 21276 piocbq->cmd_flag); 21277 list_add_tail(&piocbq->list, &completions); 21278 fail_msg = NULL; 21279 } 21280 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21281 if (txq_cnt == 0 || ret == IOCB_BUSY) 21282 break; 21283 } 21284 /* Cancel all the IOCBs that cannot be issued */ 21285 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 21286 IOERR_SLI_ABORTED); 21287 21288 return txq_cnt; 21289 } 21290 21291 /** 21292 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 21293 * @phba: Pointer to HBA context object. 21294 * @pwqeq: Pointer to command WQE. 21295 * @sglq: Pointer to the scatter gather queue object. 21296 * 21297 * This routine converts the bpl or bde that is in the WQE 21298 * to a sgl list for the sli4 hardware. The physical address 21299 * of the bpl/bde is converted back to a virtual address. 21300 * If the WQE contains a BPL then the list of BDE's is 21301 * converted to sli4_sge's. If the WQE contains a single 21302 * BDE then it is converted to a single sli_sge. 21303 * The WQE is still in cpu endianness so the contents of 21304 * the bpl can be used without byte swapping. 21305 * 21306 * Returns valid XRI = Success, NO_XRI = Failure. 21307 */ 21308 static uint16_t 21309 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 21310 struct lpfc_sglq *sglq) 21311 { 21312 uint16_t xritag = NO_XRI; 21313 struct ulp_bde64 *bpl = NULL; 21314 struct ulp_bde64 bde; 21315 struct sli4_sge *sgl = NULL; 21316 struct lpfc_dmabuf *dmabuf; 21317 union lpfc_wqe128 *wqe; 21318 int numBdes = 0; 21319 int i = 0; 21320 uint32_t offset = 0; /* accumulated offset in the sg request list */ 21321 int inbound = 0; /* number of sg reply entries inbound from firmware */ 21322 uint32_t cmd; 21323 21324 if (!pwqeq || !sglq) 21325 return xritag; 21326 21327 sgl = (struct sli4_sge *)sglq->sgl; 21328 wqe = &pwqeq->wqe; 21329 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 21330 21331 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 21332 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 21333 return sglq->sli4_xritag; 21334 numBdes = pwqeq->num_bdes; 21335 if (numBdes) { 21336 /* The addrHigh and addrLow fields within the WQE 21337 * have not been byteswapped yet so there is no 21338 * need to swap them back. 21339 */ 21340 if (pwqeq->bpl_dmabuf) 21341 dmabuf = pwqeq->bpl_dmabuf; 21342 else 21343 return xritag; 21344 21345 bpl = (struct ulp_bde64 *)dmabuf->virt; 21346 if (!bpl) 21347 return xritag; 21348 21349 for (i = 0; i < numBdes; i++) { 21350 /* Should already be byte swapped. */ 21351 sgl->addr_hi = bpl->addrHigh; 21352 sgl->addr_lo = bpl->addrLow; 21353 21354 sgl->word2 = le32_to_cpu(sgl->word2); 21355 if ((i+1) == numBdes) 21356 bf_set(lpfc_sli4_sge_last, sgl, 1); 21357 else 21358 bf_set(lpfc_sli4_sge_last, sgl, 0); 21359 /* swap the size field back to the cpu so we 21360 * can assign it to the sgl. 21361 */ 21362 bde.tus.w = le32_to_cpu(bpl->tus.w); 21363 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 21364 /* The offsets in the sgl need to be accumulated 21365 * separately for the request and reply lists. 21366 * The request is always first, the reply follows. 21367 */ 21368 switch (cmd) { 21369 case CMD_GEN_REQUEST64_WQE: 21370 /* add up the reply sg entries */ 21371 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 21372 inbound++; 21373 /* first inbound? reset the offset */ 21374 if (inbound == 1) 21375 offset = 0; 21376 bf_set(lpfc_sli4_sge_offset, sgl, offset); 21377 bf_set(lpfc_sli4_sge_type, sgl, 21378 LPFC_SGE_TYPE_DATA); 21379 offset += bde.tus.f.bdeSize; 21380 break; 21381 case CMD_FCP_TRSP64_WQE: 21382 bf_set(lpfc_sli4_sge_offset, sgl, 0); 21383 bf_set(lpfc_sli4_sge_type, sgl, 21384 LPFC_SGE_TYPE_DATA); 21385 break; 21386 case CMD_FCP_TSEND64_WQE: 21387 case CMD_FCP_TRECEIVE64_WQE: 21388 bf_set(lpfc_sli4_sge_type, sgl, 21389 bpl->tus.f.bdeFlags); 21390 if (i < 3) 21391 offset = 0; 21392 else 21393 offset += bde.tus.f.bdeSize; 21394 bf_set(lpfc_sli4_sge_offset, sgl, offset); 21395 break; 21396 } 21397 sgl->word2 = cpu_to_le32(sgl->word2); 21398 bpl++; 21399 sgl++; 21400 } 21401 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 21402 /* The addrHigh and addrLow fields of the BDE have not 21403 * been byteswapped yet so they need to be swapped 21404 * before putting them in the sgl. 21405 */ 21406 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 21407 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 21408 sgl->word2 = le32_to_cpu(sgl->word2); 21409 bf_set(lpfc_sli4_sge_last, sgl, 1); 21410 sgl->word2 = cpu_to_le32(sgl->word2); 21411 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 21412 } 21413 return sglq->sli4_xritag; 21414 } 21415 21416 /** 21417 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 21418 * @phba: Pointer to HBA context object. 21419 * @qp: Pointer to HDW queue. 21420 * @pwqe: Pointer to command WQE. 21421 **/ 21422 int 21423 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 21424 struct lpfc_iocbq *pwqe) 21425 { 21426 union lpfc_wqe128 *wqe = &pwqe->wqe; 21427 struct lpfc_async_xchg_ctx *ctxp; 21428 struct lpfc_queue *wq; 21429 struct lpfc_sglq *sglq; 21430 struct lpfc_sli_ring *pring; 21431 unsigned long iflags; 21432 int ret = 0; 21433 21434 /* NVME_LS and NVME_LS ABTS requests. */ 21435 if (pwqe->cmd_flag & LPFC_IO_NVME_LS) { 21436 pring = phba->sli4_hba.nvmels_wq->pring; 21437 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 21438 qp, wq_access); 21439 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 21440 if (!sglq) { 21441 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21442 return WQE_BUSY; 21443 } 21444 pwqe->sli4_lxritag = sglq->sli4_lxritag; 21445 pwqe->sli4_xritag = sglq->sli4_xritag; 21446 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 21447 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21448 return WQE_ERROR; 21449 } 21450 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 21451 pwqe->sli4_xritag); 21452 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 21453 if (ret) { 21454 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21455 return ret; 21456 } 21457 21458 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 21459 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21460 21461 lpfc_sli4_poll_eq(qp->hba_eq); 21462 return 0; 21463 } 21464 21465 /* NVME_FCREQ and NVME_ABTS requests */ 21466 if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) { 21467 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 21468 wq = qp->io_wq; 21469 pring = wq->pring; 21470 21471 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); 21472 21473 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 21474 qp, wq_access); 21475 ret = lpfc_sli4_wq_put(wq, wqe); 21476 if (ret) { 21477 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21478 return ret; 21479 } 21480 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 21481 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21482 21483 lpfc_sli4_poll_eq(qp->hba_eq); 21484 return 0; 21485 } 21486 21487 /* NVMET requests */ 21488 if (pwqe->cmd_flag & LPFC_IO_NVMET) { 21489 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 21490 wq = qp->io_wq; 21491 pring = wq->pring; 21492 21493 ctxp = pwqe->context_un.axchg; 21494 sglq = ctxp->ctxbuf->sglq; 21495 if (pwqe->sli4_xritag == NO_XRI) { 21496 pwqe->sli4_lxritag = sglq->sli4_lxritag; 21497 pwqe->sli4_xritag = sglq->sli4_xritag; 21498 } 21499 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 21500 pwqe->sli4_xritag); 21501 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); 21502 21503 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 21504 qp, wq_access); 21505 ret = lpfc_sli4_wq_put(wq, wqe); 21506 if (ret) { 21507 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21508 return ret; 21509 } 21510 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 21511 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21512 21513 lpfc_sli4_poll_eq(qp->hba_eq); 21514 return 0; 21515 } 21516 return WQE_ERROR; 21517 } 21518 21519 /** 21520 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort 21521 * @phba: Pointer to HBA context object. 21522 * @cmdiocb: Pointer to driver command iocb object. 21523 * @cmpl: completion function. 21524 * 21525 * Fill the appropriate fields for the abort WQE and call 21526 * internal routine lpfc_sli4_issue_wqe to send the WQE 21527 * This function is called with hbalock held and no ring_lock held. 21528 * 21529 * RETURNS 0 - SUCCESS 21530 **/ 21531 21532 int 21533 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 21534 void *cmpl) 21535 { 21536 struct lpfc_vport *vport = cmdiocb->vport; 21537 struct lpfc_iocbq *abtsiocb = NULL; 21538 union lpfc_wqe128 *abtswqe; 21539 struct lpfc_io_buf *lpfc_cmd; 21540 int retval = IOCB_ERROR; 21541 u16 xritag = cmdiocb->sli4_xritag; 21542 21543 /* 21544 * The scsi command can not be in txq and it is in flight because the 21545 * pCmd is still pointing at the SCSI command we have to abort. There 21546 * is no need to search the txcmplq. Just send an abort to the FW. 21547 */ 21548 21549 abtsiocb = __lpfc_sli_get_iocbq(phba); 21550 if (!abtsiocb) 21551 return WQE_NORESOURCE; 21552 21553 /* Indicate the IO is being aborted by the driver. */ 21554 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; 21555 21556 abtswqe = &abtsiocb->wqe; 21557 memset(abtswqe, 0, sizeof(*abtswqe)); 21558 21559 if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK)) 21560 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1); 21561 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG); 21562 abtswqe->abort_cmd.rsrvd5 = 0; 21563 abtswqe->abort_cmd.wqe_com.abort_tag = xritag; 21564 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag); 21565 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 21566 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0); 21567 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1); 21568 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 21569 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND); 21570 21571 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 21572 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx; 21573 abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX; 21574 if (cmdiocb->cmd_flag & LPFC_IO_FCP) 21575 abtsiocb->cmd_flag |= LPFC_IO_FCP; 21576 if (cmdiocb->cmd_flag & LPFC_IO_NVME) 21577 abtsiocb->cmd_flag |= LPFC_IO_NVME; 21578 if (cmdiocb->cmd_flag & LPFC_IO_FOF) 21579 abtsiocb->cmd_flag |= LPFC_IO_FOF; 21580 abtsiocb->vport = vport; 21581 abtsiocb->cmd_cmpl = cmpl; 21582 21583 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq); 21584 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb); 21585 21586 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 21587 "0359 Abort xri x%x, original iotag x%x, " 21588 "abort cmd iotag x%x retval x%x\n", 21589 xritag, cmdiocb->iotag, abtsiocb->iotag, retval); 21590 21591 if (retval) { 21592 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; 21593 __lpfc_sli_release_iocbq(phba, abtsiocb); 21594 } 21595 21596 return retval; 21597 } 21598 21599 #ifdef LPFC_MXP_STAT 21600 /** 21601 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count 21602 * @phba: pointer to lpfc hba data structure. 21603 * @hwqid: belong to which HWQ. 21604 * 21605 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count 21606 * 15 seconds after a test case is running. 21607 * 21608 * The user should call lpfc_debugfs_multixripools_write before running a test 21609 * case to clear stat_snapshot_taken. Then the user starts a test case. During 21610 * test case is running, stat_snapshot_taken is incremented by 1 every time when 21611 * this routine is called from heartbeat timer. When stat_snapshot_taken is 21612 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. 21613 **/ 21614 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) 21615 { 21616 struct lpfc_sli4_hdw_queue *qp; 21617 struct lpfc_multixri_pool *multixri_pool; 21618 struct lpfc_pvt_pool *pvt_pool; 21619 struct lpfc_pbl_pool *pbl_pool; 21620 u32 txcmplq_cnt; 21621 21622 qp = &phba->sli4_hba.hdwq[hwqid]; 21623 multixri_pool = qp->p_multixri_pool; 21624 if (!multixri_pool) 21625 return; 21626 21627 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { 21628 pvt_pool = &qp->p_multixri_pool->pvt_pool; 21629 pbl_pool = &qp->p_multixri_pool->pbl_pool; 21630 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 21631 21632 multixri_pool->stat_pbl_count = pbl_pool->count; 21633 multixri_pool->stat_pvt_count = pvt_pool->count; 21634 multixri_pool->stat_busy_count = txcmplq_cnt; 21635 } 21636 21637 multixri_pool->stat_snapshot_taken++; 21638 } 21639 #endif 21640 21641 /** 21642 * lpfc_adjust_pvt_pool_count - Adjust private pool count 21643 * @phba: pointer to lpfc hba data structure. 21644 * @hwqid: belong to which HWQ. 21645 * 21646 * This routine moves some XRIs from private to public pool when private pool 21647 * is not busy. 21648 **/ 21649 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) 21650 { 21651 struct lpfc_multixri_pool *multixri_pool; 21652 u32 io_req_count; 21653 u32 prev_io_req_count; 21654 21655 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 21656 if (!multixri_pool) 21657 return; 21658 io_req_count = multixri_pool->io_req_count; 21659 prev_io_req_count = multixri_pool->prev_io_req_count; 21660 21661 if (prev_io_req_count != io_req_count) { 21662 /* Private pool is busy */ 21663 multixri_pool->prev_io_req_count = io_req_count; 21664 } else { 21665 /* Private pool is not busy. 21666 * Move XRIs from private to public pool. 21667 */ 21668 lpfc_move_xri_pvt_to_pbl(phba, hwqid); 21669 } 21670 } 21671 21672 /** 21673 * lpfc_adjust_high_watermark - Adjust high watermark 21674 * @phba: pointer to lpfc hba data structure. 21675 * @hwqid: belong to which HWQ. 21676 * 21677 * This routine sets high watermark as number of outstanding XRIs, 21678 * but make sure the new value is between xri_limit/2 and xri_limit. 21679 **/ 21680 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) 21681 { 21682 u32 new_watermark; 21683 u32 watermark_max; 21684 u32 watermark_min; 21685 u32 xri_limit; 21686 u32 txcmplq_cnt; 21687 u32 abts_io_bufs; 21688 struct lpfc_multixri_pool *multixri_pool; 21689 struct lpfc_sli4_hdw_queue *qp; 21690 21691 qp = &phba->sli4_hba.hdwq[hwqid]; 21692 multixri_pool = qp->p_multixri_pool; 21693 if (!multixri_pool) 21694 return; 21695 xri_limit = multixri_pool->xri_limit; 21696 21697 watermark_max = xri_limit; 21698 watermark_min = xri_limit / 2; 21699 21700 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 21701 abts_io_bufs = qp->abts_scsi_io_bufs; 21702 abts_io_bufs += qp->abts_nvme_io_bufs; 21703 21704 new_watermark = txcmplq_cnt + abts_io_bufs; 21705 new_watermark = min(watermark_max, new_watermark); 21706 new_watermark = max(watermark_min, new_watermark); 21707 multixri_pool->pvt_pool.high_watermark = new_watermark; 21708 21709 #ifdef LPFC_MXP_STAT 21710 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, 21711 new_watermark); 21712 #endif 21713 } 21714 21715 /** 21716 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool 21717 * @phba: pointer to lpfc hba data structure. 21718 * @hwqid: belong to which HWQ. 21719 * 21720 * This routine is called from hearbeat timer when pvt_pool is idle. 21721 * All free XRIs are moved from private to public pool on hwqid with 2 steps. 21722 * The first step moves (all - low_watermark) amount of XRIs. 21723 * The second step moves the rest of XRIs. 21724 **/ 21725 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) 21726 { 21727 struct lpfc_pbl_pool *pbl_pool; 21728 struct lpfc_pvt_pool *pvt_pool; 21729 struct lpfc_sli4_hdw_queue *qp; 21730 struct lpfc_io_buf *lpfc_ncmd; 21731 struct lpfc_io_buf *lpfc_ncmd_next; 21732 unsigned long iflag; 21733 struct list_head tmp_list; 21734 u32 tmp_count; 21735 21736 qp = &phba->sli4_hba.hdwq[hwqid]; 21737 pbl_pool = &qp->p_multixri_pool->pbl_pool; 21738 pvt_pool = &qp->p_multixri_pool->pvt_pool; 21739 tmp_count = 0; 21740 21741 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); 21742 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); 21743 21744 if (pvt_pool->count > pvt_pool->low_watermark) { 21745 /* Step 1: move (all - low_watermark) from pvt_pool 21746 * to pbl_pool 21747 */ 21748 21749 /* Move low watermark of bufs from pvt_pool to tmp_list */ 21750 INIT_LIST_HEAD(&tmp_list); 21751 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 21752 &pvt_pool->list, list) { 21753 list_move_tail(&lpfc_ncmd->list, &tmp_list); 21754 tmp_count++; 21755 if (tmp_count >= pvt_pool->low_watermark) 21756 break; 21757 } 21758 21759 /* Move all bufs from pvt_pool to pbl_pool */ 21760 list_splice_init(&pvt_pool->list, &pbl_pool->list); 21761 21762 /* Move all bufs from tmp_list to pvt_pool */ 21763 list_splice(&tmp_list, &pvt_pool->list); 21764 21765 pbl_pool->count += (pvt_pool->count - tmp_count); 21766 pvt_pool->count = tmp_count; 21767 } else { 21768 /* Step 2: move the rest from pvt_pool to pbl_pool */ 21769 list_splice_init(&pvt_pool->list, &pbl_pool->list); 21770 pbl_pool->count += pvt_pool->count; 21771 pvt_pool->count = 0; 21772 } 21773 21774 spin_unlock(&pvt_pool->lock); 21775 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 21776 } 21777 21778 /** 21779 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 21780 * @phba: pointer to lpfc hba data structure 21781 * @qp: pointer to HDW queue 21782 * @pbl_pool: specified public free XRI pool 21783 * @pvt_pool: specified private free XRI pool 21784 * @count: number of XRIs to move 21785 * 21786 * This routine tries to move some free common bufs from the specified pbl_pool 21787 * to the specified pvt_pool. It might move less than count XRIs if there's not 21788 * enough in public pool. 21789 * 21790 * Return: 21791 * true - if XRIs are successfully moved from the specified pbl_pool to the 21792 * specified pvt_pool 21793 * false - if the specified pbl_pool is empty or locked by someone else 21794 **/ 21795 static bool 21796 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 21797 struct lpfc_pbl_pool *pbl_pool, 21798 struct lpfc_pvt_pool *pvt_pool, u32 count) 21799 { 21800 struct lpfc_io_buf *lpfc_ncmd; 21801 struct lpfc_io_buf *lpfc_ncmd_next; 21802 unsigned long iflag; 21803 int ret; 21804 21805 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); 21806 if (ret) { 21807 if (pbl_pool->count) { 21808 /* Move a batch of XRIs from public to private pool */ 21809 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); 21810 list_for_each_entry_safe(lpfc_ncmd, 21811 lpfc_ncmd_next, 21812 &pbl_pool->list, 21813 list) { 21814 list_move_tail(&lpfc_ncmd->list, 21815 &pvt_pool->list); 21816 pvt_pool->count++; 21817 pbl_pool->count--; 21818 count--; 21819 if (count == 0) 21820 break; 21821 } 21822 21823 spin_unlock(&pvt_pool->lock); 21824 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 21825 return true; 21826 } 21827 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 21828 } 21829 21830 return false; 21831 } 21832 21833 /** 21834 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 21835 * @phba: pointer to lpfc hba data structure. 21836 * @hwqid: belong to which HWQ. 21837 * @count: number of XRIs to move 21838 * 21839 * This routine tries to find some free common bufs in one of public pools with 21840 * Round Robin method. The search always starts from local hwqid, then the next 21841 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, 21842 * a batch of free common bufs are moved to private pool on hwqid. 21843 * It might move less than count XRIs if there's not enough in public pool. 21844 **/ 21845 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) 21846 { 21847 struct lpfc_multixri_pool *multixri_pool; 21848 struct lpfc_multixri_pool *next_multixri_pool; 21849 struct lpfc_pvt_pool *pvt_pool; 21850 struct lpfc_pbl_pool *pbl_pool; 21851 struct lpfc_sli4_hdw_queue *qp; 21852 u32 next_hwqid; 21853 u32 hwq_count; 21854 int ret; 21855 21856 qp = &phba->sli4_hba.hdwq[hwqid]; 21857 multixri_pool = qp->p_multixri_pool; 21858 pvt_pool = &multixri_pool->pvt_pool; 21859 pbl_pool = &multixri_pool->pbl_pool; 21860 21861 /* Check if local pbl_pool is available */ 21862 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); 21863 if (ret) { 21864 #ifdef LPFC_MXP_STAT 21865 multixri_pool->local_pbl_hit_count++; 21866 #endif 21867 return; 21868 } 21869 21870 hwq_count = phba->cfg_hdw_queue; 21871 21872 /* Get the next hwqid which was found last time */ 21873 next_hwqid = multixri_pool->rrb_next_hwqid; 21874 21875 do { 21876 /* Go to next hwq */ 21877 next_hwqid = (next_hwqid + 1) % hwq_count; 21878 21879 next_multixri_pool = 21880 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; 21881 pbl_pool = &next_multixri_pool->pbl_pool; 21882 21883 /* Check if the public free xri pool is available */ 21884 ret = _lpfc_move_xri_pbl_to_pvt( 21885 phba, qp, pbl_pool, pvt_pool, count); 21886 21887 /* Exit while-loop if success or all hwqid are checked */ 21888 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); 21889 21890 /* Starting point for the next time */ 21891 multixri_pool->rrb_next_hwqid = next_hwqid; 21892 21893 if (!ret) { 21894 /* stats: all public pools are empty*/ 21895 multixri_pool->pbl_empty_count++; 21896 } 21897 21898 #ifdef LPFC_MXP_STAT 21899 if (ret) { 21900 if (next_hwqid == hwqid) 21901 multixri_pool->local_pbl_hit_count++; 21902 else 21903 multixri_pool->other_pbl_hit_count++; 21904 } 21905 #endif 21906 } 21907 21908 /** 21909 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark 21910 * @phba: pointer to lpfc hba data structure. 21911 * @hwqid: belong to which HWQ. 21912 * 21913 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than 21914 * low watermark. 21915 **/ 21916 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) 21917 { 21918 struct lpfc_multixri_pool *multixri_pool; 21919 struct lpfc_pvt_pool *pvt_pool; 21920 21921 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 21922 pvt_pool = &multixri_pool->pvt_pool; 21923 21924 if (pvt_pool->count < pvt_pool->low_watermark) 21925 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 21926 } 21927 21928 /** 21929 * lpfc_release_io_buf - Return one IO buf back to free pool 21930 * @phba: pointer to lpfc hba data structure. 21931 * @lpfc_ncmd: IO buf to be returned. 21932 * @qp: belong to which HWQ. 21933 * 21934 * This routine returns one IO buf back to free pool. If this is an urgent IO, 21935 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, 21936 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and 21937 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to 21938 * lpfc_io_buf_list_put. 21939 **/ 21940 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, 21941 struct lpfc_sli4_hdw_queue *qp) 21942 { 21943 unsigned long iflag; 21944 struct lpfc_pbl_pool *pbl_pool; 21945 struct lpfc_pvt_pool *pvt_pool; 21946 struct lpfc_epd_pool *epd_pool; 21947 u32 txcmplq_cnt; 21948 u32 xri_owned; 21949 u32 xri_limit; 21950 u32 abts_io_bufs; 21951 21952 /* MUST zero fields if buffer is reused by another protocol */ 21953 lpfc_ncmd->nvmeCmd = NULL; 21954 lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL; 21955 21956 if (phba->cfg_xpsgl && !phba->nvmet_support && 21957 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) 21958 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 21959 21960 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) 21961 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 21962 21963 if (phba->cfg_xri_rebalancing) { 21964 if (lpfc_ncmd->expedite) { 21965 /* Return to expedite pool */ 21966 epd_pool = &phba->epd_pool; 21967 spin_lock_irqsave(&epd_pool->lock, iflag); 21968 list_add_tail(&lpfc_ncmd->list, &epd_pool->list); 21969 epd_pool->count++; 21970 spin_unlock_irqrestore(&epd_pool->lock, iflag); 21971 return; 21972 } 21973 21974 /* Avoid invalid access if an IO sneaks in and is being rejected 21975 * just _after_ xri pools are destroyed in lpfc_offline. 21976 * Nothing much can be done at this point. 21977 */ 21978 if (!qp->p_multixri_pool) 21979 return; 21980 21981 pbl_pool = &qp->p_multixri_pool->pbl_pool; 21982 pvt_pool = &qp->p_multixri_pool->pvt_pool; 21983 21984 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 21985 abts_io_bufs = qp->abts_scsi_io_bufs; 21986 abts_io_bufs += qp->abts_nvme_io_bufs; 21987 21988 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; 21989 xri_limit = qp->p_multixri_pool->xri_limit; 21990 21991 #ifdef LPFC_MXP_STAT 21992 if (xri_owned <= xri_limit) 21993 qp->p_multixri_pool->below_limit_count++; 21994 else 21995 qp->p_multixri_pool->above_limit_count++; 21996 #endif 21997 21998 /* XRI goes to either public or private free xri pool 21999 * based on watermark and xri_limit 22000 */ 22001 if ((pvt_pool->count < pvt_pool->low_watermark) || 22002 (xri_owned < xri_limit && 22003 pvt_pool->count < pvt_pool->high_watermark)) { 22004 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, 22005 qp, free_pvt_pool); 22006 list_add_tail(&lpfc_ncmd->list, 22007 &pvt_pool->list); 22008 pvt_pool->count++; 22009 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 22010 } else { 22011 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, 22012 qp, free_pub_pool); 22013 list_add_tail(&lpfc_ncmd->list, 22014 &pbl_pool->list); 22015 pbl_pool->count++; 22016 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 22017 } 22018 } else { 22019 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, 22020 qp, free_xri); 22021 list_add_tail(&lpfc_ncmd->list, 22022 &qp->lpfc_io_buf_list_put); 22023 qp->put_io_bufs++; 22024 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 22025 iflag); 22026 } 22027 } 22028 22029 /** 22030 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool 22031 * @phba: pointer to lpfc hba data structure. 22032 * @qp: pointer to HDW queue 22033 * @pvt_pool: pointer to private pool data structure. 22034 * @ndlp: pointer to lpfc nodelist data structure. 22035 * 22036 * This routine tries to get one free IO buf from private pool. 22037 * 22038 * Return: 22039 * pointer to one free IO buf - if private pool is not empty 22040 * NULL - if private pool is empty 22041 **/ 22042 static struct lpfc_io_buf * 22043 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, 22044 struct lpfc_sli4_hdw_queue *qp, 22045 struct lpfc_pvt_pool *pvt_pool, 22046 struct lpfc_nodelist *ndlp) 22047 { 22048 struct lpfc_io_buf *lpfc_ncmd; 22049 struct lpfc_io_buf *lpfc_ncmd_next; 22050 unsigned long iflag; 22051 22052 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); 22053 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 22054 &pvt_pool->list, list) { 22055 if (lpfc_test_rrq_active( 22056 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) 22057 continue; 22058 list_del(&lpfc_ncmd->list); 22059 pvt_pool->count--; 22060 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 22061 return lpfc_ncmd; 22062 } 22063 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 22064 22065 return NULL; 22066 } 22067 22068 /** 22069 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool 22070 * @phba: pointer to lpfc hba data structure. 22071 * 22072 * This routine tries to get one free IO buf from expedite pool. 22073 * 22074 * Return: 22075 * pointer to one free IO buf - if expedite pool is not empty 22076 * NULL - if expedite pool is empty 22077 **/ 22078 static struct lpfc_io_buf * 22079 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) 22080 { 22081 struct lpfc_io_buf *lpfc_ncmd = NULL, *iter; 22082 struct lpfc_io_buf *lpfc_ncmd_next; 22083 unsigned long iflag; 22084 struct lpfc_epd_pool *epd_pool; 22085 22086 epd_pool = &phba->epd_pool; 22087 22088 spin_lock_irqsave(&epd_pool->lock, iflag); 22089 if (epd_pool->count > 0) { 22090 list_for_each_entry_safe(iter, lpfc_ncmd_next, 22091 &epd_pool->list, list) { 22092 list_del(&iter->list); 22093 epd_pool->count--; 22094 lpfc_ncmd = iter; 22095 break; 22096 } 22097 } 22098 spin_unlock_irqrestore(&epd_pool->lock, iflag); 22099 22100 return lpfc_ncmd; 22101 } 22102 22103 /** 22104 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs 22105 * @phba: pointer to lpfc hba data structure. 22106 * @ndlp: pointer to lpfc nodelist data structure. 22107 * @hwqid: belong to which HWQ 22108 * @expedite: 1 means this request is urgent. 22109 * 22110 * This routine will do the following actions and then return a pointer to 22111 * one free IO buf. 22112 * 22113 * 1. If private free xri count is empty, move some XRIs from public to 22114 * private pool. 22115 * 2. Get one XRI from private free xri pool. 22116 * 3. If we fail to get one from pvt_pool and this is an expedite request, 22117 * get one free xri from expedite pool. 22118 * 22119 * Note: ndlp is only used on SCSI side for RRQ testing. 22120 * The caller should pass NULL for ndlp on NVME side. 22121 * 22122 * Return: 22123 * pointer to one free IO buf - if private pool is not empty 22124 * NULL - if private pool is empty 22125 **/ 22126 static struct lpfc_io_buf * 22127 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, 22128 struct lpfc_nodelist *ndlp, 22129 int hwqid, int expedite) 22130 { 22131 struct lpfc_sli4_hdw_queue *qp; 22132 struct lpfc_multixri_pool *multixri_pool; 22133 struct lpfc_pvt_pool *pvt_pool; 22134 struct lpfc_io_buf *lpfc_ncmd; 22135 22136 qp = &phba->sli4_hba.hdwq[hwqid]; 22137 lpfc_ncmd = NULL; 22138 if (!qp) { 22139 lpfc_printf_log(phba, KERN_INFO, 22140 LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 22141 "5556 NULL qp for hwqid x%x\n", hwqid); 22142 return lpfc_ncmd; 22143 } 22144 multixri_pool = qp->p_multixri_pool; 22145 if (!multixri_pool) { 22146 lpfc_printf_log(phba, KERN_INFO, 22147 LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 22148 "5557 NULL multixri for hwqid x%x\n", hwqid); 22149 return lpfc_ncmd; 22150 } 22151 pvt_pool = &multixri_pool->pvt_pool; 22152 if (!pvt_pool) { 22153 lpfc_printf_log(phba, KERN_INFO, 22154 LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 22155 "5558 NULL pvt_pool for hwqid x%x\n", hwqid); 22156 return lpfc_ncmd; 22157 } 22158 multixri_pool->io_req_count++; 22159 22160 /* If pvt_pool is empty, move some XRIs from public to private pool */ 22161 if (pvt_pool->count == 0) 22162 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 22163 22164 /* Get one XRI from private free xri pool */ 22165 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); 22166 22167 if (lpfc_ncmd) { 22168 lpfc_ncmd->hdwq = qp; 22169 lpfc_ncmd->hdwq_no = hwqid; 22170 } else if (expedite) { 22171 /* If we fail to get one from pvt_pool and this is an expedite 22172 * request, get one free xri from expedite pool. 22173 */ 22174 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); 22175 } 22176 22177 return lpfc_ncmd; 22178 } 22179 22180 static inline struct lpfc_io_buf * 22181 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) 22182 { 22183 struct lpfc_sli4_hdw_queue *qp; 22184 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; 22185 22186 qp = &phba->sli4_hba.hdwq[idx]; 22187 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 22188 &qp->lpfc_io_buf_list_get, list) { 22189 if (lpfc_test_rrq_active(phba, ndlp, 22190 lpfc_cmd->cur_iocbq.sli4_lxritag)) 22191 continue; 22192 22193 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) 22194 continue; 22195 22196 list_del_init(&lpfc_cmd->list); 22197 qp->get_io_bufs--; 22198 lpfc_cmd->hdwq = qp; 22199 lpfc_cmd->hdwq_no = idx; 22200 return lpfc_cmd; 22201 } 22202 return NULL; 22203 } 22204 22205 /** 22206 * lpfc_get_io_buf - Get one IO buffer from free pool 22207 * @phba: The HBA for which this call is being executed. 22208 * @ndlp: pointer to lpfc nodelist data structure. 22209 * @hwqid: belong to which HWQ 22210 * @expedite: 1 means this request is urgent. 22211 * 22212 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, 22213 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes 22214 * a IO buffer from head of @hdwq io_buf_list and returns to caller. 22215 * 22216 * Note: ndlp is only used on SCSI side for RRQ testing. 22217 * The caller should pass NULL for ndlp on NVME side. 22218 * 22219 * Return codes: 22220 * NULL - Error 22221 * Pointer to lpfc_io_buf - Success 22222 **/ 22223 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, 22224 struct lpfc_nodelist *ndlp, 22225 u32 hwqid, int expedite) 22226 { 22227 struct lpfc_sli4_hdw_queue *qp; 22228 unsigned long iflag; 22229 struct lpfc_io_buf *lpfc_cmd; 22230 22231 qp = &phba->sli4_hba.hdwq[hwqid]; 22232 lpfc_cmd = NULL; 22233 if (!qp) { 22234 lpfc_printf_log(phba, KERN_WARNING, 22235 LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 22236 "5555 NULL qp for hwqid x%x\n", hwqid); 22237 return lpfc_cmd; 22238 } 22239 22240 if (phba->cfg_xri_rebalancing) 22241 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( 22242 phba, ndlp, hwqid, expedite); 22243 else { 22244 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, 22245 qp, alloc_xri_get); 22246 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) 22247 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 22248 if (!lpfc_cmd) { 22249 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, 22250 qp, alloc_xri_put); 22251 list_splice(&qp->lpfc_io_buf_list_put, 22252 &qp->lpfc_io_buf_list_get); 22253 qp->get_io_bufs += qp->put_io_bufs; 22254 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 22255 qp->put_io_bufs = 0; 22256 spin_unlock(&qp->io_buf_list_put_lock); 22257 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || 22258 expedite) 22259 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 22260 } 22261 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); 22262 } 22263 22264 return lpfc_cmd; 22265 } 22266 22267 /** 22268 * lpfc_read_object - Retrieve object data from HBA 22269 * @phba: The HBA for which this call is being executed. 22270 * @rdobject: Pathname of object data we want to read. 22271 * @datap: Pointer to where data will be copied to. 22272 * @datasz: size of data area 22273 * 22274 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less. 22275 * The data will be truncated if datasz is not large enough. 22276 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0. 22277 * Returns the actual bytes read from the object. 22278 * 22279 * This routine is hard coded to use a poll completion. Unlike other 22280 * sli4_config mailboxes, it uses lpfc_mbuf memory which is not 22281 * cleaned up in lpfc_sli4_cmd_mbox_free. If this routine is modified 22282 * to use interrupt-based completions, code is needed to fully cleanup 22283 * the memory. 22284 */ 22285 int 22286 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap, 22287 uint32_t datasz) 22288 { 22289 struct lpfc_mbx_read_object *read_object; 22290 LPFC_MBOXQ_t *mbox; 22291 int rc, length, eof, j, byte_cnt = 0; 22292 uint32_t shdr_status, shdr_add_status; 22293 union lpfc_sli4_cfg_shdr *shdr; 22294 struct lpfc_dmabuf *pcmd; 22295 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0}; 22296 22297 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 22298 if (!mbox) 22299 return -ENOMEM; 22300 length = (sizeof(struct lpfc_mbx_read_object) - 22301 sizeof(struct lpfc_sli4_cfg_mhdr)); 22302 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 22303 LPFC_MBOX_OPCODE_READ_OBJECT, 22304 length, LPFC_SLI4_MBX_EMBED); 22305 read_object = &mbox->u.mqe.un.read_object; 22306 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr; 22307 22308 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0); 22309 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz); 22310 read_object->u.request.rd_object_offset = 0; 22311 read_object->u.request.rd_object_cnt = 1; 22312 22313 memset((void *)read_object->u.request.rd_object_name, 0, 22314 LPFC_OBJ_NAME_SZ); 22315 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject); 22316 for (j = 0; j < strlen(rdobject); j++) 22317 read_object->u.request.rd_object_name[j] = 22318 cpu_to_le32(rd_object_name[j]); 22319 22320 pcmd = kmalloc_obj(*pcmd); 22321 if (pcmd) 22322 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 22323 if (!pcmd || !pcmd->virt) { 22324 kfree(pcmd); 22325 mempool_free(mbox, phba->mbox_mem_pool); 22326 return -ENOMEM; 22327 } 22328 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE); 22329 read_object->u.request.rd_object_hbuf[0].pa_lo = 22330 putPaddrLow(pcmd->phys); 22331 read_object->u.request.rd_object_hbuf[0].pa_hi = 22332 putPaddrHigh(pcmd->phys); 22333 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE; 22334 22335 mbox->vport = phba->pport; 22336 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 22337 mbox->ctx_ndlp = NULL; 22338 22339 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 22340 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 22341 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 22342 22343 if (shdr_status == STATUS_FAILED && 22344 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) { 22345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, 22346 "4674 No port cfg file in FW.\n"); 22347 byte_cnt = -ENOENT; 22348 } else if (shdr_status || shdr_add_status || rc) { 22349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, 22350 "2625 READ_OBJECT mailbox failed with " 22351 "status x%x add_status x%x, mbx status x%x\n", 22352 shdr_status, shdr_add_status, rc); 22353 byte_cnt = -ENXIO; 22354 } else { 22355 /* Success */ 22356 length = read_object->u.response.rd_object_actual_rlen; 22357 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response); 22358 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT, 22359 "2626 READ_OBJECT Success len %d:%d, EOF %d\n", 22360 length, datasz, eof); 22361 22362 /* Detect the port config file exists but is empty */ 22363 if (!length && eof) { 22364 byte_cnt = 0; 22365 goto exit; 22366 } 22367 22368 byte_cnt = length; 22369 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt); 22370 } 22371 22372 exit: 22373 /* This is an embedded SLI4 mailbox with an external buffer allocated. 22374 * Free the pcmd and then cleanup with the correct routine. 22375 */ 22376 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 22377 kfree(pcmd); 22378 lpfc_sli4_mbox_cmd_free(phba, mbox); 22379 return byte_cnt; 22380 } 22381 22382 /** 22383 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool 22384 * @phba: The HBA for which this call is being executed. 22385 * @lpfc_buf: IO buf structure to append the SGL chunk 22386 * 22387 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, 22388 * and will allocate an SGL chunk if the pool is empty. 22389 * 22390 * Return codes: 22391 * NULL - Error 22392 * Pointer to sli4_hybrid_sgl - Success 22393 **/ 22394 struct sli4_hybrid_sgl * 22395 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) 22396 { 22397 struct sli4_hybrid_sgl *list_entry = NULL; 22398 struct sli4_hybrid_sgl *tmp = NULL; 22399 struct sli4_hybrid_sgl *allocated_sgl = NULL; 22400 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 22401 struct list_head *buf_list = &hdwq->sgl_list; 22402 unsigned long iflags; 22403 22404 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22405 22406 if (likely(!list_empty(buf_list))) { 22407 /* break off 1 chunk from the sgl_list */ 22408 list_for_each_entry_safe(list_entry, tmp, 22409 buf_list, list_node) { 22410 list_move_tail(&list_entry->list_node, 22411 &lpfc_buf->dma_sgl_xtra_list); 22412 break; 22413 } 22414 } else { 22415 /* allocate more */ 22416 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22417 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, 22418 cpu_to_node(hdwq->io_wq->chann)); 22419 if (!tmp) { 22420 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 22421 "8353 error kmalloc memory for HDWQ " 22422 "%d %s\n", 22423 lpfc_buf->hdwq_no, __func__); 22424 return NULL; 22425 } 22426 22427 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, 22428 GFP_ATOMIC, &tmp->dma_phys_sgl); 22429 if (!tmp->dma_sgl) { 22430 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 22431 "8354 error pool_alloc memory for HDWQ " 22432 "%d %s\n", 22433 lpfc_buf->hdwq_no, __func__); 22434 kfree(tmp); 22435 return NULL; 22436 } 22437 22438 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22439 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); 22440 } 22441 22442 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, 22443 struct sli4_hybrid_sgl, 22444 list_node); 22445 22446 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22447 22448 return allocated_sgl; 22449 } 22450 22451 /** 22452 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool 22453 * @phba: The HBA for which this call is being executed. 22454 * @lpfc_buf: IO buf structure with the SGL chunk 22455 * 22456 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. 22457 * 22458 * Return codes: 22459 * 0 - Success 22460 * -EINVAL - Error 22461 **/ 22462 int 22463 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) 22464 { 22465 int rc = 0; 22466 struct sli4_hybrid_sgl *list_entry = NULL; 22467 struct sli4_hybrid_sgl *tmp = NULL; 22468 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 22469 struct list_head *buf_list = &hdwq->sgl_list; 22470 unsigned long iflags; 22471 22472 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22473 22474 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { 22475 list_for_each_entry_safe(list_entry, tmp, 22476 &lpfc_buf->dma_sgl_xtra_list, 22477 list_node) { 22478 list_move_tail(&list_entry->list_node, 22479 buf_list); 22480 } 22481 } else { 22482 rc = -EINVAL; 22483 } 22484 22485 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22486 return rc; 22487 } 22488 22489 /** 22490 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool 22491 * @phba: phba object 22492 * @hdwq: hdwq to cleanup sgl buff resources on 22493 * 22494 * This routine frees all SGL chunks of hdwq SGL chunk pool. 22495 * 22496 * Return codes: 22497 * None 22498 **/ 22499 void 22500 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, 22501 struct lpfc_sli4_hdw_queue *hdwq) 22502 { 22503 struct list_head *buf_list = &hdwq->sgl_list; 22504 struct sli4_hybrid_sgl *list_entry = NULL; 22505 struct sli4_hybrid_sgl *tmp = NULL; 22506 unsigned long iflags; 22507 22508 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22509 22510 /* Free sgl pool */ 22511 list_for_each_entry_safe(list_entry, tmp, 22512 buf_list, list_node) { 22513 list_del(&list_entry->list_node); 22514 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 22515 list_entry->dma_sgl, 22516 list_entry->dma_phys_sgl); 22517 kfree(list_entry); 22518 } 22519 22520 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22521 } 22522 22523 /** 22524 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq 22525 * @phba: The HBA for which this call is being executed. 22526 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer 22527 * 22528 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, 22529 * and will allocate an CMD/RSP buffer if the pool is empty. 22530 * 22531 * Return codes: 22532 * NULL - Error 22533 * Pointer to fcp_cmd_rsp_buf - Success 22534 **/ 22535 struct fcp_cmd_rsp_buf * 22536 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 22537 struct lpfc_io_buf *lpfc_buf) 22538 { 22539 struct fcp_cmd_rsp_buf *list_entry = NULL; 22540 struct fcp_cmd_rsp_buf *tmp = NULL; 22541 struct fcp_cmd_rsp_buf *allocated_buf = NULL; 22542 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 22543 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 22544 unsigned long iflags; 22545 22546 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22547 22548 if (likely(!list_empty(buf_list))) { 22549 /* break off 1 chunk from the list */ 22550 list_for_each_entry_safe(list_entry, tmp, 22551 buf_list, 22552 list_node) { 22553 list_move_tail(&list_entry->list_node, 22554 &lpfc_buf->dma_cmd_rsp_list); 22555 break; 22556 } 22557 } else { 22558 /* allocate more */ 22559 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22560 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, 22561 cpu_to_node(hdwq->io_wq->chann)); 22562 if (!tmp) { 22563 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 22564 "8355 error kmalloc memory for HDWQ " 22565 "%d %s\n", 22566 lpfc_buf->hdwq_no, __func__); 22567 return NULL; 22568 } 22569 22570 tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool, 22571 GFP_ATOMIC, 22572 &tmp->fcp_cmd_rsp_dma_handle); 22573 22574 if (!tmp->fcp_cmnd) { 22575 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 22576 "8356 error pool_alloc memory for HDWQ " 22577 "%d %s\n", 22578 lpfc_buf->hdwq_no, __func__); 22579 kfree(tmp); 22580 return NULL; 22581 } 22582 22583 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + 22584 sizeof(struct fcp_cmnd32)); 22585 22586 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22587 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); 22588 } 22589 22590 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, 22591 struct fcp_cmd_rsp_buf, 22592 list_node); 22593 22594 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22595 22596 return allocated_buf; 22597 } 22598 22599 /** 22600 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool 22601 * @phba: The HBA for which this call is being executed. 22602 * @lpfc_buf: IO buf structure with the CMD/RSP buf 22603 * 22604 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. 22605 * 22606 * Return codes: 22607 * 0 - Success 22608 * -EINVAL - Error 22609 **/ 22610 int 22611 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 22612 struct lpfc_io_buf *lpfc_buf) 22613 { 22614 int rc = 0; 22615 struct fcp_cmd_rsp_buf *list_entry = NULL; 22616 struct fcp_cmd_rsp_buf *tmp = NULL; 22617 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 22618 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 22619 unsigned long iflags; 22620 22621 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22622 22623 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { 22624 list_for_each_entry_safe(list_entry, tmp, 22625 &lpfc_buf->dma_cmd_rsp_list, 22626 list_node) { 22627 list_move_tail(&list_entry->list_node, 22628 buf_list); 22629 } 22630 } else { 22631 rc = -EINVAL; 22632 } 22633 22634 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22635 return rc; 22636 } 22637 22638 /** 22639 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool 22640 * @phba: phba object 22641 * @hdwq: hdwq to cleanup cmd rsp buff resources on 22642 * 22643 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. 22644 * 22645 * Return codes: 22646 * None 22647 **/ 22648 void 22649 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 22650 struct lpfc_sli4_hdw_queue *hdwq) 22651 { 22652 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 22653 struct fcp_cmd_rsp_buf *list_entry = NULL; 22654 struct fcp_cmd_rsp_buf *tmp = NULL; 22655 unsigned long iflags; 22656 22657 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 22658 22659 /* Free cmd_rsp buf pool */ 22660 list_for_each_entry_safe(list_entry, tmp, 22661 buf_list, 22662 list_node) { 22663 list_del(&list_entry->list_node); 22664 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool, 22665 list_entry->fcp_cmnd, 22666 list_entry->fcp_cmd_rsp_dma_handle); 22667 kfree(list_entry); 22668 } 22669 22670 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 22671 } 22672 22673 /** 22674 * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted 22675 * @phba: phba object 22676 * @job: job entry of the command to be posted. 22677 * 22678 * Fill the common fields of the wqe for each of the command. 22679 * 22680 * Return codes: 22681 * None 22682 **/ 22683 void 22684 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job) 22685 { 22686 u8 cmnd; 22687 u32 *pcmd; 22688 u32 if_type = 0; 22689 u32 abort_tag; 22690 bool fip; 22691 struct lpfc_nodelist *ndlp = NULL; 22692 union lpfc_wqe128 *wqe = &job->wqe; 22693 u8 command_type = ELS_COMMAND_NON_FIP; 22694 22695 fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag); 22696 /* The fcp commands will set command type */ 22697 if (job->cmd_flag & LPFC_IO_FCP) 22698 command_type = FCP_COMMAND; 22699 else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK)) 22700 command_type = ELS_COMMAND_FIP; 22701 else 22702 command_type = ELS_COMMAND_NON_FIP; 22703 22704 abort_tag = job->iotag; 22705 cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com); 22706 22707 switch (cmnd) { 22708 case CMD_ELS_REQUEST64_WQE: 22709 ndlp = job->ndlp; 22710 22711 if_type = bf_get(lpfc_sli_intf_if_type, 22712 &phba->sli4_hba.sli_intf); 22713 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 22714 pcmd = (u32 *)job->cmd_dmabuf->virt; 22715 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 22716 *pcmd == ELS_CMD_SCR || 22717 *pcmd == ELS_CMD_RDF || 22718 *pcmd == ELS_CMD_EDC || 22719 *pcmd == ELS_CMD_RSCN_XMT || 22720 *pcmd == ELS_CMD_FDISC || 22721 *pcmd == ELS_CMD_LOGO || 22722 *pcmd == ELS_CMD_QFPA || 22723 *pcmd == ELS_CMD_UVEM || 22724 *pcmd == ELS_CMD_PLOGI)) { 22725 bf_set(els_req64_sp, &wqe->els_req, 1); 22726 bf_set(els_req64_sid, &wqe->els_req, 22727 job->vport->fc_myDID); 22728 22729 if ((*pcmd == ELS_CMD_FLOGI) && 22730 !(phba->fc_topology == 22731 LPFC_TOPOLOGY_LOOP)) 22732 bf_set(els_req64_sid, &wqe->els_req, 0); 22733 22734 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 22735 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 22736 phba->vpi_ids[job->vport->vpi]); 22737 } else if (pcmd) { 22738 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 22739 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 22740 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 22741 } 22742 } 22743 22744 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 22745 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 22746 22747 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 22748 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 22749 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 22750 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 22751 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 22752 break; 22753 case CMD_XMIT_ELS_RSP64_WQE: 22754 ndlp = job->ndlp; 22755 22756 /* word4 */ 22757 wqe->xmit_els_rsp.word4 = 0; 22758 22759 if_type = bf_get(lpfc_sli_intf_if_type, 22760 &phba->sli4_hba.sli_intf); 22761 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 22762 if (test_bit(FC_PT2PT, &job->vport->fc_flag)) { 22763 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 22764 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 22765 job->vport->fc_myDID); 22766 if (job->vport->fc_myDID == Fabric_DID) { 22767 bf_set(wqe_els_did, 22768 &wqe->xmit_els_rsp.wqe_dest, 0); 22769 } 22770 } 22771 } 22772 22773 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 22774 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 22775 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 22776 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 22777 LPFC_WQE_LENLOC_WORD3); 22778 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 22779 22780 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 22781 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 22782 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 22783 job->vport->fc_myDID); 22784 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 22785 } 22786 22787 if (phba->sli_rev == LPFC_SLI_REV4) { 22788 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 22789 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 22790 22791 if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com)) 22792 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 22793 phba->vpi_ids[job->vport->vpi]); 22794 } 22795 command_type = OTHER_COMMAND; 22796 break; 22797 case CMD_GEN_REQUEST64_WQE: 22798 /* Word 10 */ 22799 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 22800 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 22801 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 22802 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 22803 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 22804 command_type = OTHER_COMMAND; 22805 break; 22806 case CMD_XMIT_SEQUENCE64_WQE: 22807 if (phba->link_flag & LS_LOOPBACK_MODE) 22808 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 22809 22810 wqe->xmit_sequence.rsvd3 = 0; 22811 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 22812 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 22813 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 22814 LPFC_WQE_IOD_WRITE); 22815 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 22816 LPFC_WQE_LENLOC_WORD12); 22817 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 22818 command_type = OTHER_COMMAND; 22819 break; 22820 case CMD_XMIT_BLS_RSP64_WQE: 22821 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 22822 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 22823 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 22824 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 22825 phba->vpi_ids[phba->pport->vpi]); 22826 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 22827 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 22828 LPFC_WQE_LENLOC_NONE); 22829 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 22830 command_type = OTHER_COMMAND; 22831 break; 22832 case CMD_FCP_ICMND64_WQE: /* task mgmt commands */ 22833 case CMD_ABORT_XRI_WQE: /* abort iotag */ 22834 case CMD_SEND_FRAME: /* mds loopback */ 22835 /* cases already formatted for sli4 wqe - no chgs necessary */ 22836 return; 22837 default: 22838 dump_stack(); 22839 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 22840 "6207 Invalid command 0x%x\n", 22841 cmnd); 22842 break; 22843 } 22844 22845 wqe->generic.wqe_com.abort_tag = abort_tag; 22846 bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag); 22847 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 22848 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 22849 } 22850