1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static IOCB_t * 69 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 70 { 71 return &iocbq->iocb; 72 } 73 74 /** 75 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 76 * @q: The Work Queue to operate on. 77 * @wqe: The work Queue Entry to put on the Work queue. 78 * 79 * This routine will copy the contents of @wqe to the next available entry on 80 * the @q. This function will then ring the Work Queue Doorbell to signal the 81 * HBA to start processing the Work Queue Entry. This function returns 0 if 82 * successful. If no entries are available on @q then this function will return 83 * -ENOMEM. 84 * The caller is expected to hold the hbalock when calling this routine. 85 **/ 86 static uint32_t 87 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 88 { 89 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; 90 struct lpfc_register doorbell; 91 uint32_t host_index; 92 93 /* If the host has not yet processed the next entry then we are done */ 94 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 95 return -ENOMEM; 96 /* set consumption flag every once in a while */ 97 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) 98 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1); 99 100 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 101 102 /* Update the host index before invoking device */ 103 host_index = q->host_index; 104 q->host_index = ((q->host_index + 1) % q->entry_count); 105 106 /* Ring Doorbell */ 107 doorbell.word0 = 0; 108 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); 109 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 110 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 111 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 112 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ 113 114 return 0; 115 } 116 117 /** 118 * lpfc_sli4_wq_release - Updates internal hba index for WQ 119 * @q: The Work Queue to operate on. 120 * @index: The index to advance the hba index to. 121 * 122 * This routine will update the HBA index of a queue to reflect consumption of 123 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 124 * an entry the host calls this function to update the queue's internal 125 * pointers. This routine returns the number of entries that were consumed by 126 * the HBA. 127 **/ 128 static uint32_t 129 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 130 { 131 uint32_t released = 0; 132 133 if (q->hba_index == index) 134 return 0; 135 do { 136 q->hba_index = ((q->hba_index + 1) % q->entry_count); 137 released++; 138 } while (q->hba_index != index); 139 return released; 140 } 141 142 /** 143 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 144 * @q: The Mailbox Queue to operate on. 145 * @wqe: The Mailbox Queue Entry to put on the Work queue. 146 * 147 * This routine will copy the contents of @mqe to the next available entry on 148 * the @q. This function will then ring the Work Queue Doorbell to signal the 149 * HBA to start processing the Work Queue Entry. This function returns 0 if 150 * successful. If no entries are available on @q then this function will return 151 * -ENOMEM. 152 * The caller is expected to hold the hbalock when calling this routine. 153 **/ 154 static uint32_t 155 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 156 { 157 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; 158 struct lpfc_register doorbell; 159 uint32_t host_index; 160 161 /* If the host has not yet processed the next entry then we are done */ 162 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 163 return -ENOMEM; 164 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 165 /* Save off the mailbox pointer for completion */ 166 q->phba->mbox = (MAILBOX_t *)temp_mqe; 167 168 /* Update the host index before invoking device */ 169 host_index = q->host_index; 170 q->host_index = ((q->host_index + 1) % q->entry_count); 171 172 /* Ring Doorbell */ 173 doorbell.word0 = 0; 174 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 175 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 176 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 177 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ 178 return 0; 179 } 180 181 /** 182 * lpfc_sli4_mq_release - Updates internal hba index for MQ 183 * @q: The Mailbox Queue to operate on. 184 * 185 * This routine will update the HBA index of a queue to reflect consumption of 186 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 187 * an entry the host calls this function to update the queue's internal 188 * pointers. This routine returns the number of entries that were consumed by 189 * the HBA. 190 **/ 191 static uint32_t 192 lpfc_sli4_mq_release(struct lpfc_queue *q) 193 { 194 /* Clear the mailbox pointer for completion */ 195 q->phba->mbox = NULL; 196 q->hba_index = ((q->hba_index + 1) % q->entry_count); 197 return 1; 198 } 199 200 /** 201 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 202 * @q: The Event Queue to get the first valid EQE from 203 * 204 * This routine will get the first valid Event Queue Entry from @q, update 205 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 206 * the Queue (no more work to do), or the Queue is full of EQEs that have been 207 * processed, but not popped back to the HBA then this routine will return NULL. 208 **/ 209 static struct lpfc_eqe * 210 lpfc_sli4_eq_get(struct lpfc_queue *q) 211 { 212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 213 214 /* If the next EQE is not valid then we are done */ 215 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 216 return NULL; 217 /* If the host has not yet processed the next entry then we are done */ 218 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 219 return NULL; 220 221 q->hba_index = ((q->hba_index + 1) % q->entry_count); 222 return eqe; 223 } 224 225 /** 226 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 227 * @q: The Event Queue that the host has completed processing for. 228 * @arm: Indicates whether the host wants to arms this CQ. 229 * 230 * This routine will mark all Event Queue Entries on @q, from the last 231 * known completed entry to the last entry that was processed, as completed 232 * by clearing the valid bit for each completion queue entry. Then it will 233 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 234 * The internal host index in the @q will be updated by this routine to indicate 235 * that the host has finished processing the entries. The @arm parameter 236 * indicates that the queue should be rearmed when ringing the doorbell. 237 * 238 * This function will return the number of EQEs that were popped. 239 **/ 240 uint32_t 241 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 242 { 243 uint32_t released = 0; 244 struct lpfc_eqe *temp_eqe; 245 struct lpfc_register doorbell; 246 247 /* while there are valid entries */ 248 while (q->hba_index != q->host_index) { 249 temp_eqe = q->qe[q->host_index].eqe; 250 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 251 released++; 252 q->host_index = ((q->host_index + 1) % q->entry_count); 253 } 254 if (unlikely(released == 0 && !arm)) 255 return 0; 256 257 /* ring doorbell for number popped */ 258 doorbell.word0 = 0; 259 if (arm) { 260 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 261 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 262 } 263 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 264 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 265 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 266 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 267 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 268 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 269 readl(q->phba->sli4_hba.EQCQDBregaddr); 270 return released; 271 } 272 273 /** 274 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 275 * @q: The Completion Queue to get the first valid CQE from 276 * 277 * This routine will get the first valid Completion Queue Entry from @q, update 278 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 279 * the Queue (no more work to do), or the Queue is full of CQEs that have been 280 * processed, but not popped back to the HBA then this routine will return NULL. 281 **/ 282 static struct lpfc_cqe * 283 lpfc_sli4_cq_get(struct lpfc_queue *q) 284 { 285 struct lpfc_cqe *cqe; 286 287 /* If the next CQE is not valid then we are done */ 288 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 289 return NULL; 290 /* If the host has not yet processed the next entry then we are done */ 291 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 292 return NULL; 293 294 cqe = q->qe[q->hba_index].cqe; 295 q->hba_index = ((q->hba_index + 1) % q->entry_count); 296 return cqe; 297 } 298 299 /** 300 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 301 * @q: The Completion Queue that the host has completed processing for. 302 * @arm: Indicates whether the host wants to arms this CQ. 303 * 304 * This routine will mark all Completion queue entries on @q, from the last 305 * known completed entry to the last entry that was processed, as completed 306 * by clearing the valid bit for each completion queue entry. Then it will 307 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 308 * The internal host index in the @q will be updated by this routine to indicate 309 * that the host has finished processing the entries. The @arm parameter 310 * indicates that the queue should be rearmed when ringing the doorbell. 311 * 312 * This function will return the number of CQEs that were released. 313 **/ 314 uint32_t 315 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 316 { 317 uint32_t released = 0; 318 struct lpfc_cqe *temp_qe; 319 struct lpfc_register doorbell; 320 321 /* while there are valid entries */ 322 while (q->hba_index != q->host_index) { 323 temp_qe = q->qe[q->host_index].cqe; 324 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 325 released++; 326 q->host_index = ((q->host_index + 1) % q->entry_count); 327 } 328 if (unlikely(released == 0 && !arm)) 329 return 0; 330 331 /* ring doorbell for number popped */ 332 doorbell.word0 = 0; 333 if (arm) 334 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 335 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 336 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 337 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); 338 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 339 return released; 340 } 341 342 /** 343 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 344 * @q: The Header Receive Queue to operate on. 345 * @wqe: The Receive Queue Entry to put on the Receive queue. 346 * 347 * This routine will copy the contents of @wqe to the next available entry on 348 * the @q. This function will then ring the Receive Queue Doorbell to signal the 349 * HBA to start processing the Receive Queue Entry. This function returns the 350 * index that the rqe was copied to if successful. If no entries are available 351 * on @q then this function will return -ENOMEM. 352 * The caller is expected to hold the hbalock when calling this routine. 353 **/ 354 static int 355 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 356 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 357 { 358 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; 359 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; 360 struct lpfc_register doorbell; 361 int put_index = hq->host_index; 362 363 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 364 return -EINVAL; 365 if (hq->host_index != dq->host_index) 366 return -EINVAL; 367 /* If the host has not yet processed the next entry then we are done */ 368 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 369 return -EBUSY; 370 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 371 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 372 373 /* Update the host index to point to the next slot */ 374 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 375 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 376 377 /* Ring The Header Receive Queue Doorbell */ 378 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { 379 doorbell.word0 = 0; 380 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 381 LPFC_RQ_POST_BATCH); 382 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 383 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 384 } 385 return put_index; 386 } 387 388 /** 389 * lpfc_sli4_rq_release - Updates internal hba index for RQ 390 * @q: The Header Receive Queue to operate on. 391 * 392 * This routine will update the HBA index of a queue to reflect consumption of 393 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 394 * consumed an entry the host calls this function to update the queue's 395 * internal pointers. This routine returns the number of entries that were 396 * consumed by the HBA. 397 **/ 398 static uint32_t 399 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 400 { 401 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 402 return 0; 403 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 404 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 405 return 1; 406 } 407 408 /** 409 * lpfc_cmd_iocb - Get next command iocb entry in the ring 410 * @phba: Pointer to HBA context object. 411 * @pring: Pointer to driver SLI ring object. 412 * 413 * This function returns pointer to next command iocb entry 414 * in the command ring. The caller must hold hbalock to prevent 415 * other threads consume the next command iocb. 416 * SLI-2/SLI-3 provide different sized iocbs. 417 **/ 418 static inline IOCB_t * 419 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 420 { 421 return (IOCB_t *) (((char *) pring->cmdringaddr) + 422 pring->cmdidx * phba->iocb_cmd_size); 423 } 424 425 /** 426 * lpfc_resp_iocb - Get next response iocb entry in the ring 427 * @phba: Pointer to HBA context object. 428 * @pring: Pointer to driver SLI ring object. 429 * 430 * This function returns pointer to next response iocb entry 431 * in the response ring. The caller must hold hbalock to make sure 432 * that no other thread consume the next response iocb. 433 * SLI-2/SLI-3 provide different sized iocbs. 434 **/ 435 static inline IOCB_t * 436 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 437 { 438 return (IOCB_t *) (((char *) pring->rspringaddr) + 439 pring->rspidx * phba->iocb_rsp_size); 440 } 441 442 /** 443 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 444 * @phba: Pointer to HBA context object. 445 * 446 * This function is called with hbalock held. This function 447 * allocates a new driver iocb object from the iocb pool. If the 448 * allocation is successful, it returns pointer to the newly 449 * allocated iocb object else it returns NULL. 450 **/ 451 static struct lpfc_iocbq * 452 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 453 { 454 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 455 struct lpfc_iocbq * iocbq = NULL; 456 457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 458 459 if (iocbq) 460 phba->iocb_cnt++; 461 if (phba->iocb_cnt > phba->iocb_max) 462 phba->iocb_max = phba->iocb_cnt; 463 return iocbq; 464 } 465 466 /** 467 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 468 * @phba: Pointer to HBA context object. 469 * @xritag: XRI value. 470 * 471 * This function clears the sglq pointer from the array of acive 472 * sglq's. The xritag that is passed in is used to index into the 473 * array. Before the xritag can be used it needs to be adjusted 474 * by subtracting the xribase. 475 * 476 * Returns sglq ponter = success, NULL = Failure. 477 **/ 478 static struct lpfc_sglq * 479 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 480 { 481 uint16_t adj_xri; 482 struct lpfc_sglq *sglq; 483 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 484 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 485 return NULL; 486 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; 487 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL; 488 return sglq; 489 } 490 491 /** 492 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 493 * @phba: Pointer to HBA context object. 494 * @xritag: XRI value. 495 * 496 * This function returns the sglq pointer from the array of acive 497 * sglq's. The xritag that is passed in is used to index into the 498 * array. Before the xritag can be used it needs to be adjusted 499 * by subtracting the xribase. 500 * 501 * Returns sglq ponter = success, NULL = Failure. 502 **/ 503 struct lpfc_sglq * 504 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 505 { 506 uint16_t adj_xri; 507 struct lpfc_sglq *sglq; 508 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 509 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 510 return NULL; 511 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; 512 return sglq; 513 } 514 515 /** 516 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 517 * @phba: Pointer to HBA context object. 518 * 519 * This function is called with hbalock held. This function 520 * Gets a new driver sglq object from the sglq list. If the 521 * list is not empty then it is successful, it returns pointer to the newly 522 * allocated sglq object else it returns NULL. 523 **/ 524 static struct lpfc_sglq * 525 __lpfc_sli_get_sglq(struct lpfc_hba *phba) 526 { 527 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 528 struct lpfc_sglq *sglq = NULL; 529 uint16_t adj_xri; 530 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 531 if (!sglq) 532 return NULL; 533 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 534 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 535 sglq->state = SGL_ALLOCATED; 536 return sglq; 537 } 538 539 /** 540 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 541 * @phba: Pointer to HBA context object. 542 * 543 * This function is called with no lock held. This function 544 * allocates a new driver iocb object from the iocb pool. If the 545 * allocation is successful, it returns pointer to the newly 546 * allocated iocb object else it returns NULL. 547 **/ 548 struct lpfc_iocbq * 549 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 550 { 551 struct lpfc_iocbq * iocbq = NULL; 552 unsigned long iflags; 553 554 spin_lock_irqsave(&phba->hbalock, iflags); 555 iocbq = __lpfc_sli_get_iocbq(phba); 556 spin_unlock_irqrestore(&phba->hbalock, iflags); 557 return iocbq; 558 } 559 560 /** 561 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 562 * @phba: Pointer to HBA context object. 563 * @iocbq: Pointer to driver iocb object. 564 * 565 * This function is called with hbalock held to release driver 566 * iocb object to the iocb pool. The iotag in the iocb object 567 * does not change for each use of the iocb object. This function 568 * clears all other fields of the iocb object when it is freed. 569 * The sqlq structure that holds the xritag and phys and virtual 570 * mappings for the scatter gather list is retrieved from the 571 * active array of sglq. The get of the sglq pointer also clears 572 * the entry in the array. If the status of the IO indiactes that 573 * this IO was aborted then the sglq entry it put on the 574 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 575 * IO has good status or fails for any other reason then the sglq 576 * entry is added to the free list (lpfc_sgl_list). 577 **/ 578 static void 579 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 580 { 581 struct lpfc_sglq *sglq; 582 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 583 unsigned long iflag = 0; 584 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 585 586 if (iocbq->sli4_xritag == NO_XRI) 587 sglq = NULL; 588 else 589 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 590 if (sglq) { 591 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 592 (sglq->state != SGL_XRI_ABORTED)) { 593 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 594 iflag); 595 list_add(&sglq->list, 596 &phba->sli4_hba.lpfc_abts_els_sgl_list); 597 spin_unlock_irqrestore( 598 &phba->sli4_hba.abts_sgl_list_lock, iflag); 599 } else { 600 sglq->state = SGL_FREED; 601 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 602 603 /* Check if TXQ queue needs to be serviced */ 604 if (pring->txq_cnt) 605 lpfc_worker_wake_up(phba); 606 } 607 } 608 609 610 /* 611 * Clean all volatile data fields, preserve iotag and node struct. 612 */ 613 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 614 iocbq->sli4_xritag = NO_XRI; 615 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 616 } 617 618 619 /** 620 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 621 * @phba: Pointer to HBA context object. 622 * @iocbq: Pointer to driver iocb object. 623 * 624 * This function is called with hbalock held to release driver 625 * iocb object to the iocb pool. The iotag in the iocb object 626 * does not change for each use of the iocb object. This function 627 * clears all other fields of the iocb object when it is freed. 628 **/ 629 static void 630 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 631 { 632 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 633 634 /* 635 * Clean all volatile data fields, preserve iotag and node struct. 636 */ 637 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 638 iocbq->sli4_xritag = NO_XRI; 639 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 640 } 641 642 /** 643 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 644 * @phba: Pointer to HBA context object. 645 * @iocbq: Pointer to driver iocb object. 646 * 647 * This function is called with hbalock held to release driver 648 * iocb object to the iocb pool. The iotag in the iocb object 649 * does not change for each use of the iocb object. This function 650 * clears all other fields of the iocb object when it is freed. 651 **/ 652 static void 653 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 654 { 655 phba->__lpfc_sli_release_iocbq(phba, iocbq); 656 phba->iocb_cnt--; 657 } 658 659 /** 660 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 661 * @phba: Pointer to HBA context object. 662 * @iocbq: Pointer to driver iocb object. 663 * 664 * This function is called with no lock held to release the iocb to 665 * iocb pool. 666 **/ 667 void 668 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 669 { 670 unsigned long iflags; 671 672 /* 673 * Clean all volatile data fields, preserve iotag and node struct. 674 */ 675 spin_lock_irqsave(&phba->hbalock, iflags); 676 __lpfc_sli_release_iocbq(phba, iocbq); 677 spin_unlock_irqrestore(&phba->hbalock, iflags); 678 } 679 680 /** 681 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 682 * @phba: Pointer to HBA context object. 683 * @iocblist: List of IOCBs. 684 * @ulpstatus: ULP status in IOCB command field. 685 * @ulpWord4: ULP word-4 in IOCB command field. 686 * 687 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 688 * on the list by invoking the complete callback function associated with the 689 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 690 * fields. 691 **/ 692 void 693 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 694 uint32_t ulpstatus, uint32_t ulpWord4) 695 { 696 struct lpfc_iocbq *piocb; 697 698 while (!list_empty(iocblist)) { 699 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 700 701 if (!piocb->iocb_cmpl) 702 lpfc_sli_release_iocbq(phba, piocb); 703 else { 704 piocb->iocb.ulpStatus = ulpstatus; 705 piocb->iocb.un.ulpWord[4] = ulpWord4; 706 (piocb->iocb_cmpl) (phba, piocb, piocb); 707 } 708 } 709 return; 710 } 711 712 /** 713 * lpfc_sli_iocb_cmd_type - Get the iocb type 714 * @iocb_cmnd: iocb command code. 715 * 716 * This function is called by ring event handler function to get the iocb type. 717 * This function translates the iocb command to an iocb command type used to 718 * decide the final disposition of each completed IOCB. 719 * The function returns 720 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 721 * LPFC_SOL_IOCB if it is a solicited iocb completion 722 * LPFC_ABORT_IOCB if it is an abort iocb 723 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 724 * 725 * The caller is not required to hold any lock. 726 **/ 727 static lpfc_iocb_type 728 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 729 { 730 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 731 732 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 733 return 0; 734 735 switch (iocb_cmnd) { 736 case CMD_XMIT_SEQUENCE_CR: 737 case CMD_XMIT_SEQUENCE_CX: 738 case CMD_XMIT_BCAST_CN: 739 case CMD_XMIT_BCAST_CX: 740 case CMD_ELS_REQUEST_CR: 741 case CMD_ELS_REQUEST_CX: 742 case CMD_CREATE_XRI_CR: 743 case CMD_CREATE_XRI_CX: 744 case CMD_GET_RPI_CN: 745 case CMD_XMIT_ELS_RSP_CX: 746 case CMD_GET_RPI_CR: 747 case CMD_FCP_IWRITE_CR: 748 case CMD_FCP_IWRITE_CX: 749 case CMD_FCP_IREAD_CR: 750 case CMD_FCP_IREAD_CX: 751 case CMD_FCP_ICMND_CR: 752 case CMD_FCP_ICMND_CX: 753 case CMD_FCP_TSEND_CX: 754 case CMD_FCP_TRSP_CX: 755 case CMD_FCP_TRECEIVE_CX: 756 case CMD_FCP_AUTO_TRSP_CX: 757 case CMD_ADAPTER_MSG: 758 case CMD_ADAPTER_DUMP: 759 case CMD_XMIT_SEQUENCE64_CR: 760 case CMD_XMIT_SEQUENCE64_CX: 761 case CMD_XMIT_BCAST64_CN: 762 case CMD_XMIT_BCAST64_CX: 763 case CMD_ELS_REQUEST64_CR: 764 case CMD_ELS_REQUEST64_CX: 765 case CMD_FCP_IWRITE64_CR: 766 case CMD_FCP_IWRITE64_CX: 767 case CMD_FCP_IREAD64_CR: 768 case CMD_FCP_IREAD64_CX: 769 case CMD_FCP_ICMND64_CR: 770 case CMD_FCP_ICMND64_CX: 771 case CMD_FCP_TSEND64_CX: 772 case CMD_FCP_TRSP64_CX: 773 case CMD_FCP_TRECEIVE64_CX: 774 case CMD_GEN_REQUEST64_CR: 775 case CMD_GEN_REQUEST64_CX: 776 case CMD_XMIT_ELS_RSP64_CX: 777 case DSSCMD_IWRITE64_CR: 778 case DSSCMD_IWRITE64_CX: 779 case DSSCMD_IREAD64_CR: 780 case DSSCMD_IREAD64_CX: 781 type = LPFC_SOL_IOCB; 782 break; 783 case CMD_ABORT_XRI_CN: 784 case CMD_ABORT_XRI_CX: 785 case CMD_CLOSE_XRI_CN: 786 case CMD_CLOSE_XRI_CX: 787 case CMD_XRI_ABORTED_CX: 788 case CMD_ABORT_MXRI64_CN: 789 case CMD_XMIT_BLS_RSP64_CX: 790 type = LPFC_ABORT_IOCB; 791 break; 792 case CMD_RCV_SEQUENCE_CX: 793 case CMD_RCV_ELS_REQ_CX: 794 case CMD_RCV_SEQUENCE64_CX: 795 case CMD_RCV_ELS_REQ64_CX: 796 case CMD_ASYNC_STATUS: 797 case CMD_IOCB_RCV_SEQ64_CX: 798 case CMD_IOCB_RCV_ELS64_CX: 799 case CMD_IOCB_RCV_CONT64_CX: 800 case CMD_IOCB_RET_XRI64_CX: 801 type = LPFC_UNSOL_IOCB; 802 break; 803 case CMD_IOCB_XMIT_MSEQ64_CR: 804 case CMD_IOCB_XMIT_MSEQ64_CX: 805 case CMD_IOCB_RCV_SEQ_LIST64_CX: 806 case CMD_IOCB_RCV_ELS_LIST64_CX: 807 case CMD_IOCB_CLOSE_EXTENDED_CN: 808 case CMD_IOCB_ABORT_EXTENDED_CN: 809 case CMD_IOCB_RET_HBQE64_CN: 810 case CMD_IOCB_FCP_IBIDIR64_CR: 811 case CMD_IOCB_FCP_IBIDIR64_CX: 812 case CMD_IOCB_FCP_ITASKMGT64_CX: 813 case CMD_IOCB_LOGENTRY_CN: 814 case CMD_IOCB_LOGENTRY_ASYNC_CN: 815 printk("%s - Unhandled SLI-3 Command x%x\n", 816 __func__, iocb_cmnd); 817 type = LPFC_UNKNOWN_IOCB; 818 break; 819 default: 820 type = LPFC_UNKNOWN_IOCB; 821 break; 822 } 823 824 return type; 825 } 826 827 /** 828 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 829 * @phba: Pointer to HBA context object. 830 * 831 * This function is called from SLI initialization code 832 * to configure every ring of the HBA's SLI interface. The 833 * caller is not required to hold any lock. This function issues 834 * a config_ring mailbox command for each ring. 835 * This function returns zero if successful else returns a negative 836 * error code. 837 **/ 838 static int 839 lpfc_sli_ring_map(struct lpfc_hba *phba) 840 { 841 struct lpfc_sli *psli = &phba->sli; 842 LPFC_MBOXQ_t *pmb; 843 MAILBOX_t *pmbox; 844 int i, rc, ret = 0; 845 846 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 847 if (!pmb) 848 return -ENOMEM; 849 pmbox = &pmb->u.mb; 850 phba->link_state = LPFC_INIT_MBX_CMDS; 851 for (i = 0; i < psli->num_rings; i++) { 852 lpfc_config_ring(phba, i, pmb); 853 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 854 if (rc != MBX_SUCCESS) { 855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 856 "0446 Adapter failed to init (%d), " 857 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 858 "ring %d\n", 859 rc, pmbox->mbxCommand, 860 pmbox->mbxStatus, i); 861 phba->link_state = LPFC_HBA_ERROR; 862 ret = -ENXIO; 863 break; 864 } 865 } 866 mempool_free(pmb, phba->mbox_mem_pool); 867 return ret; 868 } 869 870 /** 871 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 872 * @phba: Pointer to HBA context object. 873 * @pring: Pointer to driver SLI ring object. 874 * @piocb: Pointer to the driver iocb object. 875 * 876 * This function is called with hbalock held. The function adds the 877 * new iocb to txcmplq of the given ring. This function always returns 878 * 0. If this function is called for ELS ring, this function checks if 879 * there is a vport associated with the ELS command. This function also 880 * starts els_tmofunc timer if this is an ELS command. 881 **/ 882 static int 883 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 884 struct lpfc_iocbq *piocb) 885 { 886 list_add_tail(&piocb->list, &pring->txcmplq); 887 piocb->iocb_flag |= LPFC_IO_ON_Q; 888 pring->txcmplq_cnt++; 889 if (pring->txcmplq_cnt > pring->txcmplq_max) 890 pring->txcmplq_max = pring->txcmplq_cnt; 891 892 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 893 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 894 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 895 if (!piocb->vport) 896 BUG(); 897 else 898 mod_timer(&piocb->vport->els_tmofunc, 899 jiffies + HZ * (phba->fc_ratov << 1)); 900 } 901 902 903 return 0; 904 } 905 906 /** 907 * lpfc_sli_ringtx_get - Get first element of the txq 908 * @phba: Pointer to HBA context object. 909 * @pring: Pointer to driver SLI ring object. 910 * 911 * This function is called with hbalock held to get next 912 * iocb in txq of the given ring. If there is any iocb in 913 * the txq, the function returns first iocb in the list after 914 * removing the iocb from the list, else it returns NULL. 915 **/ 916 struct lpfc_iocbq * 917 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 918 { 919 struct lpfc_iocbq *cmd_iocb; 920 921 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 922 if (cmd_iocb != NULL) 923 pring->txq_cnt--; 924 return cmd_iocb; 925 } 926 927 /** 928 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 929 * @phba: Pointer to HBA context object. 930 * @pring: Pointer to driver SLI ring object. 931 * 932 * This function is called with hbalock held and the caller must post the 933 * iocb without releasing the lock. If the caller releases the lock, 934 * iocb slot returned by the function is not guaranteed to be available. 935 * The function returns pointer to the next available iocb slot if there 936 * is available slot in the ring, else it returns NULL. 937 * If the get index of the ring is ahead of the put index, the function 938 * will post an error attention event to the worker thread to take the 939 * HBA to offline state. 940 **/ 941 static IOCB_t * 942 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 943 { 944 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 945 uint32_t max_cmd_idx = pring->numCiocb; 946 if ((pring->next_cmdidx == pring->cmdidx) && 947 (++pring->next_cmdidx >= max_cmd_idx)) 948 pring->next_cmdidx = 0; 949 950 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 951 952 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 953 954 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 955 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 956 "0315 Ring %d issue: portCmdGet %d " 957 "is bigger than cmd ring %d\n", 958 pring->ringno, 959 pring->local_getidx, max_cmd_idx); 960 961 phba->link_state = LPFC_HBA_ERROR; 962 /* 963 * All error attention handlers are posted to 964 * worker thread 965 */ 966 phba->work_ha |= HA_ERATT; 967 phba->work_hs = HS_FFER3; 968 969 lpfc_worker_wake_up(phba); 970 971 return NULL; 972 } 973 974 if (pring->local_getidx == pring->next_cmdidx) 975 return NULL; 976 } 977 978 return lpfc_cmd_iocb(phba, pring); 979 } 980 981 /** 982 * lpfc_sli_next_iotag - Get an iotag for the iocb 983 * @phba: Pointer to HBA context object. 984 * @iocbq: Pointer to driver iocb object. 985 * 986 * This function gets an iotag for the iocb. If there is no unused iotag and 987 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 988 * array and assigns a new iotag. 989 * The function returns the allocated iotag if successful, else returns zero. 990 * Zero is not a valid iotag. 991 * The caller is not required to hold any lock. 992 **/ 993 uint16_t 994 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 995 { 996 struct lpfc_iocbq **new_arr; 997 struct lpfc_iocbq **old_arr; 998 size_t new_len; 999 struct lpfc_sli *psli = &phba->sli; 1000 uint16_t iotag; 1001 1002 spin_lock_irq(&phba->hbalock); 1003 iotag = psli->last_iotag; 1004 if(++iotag < psli->iocbq_lookup_len) { 1005 psli->last_iotag = iotag; 1006 psli->iocbq_lookup[iotag] = iocbq; 1007 spin_unlock_irq(&phba->hbalock); 1008 iocbq->iotag = iotag; 1009 return iotag; 1010 } else if (psli->iocbq_lookup_len < (0xffff 1011 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1012 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1013 spin_unlock_irq(&phba->hbalock); 1014 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1015 GFP_KERNEL); 1016 if (new_arr) { 1017 spin_lock_irq(&phba->hbalock); 1018 old_arr = psli->iocbq_lookup; 1019 if (new_len <= psli->iocbq_lookup_len) { 1020 /* highly unprobable case */ 1021 kfree(new_arr); 1022 iotag = psli->last_iotag; 1023 if(++iotag < psli->iocbq_lookup_len) { 1024 psli->last_iotag = iotag; 1025 psli->iocbq_lookup[iotag] = iocbq; 1026 spin_unlock_irq(&phba->hbalock); 1027 iocbq->iotag = iotag; 1028 return iotag; 1029 } 1030 spin_unlock_irq(&phba->hbalock); 1031 return 0; 1032 } 1033 if (psli->iocbq_lookup) 1034 memcpy(new_arr, old_arr, 1035 ((psli->last_iotag + 1) * 1036 sizeof (struct lpfc_iocbq *))); 1037 psli->iocbq_lookup = new_arr; 1038 psli->iocbq_lookup_len = new_len; 1039 psli->last_iotag = iotag; 1040 psli->iocbq_lookup[iotag] = iocbq; 1041 spin_unlock_irq(&phba->hbalock); 1042 iocbq->iotag = iotag; 1043 kfree(old_arr); 1044 return iotag; 1045 } 1046 } else 1047 spin_unlock_irq(&phba->hbalock); 1048 1049 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1050 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1051 psli->last_iotag); 1052 1053 return 0; 1054 } 1055 1056 /** 1057 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1058 * @phba: Pointer to HBA context object. 1059 * @pring: Pointer to driver SLI ring object. 1060 * @iocb: Pointer to iocb slot in the ring. 1061 * @nextiocb: Pointer to driver iocb object which need to be 1062 * posted to firmware. 1063 * 1064 * This function is called with hbalock held to post a new iocb to 1065 * the firmware. This function copies the new iocb to ring iocb slot and 1066 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1067 * a completion call back for this iocb else the function will free the 1068 * iocb object. 1069 **/ 1070 static void 1071 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1072 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1073 { 1074 /* 1075 * Set up an iotag 1076 */ 1077 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1078 1079 1080 if (pring->ringno == LPFC_ELS_RING) { 1081 lpfc_debugfs_slow_ring_trc(phba, 1082 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1083 *(((uint32_t *) &nextiocb->iocb) + 4), 1084 *(((uint32_t *) &nextiocb->iocb) + 6), 1085 *(((uint32_t *) &nextiocb->iocb) + 7)); 1086 } 1087 1088 /* 1089 * Issue iocb command to adapter 1090 */ 1091 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1092 wmb(); 1093 pring->stats.iocb_cmd++; 1094 1095 /* 1096 * If there is no completion routine to call, we can release the 1097 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1098 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1099 */ 1100 if (nextiocb->iocb_cmpl) 1101 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1102 else 1103 __lpfc_sli_release_iocbq(phba, nextiocb); 1104 1105 /* 1106 * Let the HBA know what IOCB slot will be the next one the 1107 * driver will put a command into. 1108 */ 1109 pring->cmdidx = pring->next_cmdidx; 1110 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1111 } 1112 1113 /** 1114 * lpfc_sli_update_full_ring - Update the chip attention register 1115 * @phba: Pointer to HBA context object. 1116 * @pring: Pointer to driver SLI ring object. 1117 * 1118 * The caller is not required to hold any lock for calling this function. 1119 * This function updates the chip attention bits for the ring to inform firmware 1120 * that there are pending work to be done for this ring and requests an 1121 * interrupt when there is space available in the ring. This function is 1122 * called when the driver is unable to post more iocbs to the ring due 1123 * to unavailability of space in the ring. 1124 **/ 1125 static void 1126 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1127 { 1128 int ringno = pring->ringno; 1129 1130 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1131 1132 wmb(); 1133 1134 /* 1135 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1136 * The HBA will tell us when an IOCB entry is available. 1137 */ 1138 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1139 readl(phba->CAregaddr); /* flush */ 1140 1141 pring->stats.iocb_cmd_full++; 1142 } 1143 1144 /** 1145 * lpfc_sli_update_ring - Update chip attention register 1146 * @phba: Pointer to HBA context object. 1147 * @pring: Pointer to driver SLI ring object. 1148 * 1149 * This function updates the chip attention register bit for the 1150 * given ring to inform HBA that there is more work to be done 1151 * in this ring. The caller is not required to hold any lock. 1152 **/ 1153 static void 1154 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1155 { 1156 int ringno = pring->ringno; 1157 1158 /* 1159 * Tell the HBA that there is work to do in this ring. 1160 */ 1161 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1162 wmb(); 1163 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1164 readl(phba->CAregaddr); /* flush */ 1165 } 1166 } 1167 1168 /** 1169 * lpfc_sli_resume_iocb - Process iocbs in the txq 1170 * @phba: Pointer to HBA context object. 1171 * @pring: Pointer to driver SLI ring object. 1172 * 1173 * This function is called with hbalock held to post pending iocbs 1174 * in the txq to the firmware. This function is called when driver 1175 * detects space available in the ring. 1176 **/ 1177 static void 1178 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1179 { 1180 IOCB_t *iocb; 1181 struct lpfc_iocbq *nextiocb; 1182 1183 /* 1184 * Check to see if: 1185 * (a) there is anything on the txq to send 1186 * (b) link is up 1187 * (c) link attention events can be processed (fcp ring only) 1188 * (d) IOCB processing is not blocked by the outstanding mbox command. 1189 */ 1190 if (pring->txq_cnt && 1191 lpfc_is_link_up(phba) && 1192 (pring->ringno != phba->sli.fcp_ring || 1193 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1194 1195 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1196 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1197 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1198 1199 if (iocb) 1200 lpfc_sli_update_ring(phba, pring); 1201 else 1202 lpfc_sli_update_full_ring(phba, pring); 1203 } 1204 1205 return; 1206 } 1207 1208 /** 1209 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1210 * @phba: Pointer to HBA context object. 1211 * @hbqno: HBQ number. 1212 * 1213 * This function is called with hbalock held to get the next 1214 * available slot for the given HBQ. If there is free slot 1215 * available for the HBQ it will return pointer to the next available 1216 * HBQ entry else it will return NULL. 1217 **/ 1218 static struct lpfc_hbq_entry * 1219 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1220 { 1221 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1222 1223 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1224 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1225 hbqp->next_hbqPutIdx = 0; 1226 1227 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1228 uint32_t raw_index = phba->hbq_get[hbqno]; 1229 uint32_t getidx = le32_to_cpu(raw_index); 1230 1231 hbqp->local_hbqGetIdx = getidx; 1232 1233 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1234 lpfc_printf_log(phba, KERN_ERR, 1235 LOG_SLI | LOG_VPORT, 1236 "1802 HBQ %d: local_hbqGetIdx " 1237 "%u is > than hbqp->entry_count %u\n", 1238 hbqno, hbqp->local_hbqGetIdx, 1239 hbqp->entry_count); 1240 1241 phba->link_state = LPFC_HBA_ERROR; 1242 return NULL; 1243 } 1244 1245 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1246 return NULL; 1247 } 1248 1249 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1250 hbqp->hbqPutIdx; 1251 } 1252 1253 /** 1254 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1255 * @phba: Pointer to HBA context object. 1256 * 1257 * This function is called with no lock held to free all the 1258 * hbq buffers while uninitializing the SLI interface. It also 1259 * frees the HBQ buffers returned by the firmware but not yet 1260 * processed by the upper layers. 1261 **/ 1262 void 1263 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1264 { 1265 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1266 struct hbq_dmabuf *hbq_buf; 1267 unsigned long flags; 1268 int i, hbq_count; 1269 uint32_t hbqno; 1270 1271 hbq_count = lpfc_sli_hbq_count(); 1272 /* Return all memory used by all HBQs */ 1273 spin_lock_irqsave(&phba->hbalock, flags); 1274 for (i = 0; i < hbq_count; ++i) { 1275 list_for_each_entry_safe(dmabuf, next_dmabuf, 1276 &phba->hbqs[i].hbq_buffer_list, list) { 1277 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1278 list_del(&hbq_buf->dbuf.list); 1279 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1280 } 1281 phba->hbqs[i].buffer_count = 0; 1282 } 1283 /* Return all HBQ buffer that are in-fly */ 1284 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1285 list) { 1286 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1287 list_del(&hbq_buf->dbuf.list); 1288 if (hbq_buf->tag == -1) { 1289 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1290 (phba, hbq_buf); 1291 } else { 1292 hbqno = hbq_buf->tag >> 16; 1293 if (hbqno >= LPFC_MAX_HBQS) 1294 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1295 (phba, hbq_buf); 1296 else 1297 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1298 hbq_buf); 1299 } 1300 } 1301 1302 /* Mark the HBQs not in use */ 1303 phba->hbq_in_use = 0; 1304 spin_unlock_irqrestore(&phba->hbalock, flags); 1305 } 1306 1307 /** 1308 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1309 * @phba: Pointer to HBA context object. 1310 * @hbqno: HBQ number. 1311 * @hbq_buf: Pointer to HBQ buffer. 1312 * 1313 * This function is called with the hbalock held to post a 1314 * hbq buffer to the firmware. If the function finds an empty 1315 * slot in the HBQ, it will post the buffer. The function will return 1316 * pointer to the hbq entry if it successfully post the buffer 1317 * else it will return NULL. 1318 **/ 1319 static int 1320 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1321 struct hbq_dmabuf *hbq_buf) 1322 { 1323 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1324 } 1325 1326 /** 1327 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1328 * @phba: Pointer to HBA context object. 1329 * @hbqno: HBQ number. 1330 * @hbq_buf: Pointer to HBQ buffer. 1331 * 1332 * This function is called with the hbalock held to post a hbq buffer to the 1333 * firmware. If the function finds an empty slot in the HBQ, it will post the 1334 * buffer and place it on the hbq_buffer_list. The function will return zero if 1335 * it successfully post the buffer else it will return an error. 1336 **/ 1337 static int 1338 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1339 struct hbq_dmabuf *hbq_buf) 1340 { 1341 struct lpfc_hbq_entry *hbqe; 1342 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1343 1344 /* Get next HBQ entry slot to use */ 1345 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1346 if (hbqe) { 1347 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1348 1349 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1350 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1351 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1352 hbqe->bde.tus.f.bdeFlags = 0; 1353 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1354 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1355 /* Sync SLIM */ 1356 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1357 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1358 /* flush */ 1359 readl(phba->hbq_put + hbqno); 1360 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1361 return 0; 1362 } else 1363 return -ENOMEM; 1364 } 1365 1366 /** 1367 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1368 * @phba: Pointer to HBA context object. 1369 * @hbqno: HBQ number. 1370 * @hbq_buf: Pointer to HBQ buffer. 1371 * 1372 * This function is called with the hbalock held to post an RQE to the SLI4 1373 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1374 * the hbq_buffer_list and return zero, otherwise it will return an error. 1375 **/ 1376 static int 1377 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1378 struct hbq_dmabuf *hbq_buf) 1379 { 1380 int rc; 1381 struct lpfc_rqe hrqe; 1382 struct lpfc_rqe drqe; 1383 1384 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1385 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1386 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1387 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1388 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1389 &hrqe, &drqe); 1390 if (rc < 0) 1391 return rc; 1392 hbq_buf->tag = rc; 1393 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1394 return 0; 1395 } 1396 1397 /* HBQ for ELS and CT traffic. */ 1398 static struct lpfc_hbq_init lpfc_els_hbq = { 1399 .rn = 1, 1400 .entry_count = 256, 1401 .mask_count = 0, 1402 .profile = 0, 1403 .ring_mask = (1 << LPFC_ELS_RING), 1404 .buffer_count = 0, 1405 .init_count = 40, 1406 .add_count = 40, 1407 }; 1408 1409 /* HBQ for the extra ring if needed */ 1410 static struct lpfc_hbq_init lpfc_extra_hbq = { 1411 .rn = 1, 1412 .entry_count = 200, 1413 .mask_count = 0, 1414 .profile = 0, 1415 .ring_mask = (1 << LPFC_EXTRA_RING), 1416 .buffer_count = 0, 1417 .init_count = 0, 1418 .add_count = 5, 1419 }; 1420 1421 /* Array of HBQs */ 1422 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1423 &lpfc_els_hbq, 1424 &lpfc_extra_hbq, 1425 }; 1426 1427 /** 1428 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1429 * @phba: Pointer to HBA context object. 1430 * @hbqno: HBQ number. 1431 * @count: Number of HBQ buffers to be posted. 1432 * 1433 * This function is called with no lock held to post more hbq buffers to the 1434 * given HBQ. The function returns the number of HBQ buffers successfully 1435 * posted. 1436 **/ 1437 static int 1438 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1439 { 1440 uint32_t i, posted = 0; 1441 unsigned long flags; 1442 struct hbq_dmabuf *hbq_buffer; 1443 LIST_HEAD(hbq_buf_list); 1444 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1445 return 0; 1446 1447 if ((phba->hbqs[hbqno].buffer_count + count) > 1448 lpfc_hbq_defs[hbqno]->entry_count) 1449 count = lpfc_hbq_defs[hbqno]->entry_count - 1450 phba->hbqs[hbqno].buffer_count; 1451 if (!count) 1452 return 0; 1453 /* Allocate HBQ entries */ 1454 for (i = 0; i < count; i++) { 1455 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1456 if (!hbq_buffer) 1457 break; 1458 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1459 } 1460 /* Check whether HBQ is still in use */ 1461 spin_lock_irqsave(&phba->hbalock, flags); 1462 if (!phba->hbq_in_use) 1463 goto err; 1464 while (!list_empty(&hbq_buf_list)) { 1465 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1466 dbuf.list); 1467 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1468 (hbqno << 16)); 1469 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1470 phba->hbqs[hbqno].buffer_count++; 1471 posted++; 1472 } else 1473 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1474 } 1475 spin_unlock_irqrestore(&phba->hbalock, flags); 1476 return posted; 1477 err: 1478 spin_unlock_irqrestore(&phba->hbalock, flags); 1479 while (!list_empty(&hbq_buf_list)) { 1480 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1481 dbuf.list); 1482 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1483 } 1484 return 0; 1485 } 1486 1487 /** 1488 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1489 * @phba: Pointer to HBA context object. 1490 * @qno: HBQ number. 1491 * 1492 * This function posts more buffers to the HBQ. This function 1493 * is called with no lock held. The function returns the number of HBQ entries 1494 * successfully allocated. 1495 **/ 1496 int 1497 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1498 { 1499 if (phba->sli_rev == LPFC_SLI_REV4) 1500 return 0; 1501 else 1502 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1503 lpfc_hbq_defs[qno]->add_count); 1504 } 1505 1506 /** 1507 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1508 * @phba: Pointer to HBA context object. 1509 * @qno: HBQ queue number. 1510 * 1511 * This function is called from SLI initialization code path with 1512 * no lock held to post initial HBQ buffers to firmware. The 1513 * function returns the number of HBQ entries successfully allocated. 1514 **/ 1515 static int 1516 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1517 { 1518 if (phba->sli_rev == LPFC_SLI_REV4) 1519 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1520 lpfc_hbq_defs[qno]->entry_count); 1521 else 1522 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1523 lpfc_hbq_defs[qno]->init_count); 1524 } 1525 1526 /** 1527 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1528 * @phba: Pointer to HBA context object. 1529 * @hbqno: HBQ number. 1530 * 1531 * This function removes the first hbq buffer on an hbq list and returns a 1532 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1533 **/ 1534 static struct hbq_dmabuf * 1535 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1536 { 1537 struct lpfc_dmabuf *d_buf; 1538 1539 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1540 if (!d_buf) 1541 return NULL; 1542 return container_of(d_buf, struct hbq_dmabuf, dbuf); 1543 } 1544 1545 /** 1546 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1547 * @phba: Pointer to HBA context object. 1548 * @tag: Tag of the hbq buffer. 1549 * 1550 * This function is called with hbalock held. This function searches 1551 * for the hbq buffer associated with the given tag in the hbq buffer 1552 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 1553 * it returns NULL. 1554 **/ 1555 static struct hbq_dmabuf * 1556 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 1557 { 1558 struct lpfc_dmabuf *d_buf; 1559 struct hbq_dmabuf *hbq_buf; 1560 uint32_t hbqno; 1561 1562 hbqno = tag >> 16; 1563 if (hbqno >= LPFC_MAX_HBQS) 1564 return NULL; 1565 1566 spin_lock_irq(&phba->hbalock); 1567 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1568 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1569 if (hbq_buf->tag == tag) { 1570 spin_unlock_irq(&phba->hbalock); 1571 return hbq_buf; 1572 } 1573 } 1574 spin_unlock_irq(&phba->hbalock); 1575 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1576 "1803 Bad hbq tag. Data: x%x x%x\n", 1577 tag, phba->hbqs[tag >> 16].buffer_count); 1578 return NULL; 1579 } 1580 1581 /** 1582 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 1583 * @phba: Pointer to HBA context object. 1584 * @hbq_buffer: Pointer to HBQ buffer. 1585 * 1586 * This function is called with hbalock. This function gives back 1587 * the hbq buffer to firmware. If the HBQ does not have space to 1588 * post the buffer, it will free the buffer. 1589 **/ 1590 void 1591 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 1592 { 1593 uint32_t hbqno; 1594 1595 if (hbq_buffer) { 1596 hbqno = hbq_buffer->tag >> 16; 1597 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 1598 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1599 } 1600 } 1601 1602 /** 1603 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 1604 * @mbxCommand: mailbox command code. 1605 * 1606 * This function is called by the mailbox event handler function to verify 1607 * that the completed mailbox command is a legitimate mailbox command. If the 1608 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 1609 * and the mailbox event handler will take the HBA offline. 1610 **/ 1611 static int 1612 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 1613 { 1614 uint8_t ret; 1615 1616 switch (mbxCommand) { 1617 case MBX_LOAD_SM: 1618 case MBX_READ_NV: 1619 case MBX_WRITE_NV: 1620 case MBX_WRITE_VPARMS: 1621 case MBX_RUN_BIU_DIAG: 1622 case MBX_INIT_LINK: 1623 case MBX_DOWN_LINK: 1624 case MBX_CONFIG_LINK: 1625 case MBX_CONFIG_RING: 1626 case MBX_RESET_RING: 1627 case MBX_READ_CONFIG: 1628 case MBX_READ_RCONFIG: 1629 case MBX_READ_SPARM: 1630 case MBX_READ_STATUS: 1631 case MBX_READ_RPI: 1632 case MBX_READ_XRI: 1633 case MBX_READ_REV: 1634 case MBX_READ_LNK_STAT: 1635 case MBX_REG_LOGIN: 1636 case MBX_UNREG_LOGIN: 1637 case MBX_READ_LA: 1638 case MBX_CLEAR_LA: 1639 case MBX_DUMP_MEMORY: 1640 case MBX_DUMP_CONTEXT: 1641 case MBX_RUN_DIAGS: 1642 case MBX_RESTART: 1643 case MBX_UPDATE_CFG: 1644 case MBX_DOWN_LOAD: 1645 case MBX_DEL_LD_ENTRY: 1646 case MBX_RUN_PROGRAM: 1647 case MBX_SET_MASK: 1648 case MBX_SET_VARIABLE: 1649 case MBX_UNREG_D_ID: 1650 case MBX_KILL_BOARD: 1651 case MBX_CONFIG_FARP: 1652 case MBX_BEACON: 1653 case MBX_LOAD_AREA: 1654 case MBX_RUN_BIU_DIAG64: 1655 case MBX_CONFIG_PORT: 1656 case MBX_READ_SPARM64: 1657 case MBX_READ_RPI64: 1658 case MBX_REG_LOGIN64: 1659 case MBX_READ_LA64: 1660 case MBX_WRITE_WWN: 1661 case MBX_SET_DEBUG: 1662 case MBX_LOAD_EXP_ROM: 1663 case MBX_ASYNCEVT_ENABLE: 1664 case MBX_REG_VPI: 1665 case MBX_UNREG_VPI: 1666 case MBX_HEARTBEAT: 1667 case MBX_PORT_CAPABILITIES: 1668 case MBX_PORT_IOV_CONTROL: 1669 case MBX_SLI4_CONFIG: 1670 case MBX_SLI4_REQ_FTRS: 1671 case MBX_REG_FCFI: 1672 case MBX_UNREG_FCFI: 1673 case MBX_REG_VFI: 1674 case MBX_UNREG_VFI: 1675 case MBX_INIT_VPI: 1676 case MBX_INIT_VFI: 1677 case MBX_RESUME_RPI: 1678 case MBX_READ_EVENT_LOG_STATUS: 1679 case MBX_READ_EVENT_LOG: 1680 ret = mbxCommand; 1681 break; 1682 default: 1683 ret = MBX_SHUTDOWN; 1684 break; 1685 } 1686 return ret; 1687 } 1688 1689 /** 1690 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 1691 * @phba: Pointer to HBA context object. 1692 * @pmboxq: Pointer to mailbox command. 1693 * 1694 * This is completion handler function for mailbox commands issued from 1695 * lpfc_sli_issue_mbox_wait function. This function is called by the 1696 * mailbox event handler function with no lock held. This function 1697 * will wake up thread waiting on the wait queue pointed by context1 1698 * of the mailbox. 1699 **/ 1700 void 1701 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1702 { 1703 wait_queue_head_t *pdone_q; 1704 unsigned long drvr_flag; 1705 1706 /* 1707 * If pdone_q is empty, the driver thread gave up waiting and 1708 * continued running. 1709 */ 1710 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 1711 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1712 pdone_q = (wait_queue_head_t *) pmboxq->context1; 1713 if (pdone_q) 1714 wake_up_interruptible(pdone_q); 1715 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1716 return; 1717 } 1718 1719 1720 /** 1721 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 1722 * @phba: Pointer to HBA context object. 1723 * @pmb: Pointer to mailbox object. 1724 * 1725 * This function is the default mailbox completion handler. It 1726 * frees the memory resources associated with the completed mailbox 1727 * command. If the completed command is a REG_LOGIN mailbox command, 1728 * this function will issue a UREG_LOGIN to re-claim the RPI. 1729 **/ 1730 void 1731 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1732 { 1733 struct lpfc_dmabuf *mp; 1734 uint16_t rpi, vpi; 1735 int rc; 1736 struct lpfc_vport *vport = pmb->vport; 1737 1738 mp = (struct lpfc_dmabuf *) (pmb->context1); 1739 1740 if (mp) { 1741 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1742 kfree(mp); 1743 } 1744 1745 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && 1746 (phba->sli_rev == LPFC_SLI_REV4)) 1747 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); 1748 1749 /* 1750 * If a REG_LOGIN succeeded after node is destroyed or node 1751 * is in re-discovery driver need to cleanup the RPI. 1752 */ 1753 if (!(phba->pport->load_flag & FC_UNLOADING) && 1754 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 1755 !pmb->u.mb.mbxStatus) { 1756 rpi = pmb->u.mb.un.varWords[0]; 1757 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; 1758 lpfc_unreg_login(phba, vpi, rpi, pmb); 1759 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1760 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1761 if (rc != MBX_NOT_FINISHED) 1762 return; 1763 } 1764 1765 /* Unreg VPI, if the REG_VPI succeed after VLink failure */ 1766 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 1767 !(phba->pport->load_flag & FC_UNLOADING) && 1768 !pmb->u.mb.mbxStatus) { 1769 lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb); 1770 pmb->vport = vport; 1771 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1772 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1773 if (rc != MBX_NOT_FINISHED) 1774 return; 1775 } 1776 1777 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 1778 lpfc_sli4_mbox_cmd_free(phba, pmb); 1779 else 1780 mempool_free(pmb, phba->mbox_mem_pool); 1781 } 1782 1783 /** 1784 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 1785 * @phba: Pointer to HBA context object. 1786 * 1787 * This function is called with no lock held. This function processes all 1788 * the completed mailbox commands and gives it to upper layers. The interrupt 1789 * service routine processes mailbox completion interrupt and adds completed 1790 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 1791 * Worker thread call lpfc_sli_handle_mb_event, which will return the 1792 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 1793 * function returns the mailbox commands to the upper layer by calling the 1794 * completion handler function of each mailbox. 1795 **/ 1796 int 1797 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 1798 { 1799 MAILBOX_t *pmbox; 1800 LPFC_MBOXQ_t *pmb; 1801 int rc; 1802 LIST_HEAD(cmplq); 1803 1804 phba->sli.slistat.mbox_event++; 1805 1806 /* Get all completed mailboxe buffers into the cmplq */ 1807 spin_lock_irq(&phba->hbalock); 1808 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 1809 spin_unlock_irq(&phba->hbalock); 1810 1811 /* Get a Mailbox buffer to setup mailbox commands for callback */ 1812 do { 1813 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 1814 if (pmb == NULL) 1815 break; 1816 1817 pmbox = &pmb->u.mb; 1818 1819 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1820 if (pmb->vport) { 1821 lpfc_debugfs_disc_trc(pmb->vport, 1822 LPFC_DISC_TRC_MBOX_VPORT, 1823 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 1824 (uint32_t)pmbox->mbxCommand, 1825 pmbox->un.varWords[0], 1826 pmbox->un.varWords[1]); 1827 } 1828 else { 1829 lpfc_debugfs_disc_trc(phba->pport, 1830 LPFC_DISC_TRC_MBOX, 1831 "MBOX cmpl: cmd:x%x mb:x%x x%x", 1832 (uint32_t)pmbox->mbxCommand, 1833 pmbox->un.varWords[0], 1834 pmbox->un.varWords[1]); 1835 } 1836 } 1837 1838 /* 1839 * It is a fatal error if unknown mbox command completion. 1840 */ 1841 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 1842 MBX_SHUTDOWN) { 1843 /* Unknown mailbox command compl */ 1844 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1845 "(%d):0323 Unknown Mailbox command " 1846 "x%x (x%x) Cmpl\n", 1847 pmb->vport ? pmb->vport->vpi : 0, 1848 pmbox->mbxCommand, 1849 lpfc_sli4_mbox_opcode_get(phba, pmb)); 1850 phba->link_state = LPFC_HBA_ERROR; 1851 phba->work_hs = HS_FFER3; 1852 lpfc_handle_eratt(phba); 1853 continue; 1854 } 1855 1856 if (pmbox->mbxStatus) { 1857 phba->sli.slistat.mbox_stat_err++; 1858 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 1859 /* Mbox cmd cmpl error - RETRYing */ 1860 lpfc_printf_log(phba, KERN_INFO, 1861 LOG_MBOX | LOG_SLI, 1862 "(%d):0305 Mbox cmd cmpl " 1863 "error - RETRYing Data: x%x " 1864 "(x%x) x%x x%x x%x\n", 1865 pmb->vport ? pmb->vport->vpi :0, 1866 pmbox->mbxCommand, 1867 lpfc_sli4_mbox_opcode_get(phba, 1868 pmb), 1869 pmbox->mbxStatus, 1870 pmbox->un.varWords[0], 1871 pmb->vport->port_state); 1872 pmbox->mbxStatus = 0; 1873 pmbox->mbxOwner = OWN_HOST; 1874 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1875 if (rc != MBX_NOT_FINISHED) 1876 continue; 1877 } 1878 } 1879 1880 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1881 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1882 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p " 1883 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1884 pmb->vport ? pmb->vport->vpi : 0, 1885 pmbox->mbxCommand, 1886 lpfc_sli4_mbox_opcode_get(phba, pmb), 1887 pmb->mbox_cmpl, 1888 *((uint32_t *) pmbox), 1889 pmbox->un.varWords[0], 1890 pmbox->un.varWords[1], 1891 pmbox->un.varWords[2], 1892 pmbox->un.varWords[3], 1893 pmbox->un.varWords[4], 1894 pmbox->un.varWords[5], 1895 pmbox->un.varWords[6], 1896 pmbox->un.varWords[7]); 1897 1898 if (pmb->mbox_cmpl) 1899 pmb->mbox_cmpl(phba,pmb); 1900 } while (1); 1901 return 0; 1902 } 1903 1904 /** 1905 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 1906 * @phba: Pointer to HBA context object. 1907 * @pring: Pointer to driver SLI ring object. 1908 * @tag: buffer tag. 1909 * 1910 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 1911 * is set in the tag the buffer is posted for a particular exchange, 1912 * the function will return the buffer without replacing the buffer. 1913 * If the buffer is for unsolicited ELS or CT traffic, this function 1914 * returns the buffer and also posts another buffer to the firmware. 1915 **/ 1916 static struct lpfc_dmabuf * 1917 lpfc_sli_get_buff(struct lpfc_hba *phba, 1918 struct lpfc_sli_ring *pring, 1919 uint32_t tag) 1920 { 1921 struct hbq_dmabuf *hbq_entry; 1922 1923 if (tag & QUE_BUFTAG_BIT) 1924 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 1925 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 1926 if (!hbq_entry) 1927 return NULL; 1928 return &hbq_entry->dbuf; 1929 } 1930 1931 /** 1932 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 1933 * @phba: Pointer to HBA context object. 1934 * @pring: Pointer to driver SLI ring object. 1935 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 1936 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 1937 * @fch_type: the type for the first frame of the sequence. 1938 * 1939 * This function is called with no lock held. This function uses the r_ctl and 1940 * type of the received sequence to find the correct callback function to call 1941 * to process the sequence. 1942 **/ 1943 static int 1944 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1945 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 1946 uint32_t fch_type) 1947 { 1948 int i; 1949 1950 /* unSolicited Responses */ 1951 if (pring->prt[0].profile) { 1952 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 1953 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 1954 saveq); 1955 return 1; 1956 } 1957 /* We must search, based on rctl / type 1958 for the right routine */ 1959 for (i = 0; i < pring->num_mask; i++) { 1960 if ((pring->prt[i].rctl == fch_r_ctl) && 1961 (pring->prt[i].type == fch_type)) { 1962 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 1963 (pring->prt[i].lpfc_sli_rcv_unsol_event) 1964 (phba, pring, saveq); 1965 return 1; 1966 } 1967 } 1968 return 0; 1969 } 1970 1971 /** 1972 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1973 * @phba: Pointer to HBA context object. 1974 * @pring: Pointer to driver SLI ring object. 1975 * @saveq: Pointer to the unsolicited iocb. 1976 * 1977 * This function is called with no lock held by the ring event handler 1978 * when there is an unsolicited iocb posted to the response ring by the 1979 * firmware. This function gets the buffer associated with the iocbs 1980 * and calls the event handler for the ring. This function handles both 1981 * qring buffers and hbq buffers. 1982 * When the function returns 1 the caller can free the iocb object otherwise 1983 * upper layer functions will free the iocb objects. 1984 **/ 1985 static int 1986 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1987 struct lpfc_iocbq *saveq) 1988 { 1989 IOCB_t * irsp; 1990 WORD5 * w5p; 1991 uint32_t Rctl, Type; 1992 uint32_t match; 1993 struct lpfc_iocbq *iocbq; 1994 struct lpfc_dmabuf *dmzbuf; 1995 1996 match = 0; 1997 irsp = &(saveq->iocb); 1998 1999 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2000 if (pring->lpfc_sli_rcv_async_status) 2001 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2002 else 2003 lpfc_printf_log(phba, 2004 KERN_WARNING, 2005 LOG_SLI, 2006 "0316 Ring %d handler: unexpected " 2007 "ASYNC_STATUS iocb received evt_code " 2008 "0x%x\n", 2009 pring->ringno, 2010 irsp->un.asyncstat.evt_code); 2011 return 1; 2012 } 2013 2014 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2015 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2016 if (irsp->ulpBdeCount > 0) { 2017 dmzbuf = lpfc_sli_get_buff(phba, pring, 2018 irsp->un.ulpWord[3]); 2019 lpfc_in_buf_free(phba, dmzbuf); 2020 } 2021 2022 if (irsp->ulpBdeCount > 1) { 2023 dmzbuf = lpfc_sli_get_buff(phba, pring, 2024 irsp->unsli3.sli3Words[3]); 2025 lpfc_in_buf_free(phba, dmzbuf); 2026 } 2027 2028 if (irsp->ulpBdeCount > 2) { 2029 dmzbuf = lpfc_sli_get_buff(phba, pring, 2030 irsp->unsli3.sli3Words[7]); 2031 lpfc_in_buf_free(phba, dmzbuf); 2032 } 2033 2034 return 1; 2035 } 2036 2037 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2038 if (irsp->ulpBdeCount != 0) { 2039 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2040 irsp->un.ulpWord[3]); 2041 if (!saveq->context2) 2042 lpfc_printf_log(phba, 2043 KERN_ERR, 2044 LOG_SLI, 2045 "0341 Ring %d Cannot find buffer for " 2046 "an unsolicited iocb. tag 0x%x\n", 2047 pring->ringno, 2048 irsp->un.ulpWord[3]); 2049 } 2050 if (irsp->ulpBdeCount == 2) { 2051 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2052 irsp->unsli3.sli3Words[7]); 2053 if (!saveq->context3) 2054 lpfc_printf_log(phba, 2055 KERN_ERR, 2056 LOG_SLI, 2057 "0342 Ring %d Cannot find buffer for an" 2058 " unsolicited iocb. tag 0x%x\n", 2059 pring->ringno, 2060 irsp->unsli3.sli3Words[7]); 2061 } 2062 list_for_each_entry(iocbq, &saveq->list, list) { 2063 irsp = &(iocbq->iocb); 2064 if (irsp->ulpBdeCount != 0) { 2065 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2066 irsp->un.ulpWord[3]); 2067 if (!iocbq->context2) 2068 lpfc_printf_log(phba, 2069 KERN_ERR, 2070 LOG_SLI, 2071 "0343 Ring %d Cannot find " 2072 "buffer for an unsolicited iocb" 2073 ". tag 0x%x\n", pring->ringno, 2074 irsp->un.ulpWord[3]); 2075 } 2076 if (irsp->ulpBdeCount == 2) { 2077 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2078 irsp->unsli3.sli3Words[7]); 2079 if (!iocbq->context3) 2080 lpfc_printf_log(phba, 2081 KERN_ERR, 2082 LOG_SLI, 2083 "0344 Ring %d Cannot find " 2084 "buffer for an unsolicited " 2085 "iocb. tag 0x%x\n", 2086 pring->ringno, 2087 irsp->unsli3.sli3Words[7]); 2088 } 2089 } 2090 } 2091 if (irsp->ulpBdeCount != 0 && 2092 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2093 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2094 int found = 0; 2095 2096 /* search continue save q for same XRI */ 2097 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2098 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 2099 list_add_tail(&saveq->list, &iocbq->list); 2100 found = 1; 2101 break; 2102 } 2103 } 2104 if (!found) 2105 list_add_tail(&saveq->clist, 2106 &pring->iocb_continue_saveq); 2107 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2108 list_del_init(&iocbq->clist); 2109 saveq = iocbq; 2110 irsp = &(saveq->iocb); 2111 } else 2112 return 0; 2113 } 2114 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2115 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2116 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2117 Rctl = FC_RCTL_ELS_REQ; 2118 Type = FC_TYPE_ELS; 2119 } else { 2120 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2121 Rctl = w5p->hcsw.Rctl; 2122 Type = w5p->hcsw.Type; 2123 2124 /* Firmware Workaround */ 2125 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2126 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2127 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2128 Rctl = FC_RCTL_ELS_REQ; 2129 Type = FC_TYPE_ELS; 2130 w5p->hcsw.Rctl = Rctl; 2131 w5p->hcsw.Type = Type; 2132 } 2133 } 2134 2135 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2136 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2137 "0313 Ring %d handler: unexpected Rctl x%x " 2138 "Type x%x received\n", 2139 pring->ringno, Rctl, Type); 2140 2141 return 1; 2142 } 2143 2144 /** 2145 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2146 * @phba: Pointer to HBA context object. 2147 * @pring: Pointer to driver SLI ring object. 2148 * @prspiocb: Pointer to response iocb object. 2149 * 2150 * This function looks up the iocb_lookup table to get the command iocb 2151 * corresponding to the given response iocb using the iotag of the 2152 * response iocb. This function is called with the hbalock held. 2153 * This function returns the command iocb object if it finds the command 2154 * iocb else returns NULL. 2155 **/ 2156 static struct lpfc_iocbq * 2157 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2158 struct lpfc_sli_ring *pring, 2159 struct lpfc_iocbq *prspiocb) 2160 { 2161 struct lpfc_iocbq *cmd_iocb = NULL; 2162 uint16_t iotag; 2163 2164 iotag = prspiocb->iocb.ulpIoTag; 2165 2166 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2167 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2168 list_del_init(&cmd_iocb->list); 2169 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2170 pring->txcmplq_cnt--; 2171 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2172 } 2173 return cmd_iocb; 2174 } 2175 2176 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2177 "0317 iotag x%x is out off " 2178 "range: max iotag x%x wd0 x%x\n", 2179 iotag, phba->sli.last_iotag, 2180 *(((uint32_t *) &prspiocb->iocb) + 7)); 2181 return NULL; 2182 } 2183 2184 /** 2185 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2186 * @phba: Pointer to HBA context object. 2187 * @pring: Pointer to driver SLI ring object. 2188 * @iotag: IOCB tag. 2189 * 2190 * This function looks up the iocb_lookup table to get the command iocb 2191 * corresponding to the given iotag. This function is called with the 2192 * hbalock held. 2193 * This function returns the command iocb object if it finds the command 2194 * iocb else returns NULL. 2195 **/ 2196 static struct lpfc_iocbq * 2197 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2198 struct lpfc_sli_ring *pring, uint16_t iotag) 2199 { 2200 struct lpfc_iocbq *cmd_iocb; 2201 2202 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2203 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2204 list_del_init(&cmd_iocb->list); 2205 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2206 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2207 pring->txcmplq_cnt--; 2208 } 2209 return cmd_iocb; 2210 } 2211 2212 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2213 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2214 iotag, phba->sli.last_iotag); 2215 return NULL; 2216 } 2217 2218 /** 2219 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2220 * @phba: Pointer to HBA context object. 2221 * @pring: Pointer to driver SLI ring object. 2222 * @saveq: Pointer to the response iocb to be processed. 2223 * 2224 * This function is called by the ring event handler for non-fcp 2225 * rings when there is a new response iocb in the response ring. 2226 * The caller is not required to hold any locks. This function 2227 * gets the command iocb associated with the response iocb and 2228 * calls the completion handler for the command iocb. If there 2229 * is no completion handler, the function will free the resources 2230 * associated with command iocb. If the response iocb is for 2231 * an already aborted command iocb, the status of the completion 2232 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2233 * This function always returns 1. 2234 **/ 2235 static int 2236 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2237 struct lpfc_iocbq *saveq) 2238 { 2239 struct lpfc_iocbq *cmdiocbp; 2240 int rc = 1; 2241 unsigned long iflag; 2242 2243 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2244 spin_lock_irqsave(&phba->hbalock, iflag); 2245 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2246 spin_unlock_irqrestore(&phba->hbalock, iflag); 2247 2248 if (cmdiocbp) { 2249 if (cmdiocbp->iocb_cmpl) { 2250 /* 2251 * If an ELS command failed send an event to mgmt 2252 * application. 2253 */ 2254 if (saveq->iocb.ulpStatus && 2255 (pring->ringno == LPFC_ELS_RING) && 2256 (cmdiocbp->iocb.ulpCommand == 2257 CMD_ELS_REQUEST64_CR)) 2258 lpfc_send_els_failure_event(phba, 2259 cmdiocbp, saveq); 2260 2261 /* 2262 * Post all ELS completions to the worker thread. 2263 * All other are passed to the completion callback. 2264 */ 2265 if (pring->ringno == LPFC_ELS_RING) { 2266 if ((phba->sli_rev < LPFC_SLI_REV4) && 2267 (cmdiocbp->iocb_flag & 2268 LPFC_DRIVER_ABORTED)) { 2269 spin_lock_irqsave(&phba->hbalock, 2270 iflag); 2271 cmdiocbp->iocb_flag &= 2272 ~LPFC_DRIVER_ABORTED; 2273 spin_unlock_irqrestore(&phba->hbalock, 2274 iflag); 2275 saveq->iocb.ulpStatus = 2276 IOSTAT_LOCAL_REJECT; 2277 saveq->iocb.un.ulpWord[4] = 2278 IOERR_SLI_ABORTED; 2279 2280 /* Firmware could still be in progress 2281 * of DMAing payload, so don't free data 2282 * buffer till after a hbeat. 2283 */ 2284 spin_lock_irqsave(&phba->hbalock, 2285 iflag); 2286 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2287 spin_unlock_irqrestore(&phba->hbalock, 2288 iflag); 2289 } 2290 if (phba->sli_rev == LPFC_SLI_REV4) { 2291 if (saveq->iocb_flag & 2292 LPFC_EXCHANGE_BUSY) { 2293 /* Set cmdiocb flag for the 2294 * exchange busy so sgl (xri) 2295 * will not be released until 2296 * the abort xri is received 2297 * from hba. 2298 */ 2299 spin_lock_irqsave( 2300 &phba->hbalock, iflag); 2301 cmdiocbp->iocb_flag |= 2302 LPFC_EXCHANGE_BUSY; 2303 spin_unlock_irqrestore( 2304 &phba->hbalock, iflag); 2305 } 2306 if (cmdiocbp->iocb_flag & 2307 LPFC_DRIVER_ABORTED) { 2308 /* 2309 * Clear LPFC_DRIVER_ABORTED 2310 * bit in case it was driver 2311 * initiated abort. 2312 */ 2313 spin_lock_irqsave( 2314 &phba->hbalock, iflag); 2315 cmdiocbp->iocb_flag &= 2316 ~LPFC_DRIVER_ABORTED; 2317 spin_unlock_irqrestore( 2318 &phba->hbalock, iflag); 2319 cmdiocbp->iocb.ulpStatus = 2320 IOSTAT_LOCAL_REJECT; 2321 cmdiocbp->iocb.un.ulpWord[4] = 2322 IOERR_ABORT_REQUESTED; 2323 /* 2324 * For SLI4, irsiocb contains 2325 * NO_XRI in sli_xritag, it 2326 * shall not affect releasing 2327 * sgl (xri) process. 2328 */ 2329 saveq->iocb.ulpStatus = 2330 IOSTAT_LOCAL_REJECT; 2331 saveq->iocb.un.ulpWord[4] = 2332 IOERR_SLI_ABORTED; 2333 spin_lock_irqsave( 2334 &phba->hbalock, iflag); 2335 saveq->iocb_flag |= 2336 LPFC_DELAY_MEM_FREE; 2337 spin_unlock_irqrestore( 2338 &phba->hbalock, iflag); 2339 } 2340 } 2341 } 2342 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2343 } else 2344 lpfc_sli_release_iocbq(phba, cmdiocbp); 2345 } else { 2346 /* 2347 * Unknown initiating command based on the response iotag. 2348 * This could be the case on the ELS ring because of 2349 * lpfc_els_abort(). 2350 */ 2351 if (pring->ringno != LPFC_ELS_RING) { 2352 /* 2353 * Ring <ringno> handler: unexpected completion IoTag 2354 * <IoTag> 2355 */ 2356 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2357 "0322 Ring %d handler: " 2358 "unexpected completion IoTag x%x " 2359 "Data: x%x x%x x%x x%x\n", 2360 pring->ringno, 2361 saveq->iocb.ulpIoTag, 2362 saveq->iocb.ulpStatus, 2363 saveq->iocb.un.ulpWord[4], 2364 saveq->iocb.ulpCommand, 2365 saveq->iocb.ulpContext); 2366 } 2367 } 2368 2369 return rc; 2370 } 2371 2372 /** 2373 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2374 * @phba: Pointer to HBA context object. 2375 * @pring: Pointer to driver SLI ring object. 2376 * 2377 * This function is called from the iocb ring event handlers when 2378 * put pointer is ahead of the get pointer for a ring. This function signal 2379 * an error attention condition to the worker thread and the worker 2380 * thread will transition the HBA to offline state. 2381 **/ 2382 static void 2383 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2384 { 2385 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2386 /* 2387 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2388 * rsp ring <portRspMax> 2389 */ 2390 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2391 "0312 Ring %d handler: portRspPut %d " 2392 "is bigger than rsp ring %d\n", 2393 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2394 pring->numRiocb); 2395 2396 phba->link_state = LPFC_HBA_ERROR; 2397 2398 /* 2399 * All error attention handlers are posted to 2400 * worker thread 2401 */ 2402 phba->work_ha |= HA_ERATT; 2403 phba->work_hs = HS_FFER3; 2404 2405 lpfc_worker_wake_up(phba); 2406 2407 return; 2408 } 2409 2410 /** 2411 * lpfc_poll_eratt - Error attention polling timer timeout handler 2412 * @ptr: Pointer to address of HBA context object. 2413 * 2414 * This function is invoked by the Error Attention polling timer when the 2415 * timer times out. It will check the SLI Error Attention register for 2416 * possible attention events. If so, it will post an Error Attention event 2417 * and wake up worker thread to process it. Otherwise, it will set up the 2418 * Error Attention polling timer for the next poll. 2419 **/ 2420 void lpfc_poll_eratt(unsigned long ptr) 2421 { 2422 struct lpfc_hba *phba; 2423 uint32_t eratt = 0; 2424 2425 phba = (struct lpfc_hba *)ptr; 2426 2427 /* Check chip HA register for error event */ 2428 eratt = lpfc_sli_check_eratt(phba); 2429 2430 if (eratt) 2431 /* Tell the worker thread there is work to do */ 2432 lpfc_worker_wake_up(phba); 2433 else 2434 /* Restart the timer for next eratt poll */ 2435 mod_timer(&phba->eratt_poll, jiffies + 2436 HZ * LPFC_ERATT_POLL_INTERVAL); 2437 return; 2438 } 2439 2440 2441 /** 2442 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2443 * @phba: Pointer to HBA context object. 2444 * @pring: Pointer to driver SLI ring object. 2445 * @mask: Host attention register mask for this ring. 2446 * 2447 * This function is called from the interrupt context when there is a ring 2448 * event for the fcp ring. The caller does not hold any lock. 2449 * The function processes each response iocb in the response ring until it 2450 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with 2451 * LE bit set. The function will call the completion handler of the command iocb 2452 * if the response iocb indicates a completion for a command iocb or it is 2453 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2454 * function if this is an unsolicited iocb. 2455 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2456 * to check it explicitly. 2457 */ 2458 int 2459 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2460 struct lpfc_sli_ring *pring, uint32_t mask) 2461 { 2462 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2463 IOCB_t *irsp = NULL; 2464 IOCB_t *entry = NULL; 2465 struct lpfc_iocbq *cmdiocbq = NULL; 2466 struct lpfc_iocbq rspiocbq; 2467 uint32_t status; 2468 uint32_t portRspPut, portRspMax; 2469 int rc = 1; 2470 lpfc_iocb_type type; 2471 unsigned long iflag; 2472 uint32_t rsp_cmpl = 0; 2473 2474 spin_lock_irqsave(&phba->hbalock, iflag); 2475 pring->stats.iocb_event++; 2476 2477 /* 2478 * The next available response entry should never exceed the maximum 2479 * entries. If it does, treat it as an adapter hardware error. 2480 */ 2481 portRspMax = pring->numRiocb; 2482 portRspPut = le32_to_cpu(pgp->rspPutInx); 2483 if (unlikely(portRspPut >= portRspMax)) { 2484 lpfc_sli_rsp_pointers_error(phba, pring); 2485 spin_unlock_irqrestore(&phba->hbalock, iflag); 2486 return 1; 2487 } 2488 if (phba->fcp_ring_in_use) { 2489 spin_unlock_irqrestore(&phba->hbalock, iflag); 2490 return 1; 2491 } else 2492 phba->fcp_ring_in_use = 1; 2493 2494 rmb(); 2495 while (pring->rspidx != portRspPut) { 2496 /* 2497 * Fetch an entry off the ring and copy it into a local data 2498 * structure. The copy involves a byte-swap since the 2499 * network byte order and pci byte orders are different. 2500 */ 2501 entry = lpfc_resp_iocb(phba, pring); 2502 phba->last_completion_time = jiffies; 2503 2504 if (++pring->rspidx >= portRspMax) 2505 pring->rspidx = 0; 2506 2507 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2508 (uint32_t *) &rspiocbq.iocb, 2509 phba->iocb_rsp_size); 2510 INIT_LIST_HEAD(&(rspiocbq.list)); 2511 irsp = &rspiocbq.iocb; 2512 2513 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 2514 pring->stats.iocb_rsp++; 2515 rsp_cmpl++; 2516 2517 if (unlikely(irsp->ulpStatus)) { 2518 /* 2519 * If resource errors reported from HBA, reduce 2520 * queuedepths of the SCSI device. 2521 */ 2522 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2523 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2524 spin_unlock_irqrestore(&phba->hbalock, iflag); 2525 phba->lpfc_rampdown_queue_depth(phba); 2526 spin_lock_irqsave(&phba->hbalock, iflag); 2527 } 2528 2529 /* Rsp ring <ringno> error: IOCB */ 2530 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2531 "0336 Rsp Ring %d error: IOCB Data: " 2532 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 2533 pring->ringno, 2534 irsp->un.ulpWord[0], 2535 irsp->un.ulpWord[1], 2536 irsp->un.ulpWord[2], 2537 irsp->un.ulpWord[3], 2538 irsp->un.ulpWord[4], 2539 irsp->un.ulpWord[5], 2540 *(uint32_t *)&irsp->un1, 2541 *((uint32_t *)&irsp->un1 + 1)); 2542 } 2543 2544 switch (type) { 2545 case LPFC_ABORT_IOCB: 2546 case LPFC_SOL_IOCB: 2547 /* 2548 * Idle exchange closed via ABTS from port. No iocb 2549 * resources need to be recovered. 2550 */ 2551 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 2552 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2553 "0333 IOCB cmd 0x%x" 2554 " processed. Skipping" 2555 " completion\n", 2556 irsp->ulpCommand); 2557 break; 2558 } 2559 2560 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2561 &rspiocbq); 2562 if (unlikely(!cmdiocbq)) 2563 break; 2564 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 2565 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 2566 if (cmdiocbq->iocb_cmpl) { 2567 spin_unlock_irqrestore(&phba->hbalock, iflag); 2568 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2569 &rspiocbq); 2570 spin_lock_irqsave(&phba->hbalock, iflag); 2571 } 2572 break; 2573 case LPFC_UNSOL_IOCB: 2574 spin_unlock_irqrestore(&phba->hbalock, iflag); 2575 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 2576 spin_lock_irqsave(&phba->hbalock, iflag); 2577 break; 2578 default: 2579 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2580 char adaptermsg[LPFC_MAX_ADPTMSG]; 2581 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 2582 memcpy(&adaptermsg[0], (uint8_t *) irsp, 2583 MAX_MSG_DATA); 2584 dev_warn(&((phba->pcidev)->dev), 2585 "lpfc%d: %s\n", 2586 phba->brd_no, adaptermsg); 2587 } else { 2588 /* Unknown IOCB command */ 2589 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2590 "0334 Unknown IOCB command " 2591 "Data: x%x, x%x x%x x%x x%x\n", 2592 type, irsp->ulpCommand, 2593 irsp->ulpStatus, 2594 irsp->ulpIoTag, 2595 irsp->ulpContext); 2596 } 2597 break; 2598 } 2599 2600 /* 2601 * The response IOCB has been processed. Update the ring 2602 * pointer in SLIM. If the port response put pointer has not 2603 * been updated, sync the pgp->rspPutInx and fetch the new port 2604 * response put pointer. 2605 */ 2606 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2607 2608 if (pring->rspidx == portRspPut) 2609 portRspPut = le32_to_cpu(pgp->rspPutInx); 2610 } 2611 2612 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 2613 pring->stats.iocb_rsp_full++; 2614 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 2615 writel(status, phba->CAregaddr); 2616 readl(phba->CAregaddr); 2617 } 2618 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 2619 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 2620 pring->stats.iocb_cmd_empty++; 2621 2622 /* Force update of the local copy of cmdGetInx */ 2623 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 2624 lpfc_sli_resume_iocb(phba, pring); 2625 2626 if ((pring->lpfc_sli_cmd_available)) 2627 (pring->lpfc_sli_cmd_available) (phba, pring); 2628 2629 } 2630 2631 phba->fcp_ring_in_use = 0; 2632 spin_unlock_irqrestore(&phba->hbalock, iflag); 2633 return rc; 2634 } 2635 2636 /** 2637 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 2638 * @phba: Pointer to HBA context object. 2639 * @pring: Pointer to driver SLI ring object. 2640 * @rspiocbp: Pointer to driver response IOCB object. 2641 * 2642 * This function is called from the worker thread when there is a slow-path 2643 * response IOCB to process. This function chains all the response iocbs until 2644 * seeing the iocb with the LE bit set. The function will call 2645 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 2646 * completion of a command iocb. The function will call the 2647 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 2648 * The function frees the resources or calls the completion handler if this 2649 * iocb is an abort completion. The function returns NULL when the response 2650 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 2651 * this function shall chain the iocb on to the iocb_continueq and return the 2652 * response iocb passed in. 2653 **/ 2654 static struct lpfc_iocbq * 2655 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2656 struct lpfc_iocbq *rspiocbp) 2657 { 2658 struct lpfc_iocbq *saveq; 2659 struct lpfc_iocbq *cmdiocbp; 2660 struct lpfc_iocbq *next_iocb; 2661 IOCB_t *irsp = NULL; 2662 uint32_t free_saveq; 2663 uint8_t iocb_cmd_type; 2664 lpfc_iocb_type type; 2665 unsigned long iflag; 2666 int rc; 2667 2668 spin_lock_irqsave(&phba->hbalock, iflag); 2669 /* First add the response iocb to the countinueq list */ 2670 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2671 pring->iocb_continueq_cnt++; 2672 2673 /* Now, determine whetehr the list is completed for processing */ 2674 irsp = &rspiocbp->iocb; 2675 if (irsp->ulpLe) { 2676 /* 2677 * By default, the driver expects to free all resources 2678 * associated with this iocb completion. 2679 */ 2680 free_saveq = 1; 2681 saveq = list_get_first(&pring->iocb_continueq, 2682 struct lpfc_iocbq, list); 2683 irsp = &(saveq->iocb); 2684 list_del_init(&pring->iocb_continueq); 2685 pring->iocb_continueq_cnt = 0; 2686 2687 pring->stats.iocb_rsp++; 2688 2689 /* 2690 * If resource errors reported from HBA, reduce 2691 * queuedepths of the SCSI device. 2692 */ 2693 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2694 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2695 spin_unlock_irqrestore(&phba->hbalock, iflag); 2696 phba->lpfc_rampdown_queue_depth(phba); 2697 spin_lock_irqsave(&phba->hbalock, iflag); 2698 } 2699 2700 if (irsp->ulpStatus) { 2701 /* Rsp ring <ringno> error: IOCB */ 2702 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2703 "0328 Rsp Ring %d error: " 2704 "IOCB Data: " 2705 "x%x x%x x%x x%x " 2706 "x%x x%x x%x x%x " 2707 "x%x x%x x%x x%x " 2708 "x%x x%x x%x x%x\n", 2709 pring->ringno, 2710 irsp->un.ulpWord[0], 2711 irsp->un.ulpWord[1], 2712 irsp->un.ulpWord[2], 2713 irsp->un.ulpWord[3], 2714 irsp->un.ulpWord[4], 2715 irsp->un.ulpWord[5], 2716 *(((uint32_t *) irsp) + 6), 2717 *(((uint32_t *) irsp) + 7), 2718 *(((uint32_t *) irsp) + 8), 2719 *(((uint32_t *) irsp) + 9), 2720 *(((uint32_t *) irsp) + 10), 2721 *(((uint32_t *) irsp) + 11), 2722 *(((uint32_t *) irsp) + 12), 2723 *(((uint32_t *) irsp) + 13), 2724 *(((uint32_t *) irsp) + 14), 2725 *(((uint32_t *) irsp) + 15)); 2726 } 2727 2728 /* 2729 * Fetch the IOCB command type and call the correct completion 2730 * routine. Solicited and Unsolicited IOCBs on the ELS ring 2731 * get freed back to the lpfc_iocb_list by the discovery 2732 * kernel thread. 2733 */ 2734 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 2735 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 2736 switch (type) { 2737 case LPFC_SOL_IOCB: 2738 spin_unlock_irqrestore(&phba->hbalock, iflag); 2739 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 2740 spin_lock_irqsave(&phba->hbalock, iflag); 2741 break; 2742 2743 case LPFC_UNSOL_IOCB: 2744 spin_unlock_irqrestore(&phba->hbalock, iflag); 2745 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 2746 spin_lock_irqsave(&phba->hbalock, iflag); 2747 if (!rc) 2748 free_saveq = 0; 2749 break; 2750 2751 case LPFC_ABORT_IOCB: 2752 cmdiocbp = NULL; 2753 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 2754 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 2755 saveq); 2756 if (cmdiocbp) { 2757 /* Call the specified completion routine */ 2758 if (cmdiocbp->iocb_cmpl) { 2759 spin_unlock_irqrestore(&phba->hbalock, 2760 iflag); 2761 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 2762 saveq); 2763 spin_lock_irqsave(&phba->hbalock, 2764 iflag); 2765 } else 2766 __lpfc_sli_release_iocbq(phba, 2767 cmdiocbp); 2768 } 2769 break; 2770 2771 case LPFC_UNKNOWN_IOCB: 2772 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2773 char adaptermsg[LPFC_MAX_ADPTMSG]; 2774 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 2775 memcpy(&adaptermsg[0], (uint8_t *)irsp, 2776 MAX_MSG_DATA); 2777 dev_warn(&((phba->pcidev)->dev), 2778 "lpfc%d: %s\n", 2779 phba->brd_no, adaptermsg); 2780 } else { 2781 /* Unknown IOCB command */ 2782 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2783 "0335 Unknown IOCB " 2784 "command Data: x%x " 2785 "x%x x%x x%x\n", 2786 irsp->ulpCommand, 2787 irsp->ulpStatus, 2788 irsp->ulpIoTag, 2789 irsp->ulpContext); 2790 } 2791 break; 2792 } 2793 2794 if (free_saveq) { 2795 list_for_each_entry_safe(rspiocbp, next_iocb, 2796 &saveq->list, list) { 2797 list_del(&rspiocbp->list); 2798 __lpfc_sli_release_iocbq(phba, rspiocbp); 2799 } 2800 __lpfc_sli_release_iocbq(phba, saveq); 2801 } 2802 rspiocbp = NULL; 2803 } 2804 spin_unlock_irqrestore(&phba->hbalock, iflag); 2805 return rspiocbp; 2806 } 2807 2808 /** 2809 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 2810 * @phba: Pointer to HBA context object. 2811 * @pring: Pointer to driver SLI ring object. 2812 * @mask: Host attention register mask for this ring. 2813 * 2814 * This routine wraps the actual slow_ring event process routine from the 2815 * API jump table function pointer from the lpfc_hba struct. 2816 **/ 2817 void 2818 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2819 struct lpfc_sli_ring *pring, uint32_t mask) 2820 { 2821 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 2822 } 2823 2824 /** 2825 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 2826 * @phba: Pointer to HBA context object. 2827 * @pring: Pointer to driver SLI ring object. 2828 * @mask: Host attention register mask for this ring. 2829 * 2830 * This function is called from the worker thread when there is a ring event 2831 * for non-fcp rings. The caller does not hold any lock. The function will 2832 * remove each response iocb in the response ring and calls the handle 2833 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 2834 **/ 2835 static void 2836 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 2837 struct lpfc_sli_ring *pring, uint32_t mask) 2838 { 2839 struct lpfc_pgp *pgp; 2840 IOCB_t *entry; 2841 IOCB_t *irsp = NULL; 2842 struct lpfc_iocbq *rspiocbp = NULL; 2843 uint32_t portRspPut, portRspMax; 2844 unsigned long iflag; 2845 uint32_t status; 2846 2847 pgp = &phba->port_gp[pring->ringno]; 2848 spin_lock_irqsave(&phba->hbalock, iflag); 2849 pring->stats.iocb_event++; 2850 2851 /* 2852 * The next available response entry should never exceed the maximum 2853 * entries. If it does, treat it as an adapter hardware error. 2854 */ 2855 portRspMax = pring->numRiocb; 2856 portRspPut = le32_to_cpu(pgp->rspPutInx); 2857 if (portRspPut >= portRspMax) { 2858 /* 2859 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2860 * rsp ring <portRspMax> 2861 */ 2862 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2863 "0303 Ring %d handler: portRspPut %d " 2864 "is bigger than rsp ring %d\n", 2865 pring->ringno, portRspPut, portRspMax); 2866 2867 phba->link_state = LPFC_HBA_ERROR; 2868 spin_unlock_irqrestore(&phba->hbalock, iflag); 2869 2870 phba->work_hs = HS_FFER3; 2871 lpfc_handle_eratt(phba); 2872 2873 return; 2874 } 2875 2876 rmb(); 2877 while (pring->rspidx != portRspPut) { 2878 /* 2879 * Build a completion list and call the appropriate handler. 2880 * The process is to get the next available response iocb, get 2881 * a free iocb from the list, copy the response data into the 2882 * free iocb, insert to the continuation list, and update the 2883 * next response index to slim. This process makes response 2884 * iocb's in the ring available to DMA as fast as possible but 2885 * pays a penalty for a copy operation. Since the iocb is 2886 * only 32 bytes, this penalty is considered small relative to 2887 * the PCI reads for register values and a slim write. When 2888 * the ulpLe field is set, the entire Command has been 2889 * received. 2890 */ 2891 entry = lpfc_resp_iocb(phba, pring); 2892 2893 phba->last_completion_time = jiffies; 2894 rspiocbp = __lpfc_sli_get_iocbq(phba); 2895 if (rspiocbp == NULL) { 2896 printk(KERN_ERR "%s: out of buffers! Failing " 2897 "completion.\n", __func__); 2898 break; 2899 } 2900 2901 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 2902 phba->iocb_rsp_size); 2903 irsp = &rspiocbp->iocb; 2904 2905 if (++pring->rspidx >= portRspMax) 2906 pring->rspidx = 0; 2907 2908 if (pring->ringno == LPFC_ELS_RING) { 2909 lpfc_debugfs_slow_ring_trc(phba, 2910 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 2911 *(((uint32_t *) irsp) + 4), 2912 *(((uint32_t *) irsp) + 6), 2913 *(((uint32_t *) irsp) + 7)); 2914 } 2915 2916 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2917 2918 spin_unlock_irqrestore(&phba->hbalock, iflag); 2919 /* Handle the response IOCB */ 2920 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 2921 spin_lock_irqsave(&phba->hbalock, iflag); 2922 2923 /* 2924 * If the port response put pointer has not been updated, sync 2925 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 2926 * response put pointer. 2927 */ 2928 if (pring->rspidx == portRspPut) { 2929 portRspPut = le32_to_cpu(pgp->rspPutInx); 2930 } 2931 } /* while (pring->rspidx != portRspPut) */ 2932 2933 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 2934 /* At least one response entry has been freed */ 2935 pring->stats.iocb_rsp_full++; 2936 /* SET RxRE_RSP in Chip Att register */ 2937 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 2938 writel(status, phba->CAregaddr); 2939 readl(phba->CAregaddr); /* flush */ 2940 } 2941 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 2942 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 2943 pring->stats.iocb_cmd_empty++; 2944 2945 /* Force update of the local copy of cmdGetInx */ 2946 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 2947 lpfc_sli_resume_iocb(phba, pring); 2948 2949 if ((pring->lpfc_sli_cmd_available)) 2950 (pring->lpfc_sli_cmd_available) (phba, pring); 2951 2952 } 2953 2954 spin_unlock_irqrestore(&phba->hbalock, iflag); 2955 return; 2956 } 2957 2958 /** 2959 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 2960 * @phba: Pointer to HBA context object. 2961 * @pring: Pointer to driver SLI ring object. 2962 * @mask: Host attention register mask for this ring. 2963 * 2964 * This function is called from the worker thread when there is a pending 2965 * ELS response iocb on the driver internal slow-path response iocb worker 2966 * queue. The caller does not hold any lock. The function will remove each 2967 * response iocb from the response worker queue and calls the handle 2968 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 2969 **/ 2970 static void 2971 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 2972 struct lpfc_sli_ring *pring, uint32_t mask) 2973 { 2974 struct lpfc_iocbq *irspiocbq; 2975 struct hbq_dmabuf *dmabuf; 2976 struct lpfc_cq_event *cq_event; 2977 unsigned long iflag; 2978 2979 spin_lock_irqsave(&phba->hbalock, iflag); 2980 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 2981 spin_unlock_irqrestore(&phba->hbalock, iflag); 2982 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 2983 /* Get the response iocb from the head of work queue */ 2984 spin_lock_irqsave(&phba->hbalock, iflag); 2985 list_remove_head(&phba->sli4_hba.sp_queue_event, 2986 cq_event, struct lpfc_cq_event, list); 2987 spin_unlock_irqrestore(&phba->hbalock, iflag); 2988 2989 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 2990 case CQE_CODE_COMPL_WQE: 2991 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 2992 cq_event); 2993 /* Translate ELS WCQE to response IOCBQ */ 2994 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 2995 irspiocbq); 2996 if (irspiocbq) 2997 lpfc_sli_sp_handle_rspiocb(phba, pring, 2998 irspiocbq); 2999 break; 3000 case CQE_CODE_RECEIVE: 3001 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3002 cq_event); 3003 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3004 break; 3005 default: 3006 break; 3007 } 3008 } 3009 } 3010 3011 /** 3012 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3013 * @phba: Pointer to HBA context object. 3014 * @pring: Pointer to driver SLI ring object. 3015 * 3016 * This function aborts all iocbs in the given ring and frees all the iocb 3017 * objects in txq. This function issues an abort iocb for all the iocb commands 3018 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3019 * the return of this function. The caller is not required to hold any locks. 3020 **/ 3021 void 3022 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3023 { 3024 LIST_HEAD(completions); 3025 struct lpfc_iocbq *iocb, *next_iocb; 3026 3027 if (pring->ringno == LPFC_ELS_RING) { 3028 lpfc_fabric_abort_hba(phba); 3029 } 3030 3031 /* Error everything on txq and txcmplq 3032 * First do the txq. 3033 */ 3034 spin_lock_irq(&phba->hbalock); 3035 list_splice_init(&pring->txq, &completions); 3036 pring->txq_cnt = 0; 3037 3038 /* Next issue ABTS for everything on the txcmplq */ 3039 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3040 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3041 3042 spin_unlock_irq(&phba->hbalock); 3043 3044 /* Cancel all the IOCBs from the completions list */ 3045 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3046 IOERR_SLI_ABORTED); 3047 } 3048 3049 /** 3050 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3051 * @phba: Pointer to HBA context object. 3052 * 3053 * This function flushes all iocbs in the fcp ring and frees all the iocb 3054 * objects in txq and txcmplq. This function will not issue abort iocbs 3055 * for all the iocb commands in txcmplq, they will just be returned with 3056 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3057 * slot has been permanently disabled. 3058 **/ 3059 void 3060 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3061 { 3062 LIST_HEAD(txq); 3063 LIST_HEAD(txcmplq); 3064 struct lpfc_sli *psli = &phba->sli; 3065 struct lpfc_sli_ring *pring; 3066 3067 /* Currently, only one fcp ring */ 3068 pring = &psli->ring[psli->fcp_ring]; 3069 3070 spin_lock_irq(&phba->hbalock); 3071 /* Retrieve everything on txq */ 3072 list_splice_init(&pring->txq, &txq); 3073 pring->txq_cnt = 0; 3074 3075 /* Retrieve everything on the txcmplq */ 3076 list_splice_init(&pring->txcmplq, &txcmplq); 3077 pring->txcmplq_cnt = 0; 3078 spin_unlock_irq(&phba->hbalock); 3079 3080 /* Flush the txq */ 3081 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3082 IOERR_SLI_DOWN); 3083 3084 /* Flush the txcmpq */ 3085 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3086 IOERR_SLI_DOWN); 3087 } 3088 3089 /** 3090 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3091 * @phba: Pointer to HBA context object. 3092 * @mask: Bit mask to be checked. 3093 * 3094 * This function reads the host status register and compares 3095 * with the provided bit mask to check if HBA completed 3096 * the restart. This function will wait in a loop for the 3097 * HBA to complete restart. If the HBA does not restart within 3098 * 15 iterations, the function will reset the HBA again. The 3099 * function returns 1 when HBA fail to restart otherwise returns 3100 * zero. 3101 **/ 3102 static int 3103 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3104 { 3105 uint32_t status; 3106 int i = 0; 3107 int retval = 0; 3108 3109 /* Read the HBA Host Status Register */ 3110 status = readl(phba->HSregaddr); 3111 3112 /* 3113 * Check status register every 100ms for 5 retries, then every 3114 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3115 * every 2.5 sec for 4. 3116 * Break our of the loop if errors occurred during init. 3117 */ 3118 while (((status & mask) != mask) && 3119 !(status & HS_FFERM) && 3120 i++ < 20) { 3121 3122 if (i <= 5) 3123 msleep(10); 3124 else if (i <= 10) 3125 msleep(500); 3126 else 3127 msleep(2500); 3128 3129 if (i == 15) { 3130 /* Do post */ 3131 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3132 lpfc_sli_brdrestart(phba); 3133 } 3134 /* Read the HBA Host Status Register */ 3135 status = readl(phba->HSregaddr); 3136 } 3137 3138 /* Check to see if any errors occurred during init */ 3139 if ((status & HS_FFERM) || (i >= 20)) { 3140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3141 "2751 Adapter failed to restart, " 3142 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3143 status, 3144 readl(phba->MBslimaddr + 0xa8), 3145 readl(phba->MBslimaddr + 0xac)); 3146 phba->link_state = LPFC_HBA_ERROR; 3147 retval = 1; 3148 } 3149 3150 return retval; 3151 } 3152 3153 /** 3154 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3155 * @phba: Pointer to HBA context object. 3156 * @mask: Bit mask to be checked. 3157 * 3158 * This function checks the host status register to check if HBA is 3159 * ready. This function will wait in a loop for the HBA to be ready 3160 * If the HBA is not ready , the function will will reset the HBA PCI 3161 * function again. The function returns 1 when HBA fail to be ready 3162 * otherwise returns zero. 3163 **/ 3164 static int 3165 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3166 { 3167 uint32_t status; 3168 int retval = 0; 3169 3170 /* Read the HBA Host Status Register */ 3171 status = lpfc_sli4_post_status_check(phba); 3172 3173 if (status) { 3174 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3175 lpfc_sli_brdrestart(phba); 3176 status = lpfc_sli4_post_status_check(phba); 3177 } 3178 3179 /* Check to see if any errors occurred during init */ 3180 if (status) { 3181 phba->link_state = LPFC_HBA_ERROR; 3182 retval = 1; 3183 } else 3184 phba->sli4_hba.intr_enable = 0; 3185 3186 return retval; 3187 } 3188 3189 /** 3190 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3191 * @phba: Pointer to HBA context object. 3192 * @mask: Bit mask to be checked. 3193 * 3194 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3195 * from the API jump table function pointer from the lpfc_hba struct. 3196 **/ 3197 int 3198 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3199 { 3200 return phba->lpfc_sli_brdready(phba, mask); 3201 } 3202 3203 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3204 3205 /** 3206 * lpfc_reset_barrier - Make HBA ready for HBA reset 3207 * @phba: Pointer to HBA context object. 3208 * 3209 * This function is called before resetting an HBA. This 3210 * function requests HBA to quiesce DMAs before a reset. 3211 **/ 3212 void lpfc_reset_barrier(struct lpfc_hba *phba) 3213 { 3214 uint32_t __iomem *resp_buf; 3215 uint32_t __iomem *mbox_buf; 3216 volatile uint32_t mbox; 3217 uint32_t hc_copy; 3218 int i; 3219 uint8_t hdrtype; 3220 3221 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3222 if (hdrtype != 0x80 || 3223 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3224 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3225 return; 3226 3227 /* 3228 * Tell the other part of the chip to suspend temporarily all 3229 * its DMA activity. 3230 */ 3231 resp_buf = phba->MBslimaddr; 3232 3233 /* Disable the error attention */ 3234 hc_copy = readl(phba->HCregaddr); 3235 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3236 readl(phba->HCregaddr); /* flush */ 3237 phba->link_flag |= LS_IGNORE_ERATT; 3238 3239 if (readl(phba->HAregaddr) & HA_ERATT) { 3240 /* Clear Chip error bit */ 3241 writel(HA_ERATT, phba->HAregaddr); 3242 phba->pport->stopped = 1; 3243 } 3244 3245 mbox = 0; 3246 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3247 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3248 3249 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3250 mbox_buf = phba->MBslimaddr; 3251 writel(mbox, mbox_buf); 3252 3253 for (i = 0; 3254 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 3255 mdelay(1); 3256 3257 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3258 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3259 phba->pport->stopped) 3260 goto restore_hc; 3261 else 3262 goto clear_errat; 3263 } 3264 3265 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3266 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 3267 mdelay(1); 3268 3269 clear_errat: 3270 3271 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 3272 mdelay(1); 3273 3274 if (readl(phba->HAregaddr) & HA_ERATT) { 3275 writel(HA_ERATT, phba->HAregaddr); 3276 phba->pport->stopped = 1; 3277 } 3278 3279 restore_hc: 3280 phba->link_flag &= ~LS_IGNORE_ERATT; 3281 writel(hc_copy, phba->HCregaddr); 3282 readl(phba->HCregaddr); /* flush */ 3283 } 3284 3285 /** 3286 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3287 * @phba: Pointer to HBA context object. 3288 * 3289 * This function issues a kill_board mailbox command and waits for 3290 * the error attention interrupt. This function is called for stopping 3291 * the firmware processing. The caller is not required to hold any 3292 * locks. This function calls lpfc_hba_down_post function to free 3293 * any pending commands after the kill. The function will return 1 when it 3294 * fails to kill the board else will return 0. 3295 **/ 3296 int 3297 lpfc_sli_brdkill(struct lpfc_hba *phba) 3298 { 3299 struct lpfc_sli *psli; 3300 LPFC_MBOXQ_t *pmb; 3301 uint32_t status; 3302 uint32_t ha_copy; 3303 int retval; 3304 int i = 0; 3305 3306 psli = &phba->sli; 3307 3308 /* Kill HBA */ 3309 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3310 "0329 Kill HBA Data: x%x x%x\n", 3311 phba->pport->port_state, psli->sli_flag); 3312 3313 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3314 if (!pmb) 3315 return 1; 3316 3317 /* Disable the error attention */ 3318 spin_lock_irq(&phba->hbalock); 3319 status = readl(phba->HCregaddr); 3320 status &= ~HC_ERINT_ENA; 3321 writel(status, phba->HCregaddr); 3322 readl(phba->HCregaddr); /* flush */ 3323 phba->link_flag |= LS_IGNORE_ERATT; 3324 spin_unlock_irq(&phba->hbalock); 3325 3326 lpfc_kill_board(phba, pmb); 3327 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3328 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3329 3330 if (retval != MBX_SUCCESS) { 3331 if (retval != MBX_BUSY) 3332 mempool_free(pmb, phba->mbox_mem_pool); 3333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3334 "2752 KILL_BOARD command failed retval %d\n", 3335 retval); 3336 spin_lock_irq(&phba->hbalock); 3337 phba->link_flag &= ~LS_IGNORE_ERATT; 3338 spin_unlock_irq(&phba->hbalock); 3339 return 1; 3340 } 3341 3342 spin_lock_irq(&phba->hbalock); 3343 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3344 spin_unlock_irq(&phba->hbalock); 3345 3346 mempool_free(pmb, phba->mbox_mem_pool); 3347 3348 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3349 * attention every 100ms for 3 seconds. If we don't get ERATT after 3350 * 3 seconds we still set HBA_ERROR state because the status of the 3351 * board is now undefined. 3352 */ 3353 ha_copy = readl(phba->HAregaddr); 3354 3355 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3356 mdelay(100); 3357 ha_copy = readl(phba->HAregaddr); 3358 } 3359 3360 del_timer_sync(&psli->mbox_tmo); 3361 if (ha_copy & HA_ERATT) { 3362 writel(HA_ERATT, phba->HAregaddr); 3363 phba->pport->stopped = 1; 3364 } 3365 spin_lock_irq(&phba->hbalock); 3366 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3367 psli->mbox_active = NULL; 3368 phba->link_flag &= ~LS_IGNORE_ERATT; 3369 spin_unlock_irq(&phba->hbalock); 3370 3371 lpfc_hba_down_post(phba); 3372 phba->link_state = LPFC_HBA_ERROR; 3373 3374 return ha_copy & HA_ERATT ? 0 : 1; 3375 } 3376 3377 /** 3378 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3379 * @phba: Pointer to HBA context object. 3380 * 3381 * This function resets the HBA by writing HC_INITFF to the control 3382 * register. After the HBA resets, this function resets all the iocb ring 3383 * indices. This function disables PCI layer parity checking during 3384 * the reset. 3385 * This function returns 0 always. 3386 * The caller is not required to hold any locks. 3387 **/ 3388 int 3389 lpfc_sli_brdreset(struct lpfc_hba *phba) 3390 { 3391 struct lpfc_sli *psli; 3392 struct lpfc_sli_ring *pring; 3393 uint16_t cfg_value; 3394 int i; 3395 3396 psli = &phba->sli; 3397 3398 /* Reset HBA */ 3399 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3400 "0325 Reset HBA Data: x%x x%x\n", 3401 phba->pport->port_state, psli->sli_flag); 3402 3403 /* perform board reset */ 3404 phba->fc_eventTag = 0; 3405 phba->link_events = 0; 3406 phba->pport->fc_myDID = 0; 3407 phba->pport->fc_prevDID = 0; 3408 3409 /* Turn off parity checking and serr during the physical reset */ 3410 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3411 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3412 (cfg_value & 3413 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3414 3415 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 3416 3417 /* Now toggle INITFF bit in the Host Control Register */ 3418 writel(HC_INITFF, phba->HCregaddr); 3419 mdelay(1); 3420 readl(phba->HCregaddr); /* flush */ 3421 writel(0, phba->HCregaddr); 3422 readl(phba->HCregaddr); /* flush */ 3423 3424 /* Restore PCI cmd register */ 3425 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3426 3427 /* Initialize relevant SLI info */ 3428 for (i = 0; i < psli->num_rings; i++) { 3429 pring = &psli->ring[i]; 3430 pring->flag = 0; 3431 pring->rspidx = 0; 3432 pring->next_cmdidx = 0; 3433 pring->local_getidx = 0; 3434 pring->cmdidx = 0; 3435 pring->missbufcnt = 0; 3436 } 3437 3438 phba->link_state = LPFC_WARM_START; 3439 return 0; 3440 } 3441 3442 /** 3443 * lpfc_sli4_brdreset - Reset a sli-4 HBA 3444 * @phba: Pointer to HBA context object. 3445 * 3446 * This function resets a SLI4 HBA. This function disables PCI layer parity 3447 * checking during resets the device. The caller is not required to hold 3448 * any locks. 3449 * 3450 * This function returns 0 always. 3451 **/ 3452 int 3453 lpfc_sli4_brdreset(struct lpfc_hba *phba) 3454 { 3455 struct lpfc_sli *psli = &phba->sli; 3456 uint16_t cfg_value; 3457 uint8_t qindx; 3458 3459 /* Reset HBA */ 3460 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3461 "0295 Reset HBA Data: x%x x%x\n", 3462 phba->pport->port_state, psli->sli_flag); 3463 3464 /* perform board reset */ 3465 phba->fc_eventTag = 0; 3466 phba->link_events = 0; 3467 phba->pport->fc_myDID = 0; 3468 phba->pport->fc_prevDID = 0; 3469 3470 /* Turn off parity checking and serr during the physical reset */ 3471 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3472 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3473 (cfg_value & 3474 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3475 3476 spin_lock_irq(&phba->hbalock); 3477 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3478 phba->fcf.fcf_flag = 0; 3479 /* Clean up the child queue list for the CQs */ 3480 list_del_init(&phba->sli4_hba.mbx_wq->list); 3481 list_del_init(&phba->sli4_hba.els_wq->list); 3482 list_del_init(&phba->sli4_hba.hdr_rq->list); 3483 list_del_init(&phba->sli4_hba.dat_rq->list); 3484 list_del_init(&phba->sli4_hba.mbx_cq->list); 3485 list_del_init(&phba->sli4_hba.els_cq->list); 3486 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3487 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3488 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3489 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); 3490 spin_unlock_irq(&phba->hbalock); 3491 3492 /* Now physically reset the device */ 3493 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3494 "0389 Performing PCI function reset!\n"); 3495 /* Perform FCoE PCI function reset */ 3496 lpfc_pci_function_reset(phba); 3497 3498 return 0; 3499 } 3500 3501 /** 3502 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 3503 * @phba: Pointer to HBA context object. 3504 * 3505 * This function is called in the SLI initialization code path to 3506 * restart the HBA. The caller is not required to hold any lock. 3507 * This function writes MBX_RESTART mailbox command to the SLIM and 3508 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 3509 * function to free any pending commands. The function enables 3510 * POST only during the first initialization. The function returns zero. 3511 * The function does not guarantee completion of MBX_RESTART mailbox 3512 * command before the return of this function. 3513 **/ 3514 static int 3515 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 3516 { 3517 MAILBOX_t *mb; 3518 struct lpfc_sli *psli; 3519 volatile uint32_t word0; 3520 void __iomem *to_slim; 3521 uint32_t hba_aer_enabled; 3522 3523 spin_lock_irq(&phba->hbalock); 3524 3525 /* Take PCIe device Advanced Error Reporting (AER) state */ 3526 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3527 3528 psli = &phba->sli; 3529 3530 /* Restart HBA */ 3531 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3532 "0337 Restart HBA Data: x%x x%x\n", 3533 phba->pport->port_state, psli->sli_flag); 3534 3535 word0 = 0; 3536 mb = (MAILBOX_t *) &word0; 3537 mb->mbxCommand = MBX_RESTART; 3538 mb->mbxHc = 1; 3539 3540 lpfc_reset_barrier(phba); 3541 3542 to_slim = phba->MBslimaddr; 3543 writel(*(uint32_t *) mb, to_slim); 3544 readl(to_slim); /* flush */ 3545 3546 /* Only skip post after fc_ffinit is completed */ 3547 if (phba->pport->port_state) 3548 word0 = 1; /* This is really setting up word1 */ 3549 else 3550 word0 = 0; /* This is really setting up word1 */ 3551 to_slim = phba->MBslimaddr + sizeof (uint32_t); 3552 writel(*(uint32_t *) mb, to_slim); 3553 readl(to_slim); /* flush */ 3554 3555 lpfc_sli_brdreset(phba); 3556 phba->pport->stopped = 0; 3557 phba->link_state = LPFC_INIT_START; 3558 phba->hba_flag = 0; 3559 spin_unlock_irq(&phba->hbalock); 3560 3561 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3562 psli->stats_start = get_seconds(); 3563 3564 /* Give the INITFF and Post time to settle. */ 3565 mdelay(100); 3566 3567 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 3568 if (hba_aer_enabled) 3569 pci_disable_pcie_error_reporting(phba->pcidev); 3570 3571 lpfc_hba_down_post(phba); 3572 3573 return 0; 3574 } 3575 3576 /** 3577 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 3578 * @phba: Pointer to HBA context object. 3579 * 3580 * This function is called in the SLI initialization code path to restart 3581 * a SLI4 HBA. The caller is not required to hold any lock. 3582 * At the end of the function, it calls lpfc_hba_down_post function to 3583 * free any pending commands. 3584 **/ 3585 static int 3586 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 3587 { 3588 struct lpfc_sli *psli = &phba->sli; 3589 uint32_t hba_aer_enabled; 3590 3591 /* Restart HBA */ 3592 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3593 "0296 Restart HBA Data: x%x x%x\n", 3594 phba->pport->port_state, psli->sli_flag); 3595 3596 /* Take PCIe device Advanced Error Reporting (AER) state */ 3597 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3598 3599 lpfc_sli4_brdreset(phba); 3600 3601 spin_lock_irq(&phba->hbalock); 3602 phba->pport->stopped = 0; 3603 phba->link_state = LPFC_INIT_START; 3604 phba->hba_flag = 0; 3605 spin_unlock_irq(&phba->hbalock); 3606 3607 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3608 psli->stats_start = get_seconds(); 3609 3610 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 3611 if (hba_aer_enabled) 3612 pci_disable_pcie_error_reporting(phba->pcidev); 3613 3614 lpfc_hba_down_post(phba); 3615 3616 return 0; 3617 } 3618 3619 /** 3620 * lpfc_sli_brdrestart - Wrapper func for restarting hba 3621 * @phba: Pointer to HBA context object. 3622 * 3623 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 3624 * API jump table function pointer from the lpfc_hba struct. 3625 **/ 3626 int 3627 lpfc_sli_brdrestart(struct lpfc_hba *phba) 3628 { 3629 return phba->lpfc_sli_brdrestart(phba); 3630 } 3631 3632 /** 3633 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3634 * @phba: Pointer to HBA context object. 3635 * 3636 * This function is called after a HBA restart to wait for successful 3637 * restart of the HBA. Successful restart of the HBA is indicated by 3638 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 3639 * iteration, the function will restart the HBA again. The function returns 3640 * zero if HBA successfully restarted else returns negative error code. 3641 **/ 3642 static int 3643 lpfc_sli_chipset_init(struct lpfc_hba *phba) 3644 { 3645 uint32_t status, i = 0; 3646 3647 /* Read the HBA Host Status Register */ 3648 status = readl(phba->HSregaddr); 3649 3650 /* Check status register to see what current state is */ 3651 i = 0; 3652 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 3653 3654 /* Check every 100ms for 5 retries, then every 500ms for 5, then 3655 * every 2.5 sec for 5, then reset board and every 2.5 sec for 3656 * 4. 3657 */ 3658 if (i++ >= 20) { 3659 /* Adapter failed to init, timeout, status reg 3660 <status> */ 3661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3662 "0436 Adapter failed to init, " 3663 "timeout, status reg x%x, " 3664 "FW Data: A8 x%x AC x%x\n", status, 3665 readl(phba->MBslimaddr + 0xa8), 3666 readl(phba->MBslimaddr + 0xac)); 3667 phba->link_state = LPFC_HBA_ERROR; 3668 return -ETIMEDOUT; 3669 } 3670 3671 /* Check to see if any errors occurred during init */ 3672 if (status & HS_FFERM) { 3673 /* ERROR: During chipset initialization */ 3674 /* Adapter failed to init, chipset, status reg 3675 <status> */ 3676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3677 "0437 Adapter failed to init, " 3678 "chipset, status reg x%x, " 3679 "FW Data: A8 x%x AC x%x\n", status, 3680 readl(phba->MBslimaddr + 0xa8), 3681 readl(phba->MBslimaddr + 0xac)); 3682 phba->link_state = LPFC_HBA_ERROR; 3683 return -EIO; 3684 } 3685 3686 if (i <= 5) { 3687 msleep(10); 3688 } else if (i <= 10) { 3689 msleep(500); 3690 } else { 3691 msleep(2500); 3692 } 3693 3694 if (i == 15) { 3695 /* Do post */ 3696 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3697 lpfc_sli_brdrestart(phba); 3698 } 3699 /* Read the HBA Host Status Register */ 3700 status = readl(phba->HSregaddr); 3701 } 3702 3703 /* Check to see if any errors occurred during init */ 3704 if (status & HS_FFERM) { 3705 /* ERROR: During chipset initialization */ 3706 /* Adapter failed to init, chipset, status reg <status> */ 3707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3708 "0438 Adapter failed to init, chipset, " 3709 "status reg x%x, " 3710 "FW Data: A8 x%x AC x%x\n", status, 3711 readl(phba->MBslimaddr + 0xa8), 3712 readl(phba->MBslimaddr + 0xac)); 3713 phba->link_state = LPFC_HBA_ERROR; 3714 return -EIO; 3715 } 3716 3717 /* Clear all interrupt enable conditions */ 3718 writel(0, phba->HCregaddr); 3719 readl(phba->HCregaddr); /* flush */ 3720 3721 /* setup host attn register */ 3722 writel(0xffffffff, phba->HAregaddr); 3723 readl(phba->HAregaddr); /* flush */ 3724 return 0; 3725 } 3726 3727 /** 3728 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 3729 * 3730 * This function calculates and returns the number of HBQs required to be 3731 * configured. 3732 **/ 3733 int 3734 lpfc_sli_hbq_count(void) 3735 { 3736 return ARRAY_SIZE(lpfc_hbq_defs); 3737 } 3738 3739 /** 3740 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 3741 * 3742 * This function adds the number of hbq entries in every HBQ to get 3743 * the total number of hbq entries required for the HBA and returns 3744 * the total count. 3745 **/ 3746 static int 3747 lpfc_sli_hbq_entry_count(void) 3748 { 3749 int hbq_count = lpfc_sli_hbq_count(); 3750 int count = 0; 3751 int i; 3752 3753 for (i = 0; i < hbq_count; ++i) 3754 count += lpfc_hbq_defs[i]->entry_count; 3755 return count; 3756 } 3757 3758 /** 3759 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 3760 * 3761 * This function calculates amount of memory required for all hbq entries 3762 * to be configured and returns the total memory required. 3763 **/ 3764 int 3765 lpfc_sli_hbq_size(void) 3766 { 3767 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 3768 } 3769 3770 /** 3771 * lpfc_sli_hbq_setup - configure and initialize HBQs 3772 * @phba: Pointer to HBA context object. 3773 * 3774 * This function is called during the SLI initialization to configure 3775 * all the HBQs and post buffers to the HBQ. The caller is not 3776 * required to hold any locks. This function will return zero if successful 3777 * else it will return negative error code. 3778 **/ 3779 static int 3780 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 3781 { 3782 int hbq_count = lpfc_sli_hbq_count(); 3783 LPFC_MBOXQ_t *pmb; 3784 MAILBOX_t *pmbox; 3785 uint32_t hbqno; 3786 uint32_t hbq_entry_index; 3787 3788 /* Get a Mailbox buffer to setup mailbox 3789 * commands for HBA initialization 3790 */ 3791 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3792 3793 if (!pmb) 3794 return -ENOMEM; 3795 3796 pmbox = &pmb->u.mb; 3797 3798 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3799 phba->link_state = LPFC_INIT_MBX_CMDS; 3800 phba->hbq_in_use = 1; 3801 3802 hbq_entry_index = 0; 3803 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 3804 phba->hbqs[hbqno].next_hbqPutIdx = 0; 3805 phba->hbqs[hbqno].hbqPutIdx = 0; 3806 phba->hbqs[hbqno].local_hbqGetIdx = 0; 3807 phba->hbqs[hbqno].entry_count = 3808 lpfc_hbq_defs[hbqno]->entry_count; 3809 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 3810 hbq_entry_index, pmb); 3811 hbq_entry_index += phba->hbqs[hbqno].entry_count; 3812 3813 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 3814 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 3815 mbxStatus <status>, ring <num> */ 3816 3817 lpfc_printf_log(phba, KERN_ERR, 3818 LOG_SLI | LOG_VPORT, 3819 "1805 Adapter failed to init. " 3820 "Data: x%x x%x x%x\n", 3821 pmbox->mbxCommand, 3822 pmbox->mbxStatus, hbqno); 3823 3824 phba->link_state = LPFC_HBA_ERROR; 3825 mempool_free(pmb, phba->mbox_mem_pool); 3826 return -ENXIO; 3827 } 3828 } 3829 phba->hbq_count = hbq_count; 3830 3831 mempool_free(pmb, phba->mbox_mem_pool); 3832 3833 /* Initially populate or replenish the HBQs */ 3834 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 3835 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 3836 return 0; 3837 } 3838 3839 /** 3840 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 3841 * @phba: Pointer to HBA context object. 3842 * 3843 * This function is called during the SLI initialization to configure 3844 * all the HBQs and post buffers to the HBQ. The caller is not 3845 * required to hold any locks. This function will return zero if successful 3846 * else it will return negative error code. 3847 **/ 3848 static int 3849 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 3850 { 3851 phba->hbq_in_use = 1; 3852 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 3853 phba->hbq_count = 1; 3854 /* Initially populate or replenish the HBQs */ 3855 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 3856 return 0; 3857 } 3858 3859 /** 3860 * lpfc_sli_config_port - Issue config port mailbox command 3861 * @phba: Pointer to HBA context object. 3862 * @sli_mode: sli mode - 2/3 3863 * 3864 * This function is called by the sli intialization code path 3865 * to issue config_port mailbox command. This function restarts the 3866 * HBA firmware and issues a config_port mailbox command to configure 3867 * the SLI interface in the sli mode specified by sli_mode 3868 * variable. The caller is not required to hold any locks. 3869 * The function returns 0 if successful, else returns negative error 3870 * code. 3871 **/ 3872 int 3873 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 3874 { 3875 LPFC_MBOXQ_t *pmb; 3876 uint32_t resetcount = 0, rc = 0, done = 0; 3877 3878 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3879 if (!pmb) { 3880 phba->link_state = LPFC_HBA_ERROR; 3881 return -ENOMEM; 3882 } 3883 3884 phba->sli_rev = sli_mode; 3885 while (resetcount < 2 && !done) { 3886 spin_lock_irq(&phba->hbalock); 3887 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 3888 spin_unlock_irq(&phba->hbalock); 3889 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3890 lpfc_sli_brdrestart(phba); 3891 rc = lpfc_sli_chipset_init(phba); 3892 if (rc) 3893 break; 3894 3895 spin_lock_irq(&phba->hbalock); 3896 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3897 spin_unlock_irq(&phba->hbalock); 3898 resetcount++; 3899 3900 /* Call pre CONFIG_PORT mailbox command initialization. A 3901 * value of 0 means the call was successful. Any other 3902 * nonzero value is a failure, but if ERESTART is returned, 3903 * the driver may reset the HBA and try again. 3904 */ 3905 rc = lpfc_config_port_prep(phba); 3906 if (rc == -ERESTART) { 3907 phba->link_state = LPFC_LINK_UNKNOWN; 3908 continue; 3909 } else if (rc) 3910 break; 3911 phba->link_state = LPFC_INIT_MBX_CMDS; 3912 lpfc_config_port(phba, pmb); 3913 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 3914 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 3915 LPFC_SLI3_HBQ_ENABLED | 3916 LPFC_SLI3_CRP_ENABLED | 3917 LPFC_SLI3_BG_ENABLED | 3918 LPFC_SLI3_DSS_ENABLED); 3919 if (rc != MBX_SUCCESS) { 3920 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3921 "0442 Adapter failed to init, mbxCmd x%x " 3922 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3923 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 3924 spin_lock_irq(&phba->hbalock); 3925 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 3926 spin_unlock_irq(&phba->hbalock); 3927 rc = -ENXIO; 3928 } else { 3929 /* Allow asynchronous mailbox command to go through */ 3930 spin_lock_irq(&phba->hbalock); 3931 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 3932 spin_unlock_irq(&phba->hbalock); 3933 done = 1; 3934 } 3935 } 3936 if (!done) { 3937 rc = -EINVAL; 3938 goto do_prep_failed; 3939 } 3940 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 3941 if (!pmb->u.mb.un.varCfgPort.cMA) { 3942 rc = -ENXIO; 3943 goto do_prep_failed; 3944 } 3945 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 3946 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3947 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 3948 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 3949 phba->max_vpi : phba->max_vports; 3950 3951 } else 3952 phba->max_vpi = 0; 3953 phba->fips_level = 0; 3954 phba->fips_spec_rev = 0; 3955 if (pmb->u.mb.un.varCfgPort.gdss) { 3956 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 3957 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 3958 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 3959 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3960 "2850 Security Crypto Active. FIPS x%d " 3961 "(Spec Rev: x%d)", 3962 phba->fips_level, phba->fips_spec_rev); 3963 } 3964 if (pmb->u.mb.un.varCfgPort.sec_err) { 3965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3966 "2856 Config Port Security Crypto " 3967 "Error: x%x ", 3968 pmb->u.mb.un.varCfgPort.sec_err); 3969 } 3970 if (pmb->u.mb.un.varCfgPort.gerbm) 3971 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3972 if (pmb->u.mb.un.varCfgPort.gcrp) 3973 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3974 3975 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 3976 phba->port_gp = phba->mbox->us.s3_pgp.port; 3977 3978 if (phba->cfg_enable_bg) { 3979 if (pmb->u.mb.un.varCfgPort.gbg) 3980 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3981 else 3982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3983 "0443 Adapter did not grant " 3984 "BlockGuard\n"); 3985 } 3986 } else { 3987 phba->hbq_get = NULL; 3988 phba->port_gp = phba->mbox->us.s2.port; 3989 phba->max_vpi = 0; 3990 } 3991 do_prep_failed: 3992 mempool_free(pmb, phba->mbox_mem_pool); 3993 return rc; 3994 } 3995 3996 3997 /** 3998 * lpfc_sli_hba_setup - SLI intialization function 3999 * @phba: Pointer to HBA context object. 4000 * 4001 * This function is the main SLI intialization function. This function 4002 * is called by the HBA intialization code, HBA reset code and HBA 4003 * error attention handler code. Caller is not required to hold any 4004 * locks. This function issues config_port mailbox command to configure 4005 * the SLI, setup iocb rings and HBQ rings. In the end the function 4006 * calls the config_port_post function to issue init_link mailbox 4007 * command and to start the discovery. The function will return zero 4008 * if successful, else it will return negative error code. 4009 **/ 4010 int 4011 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4012 { 4013 uint32_t rc; 4014 int mode = 3; 4015 4016 switch (lpfc_sli_mode) { 4017 case 2: 4018 if (phba->cfg_enable_npiv) { 4019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4020 "1824 NPIV enabled: Override lpfc_sli_mode " 4021 "parameter (%d) to auto (0).\n", 4022 lpfc_sli_mode); 4023 break; 4024 } 4025 mode = 2; 4026 break; 4027 case 0: 4028 case 3: 4029 break; 4030 default: 4031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4032 "1819 Unrecognized lpfc_sli_mode " 4033 "parameter: %d.\n", lpfc_sli_mode); 4034 4035 break; 4036 } 4037 4038 rc = lpfc_sli_config_port(phba, mode); 4039 4040 if (rc && lpfc_sli_mode == 3) 4041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4042 "1820 Unable to select SLI-3. " 4043 "Not supported by adapter.\n"); 4044 if (rc && mode != 2) 4045 rc = lpfc_sli_config_port(phba, 2); 4046 if (rc) 4047 goto lpfc_sli_hba_setup_error; 4048 4049 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4050 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4051 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4052 if (!rc) { 4053 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4054 "2709 This device supports " 4055 "Advanced Error Reporting (AER)\n"); 4056 spin_lock_irq(&phba->hbalock); 4057 phba->hba_flag |= HBA_AER_ENABLED; 4058 spin_unlock_irq(&phba->hbalock); 4059 } else { 4060 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4061 "2708 This device does not support " 4062 "Advanced Error Reporting (AER)\n"); 4063 phba->cfg_aer_support = 0; 4064 } 4065 } 4066 4067 if (phba->sli_rev == 3) { 4068 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4069 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4070 } else { 4071 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4072 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4073 phba->sli3_options = 0; 4074 } 4075 4076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4077 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4078 phba->sli_rev, phba->max_vpi); 4079 rc = lpfc_sli_ring_map(phba); 4080 4081 if (rc) 4082 goto lpfc_sli_hba_setup_error; 4083 4084 /* Init HBQs */ 4085 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4086 rc = lpfc_sli_hbq_setup(phba); 4087 if (rc) 4088 goto lpfc_sli_hba_setup_error; 4089 } 4090 spin_lock_irq(&phba->hbalock); 4091 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4092 spin_unlock_irq(&phba->hbalock); 4093 4094 rc = lpfc_config_port_post(phba); 4095 if (rc) 4096 goto lpfc_sli_hba_setup_error; 4097 4098 return rc; 4099 4100 lpfc_sli_hba_setup_error: 4101 phba->link_state = LPFC_HBA_ERROR; 4102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4103 "0445 Firmware initialization failed\n"); 4104 return rc; 4105 } 4106 4107 /** 4108 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4109 * @phba: Pointer to HBA context object. 4110 * @mboxq: mailbox pointer. 4111 * This function issue a dump mailbox command to read config region 4112 * 23 and parse the records in the region and populate driver 4113 * data structure. 4114 **/ 4115 static int 4116 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, 4117 LPFC_MBOXQ_t *mboxq) 4118 { 4119 struct lpfc_dmabuf *mp; 4120 struct lpfc_mqe *mqe; 4121 uint32_t data_length; 4122 int rc; 4123 4124 /* Program the default value of vlan_id and fc_map */ 4125 phba->valid_vlan = 0; 4126 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4127 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4128 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4129 4130 mqe = &mboxq->u.mqe; 4131 if (lpfc_dump_fcoe_param(phba, mboxq)) 4132 return -ENOMEM; 4133 4134 mp = (struct lpfc_dmabuf *) mboxq->context1; 4135 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4136 4137 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4138 "(%d):2571 Mailbox cmd x%x Status x%x " 4139 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4140 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4141 "CQ: x%x x%x x%x x%x\n", 4142 mboxq->vport ? mboxq->vport->vpi : 0, 4143 bf_get(lpfc_mqe_command, mqe), 4144 bf_get(lpfc_mqe_status, mqe), 4145 mqe->un.mb_words[0], mqe->un.mb_words[1], 4146 mqe->un.mb_words[2], mqe->un.mb_words[3], 4147 mqe->un.mb_words[4], mqe->un.mb_words[5], 4148 mqe->un.mb_words[6], mqe->un.mb_words[7], 4149 mqe->un.mb_words[8], mqe->un.mb_words[9], 4150 mqe->un.mb_words[10], mqe->un.mb_words[11], 4151 mqe->un.mb_words[12], mqe->un.mb_words[13], 4152 mqe->un.mb_words[14], mqe->un.mb_words[15], 4153 mqe->un.mb_words[16], mqe->un.mb_words[50], 4154 mboxq->mcqe.word0, 4155 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4156 mboxq->mcqe.trailer); 4157 4158 if (rc) { 4159 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4160 kfree(mp); 4161 return -EIO; 4162 } 4163 data_length = mqe->un.mb_words[5]; 4164 if (data_length > DMP_RGN23_SIZE) { 4165 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4166 kfree(mp); 4167 return -EIO; 4168 } 4169 4170 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4171 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4172 kfree(mp); 4173 return 0; 4174 } 4175 4176 /** 4177 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4178 * @phba: pointer to lpfc hba data structure. 4179 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4180 * @vpd: pointer to the memory to hold resulting port vpd data. 4181 * @vpd_size: On input, the number of bytes allocated to @vpd. 4182 * On output, the number of data bytes in @vpd. 4183 * 4184 * This routine executes a READ_REV SLI4 mailbox command. In 4185 * addition, this routine gets the port vpd data. 4186 * 4187 * Return codes 4188 * 0 - successful 4189 * ENOMEM - could not allocated memory. 4190 **/ 4191 static int 4192 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4193 uint8_t *vpd, uint32_t *vpd_size) 4194 { 4195 int rc = 0; 4196 uint32_t dma_size; 4197 struct lpfc_dmabuf *dmabuf; 4198 struct lpfc_mqe *mqe; 4199 4200 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4201 if (!dmabuf) 4202 return -ENOMEM; 4203 4204 /* 4205 * Get a DMA buffer for the vpd data resulting from the READ_REV 4206 * mailbox command. 4207 */ 4208 dma_size = *vpd_size; 4209 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4210 dma_size, 4211 &dmabuf->phys, 4212 GFP_KERNEL); 4213 if (!dmabuf->virt) { 4214 kfree(dmabuf); 4215 return -ENOMEM; 4216 } 4217 memset(dmabuf->virt, 0, dma_size); 4218 4219 /* 4220 * The SLI4 implementation of READ_REV conflicts at word1, 4221 * bits 31:16 and SLI4 adds vpd functionality not present 4222 * in SLI3. This code corrects the conflicts. 4223 */ 4224 lpfc_read_rev(phba, mboxq); 4225 mqe = &mboxq->u.mqe; 4226 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4227 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4228 mqe->un.read_rev.word1 &= 0x0000FFFF; 4229 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4230 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4231 4232 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4233 if (rc) { 4234 dma_free_coherent(&phba->pcidev->dev, dma_size, 4235 dmabuf->virt, dmabuf->phys); 4236 kfree(dmabuf); 4237 return -EIO; 4238 } 4239 4240 /* 4241 * The available vpd length cannot be bigger than the 4242 * DMA buffer passed to the port. Catch the less than 4243 * case and update the caller's size. 4244 */ 4245 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4246 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4247 4248 memcpy(vpd, dmabuf->virt, *vpd_size); 4249 4250 dma_free_coherent(&phba->pcidev->dev, dma_size, 4251 dmabuf->virt, dmabuf->phys); 4252 kfree(dmabuf); 4253 return 0; 4254 } 4255 4256 /** 4257 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4258 * @phba: pointer to lpfc hba data structure. 4259 * 4260 * This routine is called to explicitly arm the SLI4 device's completion and 4261 * event queues 4262 **/ 4263 static void 4264 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4265 { 4266 uint8_t fcp_eqidx; 4267 4268 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4269 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4270 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4271 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4272 LPFC_QUEUE_REARM); 4273 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4274 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4275 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4276 LPFC_QUEUE_REARM); 4277 } 4278 4279 /** 4280 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 4281 * @phba: Pointer to HBA context object. 4282 * 4283 * This function is the main SLI4 device intialization PCI function. This 4284 * function is called by the HBA intialization code, HBA reset code and 4285 * HBA error attention handler code. Caller is not required to hold any 4286 * locks. 4287 **/ 4288 int 4289 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 4290 { 4291 int rc; 4292 LPFC_MBOXQ_t *mboxq; 4293 struct lpfc_mqe *mqe; 4294 uint8_t *vpd; 4295 uint32_t vpd_size; 4296 uint32_t ftr_rsp = 0; 4297 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 4298 struct lpfc_vport *vport = phba->pport; 4299 struct lpfc_dmabuf *mp; 4300 4301 /* Perform a PCI function reset to start from clean */ 4302 rc = lpfc_pci_function_reset(phba); 4303 if (unlikely(rc)) 4304 return -ENODEV; 4305 4306 /* Check the HBA Host Status Register for readyness */ 4307 rc = lpfc_sli4_post_status_check(phba); 4308 if (unlikely(rc)) 4309 return -ENODEV; 4310 else { 4311 spin_lock_irq(&phba->hbalock); 4312 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 4313 spin_unlock_irq(&phba->hbalock); 4314 } 4315 4316 /* 4317 * Allocate a single mailbox container for initializing the 4318 * port. 4319 */ 4320 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4321 if (!mboxq) 4322 return -ENOMEM; 4323 4324 /* 4325 * Continue initialization with default values even if driver failed 4326 * to read FCoE param config regions 4327 */ 4328 if (lpfc_sli4_read_fcoe_params(phba, mboxq)) 4329 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4330 "2570 Failed to read FCoE parameters\n"); 4331 4332 /* Issue READ_REV to collect vpd and FW information. */ 4333 vpd_size = SLI4_PAGE_SIZE; 4334 vpd = kzalloc(vpd_size, GFP_KERNEL); 4335 if (!vpd) { 4336 rc = -ENOMEM; 4337 goto out_free_mbox; 4338 } 4339 4340 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 4341 if (unlikely(rc)) 4342 goto out_free_vpd; 4343 4344 mqe = &mboxq->u.mqe; 4345 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 4346 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 4347 phba->hba_flag |= HBA_FCOE_SUPPORT; 4348 4349 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 4350 LPFC_DCBX_CEE_MODE) 4351 phba->hba_flag |= HBA_FIP_SUPPORT; 4352 else 4353 phba->hba_flag &= ~HBA_FIP_SUPPORT; 4354 4355 if (phba->sli_rev != LPFC_SLI_REV4 || 4356 !(phba->hba_flag & HBA_FCOE_SUPPORT)) { 4357 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4358 "0376 READ_REV Error. SLI Level %d " 4359 "FCoE enabled %d\n", 4360 phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT); 4361 rc = -EIO; 4362 goto out_free_vpd; 4363 } 4364 /* 4365 * Evaluate the read rev and vpd data. Populate the driver 4366 * state with the results. If this routine fails, the failure 4367 * is not fatal as the driver will use generic values. 4368 */ 4369 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 4370 if (unlikely(!rc)) { 4371 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4372 "0377 Error %d parsing vpd. " 4373 "Using defaults.\n", rc); 4374 rc = 0; 4375 } 4376 4377 /* Save information as VPD data */ 4378 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 4379 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 4380 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 4381 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 4382 &mqe->un.read_rev); 4383 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 4384 &mqe->un.read_rev); 4385 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 4386 &mqe->un.read_rev); 4387 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 4388 &mqe->un.read_rev); 4389 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 4390 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 4391 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 4392 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 4393 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 4394 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 4395 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4396 "(%d):0380 READ_REV Status x%x " 4397 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 4398 mboxq->vport ? mboxq->vport->vpi : 0, 4399 bf_get(lpfc_mqe_status, mqe), 4400 phba->vpd.rev.opFwName, 4401 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 4402 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 4403 4404 /* 4405 * Discover the port's supported feature set and match it against the 4406 * hosts requests. 4407 */ 4408 lpfc_request_features(phba, mboxq); 4409 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4410 if (unlikely(rc)) { 4411 rc = -EIO; 4412 goto out_free_vpd; 4413 } 4414 4415 /* 4416 * The port must support FCP initiator mode as this is the 4417 * only mode running in the host. 4418 */ 4419 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 4420 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4421 "0378 No support for fcpi mode.\n"); 4422 ftr_rsp++; 4423 } 4424 4425 /* 4426 * If the port cannot support the host's requested features 4427 * then turn off the global config parameters to disable the 4428 * feature in the driver. This is not a fatal error. 4429 */ 4430 if ((phba->cfg_enable_bg) && 4431 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 4432 ftr_rsp++; 4433 4434 if (phba->max_vpi && phba->cfg_enable_npiv && 4435 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 4436 ftr_rsp++; 4437 4438 if (ftr_rsp) { 4439 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4440 "0379 Feature Mismatch Data: x%08x %08x " 4441 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 4442 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 4443 phba->cfg_enable_npiv, phba->max_vpi); 4444 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 4445 phba->cfg_enable_bg = 0; 4446 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 4447 phba->cfg_enable_npiv = 0; 4448 } 4449 4450 /* These SLI3 features are assumed in SLI4 */ 4451 spin_lock_irq(&phba->hbalock); 4452 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 4453 spin_unlock_irq(&phba->hbalock); 4454 4455 /* Read the port's service parameters. */ 4456 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 4457 if (rc) { 4458 phba->link_state = LPFC_HBA_ERROR; 4459 rc = -ENOMEM; 4460 goto out_free_vpd; 4461 } 4462 4463 mboxq->vport = vport; 4464 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4465 mp = (struct lpfc_dmabuf *) mboxq->context1; 4466 if (rc == MBX_SUCCESS) { 4467 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 4468 rc = 0; 4469 } 4470 4471 /* 4472 * This memory was allocated by the lpfc_read_sparam routine. Release 4473 * it to the mbuf pool. 4474 */ 4475 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4476 kfree(mp); 4477 mboxq->context1 = NULL; 4478 if (unlikely(rc)) { 4479 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4480 "0382 READ_SPARAM command failed " 4481 "status %d, mbxStatus x%x\n", 4482 rc, bf_get(lpfc_mqe_status, mqe)); 4483 phba->link_state = LPFC_HBA_ERROR; 4484 rc = -EIO; 4485 goto out_free_vpd; 4486 } 4487 4488 if (phba->cfg_soft_wwnn) 4489 u64_to_wwn(phba->cfg_soft_wwnn, 4490 vport->fc_sparam.nodeName.u.wwn); 4491 if (phba->cfg_soft_wwpn) 4492 u64_to_wwn(phba->cfg_soft_wwpn, 4493 vport->fc_sparam.portName.u.wwn); 4494 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 4495 sizeof(struct lpfc_name)); 4496 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 4497 sizeof(struct lpfc_name)); 4498 4499 /* Update the fc_host data structures with new wwn. */ 4500 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4501 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4502 4503 /* Register SGL pool to the device using non-embedded mailbox command */ 4504 rc = lpfc_sli4_post_sgl_list(phba); 4505 if (unlikely(rc)) { 4506 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4507 "0582 Error %d during sgl post operation\n", 4508 rc); 4509 rc = -ENODEV; 4510 goto out_free_vpd; 4511 } 4512 4513 /* Register SCSI SGL pool to the device */ 4514 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 4515 if (unlikely(rc)) { 4516 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4517 "0383 Error %d during scsi sgl post " 4518 "operation\n", rc); 4519 /* Some Scsi buffers were moved to the abort scsi list */ 4520 /* A pci function reset will repost them */ 4521 rc = -ENODEV; 4522 goto out_free_vpd; 4523 } 4524 4525 /* Post the rpi header region to the device. */ 4526 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 4527 if (unlikely(rc)) { 4528 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4529 "0393 Error %d during rpi post operation\n", 4530 rc); 4531 rc = -ENODEV; 4532 goto out_free_vpd; 4533 } 4534 4535 /* Set up all the queues to the device */ 4536 rc = lpfc_sli4_queue_setup(phba); 4537 if (unlikely(rc)) { 4538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4539 "0381 Error %d during queue setup.\n ", rc); 4540 goto out_stop_timers; 4541 } 4542 4543 /* Arm the CQs and then EQs on device */ 4544 lpfc_sli4_arm_cqeq_intr(phba); 4545 4546 /* Indicate device interrupt mode */ 4547 phba->sli4_hba.intr_enable = 1; 4548 4549 /* Allow asynchronous mailbox command to go through */ 4550 spin_lock_irq(&phba->hbalock); 4551 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4552 spin_unlock_irq(&phba->hbalock); 4553 4554 /* Post receive buffers to the device */ 4555 lpfc_sli4_rb_setup(phba); 4556 4557 /* Reset HBA FCF states after HBA reset */ 4558 phba->fcf.fcf_flag = 0; 4559 phba->fcf.current_rec.flag = 0; 4560 4561 /* Start the ELS watchdog timer */ 4562 mod_timer(&vport->els_tmofunc, 4563 jiffies + HZ * (phba->fc_ratov * 2)); 4564 4565 /* Start heart beat timer */ 4566 mod_timer(&phba->hb_tmofunc, 4567 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 4568 phba->hb_outstanding = 0; 4569 phba->last_completion_time = jiffies; 4570 4571 /* Start error attention (ERATT) polling timer */ 4572 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 4573 4574 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4575 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4576 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4577 if (!rc) { 4578 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4579 "2829 This device supports " 4580 "Advanced Error Reporting (AER)\n"); 4581 spin_lock_irq(&phba->hbalock); 4582 phba->hba_flag |= HBA_AER_ENABLED; 4583 spin_unlock_irq(&phba->hbalock); 4584 } else { 4585 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4586 "2830 This device does not support " 4587 "Advanced Error Reporting (AER)\n"); 4588 phba->cfg_aer_support = 0; 4589 } 4590 } 4591 4592 /* 4593 * The port is ready, set the host's link state to LINK_DOWN 4594 * in preparation for link interrupts. 4595 */ 4596 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed); 4597 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4598 lpfc_set_loopback_flag(phba); 4599 /* Change driver state to LPFC_LINK_DOWN right before init link */ 4600 spin_lock_irq(&phba->hbalock); 4601 phba->link_state = LPFC_LINK_DOWN; 4602 spin_unlock_irq(&phba->hbalock); 4603 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 4604 if (unlikely(rc != MBX_NOT_FINISHED)) { 4605 kfree(vpd); 4606 return 0; 4607 } else 4608 rc = -EIO; 4609 4610 /* Unset all the queues set up in this routine when error out */ 4611 if (rc) 4612 lpfc_sli4_queue_unset(phba); 4613 4614 out_stop_timers: 4615 if (rc) 4616 lpfc_stop_hba_timers(phba); 4617 out_free_vpd: 4618 kfree(vpd); 4619 out_free_mbox: 4620 mempool_free(mboxq, phba->mbox_mem_pool); 4621 return rc; 4622 } 4623 4624 /** 4625 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4626 * @ptr: context object - pointer to hba structure. 4627 * 4628 * This is the callback function for mailbox timer. The mailbox 4629 * timer is armed when a new mailbox command is issued and the timer 4630 * is deleted when the mailbox complete. The function is called by 4631 * the kernel timer code when a mailbox does not complete within 4632 * expected time. This function wakes up the worker thread to 4633 * process the mailbox timeout and returns. All the processing is 4634 * done by the worker thread function lpfc_mbox_timeout_handler. 4635 **/ 4636 void 4637 lpfc_mbox_timeout(unsigned long ptr) 4638 { 4639 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 4640 unsigned long iflag; 4641 uint32_t tmo_posted; 4642 4643 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 4644 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 4645 if (!tmo_posted) 4646 phba->pport->work_port_events |= WORKER_MBOX_TMO; 4647 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 4648 4649 if (!tmo_posted) 4650 lpfc_worker_wake_up(phba); 4651 return; 4652 } 4653 4654 4655 /** 4656 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 4657 * @phba: Pointer to HBA context object. 4658 * 4659 * This function is called from worker thread when a mailbox command times out. 4660 * The caller is not required to hold any locks. This function will reset the 4661 * HBA and recover all the pending commands. 4662 **/ 4663 void 4664 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4665 { 4666 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4667 MAILBOX_t *mb = &pmbox->u.mb; 4668 struct lpfc_sli *psli = &phba->sli; 4669 struct lpfc_sli_ring *pring; 4670 4671 /* Check the pmbox pointer first. There is a race condition 4672 * between the mbox timeout handler getting executed in the 4673 * worklist and the mailbox actually completing. When this 4674 * race condition occurs, the mbox_active will be NULL. 4675 */ 4676 spin_lock_irq(&phba->hbalock); 4677 if (pmbox == NULL) { 4678 lpfc_printf_log(phba, KERN_WARNING, 4679 LOG_MBOX | LOG_SLI, 4680 "0353 Active Mailbox cleared - mailbox timeout " 4681 "exiting\n"); 4682 spin_unlock_irq(&phba->hbalock); 4683 return; 4684 } 4685 4686 /* Mbox cmd <mbxCommand> timeout */ 4687 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4688 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 4689 mb->mbxCommand, 4690 phba->pport->port_state, 4691 phba->sli.sli_flag, 4692 phba->sli.mbox_active); 4693 spin_unlock_irq(&phba->hbalock); 4694 4695 /* Setting state unknown so lpfc_sli_abort_iocb_ring 4696 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 4697 * it to fail all oustanding SCSI IO. 4698 */ 4699 spin_lock_irq(&phba->pport->work_port_lock); 4700 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 4701 spin_unlock_irq(&phba->pport->work_port_lock); 4702 spin_lock_irq(&phba->hbalock); 4703 phba->link_state = LPFC_LINK_UNKNOWN; 4704 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4705 spin_unlock_irq(&phba->hbalock); 4706 4707 pring = &psli->ring[psli->fcp_ring]; 4708 lpfc_sli_abort_iocb_ring(phba, pring); 4709 4710 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4711 "0345 Resetting board due to mailbox timeout\n"); 4712 4713 /* Reset the HBA device */ 4714 lpfc_reset_hba(phba); 4715 } 4716 4717 /** 4718 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 4719 * @phba: Pointer to HBA context object. 4720 * @pmbox: Pointer to mailbox object. 4721 * @flag: Flag indicating how the mailbox need to be processed. 4722 * 4723 * This function is called by discovery code and HBA management code 4724 * to submit a mailbox command to firmware with SLI-3 interface spec. This 4725 * function gets the hbalock to protect the data structures. 4726 * The mailbox command can be submitted in polling mode, in which case 4727 * this function will wait in a polling loop for the completion of the 4728 * mailbox. 4729 * If the mailbox is submitted in no_wait mode (not polling) the 4730 * function will submit the command and returns immediately without waiting 4731 * for the mailbox completion. The no_wait is supported only when HBA 4732 * is in SLI2/SLI3 mode - interrupts are enabled. 4733 * The SLI interface allows only one mailbox pending at a time. If the 4734 * mailbox is issued in polling mode and there is already a mailbox 4735 * pending, then the function will return an error. If the mailbox is issued 4736 * in NO_WAIT mode and there is a mailbox pending already, the function 4737 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 4738 * The sli layer owns the mailbox object until the completion of mailbox 4739 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 4740 * return codes the caller owns the mailbox command after the return of 4741 * the function. 4742 **/ 4743 static int 4744 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 4745 uint32_t flag) 4746 { 4747 MAILBOX_t *mb; 4748 struct lpfc_sli *psli = &phba->sli; 4749 uint32_t status, evtctr; 4750 uint32_t ha_copy; 4751 int i; 4752 unsigned long timeout; 4753 unsigned long drvr_flag = 0; 4754 uint32_t word0, ldata; 4755 void __iomem *to_slim; 4756 int processing_queue = 0; 4757 4758 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4759 if (!pmbox) { 4760 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4761 /* processing mbox queue from intr_handler */ 4762 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 4763 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4764 return MBX_SUCCESS; 4765 } 4766 processing_queue = 1; 4767 pmbox = lpfc_mbox_get(phba); 4768 if (!pmbox) { 4769 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4770 return MBX_SUCCESS; 4771 } 4772 } 4773 4774 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 4775 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 4776 if(!pmbox->vport) { 4777 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4778 lpfc_printf_log(phba, KERN_ERR, 4779 LOG_MBOX | LOG_VPORT, 4780 "1806 Mbox x%x failed. No vport\n", 4781 pmbox->u.mb.mbxCommand); 4782 dump_stack(); 4783 goto out_not_finished; 4784 } 4785 } 4786 4787 /* If the PCI channel is in offline state, do not post mbox. */ 4788 if (unlikely(pci_channel_offline(phba->pcidev))) { 4789 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4790 goto out_not_finished; 4791 } 4792 4793 /* If HBA has a deferred error attention, fail the iocb. */ 4794 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 4795 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4796 goto out_not_finished; 4797 } 4798 4799 psli = &phba->sli; 4800 4801 mb = &pmbox->u.mb; 4802 status = MBX_SUCCESS; 4803 4804 if (phba->link_state == LPFC_HBA_ERROR) { 4805 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4806 4807 /* Mbox command <mbxCommand> cannot issue */ 4808 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4809 "(%d):0311 Mailbox command x%x cannot " 4810 "issue Data: x%x x%x\n", 4811 pmbox->vport ? pmbox->vport->vpi : 0, 4812 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 4813 goto out_not_finished; 4814 } 4815 4816 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 4817 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 4818 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4819 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4820 "(%d):2528 Mailbox command x%x cannot " 4821 "issue Data: x%x x%x\n", 4822 pmbox->vport ? pmbox->vport->vpi : 0, 4823 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 4824 goto out_not_finished; 4825 } 4826 4827 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 4828 /* Polling for a mbox command when another one is already active 4829 * is not allowed in SLI. Also, the driver must have established 4830 * SLI2 mode to queue and process multiple mbox commands. 4831 */ 4832 4833 if (flag & MBX_POLL) { 4834 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4835 4836 /* Mbox command <mbxCommand> cannot issue */ 4837 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4838 "(%d):2529 Mailbox command x%x " 4839 "cannot issue Data: x%x x%x\n", 4840 pmbox->vport ? pmbox->vport->vpi : 0, 4841 pmbox->u.mb.mbxCommand, 4842 psli->sli_flag, flag); 4843 goto out_not_finished; 4844 } 4845 4846 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 4847 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4848 /* Mbox command <mbxCommand> cannot issue */ 4849 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4850 "(%d):2530 Mailbox command x%x " 4851 "cannot issue Data: x%x x%x\n", 4852 pmbox->vport ? pmbox->vport->vpi : 0, 4853 pmbox->u.mb.mbxCommand, 4854 psli->sli_flag, flag); 4855 goto out_not_finished; 4856 } 4857 4858 /* Another mailbox command is still being processed, queue this 4859 * command to be processed later. 4860 */ 4861 lpfc_mbox_put(phba, pmbox); 4862 4863 /* Mbox cmd issue - BUSY */ 4864 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4865 "(%d):0308 Mbox cmd issue - BUSY Data: " 4866 "x%x x%x x%x x%x\n", 4867 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 4868 mb->mbxCommand, phba->pport->port_state, 4869 psli->sli_flag, flag); 4870 4871 psli->slistat.mbox_busy++; 4872 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4873 4874 if (pmbox->vport) { 4875 lpfc_debugfs_disc_trc(pmbox->vport, 4876 LPFC_DISC_TRC_MBOX_VPORT, 4877 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 4878 (uint32_t)mb->mbxCommand, 4879 mb->un.varWords[0], mb->un.varWords[1]); 4880 } 4881 else { 4882 lpfc_debugfs_disc_trc(phba->pport, 4883 LPFC_DISC_TRC_MBOX, 4884 "MBOX Bsy: cmd:x%x mb:x%x x%x", 4885 (uint32_t)mb->mbxCommand, 4886 mb->un.varWords[0], mb->un.varWords[1]); 4887 } 4888 4889 return MBX_BUSY; 4890 } 4891 4892 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4893 4894 /* If we are not polling, we MUST be in SLI2 mode */ 4895 if (flag != MBX_POLL) { 4896 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 4897 (mb->mbxCommand != MBX_KILL_BOARD)) { 4898 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4899 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4900 /* Mbox command <mbxCommand> cannot issue */ 4901 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4902 "(%d):2531 Mailbox command x%x " 4903 "cannot issue Data: x%x x%x\n", 4904 pmbox->vport ? pmbox->vport->vpi : 0, 4905 pmbox->u.mb.mbxCommand, 4906 psli->sli_flag, flag); 4907 goto out_not_finished; 4908 } 4909 /* timeout active mbox command */ 4910 mod_timer(&psli->mbox_tmo, (jiffies + 4911 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 4912 } 4913 4914 /* Mailbox cmd <cmd> issue */ 4915 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4916 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 4917 "x%x\n", 4918 pmbox->vport ? pmbox->vport->vpi : 0, 4919 mb->mbxCommand, phba->pport->port_state, 4920 psli->sli_flag, flag); 4921 4922 if (mb->mbxCommand != MBX_HEARTBEAT) { 4923 if (pmbox->vport) { 4924 lpfc_debugfs_disc_trc(pmbox->vport, 4925 LPFC_DISC_TRC_MBOX_VPORT, 4926 "MBOX Send vport: cmd:x%x mb:x%x x%x", 4927 (uint32_t)mb->mbxCommand, 4928 mb->un.varWords[0], mb->un.varWords[1]); 4929 } 4930 else { 4931 lpfc_debugfs_disc_trc(phba->pport, 4932 LPFC_DISC_TRC_MBOX, 4933 "MBOX Send: cmd:x%x mb:x%x x%x", 4934 (uint32_t)mb->mbxCommand, 4935 mb->un.varWords[0], mb->un.varWords[1]); 4936 } 4937 } 4938 4939 psli->slistat.mbox_cmd++; 4940 evtctr = psli->slistat.mbox_event; 4941 4942 /* next set own bit for the adapter and copy over command word */ 4943 mb->mbxOwner = OWN_CHIP; 4944 4945 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 4946 /* Populate mbox extension offset word. */ 4947 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 4948 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 4949 = (uint8_t *)phba->mbox_ext 4950 - (uint8_t *)phba->mbox; 4951 } 4952 4953 /* Copy the mailbox extension data */ 4954 if (pmbox->in_ext_byte_len && pmbox->context2) { 4955 lpfc_sli_pcimem_bcopy(pmbox->context2, 4956 (uint8_t *)phba->mbox_ext, 4957 pmbox->in_ext_byte_len); 4958 } 4959 /* Copy command data to host SLIM area */ 4960 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4961 } else { 4962 /* Populate mbox extension offset word. */ 4963 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 4964 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 4965 = MAILBOX_HBA_EXT_OFFSET; 4966 4967 /* Copy the mailbox extension data */ 4968 if (pmbox->in_ext_byte_len && pmbox->context2) { 4969 lpfc_memcpy_to_slim(phba->MBslimaddr + 4970 MAILBOX_HBA_EXT_OFFSET, 4971 pmbox->context2, pmbox->in_ext_byte_len); 4972 4973 } 4974 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4975 /* copy command data into host mbox for cmpl */ 4976 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4977 } 4978 4979 /* First copy mbox command data to HBA SLIM, skip past first 4980 word */ 4981 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4982 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 4983 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 4984 4985 /* Next copy over first word, with mbxOwner set */ 4986 ldata = *((uint32_t *)mb); 4987 to_slim = phba->MBslimaddr; 4988 writel(ldata, to_slim); 4989 readl(to_slim); /* flush */ 4990 4991 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4992 /* switch over to host mailbox */ 4993 psli->sli_flag |= LPFC_SLI_ACTIVE; 4994 } 4995 } 4996 4997 wmb(); 4998 4999 switch (flag) { 5000 case MBX_NOWAIT: 5001 /* Set up reference to mailbox command */ 5002 psli->mbox_active = pmbox; 5003 /* Interrupt board to do it */ 5004 writel(CA_MBATT, phba->CAregaddr); 5005 readl(phba->CAregaddr); /* flush */ 5006 /* Don't wait for it to finish, just return */ 5007 break; 5008 5009 case MBX_POLL: 5010 /* Set up null reference to mailbox command */ 5011 psli->mbox_active = NULL; 5012 /* Interrupt board to do it */ 5013 writel(CA_MBATT, phba->CAregaddr); 5014 readl(phba->CAregaddr); /* flush */ 5015 5016 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5017 /* First read mbox status word */ 5018 word0 = *((uint32_t *)phba->mbox); 5019 word0 = le32_to_cpu(word0); 5020 } else { 5021 /* First read mbox status word */ 5022 word0 = readl(phba->MBslimaddr); 5023 } 5024 5025 /* Read the HBA Host Attention Register */ 5026 ha_copy = readl(phba->HAregaddr); 5027 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 5028 mb->mbxCommand) * 5029 1000) + jiffies; 5030 i = 0; 5031 /* Wait for command to complete */ 5032 while (((word0 & OWN_CHIP) == OWN_CHIP) || 5033 (!(ha_copy & HA_MBATT) && 5034 (phba->link_state > LPFC_WARM_START))) { 5035 if (time_after(jiffies, timeout)) { 5036 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5037 spin_unlock_irqrestore(&phba->hbalock, 5038 drvr_flag); 5039 goto out_not_finished; 5040 } 5041 5042 /* Check if we took a mbox interrupt while we were 5043 polling */ 5044 if (((word0 & OWN_CHIP) != OWN_CHIP) 5045 && (evtctr != psli->slistat.mbox_event)) 5046 break; 5047 5048 if (i++ > 10) { 5049 spin_unlock_irqrestore(&phba->hbalock, 5050 drvr_flag); 5051 msleep(1); 5052 spin_lock_irqsave(&phba->hbalock, drvr_flag); 5053 } 5054 5055 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5056 /* First copy command data */ 5057 word0 = *((uint32_t *)phba->mbox); 5058 word0 = le32_to_cpu(word0); 5059 if (mb->mbxCommand == MBX_CONFIG_PORT) { 5060 MAILBOX_t *slimmb; 5061 uint32_t slimword0; 5062 /* Check real SLIM for any errors */ 5063 slimword0 = readl(phba->MBslimaddr); 5064 slimmb = (MAILBOX_t *) & slimword0; 5065 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 5066 && slimmb->mbxStatus) { 5067 psli->sli_flag &= 5068 ~LPFC_SLI_ACTIVE; 5069 word0 = slimword0; 5070 } 5071 } 5072 } else { 5073 /* First copy command data */ 5074 word0 = readl(phba->MBslimaddr); 5075 } 5076 /* Read the HBA Host Attention Register */ 5077 ha_copy = readl(phba->HAregaddr); 5078 } 5079 5080 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5081 /* copy results back to user */ 5082 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5083 /* Copy the mailbox extension data */ 5084 if (pmbox->out_ext_byte_len && pmbox->context2) { 5085 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 5086 pmbox->context2, 5087 pmbox->out_ext_byte_len); 5088 } 5089 } else { 5090 /* First copy command data */ 5091 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 5092 MAILBOX_CMD_SIZE); 5093 /* Copy the mailbox extension data */ 5094 if (pmbox->out_ext_byte_len && pmbox->context2) { 5095 lpfc_memcpy_from_slim(pmbox->context2, 5096 phba->MBslimaddr + 5097 MAILBOX_HBA_EXT_OFFSET, 5098 pmbox->out_ext_byte_len); 5099 } 5100 } 5101 5102 writel(HA_MBATT, phba->HAregaddr); 5103 readl(phba->HAregaddr); /* flush */ 5104 5105 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5106 status = mb->mbxStatus; 5107 } 5108 5109 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5110 return status; 5111 5112 out_not_finished: 5113 if (processing_queue) { 5114 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 5115 lpfc_mbox_cmpl_put(phba, pmbox); 5116 } 5117 return MBX_NOT_FINISHED; 5118 } 5119 5120 /** 5121 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 5122 * @phba: Pointer to HBA context object. 5123 * 5124 * The function blocks the posting of SLI4 asynchronous mailbox commands from 5125 * the driver internal pending mailbox queue. It will then try to wait out the 5126 * possible outstanding mailbox command before return. 5127 * 5128 * Returns: 5129 * 0 - the outstanding mailbox command completed; otherwise, the wait for 5130 * the outstanding mailbox command timed out. 5131 **/ 5132 static int 5133 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 5134 { 5135 struct lpfc_sli *psli = &phba->sli; 5136 uint8_t actcmd = MBX_HEARTBEAT; 5137 int rc = 0; 5138 unsigned long timeout; 5139 5140 /* Mark the asynchronous mailbox command posting as blocked */ 5141 spin_lock_irq(&phba->hbalock); 5142 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 5143 if (phba->sli.mbox_active) 5144 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 5145 spin_unlock_irq(&phba->hbalock); 5146 /* Determine how long we might wait for the active mailbox 5147 * command to be gracefully completed by firmware. 5148 */ 5149 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 5150 jiffies; 5151 /* Wait for the outstnading mailbox command to complete */ 5152 while (phba->sli.mbox_active) { 5153 /* Check active mailbox complete status every 2ms */ 5154 msleep(2); 5155 if (time_after(jiffies, timeout)) { 5156 /* Timeout, marked the outstanding cmd not complete */ 5157 rc = 1; 5158 break; 5159 } 5160 } 5161 5162 /* Can not cleanly block async mailbox command, fails it */ 5163 if (rc) { 5164 spin_lock_irq(&phba->hbalock); 5165 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5166 spin_unlock_irq(&phba->hbalock); 5167 } 5168 return rc; 5169 } 5170 5171 /** 5172 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 5173 * @phba: Pointer to HBA context object. 5174 * 5175 * The function unblocks and resume posting of SLI4 asynchronous mailbox 5176 * commands from the driver internal pending mailbox queue. It makes sure 5177 * that there is no outstanding mailbox command before resuming posting 5178 * asynchronous mailbox commands. If, for any reason, there is outstanding 5179 * mailbox command, it will try to wait it out before resuming asynchronous 5180 * mailbox command posting. 5181 **/ 5182 static void 5183 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 5184 { 5185 struct lpfc_sli *psli = &phba->sli; 5186 5187 spin_lock_irq(&phba->hbalock); 5188 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 5189 /* Asynchronous mailbox posting is not blocked, do nothing */ 5190 spin_unlock_irq(&phba->hbalock); 5191 return; 5192 } 5193 5194 /* Outstanding synchronous mailbox command is guaranteed to be done, 5195 * successful or timeout, after timing-out the outstanding mailbox 5196 * command shall always be removed, so just unblock posting async 5197 * mailbox command and resume 5198 */ 5199 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5200 spin_unlock_irq(&phba->hbalock); 5201 5202 /* wake up worker thread to post asynchronlous mailbox command */ 5203 lpfc_worker_wake_up(phba); 5204 } 5205 5206 /** 5207 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 5208 * @phba: Pointer to HBA context object. 5209 * @mboxq: Pointer to mailbox object. 5210 * 5211 * The function posts a mailbox to the port. The mailbox is expected 5212 * to be comletely filled in and ready for the port to operate on it. 5213 * This routine executes a synchronous completion operation on the 5214 * mailbox by polling for its completion. 5215 * 5216 * The caller must not be holding any locks when calling this routine. 5217 * 5218 * Returns: 5219 * MBX_SUCCESS - mailbox posted successfully 5220 * Any of the MBX error values. 5221 **/ 5222 static int 5223 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 5224 { 5225 int rc = MBX_SUCCESS; 5226 unsigned long iflag; 5227 uint32_t db_ready; 5228 uint32_t mcqe_status; 5229 uint32_t mbx_cmnd; 5230 unsigned long timeout; 5231 struct lpfc_sli *psli = &phba->sli; 5232 struct lpfc_mqe *mb = &mboxq->u.mqe; 5233 struct lpfc_bmbx_create *mbox_rgn; 5234 struct dma_address *dma_address; 5235 struct lpfc_register bmbx_reg; 5236 5237 /* 5238 * Only one mailbox can be active to the bootstrap mailbox region 5239 * at a time and there is no queueing provided. 5240 */ 5241 spin_lock_irqsave(&phba->hbalock, iflag); 5242 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 5243 spin_unlock_irqrestore(&phba->hbalock, iflag); 5244 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5245 "(%d):2532 Mailbox command x%x (x%x) " 5246 "cannot issue Data: x%x x%x\n", 5247 mboxq->vport ? mboxq->vport->vpi : 0, 5248 mboxq->u.mb.mbxCommand, 5249 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5250 psli->sli_flag, MBX_POLL); 5251 return MBXERR_ERROR; 5252 } 5253 /* The server grabs the token and owns it until release */ 5254 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 5255 phba->sli.mbox_active = mboxq; 5256 spin_unlock_irqrestore(&phba->hbalock, iflag); 5257 5258 /* 5259 * Initialize the bootstrap memory region to avoid stale data areas 5260 * in the mailbox post. Then copy the caller's mailbox contents to 5261 * the bmbx mailbox region. 5262 */ 5263 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 5264 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 5265 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 5266 sizeof(struct lpfc_mqe)); 5267 5268 /* Post the high mailbox dma address to the port and wait for ready. */ 5269 dma_address = &phba->sli4_hba.bmbx.dma_address; 5270 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 5271 5272 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 5273 * 1000) + jiffies; 5274 do { 5275 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 5276 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 5277 if (!db_ready) 5278 msleep(2); 5279 5280 if (time_after(jiffies, timeout)) { 5281 rc = MBXERR_ERROR; 5282 goto exit; 5283 } 5284 } while (!db_ready); 5285 5286 /* Post the low mailbox dma address to the port. */ 5287 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 5288 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 5289 * 1000) + jiffies; 5290 do { 5291 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 5292 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 5293 if (!db_ready) 5294 msleep(2); 5295 5296 if (time_after(jiffies, timeout)) { 5297 rc = MBXERR_ERROR; 5298 goto exit; 5299 } 5300 } while (!db_ready); 5301 5302 /* 5303 * Read the CQ to ensure the mailbox has completed. 5304 * If so, update the mailbox status so that the upper layers 5305 * can complete the request normally. 5306 */ 5307 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 5308 sizeof(struct lpfc_mqe)); 5309 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 5310 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 5311 sizeof(struct lpfc_mcqe)); 5312 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 5313 5314 /* Prefix the mailbox status with range x4000 to note SLI4 status. */ 5315 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 5316 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); 5317 rc = MBXERR_ERROR; 5318 } else 5319 lpfc_sli4_swap_str(phba, mboxq); 5320 5321 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5322 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " 5323 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 5324 " x%x x%x CQ: x%x x%x x%x x%x\n", 5325 mboxq->vport ? mboxq->vport->vpi : 0, 5326 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), 5327 bf_get(lpfc_mqe_status, mb), 5328 mb->un.mb_words[0], mb->un.mb_words[1], 5329 mb->un.mb_words[2], mb->un.mb_words[3], 5330 mb->un.mb_words[4], mb->un.mb_words[5], 5331 mb->un.mb_words[6], mb->un.mb_words[7], 5332 mb->un.mb_words[8], mb->un.mb_words[9], 5333 mb->un.mb_words[10], mb->un.mb_words[11], 5334 mb->un.mb_words[12], mboxq->mcqe.word0, 5335 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5336 mboxq->mcqe.trailer); 5337 exit: 5338 /* We are holding the token, no needed for lock when release */ 5339 spin_lock_irqsave(&phba->hbalock, iflag); 5340 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5341 phba->sli.mbox_active = NULL; 5342 spin_unlock_irqrestore(&phba->hbalock, iflag); 5343 return rc; 5344 } 5345 5346 /** 5347 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 5348 * @phba: Pointer to HBA context object. 5349 * @pmbox: Pointer to mailbox object. 5350 * @flag: Flag indicating how the mailbox need to be processed. 5351 * 5352 * This function is called by discovery code and HBA management code to submit 5353 * a mailbox command to firmware with SLI-4 interface spec. 5354 * 5355 * Return codes the caller owns the mailbox command after the return of the 5356 * function. 5357 **/ 5358 static int 5359 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5360 uint32_t flag) 5361 { 5362 struct lpfc_sli *psli = &phba->sli; 5363 unsigned long iflags; 5364 int rc; 5365 5366 rc = lpfc_mbox_dev_check(phba); 5367 if (unlikely(rc)) { 5368 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5369 "(%d):2544 Mailbox command x%x (x%x) " 5370 "cannot issue Data: x%x x%x\n", 5371 mboxq->vport ? mboxq->vport->vpi : 0, 5372 mboxq->u.mb.mbxCommand, 5373 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5374 psli->sli_flag, flag); 5375 goto out_not_finished; 5376 } 5377 5378 /* Detect polling mode and jump to a handler */ 5379 if (!phba->sli4_hba.intr_enable) { 5380 if (flag == MBX_POLL) 5381 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 5382 else 5383 rc = -EIO; 5384 if (rc != MBX_SUCCESS) 5385 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5386 "(%d):2541 Mailbox command x%x " 5387 "(x%x) cannot issue Data: x%x x%x\n", 5388 mboxq->vport ? mboxq->vport->vpi : 0, 5389 mboxq->u.mb.mbxCommand, 5390 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5391 psli->sli_flag, flag); 5392 return rc; 5393 } else if (flag == MBX_POLL) { 5394 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 5395 "(%d):2542 Try to issue mailbox command " 5396 "x%x (x%x) synchronously ahead of async" 5397 "mailbox command queue: x%x x%x\n", 5398 mboxq->vport ? mboxq->vport->vpi : 0, 5399 mboxq->u.mb.mbxCommand, 5400 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5401 psli->sli_flag, flag); 5402 /* Try to block the asynchronous mailbox posting */ 5403 rc = lpfc_sli4_async_mbox_block(phba); 5404 if (!rc) { 5405 /* Successfully blocked, now issue sync mbox cmd */ 5406 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 5407 if (rc != MBX_SUCCESS) 5408 lpfc_printf_log(phba, KERN_ERR, 5409 LOG_MBOX | LOG_SLI, 5410 "(%d):2597 Mailbox command " 5411 "x%x (x%x) cannot issue " 5412 "Data: x%x x%x\n", 5413 mboxq->vport ? 5414 mboxq->vport->vpi : 0, 5415 mboxq->u.mb.mbxCommand, 5416 lpfc_sli4_mbox_opcode_get(phba, 5417 mboxq), 5418 psli->sli_flag, flag); 5419 /* Unblock the async mailbox posting afterward */ 5420 lpfc_sli4_async_mbox_unblock(phba); 5421 } 5422 return rc; 5423 } 5424 5425 /* Now, interrupt mode asynchrous mailbox command */ 5426 rc = lpfc_mbox_cmd_check(phba, mboxq); 5427 if (rc) { 5428 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5429 "(%d):2543 Mailbox command x%x (x%x) " 5430 "cannot issue Data: x%x x%x\n", 5431 mboxq->vport ? mboxq->vport->vpi : 0, 5432 mboxq->u.mb.mbxCommand, 5433 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5434 psli->sli_flag, flag); 5435 goto out_not_finished; 5436 } 5437 5438 /* Put the mailbox command to the driver internal FIFO */ 5439 psli->slistat.mbox_busy++; 5440 spin_lock_irqsave(&phba->hbalock, iflags); 5441 lpfc_mbox_put(phba, mboxq); 5442 spin_unlock_irqrestore(&phba->hbalock, iflags); 5443 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5444 "(%d):0354 Mbox cmd issue - Enqueue Data: " 5445 "x%x (x%x) x%x x%x x%x\n", 5446 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 5447 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5448 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5449 phba->pport->port_state, 5450 psli->sli_flag, MBX_NOWAIT); 5451 /* Wake up worker thread to transport mailbox command from head */ 5452 lpfc_worker_wake_up(phba); 5453 5454 return MBX_BUSY; 5455 5456 out_not_finished: 5457 return MBX_NOT_FINISHED; 5458 } 5459 5460 /** 5461 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 5462 * @phba: Pointer to HBA context object. 5463 * 5464 * This function is called by worker thread to send a mailbox command to 5465 * SLI4 HBA firmware. 5466 * 5467 **/ 5468 int 5469 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 5470 { 5471 struct lpfc_sli *psli = &phba->sli; 5472 LPFC_MBOXQ_t *mboxq; 5473 int rc = MBX_SUCCESS; 5474 unsigned long iflags; 5475 struct lpfc_mqe *mqe; 5476 uint32_t mbx_cmnd; 5477 5478 /* Check interrupt mode before post async mailbox command */ 5479 if (unlikely(!phba->sli4_hba.intr_enable)) 5480 return MBX_NOT_FINISHED; 5481 5482 /* Check for mailbox command service token */ 5483 spin_lock_irqsave(&phba->hbalock, iflags); 5484 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 5485 spin_unlock_irqrestore(&phba->hbalock, iflags); 5486 return MBX_NOT_FINISHED; 5487 } 5488 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 5489 spin_unlock_irqrestore(&phba->hbalock, iflags); 5490 return MBX_NOT_FINISHED; 5491 } 5492 if (unlikely(phba->sli.mbox_active)) { 5493 spin_unlock_irqrestore(&phba->hbalock, iflags); 5494 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5495 "0384 There is pending active mailbox cmd\n"); 5496 return MBX_NOT_FINISHED; 5497 } 5498 /* Take the mailbox command service token */ 5499 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 5500 5501 /* Get the next mailbox command from head of queue */ 5502 mboxq = lpfc_mbox_get(phba); 5503 5504 /* If no more mailbox command waiting for post, we're done */ 5505 if (!mboxq) { 5506 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5507 spin_unlock_irqrestore(&phba->hbalock, iflags); 5508 return MBX_SUCCESS; 5509 } 5510 phba->sli.mbox_active = mboxq; 5511 spin_unlock_irqrestore(&phba->hbalock, iflags); 5512 5513 /* Check device readiness for posting mailbox command */ 5514 rc = lpfc_mbox_dev_check(phba); 5515 if (unlikely(rc)) 5516 /* Driver clean routine will clean up pending mailbox */ 5517 goto out_not_finished; 5518 5519 /* Prepare the mbox command to be posted */ 5520 mqe = &mboxq->u.mqe; 5521 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 5522 5523 /* Start timer for the mbox_tmo and log some mailbox post messages */ 5524 mod_timer(&psli->mbox_tmo, (jiffies + 5525 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); 5526 5527 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5528 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " 5529 "x%x x%x\n", 5530 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 5531 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5532 phba->pport->port_state, psli->sli_flag); 5533 5534 if (mbx_cmnd != MBX_HEARTBEAT) { 5535 if (mboxq->vport) { 5536 lpfc_debugfs_disc_trc(mboxq->vport, 5537 LPFC_DISC_TRC_MBOX_VPORT, 5538 "MBOX Send vport: cmd:x%x mb:x%x x%x", 5539 mbx_cmnd, mqe->un.mb_words[0], 5540 mqe->un.mb_words[1]); 5541 } else { 5542 lpfc_debugfs_disc_trc(phba->pport, 5543 LPFC_DISC_TRC_MBOX, 5544 "MBOX Send: cmd:x%x mb:x%x x%x", 5545 mbx_cmnd, mqe->un.mb_words[0], 5546 mqe->un.mb_words[1]); 5547 } 5548 } 5549 psli->slistat.mbox_cmd++; 5550 5551 /* Post the mailbox command to the port */ 5552 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 5553 if (rc != MBX_SUCCESS) { 5554 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5555 "(%d):2533 Mailbox command x%x (x%x) " 5556 "cannot issue Data: x%x x%x\n", 5557 mboxq->vport ? mboxq->vport->vpi : 0, 5558 mboxq->u.mb.mbxCommand, 5559 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5560 psli->sli_flag, MBX_NOWAIT); 5561 goto out_not_finished; 5562 } 5563 5564 return rc; 5565 5566 out_not_finished: 5567 spin_lock_irqsave(&phba->hbalock, iflags); 5568 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 5569 __lpfc_mbox_cmpl_put(phba, mboxq); 5570 /* Release the token */ 5571 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5572 phba->sli.mbox_active = NULL; 5573 spin_unlock_irqrestore(&phba->hbalock, iflags); 5574 5575 return MBX_NOT_FINISHED; 5576 } 5577 5578 /** 5579 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 5580 * @phba: Pointer to HBA context object. 5581 * @pmbox: Pointer to mailbox object. 5582 * @flag: Flag indicating how the mailbox need to be processed. 5583 * 5584 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 5585 * the API jump table function pointer from the lpfc_hba struct. 5586 * 5587 * Return codes the caller owns the mailbox command after the return of the 5588 * function. 5589 **/ 5590 int 5591 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 5592 { 5593 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 5594 } 5595 5596 /** 5597 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table 5598 * @phba: The hba struct for which this call is being executed. 5599 * @dev_grp: The HBA PCI-Device group number. 5600 * 5601 * This routine sets up the mbox interface API function jump table in @phba 5602 * struct. 5603 * Returns: 0 - success, -ENODEV - failure. 5604 **/ 5605 int 5606 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5607 { 5608 5609 switch (dev_grp) { 5610 case LPFC_PCI_DEV_LP: 5611 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 5612 phba->lpfc_sli_handle_slow_ring_event = 5613 lpfc_sli_handle_slow_ring_event_s3; 5614 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 5615 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 5616 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 5617 break; 5618 case LPFC_PCI_DEV_OC: 5619 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 5620 phba->lpfc_sli_handle_slow_ring_event = 5621 lpfc_sli_handle_slow_ring_event_s4; 5622 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 5623 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 5624 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 5625 break; 5626 default: 5627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5628 "1420 Invalid HBA PCI-device group: 0x%x\n", 5629 dev_grp); 5630 return -ENODEV; 5631 break; 5632 } 5633 return 0; 5634 } 5635 5636 /** 5637 * __lpfc_sli_ringtx_put - Add an iocb to the txq 5638 * @phba: Pointer to HBA context object. 5639 * @pring: Pointer to driver SLI ring object. 5640 * @piocb: Pointer to address of newly added command iocb. 5641 * 5642 * This function is called with hbalock held to add a command 5643 * iocb to the txq when SLI layer cannot submit the command iocb 5644 * to the ring. 5645 **/ 5646 void 5647 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5648 struct lpfc_iocbq *piocb) 5649 { 5650 /* Insert the caller's iocb in the txq tail for later processing. */ 5651 list_add_tail(&piocb->list, &pring->txq); 5652 pring->txq_cnt++; 5653 } 5654 5655 /** 5656 * lpfc_sli_next_iocb - Get the next iocb in the txq 5657 * @phba: Pointer to HBA context object. 5658 * @pring: Pointer to driver SLI ring object. 5659 * @piocb: Pointer to address of newly added command iocb. 5660 * 5661 * This function is called with hbalock held before a new 5662 * iocb is submitted to the firmware. This function checks 5663 * txq to flush the iocbs in txq to Firmware before 5664 * submitting new iocbs to the Firmware. 5665 * If there are iocbs in the txq which need to be submitted 5666 * to firmware, lpfc_sli_next_iocb returns the first element 5667 * of the txq after dequeuing it from txq. 5668 * If there is no iocb in the txq then the function will return 5669 * *piocb and *piocb is set to NULL. Caller needs to check 5670 * *piocb to find if there are more commands in the txq. 5671 **/ 5672 static struct lpfc_iocbq * 5673 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5674 struct lpfc_iocbq **piocb) 5675 { 5676 struct lpfc_iocbq * nextiocb; 5677 5678 nextiocb = lpfc_sli_ringtx_get(phba, pring); 5679 if (!nextiocb) { 5680 nextiocb = *piocb; 5681 *piocb = NULL; 5682 } 5683 5684 return nextiocb; 5685 } 5686 5687 /** 5688 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 5689 * @phba: Pointer to HBA context object. 5690 * @ring_number: SLI ring number to issue iocb on. 5691 * @piocb: Pointer to command iocb. 5692 * @flag: Flag indicating if this command can be put into txq. 5693 * 5694 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 5695 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 5696 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 5697 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 5698 * this function allows only iocbs for posting buffers. This function finds 5699 * next available slot in the command ring and posts the command to the 5700 * available slot and writes the port attention register to request HBA start 5701 * processing new iocb. If there is no slot available in the ring and 5702 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 5703 * the function returns IOCB_BUSY. 5704 * 5705 * This function is called with hbalock held. The function will return success 5706 * after it successfully submit the iocb to firmware or after adding to the 5707 * txq. 5708 **/ 5709 static int 5710 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 5711 struct lpfc_iocbq *piocb, uint32_t flag) 5712 { 5713 struct lpfc_iocbq *nextiocb; 5714 IOCB_t *iocb; 5715 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 5716 5717 if (piocb->iocb_cmpl && (!piocb->vport) && 5718 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 5719 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 5720 lpfc_printf_log(phba, KERN_ERR, 5721 LOG_SLI | LOG_VPORT, 5722 "1807 IOCB x%x failed. No vport\n", 5723 piocb->iocb.ulpCommand); 5724 dump_stack(); 5725 return IOCB_ERROR; 5726 } 5727 5728 5729 /* If the PCI channel is in offline state, do not post iocbs. */ 5730 if (unlikely(pci_channel_offline(phba->pcidev))) 5731 return IOCB_ERROR; 5732 5733 /* If HBA has a deferred error attention, fail the iocb. */ 5734 if (unlikely(phba->hba_flag & DEFER_ERATT)) 5735 return IOCB_ERROR; 5736 5737 /* 5738 * We should never get an IOCB if we are in a < LINK_DOWN state 5739 */ 5740 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 5741 return IOCB_ERROR; 5742 5743 /* 5744 * Check to see if we are blocking IOCB processing because of a 5745 * outstanding event. 5746 */ 5747 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 5748 goto iocb_busy; 5749 5750 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 5751 /* 5752 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 5753 * can be issued if the link is not up. 5754 */ 5755 switch (piocb->iocb.ulpCommand) { 5756 case CMD_GEN_REQUEST64_CR: 5757 case CMD_GEN_REQUEST64_CX: 5758 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 5759 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 5760 FC_RCTL_DD_UNSOL_CMD) || 5761 (piocb->iocb.un.genreq64.w5.hcsw.Type != 5762 MENLO_TRANSPORT_TYPE)) 5763 5764 goto iocb_busy; 5765 break; 5766 case CMD_QUE_RING_BUF_CN: 5767 case CMD_QUE_RING_BUF64_CN: 5768 /* 5769 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 5770 * completion, iocb_cmpl MUST be 0. 5771 */ 5772 if (piocb->iocb_cmpl) 5773 piocb->iocb_cmpl = NULL; 5774 /*FALLTHROUGH*/ 5775 case CMD_CREATE_XRI_CR: 5776 case CMD_CLOSE_XRI_CN: 5777 case CMD_CLOSE_XRI_CX: 5778 break; 5779 default: 5780 goto iocb_busy; 5781 } 5782 5783 /* 5784 * For FCP commands, we must be in a state where we can process link 5785 * attention events. 5786 */ 5787 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 5788 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 5789 goto iocb_busy; 5790 } 5791 5792 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 5793 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 5794 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 5795 5796 if (iocb) 5797 lpfc_sli_update_ring(phba, pring); 5798 else 5799 lpfc_sli_update_full_ring(phba, pring); 5800 5801 if (!piocb) 5802 return IOCB_SUCCESS; 5803 5804 goto out_busy; 5805 5806 iocb_busy: 5807 pring->stats.iocb_cmd_delay++; 5808 5809 out_busy: 5810 5811 if (!(flag & SLI_IOCB_RET_IOCB)) { 5812 __lpfc_sli_ringtx_put(phba, pring, piocb); 5813 return IOCB_SUCCESS; 5814 } 5815 5816 return IOCB_BUSY; 5817 } 5818 5819 /** 5820 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 5821 * @phba: Pointer to HBA context object. 5822 * @piocb: Pointer to command iocb. 5823 * @sglq: Pointer to the scatter gather queue object. 5824 * 5825 * This routine converts the bpl or bde that is in the IOCB 5826 * to a sgl list for the sli4 hardware. The physical address 5827 * of the bpl/bde is converted back to a virtual address. 5828 * If the IOCB contains a BPL then the list of BDE's is 5829 * converted to sli4_sge's. If the IOCB contains a single 5830 * BDE then it is converted to a single sli_sge. 5831 * The IOCB is still in cpu endianess so the contents of 5832 * the bpl can be used without byte swapping. 5833 * 5834 * Returns valid XRI = Success, NO_XRI = Failure. 5835 **/ 5836 static uint16_t 5837 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 5838 struct lpfc_sglq *sglq) 5839 { 5840 uint16_t xritag = NO_XRI; 5841 struct ulp_bde64 *bpl = NULL; 5842 struct ulp_bde64 bde; 5843 struct sli4_sge *sgl = NULL; 5844 IOCB_t *icmd; 5845 int numBdes = 0; 5846 int i = 0; 5847 5848 if (!piocbq || !sglq) 5849 return xritag; 5850 5851 sgl = (struct sli4_sge *)sglq->sgl; 5852 icmd = &piocbq->iocb; 5853 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5854 numBdes = icmd->un.genreq64.bdl.bdeSize / 5855 sizeof(struct ulp_bde64); 5856 /* The addrHigh and addrLow fields within the IOCB 5857 * have not been byteswapped yet so there is no 5858 * need to swap them back. 5859 */ 5860 bpl = (struct ulp_bde64 *) 5861 ((struct lpfc_dmabuf *)piocbq->context3)->virt; 5862 5863 if (!bpl) 5864 return xritag; 5865 5866 for (i = 0; i < numBdes; i++) { 5867 /* Should already be byte swapped. */ 5868 sgl->addr_hi = bpl->addrHigh; 5869 sgl->addr_lo = bpl->addrLow; 5870 5871 if ((i+1) == numBdes) 5872 bf_set(lpfc_sli4_sge_last, sgl, 1); 5873 else 5874 bf_set(lpfc_sli4_sge_last, sgl, 0); 5875 sgl->word2 = cpu_to_le32(sgl->word2); 5876 /* swap the size field back to the cpu so we 5877 * can assign it to the sgl. 5878 */ 5879 bde.tus.w = le32_to_cpu(bpl->tus.w); 5880 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 5881 bpl++; 5882 sgl++; 5883 } 5884 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 5885 /* The addrHigh and addrLow fields of the BDE have not 5886 * been byteswapped yet so they need to be swapped 5887 * before putting them in the sgl. 5888 */ 5889 sgl->addr_hi = 5890 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 5891 sgl->addr_lo = 5892 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 5893 bf_set(lpfc_sli4_sge_last, sgl, 1); 5894 sgl->word2 = cpu_to_le32(sgl->word2); 5895 sgl->sge_len = 5896 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 5897 } 5898 return sglq->sli4_xritag; 5899 } 5900 5901 /** 5902 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 5903 * @phba: Pointer to HBA context object. 5904 * 5905 * This routine performs a round robin SCSI command to SLI4 FCP WQ index 5906 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 5907 * held. 5908 * 5909 * Return: index into SLI4 fast-path FCP queue index. 5910 **/ 5911 static uint32_t 5912 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 5913 { 5914 ++phba->fcp_qidx; 5915 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 5916 phba->fcp_qidx = 0; 5917 5918 return phba->fcp_qidx; 5919 } 5920 5921 /** 5922 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 5923 * @phba: Pointer to HBA context object. 5924 * @piocb: Pointer to command iocb. 5925 * @wqe: Pointer to the work queue entry. 5926 * 5927 * This routine converts the iocb command to its Work Queue Entry 5928 * equivalent. The wqe pointer should not have any fields set when 5929 * this routine is called because it will memcpy over them. 5930 * This routine does not set the CQ_ID or the WQEC bits in the 5931 * wqe. 5932 * 5933 * Returns: 0 = Success, IOCB_ERROR = Failure. 5934 **/ 5935 static int 5936 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 5937 union lpfc_wqe *wqe) 5938 { 5939 uint32_t xmit_len = 0, total_len = 0; 5940 uint8_t ct = 0; 5941 uint32_t fip; 5942 uint32_t abort_tag; 5943 uint8_t command_type = ELS_COMMAND_NON_FIP; 5944 uint8_t cmnd; 5945 uint16_t xritag; 5946 struct ulp_bde64 *bpl = NULL; 5947 uint32_t els_id = ELS_ID_DEFAULT; 5948 int numBdes, i; 5949 struct ulp_bde64 bde; 5950 5951 fip = phba->hba_flag & HBA_FIP_SUPPORT; 5952 /* The fcp commands will set command type */ 5953 if (iocbq->iocb_flag & LPFC_IO_FCP) 5954 command_type = FCP_COMMAND; 5955 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 5956 command_type = ELS_COMMAND_FIP; 5957 else 5958 command_type = ELS_COMMAND_NON_FIP; 5959 5960 /* Some of the fields are in the right position already */ 5961 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 5962 abort_tag = (uint32_t) iocbq->iotag; 5963 xritag = iocbq->sli4_xritag; 5964 wqe->words[7] = 0; /* The ct field has moved so reset */ 5965 /* words0-2 bpl convert bde */ 5966 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5967 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 5968 sizeof(struct ulp_bde64); 5969 bpl = (struct ulp_bde64 *) 5970 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 5971 if (!bpl) 5972 return IOCB_ERROR; 5973 5974 /* Should already be byte swapped. */ 5975 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 5976 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 5977 /* swap the size field back to the cpu so we 5978 * can assign it to the sgl. 5979 */ 5980 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 5981 xmit_len = wqe->generic.bde.tus.f.bdeSize; 5982 total_len = 0; 5983 for (i = 0; i < numBdes; i++) { 5984 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 5985 total_len += bde.tus.f.bdeSize; 5986 } 5987 } else 5988 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 5989 5990 iocbq->iocb.ulpIoTag = iocbq->iotag; 5991 cmnd = iocbq->iocb.ulpCommand; 5992 5993 switch (iocbq->iocb.ulpCommand) { 5994 case CMD_ELS_REQUEST64_CR: 5995 if (!iocbq->iocb.ulpLe) { 5996 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5997 "2007 Only Limited Edition cmd Format" 5998 " supported 0x%x\n", 5999 iocbq->iocb.ulpCommand); 6000 return IOCB_ERROR; 6001 } 6002 wqe->els_req.payload_len = xmit_len; 6003 /* Els_reguest64 has a TMO */ 6004 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 6005 iocbq->iocb.ulpTimeout); 6006 /* Need a VF for word 4 set the vf bit*/ 6007 bf_set(els_req64_vf, &wqe->els_req, 0); 6008 /* And a VFID for word 12 */ 6009 bf_set(els_req64_vfid, &wqe->els_req, 0); 6010 /* 6011 * Set ct field to 3, indicates that the context_tag field 6012 * contains the FCFI and remote N_Port_ID is 6013 * in word 5. 6014 */ 6015 6016 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6017 bf_set(lpfc_wqe_gen_context, &wqe->generic, 6018 iocbq->iocb.ulpContext); 6019 6020 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); 6021 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 6022 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 6023 6024 if (command_type == ELS_COMMAND_FIP) { 6025 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 6026 >> LPFC_FIP_ELS_ID_SHIFT); 6027 } 6028 bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id); 6029 6030 break; 6031 case CMD_XMIT_SEQUENCE64_CX: 6032 bf_set(lpfc_wqe_gen_context, &wqe->generic, 6033 iocbq->iocb.un.ulpWord[3]); 6034 wqe->generic.word3 = 0; 6035 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); 6036 /* The entire sequence is transmitted for this IOCB */ 6037 xmit_len = total_len; 6038 cmnd = CMD_XMIT_SEQUENCE64_CR; 6039 case CMD_XMIT_SEQUENCE64_CR: 6040 /* word3 iocb=io_tag32 wqe=payload_offset */ 6041 /* payload offset used for multilpe outstanding 6042 * sequences on the same exchange 6043 */ 6044 wqe->words[3] = 0; 6045 /* word4 relative_offset memcpy */ 6046 /* word5 r_ctl/df_ctl memcpy */ 6047 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 6048 wqe->xmit_sequence.xmit_len = xmit_len; 6049 command_type = OTHER_COMMAND; 6050 break; 6051 case CMD_XMIT_BCAST64_CN: 6052 /* word3 iocb=iotag32 wqe=payload_len */ 6053 wqe->words[3] = 0; /* no definition for this in wqe */ 6054 /* word4 iocb=rsvd wqe=rsvd */ 6055 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 6056 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 6057 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6058 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6059 break; 6060 case CMD_FCP_IWRITE64_CR: 6061 command_type = FCP_COMMAND_DATA_OUT; 6062 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat 6063 * confusing. 6064 * word3 is payload_len: byte offset to the sgl entry for the 6065 * fcp_command. 6066 * word4 is total xfer len, same as the IOCB->ulpParameter. 6067 * word5 is initial xfer len 0 = wait for xfer-ready 6068 */ 6069 6070 /* Always wait for xfer-ready before sending data */ 6071 wqe->fcp_iwrite.initial_xfer_len = 0; 6072 /* word 4 (xfer length) should have been set on the memcpy */ 6073 6074 /* allow write to fall through to read */ 6075 case CMD_FCP_IREAD64_CR: 6076 /* FCP_CMD is always the 1st sgl entry */ 6077 wqe->fcp_iread.payload_len = 6078 xmit_len + sizeof(struct fcp_rsp); 6079 6080 /* word 4 (xfer length) should have been set on the memcpy */ 6081 6082 bf_set(lpfc_wqe_gen_erp, &wqe->generic, 6083 iocbq->iocb.ulpFCP2Rcvy); 6084 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS); 6085 /* The XC bit and the XS bit are similar. The driver never 6086 * tracked whether or not the exchange was previouslly open. 6087 * XC = Exchange create, 0 is create. 1 is already open. 6088 * XS = link cmd: 1 do not close the exchange after command. 6089 * XS = 0 close exchange when command completes. 6090 * The only time we would not set the XC bit is when the XS bit 6091 * is set and we are sending our 2nd or greater command on 6092 * this exchange. 6093 */ 6094 /* Always open the exchange */ 6095 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 6096 6097 wqe->words[10] &= 0xffff0000; /* zero out ebde count */ 6098 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 6099 break; 6100 case CMD_FCP_ICMND64_CR: 6101 /* Always open the exchange */ 6102 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 6103 6104 wqe->words[4] = 0; 6105 wqe->words[10] &= 0xffff0000; /* zero out ebde count */ 6106 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 6107 break; 6108 case CMD_GEN_REQUEST64_CR: 6109 /* word3 command length is described as byte offset to the 6110 * rsp_data. Would always be 16, sizeof(struct sli4_sge) 6111 * sgl[0] = cmnd 6112 * sgl[1] = rsp. 6113 * 6114 */ 6115 wqe->gen_req.command_len = xmit_len; 6116 /* Word4 parameter copied in the memcpy */ 6117 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ 6118 /* word6 context tag copied in memcpy */ 6119 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 6120 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6121 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6122 "2015 Invalid CT %x command 0x%x\n", 6123 ct, iocbq->iocb.ulpCommand); 6124 return IOCB_ERROR; 6125 } 6126 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0); 6127 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, 6128 iocbq->iocb.ulpTimeout); 6129 6130 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 6131 command_type = OTHER_COMMAND; 6132 break; 6133 case CMD_XMIT_ELS_RSP64_CX: 6134 /* words0-2 BDE memcpy */ 6135 /* word3 iocb=iotag32 wqe=rsvd */ 6136 wqe->words[3] = 0; 6137 /* word4 iocb=did wge=rsvd. */ 6138 wqe->words[4] = 0; 6139 /* word5 iocb=rsvd wge=did */ 6140 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 6141 iocbq->iocb.un.elsreq64.remoteID); 6142 6143 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6144 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6145 6146 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 6147 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); 6148 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 6149 bf_set(lpfc_wqe_gen_context, &wqe->generic, 6150 iocbq->vport->vpi + phba->vpi_base); 6151 command_type = OTHER_COMMAND; 6152 break; 6153 case CMD_CLOSE_XRI_CN: 6154 case CMD_ABORT_XRI_CN: 6155 case CMD_ABORT_XRI_CX: 6156 /* words 0-2 memcpy should be 0 rserved */ 6157 /* port will send abts */ 6158 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6159 /* 6160 * The link is down so the fw does not need to send abts 6161 * on the wire. 6162 */ 6163 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 6164 else 6165 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 6166 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 6167 wqe->words[5] = 0; 6168 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6169 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6170 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 6171 /* 6172 * The abort handler will send us CMD_ABORT_XRI_CN or 6173 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 6174 */ 6175 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX); 6176 cmnd = CMD_ABORT_XRI_CX; 6177 command_type = OTHER_COMMAND; 6178 xritag = 0; 6179 break; 6180 case CMD_XMIT_BLS_RSP64_CX: 6181 /* As BLS ABTS-ACC WQE is very different from other WQEs, 6182 * we re-construct this WQE here based on information in 6183 * iocbq from scratch. 6184 */ 6185 memset(wqe, 0, sizeof(union lpfc_wqe)); 6186 /* OX_ID is invariable to who sent ABTS to CT exchange */ 6187 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 6188 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); 6189 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == 6190 LPFC_ABTS_UNSOL_INT) { 6191 /* ABTS sent by initiator to CT exchange, the 6192 * RX_ID field will be filled with the newly 6193 * allocated responder XRI. 6194 */ 6195 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 6196 iocbq->sli4_xritag); 6197 } else { 6198 /* ABTS sent by responder to CT exchange, the 6199 * RX_ID field will be filled with the responder 6200 * RX_ID from ABTS. 6201 */ 6202 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 6203 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); 6204 } 6205 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 6206 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 6207 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 6208 iocbq->iocb.ulpContext); 6209 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 6210 command_type = OTHER_COMMAND; 6211 break; 6212 case CMD_XRI_ABORTED_CX: 6213 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6214 /* words0-2 are all 0's no bde */ 6215 /* word3 and word4 are rsvrd */ 6216 wqe->words[3] = 0; 6217 wqe->words[4] = 0; 6218 /* word5 iocb=rsvd wge=did */ 6219 /* There is no remote port id in the IOCB? */ 6220 /* Let this fall through and fail */ 6221 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 6222 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 6223 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 6224 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 6225 default: 6226 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6227 "2014 Invalid command 0x%x\n", 6228 iocbq->iocb.ulpCommand); 6229 return IOCB_ERROR; 6230 break; 6231 6232 } 6233 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag); 6234 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag); 6235 wqe->generic.abort_tag = abort_tag; 6236 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type); 6237 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd); 6238 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass); 6239 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT); 6240 6241 return 0; 6242 } 6243 6244 /** 6245 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 6246 * @phba: Pointer to HBA context object. 6247 * @ring_number: SLI ring number to issue iocb on. 6248 * @piocb: Pointer to command iocb. 6249 * @flag: Flag indicating if this command can be put into txq. 6250 * 6251 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 6252 * an iocb command to an HBA with SLI-4 interface spec. 6253 * 6254 * This function is called with hbalock held. The function will return success 6255 * after it successfully submit the iocb to firmware or after adding to the 6256 * txq. 6257 **/ 6258 static int 6259 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 6260 struct lpfc_iocbq *piocb, uint32_t flag) 6261 { 6262 struct lpfc_sglq *sglq; 6263 union lpfc_wqe wqe; 6264 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6265 6266 if (piocb->sli4_xritag == NO_XRI) { 6267 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6268 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6269 sglq = NULL; 6270 else { 6271 if (pring->txq_cnt) { 6272 if (!(flag & SLI_IOCB_RET_IOCB)) { 6273 __lpfc_sli_ringtx_put(phba, 6274 pring, piocb); 6275 return IOCB_SUCCESS; 6276 } else { 6277 return IOCB_BUSY; 6278 } 6279 } else { 6280 sglq = __lpfc_sli_get_sglq(phba); 6281 if (!sglq) { 6282 if (!(flag & SLI_IOCB_RET_IOCB)) { 6283 __lpfc_sli_ringtx_put(phba, 6284 pring, 6285 piocb); 6286 return IOCB_SUCCESS; 6287 } else 6288 return IOCB_BUSY; 6289 } 6290 } 6291 } 6292 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 6293 sglq = NULL; /* These IO's already have an XRI and 6294 * a mapped sgl. 6295 */ 6296 } else { 6297 /* This is a continuation of a commandi,(CX) so this 6298 * sglq is on the active list 6299 */ 6300 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 6301 if (!sglq) 6302 return IOCB_ERROR; 6303 } 6304 6305 if (sglq) { 6306 piocb->sli4_xritag = sglq->sli4_xritag; 6307 6308 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 6309 return IOCB_ERROR; 6310 } 6311 6312 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 6313 return IOCB_ERROR; 6314 6315 if ((piocb->iocb_flag & LPFC_IO_FCP) || 6316 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 6317 /* 6318 * For FCP command IOCB, get a new WQ index to distribute 6319 * WQE across the WQsr. On the other hand, for abort IOCB, 6320 * it carries the same WQ index to the original command 6321 * IOCB. 6322 */ 6323 if (piocb->iocb_flag & LPFC_IO_FCP) 6324 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 6325 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 6326 &wqe)) 6327 return IOCB_ERROR; 6328 } else { 6329 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 6330 return IOCB_ERROR; 6331 } 6332 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 6333 6334 return 0; 6335 } 6336 6337 /** 6338 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 6339 * 6340 * This routine wraps the actual lockless version for issusing IOCB function 6341 * pointer from the lpfc_hba struct. 6342 * 6343 * Return codes: 6344 * IOCB_ERROR - Error 6345 * IOCB_SUCCESS - Success 6346 * IOCB_BUSY - Busy 6347 **/ 6348 int 6349 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 6350 struct lpfc_iocbq *piocb, uint32_t flag) 6351 { 6352 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 6353 } 6354 6355 /** 6356 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table 6357 * @phba: The hba struct for which this call is being executed. 6358 * @dev_grp: The HBA PCI-Device group number. 6359 * 6360 * This routine sets up the SLI interface API function jump table in @phba 6361 * struct. 6362 * Returns: 0 - success, -ENODEV - failure. 6363 **/ 6364 int 6365 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6366 { 6367 6368 switch (dev_grp) { 6369 case LPFC_PCI_DEV_LP: 6370 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 6371 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 6372 break; 6373 case LPFC_PCI_DEV_OC: 6374 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 6375 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 6376 break; 6377 default: 6378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6379 "1419 Invalid HBA PCI-device group: 0x%x\n", 6380 dev_grp); 6381 return -ENODEV; 6382 break; 6383 } 6384 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 6385 return 0; 6386 } 6387 6388 /** 6389 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6390 * @phba: Pointer to HBA context object. 6391 * @pring: Pointer to driver SLI ring object. 6392 * @piocb: Pointer to command iocb. 6393 * @flag: Flag indicating if this command can be put into txq. 6394 * 6395 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 6396 * function. This function gets the hbalock and calls 6397 * __lpfc_sli_issue_iocb function and will return the error returned 6398 * by __lpfc_sli_issue_iocb function. This wrapper is used by 6399 * functions which do not hold hbalock. 6400 **/ 6401 int 6402 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 6403 struct lpfc_iocbq *piocb, uint32_t flag) 6404 { 6405 unsigned long iflags; 6406 int rc; 6407 6408 spin_lock_irqsave(&phba->hbalock, iflags); 6409 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 6410 spin_unlock_irqrestore(&phba->hbalock, iflags); 6411 6412 return rc; 6413 } 6414 6415 /** 6416 * lpfc_extra_ring_setup - Extra ring setup function 6417 * @phba: Pointer to HBA context object. 6418 * 6419 * This function is called while driver attaches with the 6420 * HBA to setup the extra ring. The extra ring is used 6421 * only when driver needs to support target mode functionality 6422 * or IP over FC functionalities. 6423 * 6424 * This function is called with no lock held. 6425 **/ 6426 static int 6427 lpfc_extra_ring_setup( struct lpfc_hba *phba) 6428 { 6429 struct lpfc_sli *psli; 6430 struct lpfc_sli_ring *pring; 6431 6432 psli = &phba->sli; 6433 6434 /* Adjust cmd/rsp ring iocb entries more evenly */ 6435 6436 /* Take some away from the FCP ring */ 6437 pring = &psli->ring[psli->fcp_ring]; 6438 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 6439 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 6440 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 6441 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 6442 6443 /* and give them to the extra ring */ 6444 pring = &psli->ring[psli->extra_ring]; 6445 6446 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 6447 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 6448 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 6449 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 6450 6451 /* Setup default profile for this ring */ 6452 pring->iotag_max = 4096; 6453 pring->num_mask = 1; 6454 pring->prt[0].profile = 0; /* Mask 0 */ 6455 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 6456 pring->prt[0].type = phba->cfg_multi_ring_type; 6457 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 6458 return 0; 6459 } 6460 6461 /** 6462 * lpfc_sli_async_event_handler - ASYNC iocb handler function 6463 * @phba: Pointer to HBA context object. 6464 * @pring: Pointer to driver SLI ring object. 6465 * @iocbq: Pointer to iocb object. 6466 * 6467 * This function is called by the slow ring event handler 6468 * function when there is an ASYNC event iocb in the ring. 6469 * This function is called with no lock held. 6470 * Currently this function handles only temperature related 6471 * ASYNC events. The function decodes the temperature sensor 6472 * event message and posts events for the management applications. 6473 **/ 6474 static void 6475 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 6476 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 6477 { 6478 IOCB_t *icmd; 6479 uint16_t evt_code; 6480 uint16_t temp; 6481 struct temp_event temp_event_data; 6482 struct Scsi_Host *shost; 6483 uint32_t *iocb_w; 6484 6485 icmd = &iocbq->iocb; 6486 evt_code = icmd->un.asyncstat.evt_code; 6487 temp = icmd->ulpContext; 6488 6489 if ((evt_code != ASYNC_TEMP_WARN) && 6490 (evt_code != ASYNC_TEMP_SAFE)) { 6491 iocb_w = (uint32_t *) icmd; 6492 lpfc_printf_log(phba, 6493 KERN_ERR, 6494 LOG_SLI, 6495 "0346 Ring %d handler: unexpected ASYNC_STATUS" 6496 " evt_code 0x%x\n" 6497 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 6498 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 6499 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 6500 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 6501 pring->ringno, 6502 icmd->un.asyncstat.evt_code, 6503 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 6504 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 6505 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 6506 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 6507 6508 return; 6509 } 6510 temp_event_data.data = (uint32_t)temp; 6511 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6512 if (evt_code == ASYNC_TEMP_WARN) { 6513 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6514 lpfc_printf_log(phba, 6515 KERN_ERR, 6516 LOG_TEMP, 6517 "0347 Adapter is very hot, please take " 6518 "corrective action. temperature : %d Celsius\n", 6519 temp); 6520 } 6521 if (evt_code == ASYNC_TEMP_SAFE) { 6522 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6523 lpfc_printf_log(phba, 6524 KERN_ERR, 6525 LOG_TEMP, 6526 "0340 Adapter temperature is OK now. " 6527 "temperature : %d Celsius\n", 6528 temp); 6529 } 6530 6531 /* Send temperature change event to applications */ 6532 shost = lpfc_shost_from_vport(phba->pport); 6533 fc_host_post_vendor_event(shost, fc_get_event_number(), 6534 sizeof(temp_event_data), (char *) &temp_event_data, 6535 LPFC_NL_VENDOR_ID); 6536 6537 } 6538 6539 6540 /** 6541 * lpfc_sli_setup - SLI ring setup function 6542 * @phba: Pointer to HBA context object. 6543 * 6544 * lpfc_sli_setup sets up rings of the SLI interface with 6545 * number of iocbs per ring and iotags. This function is 6546 * called while driver attach to the HBA and before the 6547 * interrupts are enabled. So there is no need for locking. 6548 * 6549 * This function always returns 0. 6550 **/ 6551 int 6552 lpfc_sli_setup(struct lpfc_hba *phba) 6553 { 6554 int i, totiocbsize = 0; 6555 struct lpfc_sli *psli = &phba->sli; 6556 struct lpfc_sli_ring *pring; 6557 6558 psli->num_rings = MAX_CONFIGURED_RINGS; 6559 psli->sli_flag = 0; 6560 psli->fcp_ring = LPFC_FCP_RING; 6561 psli->next_ring = LPFC_FCP_NEXT_RING; 6562 psli->extra_ring = LPFC_EXTRA_RING; 6563 6564 psli->iocbq_lookup = NULL; 6565 psli->iocbq_lookup_len = 0; 6566 psli->last_iotag = 0; 6567 6568 for (i = 0; i < psli->num_rings; i++) { 6569 pring = &psli->ring[i]; 6570 switch (i) { 6571 case LPFC_FCP_RING: /* ring 0 - FCP */ 6572 /* numCiocb and numRiocb are used in config_port */ 6573 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 6574 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 6575 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 6576 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 6577 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 6578 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 6579 pring->sizeCiocb = (phba->sli_rev == 3) ? 6580 SLI3_IOCB_CMD_SIZE : 6581 SLI2_IOCB_CMD_SIZE; 6582 pring->sizeRiocb = (phba->sli_rev == 3) ? 6583 SLI3_IOCB_RSP_SIZE : 6584 SLI2_IOCB_RSP_SIZE; 6585 pring->iotag_ctr = 0; 6586 pring->iotag_max = 6587 (phba->cfg_hba_queue_depth * 2); 6588 pring->fast_iotag = pring->iotag_max; 6589 pring->num_mask = 0; 6590 break; 6591 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 6592 /* numCiocb and numRiocb are used in config_port */ 6593 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 6594 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 6595 pring->sizeCiocb = (phba->sli_rev == 3) ? 6596 SLI3_IOCB_CMD_SIZE : 6597 SLI2_IOCB_CMD_SIZE; 6598 pring->sizeRiocb = (phba->sli_rev == 3) ? 6599 SLI3_IOCB_RSP_SIZE : 6600 SLI2_IOCB_RSP_SIZE; 6601 pring->iotag_max = phba->cfg_hba_queue_depth; 6602 pring->num_mask = 0; 6603 break; 6604 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 6605 /* numCiocb and numRiocb are used in config_port */ 6606 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 6607 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 6608 pring->sizeCiocb = (phba->sli_rev == 3) ? 6609 SLI3_IOCB_CMD_SIZE : 6610 SLI2_IOCB_CMD_SIZE; 6611 pring->sizeRiocb = (phba->sli_rev == 3) ? 6612 SLI3_IOCB_RSP_SIZE : 6613 SLI2_IOCB_RSP_SIZE; 6614 pring->fast_iotag = 0; 6615 pring->iotag_ctr = 0; 6616 pring->iotag_max = 4096; 6617 pring->lpfc_sli_rcv_async_status = 6618 lpfc_sli_async_event_handler; 6619 pring->num_mask = LPFC_MAX_RING_MASK; 6620 pring->prt[0].profile = 0; /* Mask 0 */ 6621 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 6622 pring->prt[0].type = FC_TYPE_ELS; 6623 pring->prt[0].lpfc_sli_rcv_unsol_event = 6624 lpfc_els_unsol_event; 6625 pring->prt[1].profile = 0; /* Mask 1 */ 6626 pring->prt[1].rctl = FC_RCTL_ELS_REP; 6627 pring->prt[1].type = FC_TYPE_ELS; 6628 pring->prt[1].lpfc_sli_rcv_unsol_event = 6629 lpfc_els_unsol_event; 6630 pring->prt[2].profile = 0; /* Mask 2 */ 6631 /* NameServer Inquiry */ 6632 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 6633 /* NameServer */ 6634 pring->prt[2].type = FC_TYPE_CT; 6635 pring->prt[2].lpfc_sli_rcv_unsol_event = 6636 lpfc_ct_unsol_event; 6637 pring->prt[3].profile = 0; /* Mask 3 */ 6638 /* NameServer response */ 6639 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 6640 /* NameServer */ 6641 pring->prt[3].type = FC_TYPE_CT; 6642 pring->prt[3].lpfc_sli_rcv_unsol_event = 6643 lpfc_ct_unsol_event; 6644 /* abort unsolicited sequence */ 6645 pring->prt[4].profile = 0; /* Mask 4 */ 6646 pring->prt[4].rctl = FC_RCTL_BA_ABTS; 6647 pring->prt[4].type = FC_TYPE_BLS; 6648 pring->prt[4].lpfc_sli_rcv_unsol_event = 6649 lpfc_sli4_ct_abort_unsol_event; 6650 break; 6651 } 6652 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 6653 (pring->numRiocb * pring->sizeRiocb); 6654 } 6655 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 6656 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 6657 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 6658 "SLI2 SLIM Data: x%x x%lx\n", 6659 phba->brd_no, totiocbsize, 6660 (unsigned long) MAX_SLIM_IOCB_SIZE); 6661 } 6662 if (phba->cfg_multi_ring_support == 2) 6663 lpfc_extra_ring_setup(phba); 6664 6665 return 0; 6666 } 6667 6668 /** 6669 * lpfc_sli_queue_setup - Queue initialization function 6670 * @phba: Pointer to HBA context object. 6671 * 6672 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 6673 * ring. This function also initializes ring indices of each ring. 6674 * This function is called during the initialization of the SLI 6675 * interface of an HBA. 6676 * This function is called with no lock held and always returns 6677 * 1. 6678 **/ 6679 int 6680 lpfc_sli_queue_setup(struct lpfc_hba *phba) 6681 { 6682 struct lpfc_sli *psli; 6683 struct lpfc_sli_ring *pring; 6684 int i; 6685 6686 psli = &phba->sli; 6687 spin_lock_irq(&phba->hbalock); 6688 INIT_LIST_HEAD(&psli->mboxq); 6689 INIT_LIST_HEAD(&psli->mboxq_cmpl); 6690 /* Initialize list headers for txq and txcmplq as double linked lists */ 6691 for (i = 0; i < psli->num_rings; i++) { 6692 pring = &psli->ring[i]; 6693 pring->ringno = i; 6694 pring->next_cmdidx = 0; 6695 pring->local_getidx = 0; 6696 pring->cmdidx = 0; 6697 INIT_LIST_HEAD(&pring->txq); 6698 INIT_LIST_HEAD(&pring->txcmplq); 6699 INIT_LIST_HEAD(&pring->iocb_continueq); 6700 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 6701 INIT_LIST_HEAD(&pring->postbufq); 6702 } 6703 spin_unlock_irq(&phba->hbalock); 6704 return 1; 6705 } 6706 6707 /** 6708 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 6709 * @phba: Pointer to HBA context object. 6710 * 6711 * This routine flushes the mailbox command subsystem. It will unconditionally 6712 * flush all the mailbox commands in the three possible stages in the mailbox 6713 * command sub-system: pending mailbox command queue; the outstanding mailbox 6714 * command; and completed mailbox command queue. It is caller's responsibility 6715 * to make sure that the driver is in the proper state to flush the mailbox 6716 * command sub-system. Namely, the posting of mailbox commands into the 6717 * pending mailbox command queue from the various clients must be stopped; 6718 * either the HBA is in a state that it will never works on the outstanding 6719 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 6720 * mailbox command has been completed. 6721 **/ 6722 static void 6723 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 6724 { 6725 LIST_HEAD(completions); 6726 struct lpfc_sli *psli = &phba->sli; 6727 LPFC_MBOXQ_t *pmb; 6728 unsigned long iflag; 6729 6730 /* Flush all the mailbox commands in the mbox system */ 6731 spin_lock_irqsave(&phba->hbalock, iflag); 6732 /* The pending mailbox command queue */ 6733 list_splice_init(&phba->sli.mboxq, &completions); 6734 /* The outstanding active mailbox command */ 6735 if (psli->mbox_active) { 6736 list_add_tail(&psli->mbox_active->list, &completions); 6737 psli->mbox_active = NULL; 6738 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6739 } 6740 /* The completed mailbox command queue */ 6741 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 6742 spin_unlock_irqrestore(&phba->hbalock, iflag); 6743 6744 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 6745 while (!list_empty(&completions)) { 6746 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 6747 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 6748 if (pmb->mbox_cmpl) 6749 pmb->mbox_cmpl(phba, pmb); 6750 } 6751 } 6752 6753 /** 6754 * lpfc_sli_host_down - Vport cleanup function 6755 * @vport: Pointer to virtual port object. 6756 * 6757 * lpfc_sli_host_down is called to clean up the resources 6758 * associated with a vport before destroying virtual 6759 * port data structures. 6760 * This function does following operations: 6761 * - Free discovery resources associated with this virtual 6762 * port. 6763 * - Free iocbs associated with this virtual port in 6764 * the txq. 6765 * - Send abort for all iocb commands associated with this 6766 * vport in txcmplq. 6767 * 6768 * This function is called with no lock held and always returns 1. 6769 **/ 6770 int 6771 lpfc_sli_host_down(struct lpfc_vport *vport) 6772 { 6773 LIST_HEAD(completions); 6774 struct lpfc_hba *phba = vport->phba; 6775 struct lpfc_sli *psli = &phba->sli; 6776 struct lpfc_sli_ring *pring; 6777 struct lpfc_iocbq *iocb, *next_iocb; 6778 int i; 6779 unsigned long flags = 0; 6780 uint16_t prev_pring_flag; 6781 6782 lpfc_cleanup_discovery_resources(vport); 6783 6784 spin_lock_irqsave(&phba->hbalock, flags); 6785 for (i = 0; i < psli->num_rings; i++) { 6786 pring = &psli->ring[i]; 6787 prev_pring_flag = pring->flag; 6788 /* Only slow rings */ 6789 if (pring->ringno == LPFC_ELS_RING) { 6790 pring->flag |= LPFC_DEFERRED_RING_EVENT; 6791 /* Set the lpfc data pending flag */ 6792 set_bit(LPFC_DATA_READY, &phba->data_flags); 6793 } 6794 /* 6795 * Error everything on the txq since these iocbs have not been 6796 * given to the FW yet. 6797 */ 6798 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 6799 if (iocb->vport != vport) 6800 continue; 6801 list_move_tail(&iocb->list, &completions); 6802 pring->txq_cnt--; 6803 } 6804 6805 /* Next issue ABTS for everything on the txcmplq */ 6806 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 6807 list) { 6808 if (iocb->vport != vport) 6809 continue; 6810 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 6811 } 6812 6813 pring->flag = prev_pring_flag; 6814 } 6815 6816 spin_unlock_irqrestore(&phba->hbalock, flags); 6817 6818 /* Cancel all the IOCBs from the completions list */ 6819 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6820 IOERR_SLI_DOWN); 6821 return 1; 6822 } 6823 6824 /** 6825 * lpfc_sli_hba_down - Resource cleanup function for the HBA 6826 * @phba: Pointer to HBA context object. 6827 * 6828 * This function cleans up all iocb, buffers, mailbox commands 6829 * while shutting down the HBA. This function is called with no 6830 * lock held and always returns 1. 6831 * This function does the following to cleanup driver resources: 6832 * - Free discovery resources for each virtual port 6833 * - Cleanup any pending fabric iocbs 6834 * - Iterate through the iocb txq and free each entry 6835 * in the list. 6836 * - Free up any buffer posted to the HBA 6837 * - Free mailbox commands in the mailbox queue. 6838 **/ 6839 int 6840 lpfc_sli_hba_down(struct lpfc_hba *phba) 6841 { 6842 LIST_HEAD(completions); 6843 struct lpfc_sli *psli = &phba->sli; 6844 struct lpfc_sli_ring *pring; 6845 struct lpfc_dmabuf *buf_ptr; 6846 unsigned long flags = 0; 6847 int i; 6848 6849 /* Shutdown the mailbox command sub-system */ 6850 lpfc_sli_mbox_sys_shutdown(phba); 6851 6852 lpfc_hba_down_prep(phba); 6853 6854 lpfc_fabric_abort_hba(phba); 6855 6856 spin_lock_irqsave(&phba->hbalock, flags); 6857 for (i = 0; i < psli->num_rings; i++) { 6858 pring = &psli->ring[i]; 6859 /* Only slow rings */ 6860 if (pring->ringno == LPFC_ELS_RING) { 6861 pring->flag |= LPFC_DEFERRED_RING_EVENT; 6862 /* Set the lpfc data pending flag */ 6863 set_bit(LPFC_DATA_READY, &phba->data_flags); 6864 } 6865 6866 /* 6867 * Error everything on the txq since these iocbs have not been 6868 * given to the FW yet. 6869 */ 6870 list_splice_init(&pring->txq, &completions); 6871 pring->txq_cnt = 0; 6872 6873 } 6874 spin_unlock_irqrestore(&phba->hbalock, flags); 6875 6876 /* Cancel all the IOCBs from the completions list */ 6877 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6878 IOERR_SLI_DOWN); 6879 6880 spin_lock_irqsave(&phba->hbalock, flags); 6881 list_splice_init(&phba->elsbuf, &completions); 6882 phba->elsbuf_cnt = 0; 6883 phba->elsbuf_prev_cnt = 0; 6884 spin_unlock_irqrestore(&phba->hbalock, flags); 6885 6886 while (!list_empty(&completions)) { 6887 list_remove_head(&completions, buf_ptr, 6888 struct lpfc_dmabuf, list); 6889 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 6890 kfree(buf_ptr); 6891 } 6892 6893 /* Return any active mbox cmds */ 6894 del_timer_sync(&psli->mbox_tmo); 6895 6896 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 6897 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6898 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 6899 6900 return 1; 6901 } 6902 6903 /** 6904 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA 6905 * @phba: Pointer to HBA context object. 6906 * 6907 * This function cleans up all queues, iocb, buffers, mailbox commands while 6908 * shutting down the SLI4 HBA FCoE function. This function is called with no 6909 * lock held and always returns 1. 6910 * 6911 * This function does the following to cleanup driver FCoE function resources: 6912 * - Free discovery resources for each virtual port 6913 * - Cleanup any pending fabric iocbs 6914 * - Iterate through the iocb txq and free each entry in the list. 6915 * - Free up any buffer posted to the HBA. 6916 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc. 6917 * - Free mailbox commands in the mailbox queue. 6918 **/ 6919 int 6920 lpfc_sli4_hba_down(struct lpfc_hba *phba) 6921 { 6922 /* Stop the SLI4 device port */ 6923 lpfc_stop_port(phba); 6924 6925 /* Tear down the queues in the HBA */ 6926 lpfc_sli4_queue_unset(phba); 6927 6928 /* unregister default FCFI from the HBA */ 6929 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 6930 6931 return 1; 6932 } 6933 6934 /** 6935 * lpfc_sli_pcimem_bcopy - SLI memory copy function 6936 * @srcp: Source memory pointer. 6937 * @destp: Destination memory pointer. 6938 * @cnt: Number of words required to be copied. 6939 * 6940 * This function is used for copying data between driver memory 6941 * and the SLI memory. This function also changes the endianness 6942 * of each word if native endianness is different from SLI 6943 * endianness. This function can be called with or without 6944 * lock. 6945 **/ 6946 void 6947 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 6948 { 6949 uint32_t *src = srcp; 6950 uint32_t *dest = destp; 6951 uint32_t ldata; 6952 int i; 6953 6954 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 6955 ldata = *src; 6956 ldata = le32_to_cpu(ldata); 6957 *dest = ldata; 6958 src++; 6959 dest++; 6960 } 6961 } 6962 6963 6964 /** 6965 * lpfc_sli_bemem_bcopy - SLI memory copy function 6966 * @srcp: Source memory pointer. 6967 * @destp: Destination memory pointer. 6968 * @cnt: Number of words required to be copied. 6969 * 6970 * This function is used for copying data between a data structure 6971 * with big endian representation to local endianness. 6972 * This function can be called with or without lock. 6973 **/ 6974 void 6975 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 6976 { 6977 uint32_t *src = srcp; 6978 uint32_t *dest = destp; 6979 uint32_t ldata; 6980 int i; 6981 6982 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 6983 ldata = *src; 6984 ldata = be32_to_cpu(ldata); 6985 *dest = ldata; 6986 src++; 6987 dest++; 6988 } 6989 } 6990 6991 /** 6992 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 6993 * @phba: Pointer to HBA context object. 6994 * @pring: Pointer to driver SLI ring object. 6995 * @mp: Pointer to driver buffer object. 6996 * 6997 * This function is called with no lock held. 6998 * It always return zero after adding the buffer to the postbufq 6999 * buffer list. 7000 **/ 7001 int 7002 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7003 struct lpfc_dmabuf *mp) 7004 { 7005 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 7006 later */ 7007 spin_lock_irq(&phba->hbalock); 7008 list_add_tail(&mp->list, &pring->postbufq); 7009 pring->postbufq_cnt++; 7010 spin_unlock_irq(&phba->hbalock); 7011 return 0; 7012 } 7013 7014 /** 7015 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 7016 * @phba: Pointer to HBA context object. 7017 * 7018 * When HBQ is enabled, buffers are searched based on tags. This function 7019 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 7020 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 7021 * does not conflict with tags of buffer posted for unsolicited events. 7022 * The function returns the allocated tag. The function is called with 7023 * no locks held. 7024 **/ 7025 uint32_t 7026 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 7027 { 7028 spin_lock_irq(&phba->hbalock); 7029 phba->buffer_tag_count++; 7030 /* 7031 * Always set the QUE_BUFTAG_BIT to distiguish between 7032 * a tag assigned by HBQ. 7033 */ 7034 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 7035 spin_unlock_irq(&phba->hbalock); 7036 return phba->buffer_tag_count; 7037 } 7038 7039 /** 7040 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 7041 * @phba: Pointer to HBA context object. 7042 * @pring: Pointer to driver SLI ring object. 7043 * @tag: Buffer tag. 7044 * 7045 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 7046 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 7047 * iocb is posted to the response ring with the tag of the buffer. 7048 * This function searches the pring->postbufq list using the tag 7049 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 7050 * iocb. If the buffer is found then lpfc_dmabuf object of the 7051 * buffer is returned to the caller else NULL is returned. 7052 * This function is called with no lock held. 7053 **/ 7054 struct lpfc_dmabuf * 7055 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7056 uint32_t tag) 7057 { 7058 struct lpfc_dmabuf *mp, *next_mp; 7059 struct list_head *slp = &pring->postbufq; 7060 7061 /* Search postbufq, from the begining, looking for a match on tag */ 7062 spin_lock_irq(&phba->hbalock); 7063 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 7064 if (mp->buffer_tag == tag) { 7065 list_del_init(&mp->list); 7066 pring->postbufq_cnt--; 7067 spin_unlock_irq(&phba->hbalock); 7068 return mp; 7069 } 7070 } 7071 7072 spin_unlock_irq(&phba->hbalock); 7073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7074 "0402 Cannot find virtual addr for buffer tag on " 7075 "ring %d Data x%lx x%p x%p x%x\n", 7076 pring->ringno, (unsigned long) tag, 7077 slp->next, slp->prev, pring->postbufq_cnt); 7078 7079 return NULL; 7080 } 7081 7082 /** 7083 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 7084 * @phba: Pointer to HBA context object. 7085 * @pring: Pointer to driver SLI ring object. 7086 * @phys: DMA address of the buffer. 7087 * 7088 * This function searches the buffer list using the dma_address 7089 * of unsolicited event to find the driver's lpfc_dmabuf object 7090 * corresponding to the dma_address. The function returns the 7091 * lpfc_dmabuf object if a buffer is found else it returns NULL. 7092 * This function is called by the ct and els unsolicited event 7093 * handlers to get the buffer associated with the unsolicited 7094 * event. 7095 * 7096 * This function is called with no lock held. 7097 **/ 7098 struct lpfc_dmabuf * 7099 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7100 dma_addr_t phys) 7101 { 7102 struct lpfc_dmabuf *mp, *next_mp; 7103 struct list_head *slp = &pring->postbufq; 7104 7105 /* Search postbufq, from the begining, looking for a match on phys */ 7106 spin_lock_irq(&phba->hbalock); 7107 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 7108 if (mp->phys == phys) { 7109 list_del_init(&mp->list); 7110 pring->postbufq_cnt--; 7111 spin_unlock_irq(&phba->hbalock); 7112 return mp; 7113 } 7114 } 7115 7116 spin_unlock_irq(&phba->hbalock); 7117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7118 "0410 Cannot find virtual addr for mapped buf on " 7119 "ring %d Data x%llx x%p x%p x%x\n", 7120 pring->ringno, (unsigned long long)phys, 7121 slp->next, slp->prev, pring->postbufq_cnt); 7122 return NULL; 7123 } 7124 7125 /** 7126 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 7127 * @phba: Pointer to HBA context object. 7128 * @cmdiocb: Pointer to driver command iocb object. 7129 * @rspiocb: Pointer to driver response iocb object. 7130 * 7131 * This function is the completion handler for the abort iocbs for 7132 * ELS commands. This function is called from the ELS ring event 7133 * handler with no lock held. This function frees memory resources 7134 * associated with the abort iocb. 7135 **/ 7136 static void 7137 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7138 struct lpfc_iocbq *rspiocb) 7139 { 7140 IOCB_t *irsp = &rspiocb->iocb; 7141 uint16_t abort_iotag, abort_context; 7142 struct lpfc_iocbq *abort_iocb; 7143 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7144 7145 abort_iocb = NULL; 7146 7147 if (irsp->ulpStatus) { 7148 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 7149 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 7150 7151 spin_lock_irq(&phba->hbalock); 7152 if (phba->sli_rev < LPFC_SLI_REV4) { 7153 if (abort_iotag != 0 && 7154 abort_iotag <= phba->sli.last_iotag) 7155 abort_iocb = 7156 phba->sli.iocbq_lookup[abort_iotag]; 7157 } else 7158 /* For sli4 the abort_tag is the XRI, 7159 * so the abort routine puts the iotag of the iocb 7160 * being aborted in the context field of the abort 7161 * IOCB. 7162 */ 7163 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 7164 7165 /* 7166 * If the iocb is not found in Firmware queue the iocb 7167 * might have completed already. Do not free it again. 7168 */ 7169 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 7170 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) { 7171 spin_unlock_irq(&phba->hbalock); 7172 lpfc_sli_release_iocbq(phba, cmdiocb); 7173 return; 7174 } 7175 /* For SLI4 the ulpContext field for abort IOCB 7176 * holds the iotag of the IOCB being aborted so 7177 * the local abort_context needs to be reset to 7178 * match the aborted IOCBs ulpContext. 7179 */ 7180 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) 7181 abort_context = abort_iocb->iocb.ulpContext; 7182 } 7183 7184 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 7185 "0327 Cannot abort els iocb %p " 7186 "with tag %x context %x, abort status %x, " 7187 "abort code %x\n", 7188 abort_iocb, abort_iotag, abort_context, 7189 irsp->ulpStatus, irsp->un.ulpWord[4]); 7190 /* 7191 * make sure we have the right iocbq before taking it 7192 * off the txcmplq and try to call completion routine. 7193 */ 7194 if (!abort_iocb || 7195 abort_iocb->iocb.ulpContext != abort_context || 7196 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 7197 spin_unlock_irq(&phba->hbalock); 7198 else if (phba->sli_rev < LPFC_SLI_REV4) { 7199 /* 7200 * leave the SLI4 aborted command on the txcmplq 7201 * list and the command complete WCQE's XB bit 7202 * will tell whether the SGL (XRI) can be released 7203 * immediately or to the aborted SGL list for the 7204 * following abort XRI from the HBA. 7205 */ 7206 list_del_init(&abort_iocb->list); 7207 if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) { 7208 abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 7209 pring->txcmplq_cnt--; 7210 } 7211 7212 /* Firmware could still be in progress of DMAing 7213 * payload, so don't free data buffer till after 7214 * a hbeat. 7215 */ 7216 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 7217 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 7218 spin_unlock_irq(&phba->hbalock); 7219 7220 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 7221 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED; 7222 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 7223 } else 7224 spin_unlock_irq(&phba->hbalock); 7225 } 7226 7227 lpfc_sli_release_iocbq(phba, cmdiocb); 7228 return; 7229 } 7230 7231 /** 7232 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 7233 * @phba: Pointer to HBA context object. 7234 * @cmdiocb: Pointer to driver command iocb object. 7235 * @rspiocb: Pointer to driver response iocb object. 7236 * 7237 * The function is called from SLI ring event handler with no 7238 * lock held. This function is the completion handler for ELS commands 7239 * which are aborted. The function frees memory resources used for 7240 * the aborted ELS commands. 7241 **/ 7242 static void 7243 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7244 struct lpfc_iocbq *rspiocb) 7245 { 7246 IOCB_t *irsp = &rspiocb->iocb; 7247 7248 /* ELS cmd tag <ulpIoTag> completes */ 7249 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 7250 "0139 Ignoring ELS cmd tag x%x completion Data: " 7251 "x%x x%x x%x\n", 7252 irsp->ulpIoTag, irsp->ulpStatus, 7253 irsp->un.ulpWord[4], irsp->ulpTimeout); 7254 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 7255 lpfc_ct_free_iocb(phba, cmdiocb); 7256 else 7257 lpfc_els_free_iocb(phba, cmdiocb); 7258 return; 7259 } 7260 7261 /** 7262 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7263 * @phba: Pointer to HBA context object. 7264 * @pring: Pointer to driver SLI ring object. 7265 * @cmdiocb: Pointer to driver command iocb object. 7266 * 7267 * This function issues an abort iocb for the provided command 7268 * iocb. This function is called with hbalock held. 7269 * The function returns 0 when it fails due to memory allocation 7270 * failure or when the command iocb is an abort request. 7271 **/ 7272 int 7273 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7274 struct lpfc_iocbq *cmdiocb) 7275 { 7276 struct lpfc_vport *vport = cmdiocb->vport; 7277 struct lpfc_iocbq *abtsiocbp; 7278 IOCB_t *icmd = NULL; 7279 IOCB_t *iabt = NULL; 7280 int retval = IOCB_ERROR; 7281 7282 /* 7283 * There are certain command types we don't want to abort. And we 7284 * don't want to abort commands that are already in the process of 7285 * being aborted. 7286 */ 7287 icmd = &cmdiocb->iocb; 7288 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 7289 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7290 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7291 return 0; 7292 7293 /* If we're unloading, don't abort iocb on the ELS ring, but change the 7294 * callback so that nothing happens when it finishes. 7295 */ 7296 if ((vport->load_flag & FC_UNLOADING) && 7297 (pring->ringno == LPFC_ELS_RING)) { 7298 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 7299 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 7300 else 7301 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 7302 goto abort_iotag_exit; 7303 } 7304 7305 /* issue ABTS for this IOCB based on iotag */ 7306 abtsiocbp = __lpfc_sli_get_iocbq(phba); 7307 if (abtsiocbp == NULL) 7308 return 0; 7309 7310 /* This signals the response to set the correct status 7311 * before calling the completion handler 7312 */ 7313 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 7314 7315 iabt = &abtsiocbp->iocb; 7316 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7317 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7318 if (phba->sli_rev == LPFC_SLI_REV4) { 7319 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 7320 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 7321 } 7322 else 7323 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7324 iabt->ulpLe = 1; 7325 iabt->ulpClass = icmd->ulpClass; 7326 7327 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 7328 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 7329 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 7330 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 7331 7332 if (phba->link_state >= LPFC_LINK_UP) 7333 iabt->ulpCommand = CMD_ABORT_XRI_CN; 7334 else 7335 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 7336 7337 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 7338 7339 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 7340 "0339 Abort xri x%x, original iotag x%x, " 7341 "abort cmd iotag x%x\n", 7342 iabt->un.acxri.abortIoTag, 7343 iabt->un.acxri.abortContextTag, 7344 abtsiocbp->iotag); 7345 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 7346 7347 if (retval) 7348 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7349 abort_iotag_exit: 7350 /* 7351 * Caller to this routine should check for IOCB_ERROR 7352 * and handle it properly. This routine no longer removes 7353 * iocb off txcmplq and call compl in case of IOCB_ERROR. 7354 */ 7355 return retval; 7356 } 7357 7358 /** 7359 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 7360 * @iocbq: Pointer to driver iocb object. 7361 * @vport: Pointer to driver virtual port object. 7362 * @tgt_id: SCSI ID of the target. 7363 * @lun_id: LUN ID of the scsi device. 7364 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 7365 * 7366 * This function acts as an iocb filter for functions which abort or count 7367 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 7368 * 0 if the filtering criteria is met for the given iocb and will return 7369 * 1 if the filtering criteria is not met. 7370 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 7371 * given iocb is for the SCSI device specified by vport, tgt_id and 7372 * lun_id parameter. 7373 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 7374 * given iocb is for the SCSI target specified by vport and tgt_id 7375 * parameters. 7376 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 7377 * given iocb is for the SCSI host associated with the given vport. 7378 * This function is called with no locks held. 7379 **/ 7380 static int 7381 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 7382 uint16_t tgt_id, uint64_t lun_id, 7383 lpfc_ctx_cmd ctx_cmd) 7384 { 7385 struct lpfc_scsi_buf *lpfc_cmd; 7386 int rc = 1; 7387 7388 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 7389 return rc; 7390 7391 if (iocbq->vport != vport) 7392 return rc; 7393 7394 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 7395 7396 if (lpfc_cmd->pCmd == NULL) 7397 return rc; 7398 7399 switch (ctx_cmd) { 7400 case LPFC_CTX_LUN: 7401 if ((lpfc_cmd->rdata->pnode) && 7402 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 7403 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 7404 rc = 0; 7405 break; 7406 case LPFC_CTX_TGT: 7407 if ((lpfc_cmd->rdata->pnode) && 7408 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 7409 rc = 0; 7410 break; 7411 case LPFC_CTX_HOST: 7412 rc = 0; 7413 break; 7414 default: 7415 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 7416 __func__, ctx_cmd); 7417 break; 7418 } 7419 7420 return rc; 7421 } 7422 7423 /** 7424 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 7425 * @vport: Pointer to virtual port. 7426 * @tgt_id: SCSI ID of the target. 7427 * @lun_id: LUN ID of the scsi device. 7428 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 7429 * 7430 * This function returns number of FCP commands pending for the vport. 7431 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 7432 * commands pending on the vport associated with SCSI device specified 7433 * by tgt_id and lun_id parameters. 7434 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 7435 * commands pending on the vport associated with SCSI target specified 7436 * by tgt_id parameter. 7437 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 7438 * commands pending on the vport. 7439 * This function returns the number of iocbs which satisfy the filter. 7440 * This function is called without any lock held. 7441 **/ 7442 int 7443 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 7444 lpfc_ctx_cmd ctx_cmd) 7445 { 7446 struct lpfc_hba *phba = vport->phba; 7447 struct lpfc_iocbq *iocbq; 7448 int sum, i; 7449 7450 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 7451 iocbq = phba->sli.iocbq_lookup[i]; 7452 7453 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 7454 ctx_cmd) == 0) 7455 sum++; 7456 } 7457 7458 return sum; 7459 } 7460 7461 /** 7462 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 7463 * @phba: Pointer to HBA context object 7464 * @cmdiocb: Pointer to command iocb object. 7465 * @rspiocb: Pointer to response iocb object. 7466 * 7467 * This function is called when an aborted FCP iocb completes. This 7468 * function is called by the ring event handler with no lock held. 7469 * This function frees the iocb. 7470 **/ 7471 void 7472 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7473 struct lpfc_iocbq *rspiocb) 7474 { 7475 lpfc_sli_release_iocbq(phba, cmdiocb); 7476 return; 7477 } 7478 7479 /** 7480 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 7481 * @vport: Pointer to virtual port. 7482 * @pring: Pointer to driver SLI ring object. 7483 * @tgt_id: SCSI ID of the target. 7484 * @lun_id: LUN ID of the scsi device. 7485 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 7486 * 7487 * This function sends an abort command for every SCSI command 7488 * associated with the given virtual port pending on the ring 7489 * filtered by lpfc_sli_validate_fcp_iocb function. 7490 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 7491 * FCP iocbs associated with lun specified by tgt_id and lun_id 7492 * parameters 7493 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 7494 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 7495 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 7496 * FCP iocbs associated with virtual port. 7497 * This function returns number of iocbs it failed to abort. 7498 * This function is called with no locks held. 7499 **/ 7500 int 7501 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 7502 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 7503 { 7504 struct lpfc_hba *phba = vport->phba; 7505 struct lpfc_iocbq *iocbq; 7506 struct lpfc_iocbq *abtsiocb; 7507 IOCB_t *cmd = NULL; 7508 int errcnt = 0, ret_val = 0; 7509 int i; 7510 7511 for (i = 1; i <= phba->sli.last_iotag; i++) { 7512 iocbq = phba->sli.iocbq_lookup[i]; 7513 7514 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 7515 abort_cmd) != 0) 7516 continue; 7517 7518 /* issue ABTS for this IOCB based on iotag */ 7519 abtsiocb = lpfc_sli_get_iocbq(phba); 7520 if (abtsiocb == NULL) { 7521 errcnt++; 7522 continue; 7523 } 7524 7525 cmd = &iocbq->iocb; 7526 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 7527 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 7528 if (phba->sli_rev == LPFC_SLI_REV4) 7529 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 7530 else 7531 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 7532 abtsiocb->iocb.ulpLe = 1; 7533 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7534 abtsiocb->vport = phba->pport; 7535 7536 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 7537 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 7538 if (iocbq->iocb_flag & LPFC_IO_FCP) 7539 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 7540 7541 if (lpfc_is_link_up(phba)) 7542 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 7543 else 7544 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 7545 7546 /* Setup callback routine and issue the command. */ 7547 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 7548 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 7549 abtsiocb, 0); 7550 if (ret_val == IOCB_ERROR) { 7551 lpfc_sli_release_iocbq(phba, abtsiocb); 7552 errcnt++; 7553 continue; 7554 } 7555 } 7556 7557 return errcnt; 7558 } 7559 7560 /** 7561 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 7562 * @phba: Pointer to HBA context object. 7563 * @cmdiocbq: Pointer to command iocb. 7564 * @rspiocbq: Pointer to response iocb. 7565 * 7566 * This function is the completion handler for iocbs issued using 7567 * lpfc_sli_issue_iocb_wait function. This function is called by the 7568 * ring event handler function without any lock held. This function 7569 * can be called from both worker thread context and interrupt 7570 * context. This function also can be called from other thread which 7571 * cleans up the SLI layer objects. 7572 * This function copy the contents of the response iocb to the 7573 * response iocb memory object provided by the caller of 7574 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 7575 * sleeps for the iocb completion. 7576 **/ 7577 static void 7578 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 7579 struct lpfc_iocbq *cmdiocbq, 7580 struct lpfc_iocbq *rspiocbq) 7581 { 7582 wait_queue_head_t *pdone_q; 7583 unsigned long iflags; 7584 struct lpfc_scsi_buf *lpfc_cmd; 7585 7586 spin_lock_irqsave(&phba->hbalock, iflags); 7587 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7588 if (cmdiocbq->context2 && rspiocbq) 7589 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7590 &rspiocbq->iocb, sizeof(IOCB_t)); 7591 7592 /* Set the exchange busy flag for task management commands */ 7593 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 7594 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 7595 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 7596 cur_iocbq); 7597 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 7598 } 7599 7600 pdone_q = cmdiocbq->context_un.wait_queue; 7601 if (pdone_q) 7602 wake_up(pdone_q); 7603 spin_unlock_irqrestore(&phba->hbalock, iflags); 7604 return; 7605 } 7606 7607 /** 7608 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 7609 * @phba: Pointer to HBA context object.. 7610 * @piocbq: Pointer to command iocb. 7611 * @flag: Flag to test. 7612 * 7613 * This routine grabs the hbalock and then test the iocb_flag to 7614 * see if the passed in flag is set. 7615 * Returns: 7616 * 1 if flag is set. 7617 * 0 if flag is not set. 7618 **/ 7619 static int 7620 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 7621 struct lpfc_iocbq *piocbq, uint32_t flag) 7622 { 7623 unsigned long iflags; 7624 int ret; 7625 7626 spin_lock_irqsave(&phba->hbalock, iflags); 7627 ret = piocbq->iocb_flag & flag; 7628 spin_unlock_irqrestore(&phba->hbalock, iflags); 7629 return ret; 7630 7631 } 7632 7633 /** 7634 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 7635 * @phba: Pointer to HBA context object.. 7636 * @pring: Pointer to sli ring. 7637 * @piocb: Pointer to command iocb. 7638 * @prspiocbq: Pointer to response iocb. 7639 * @timeout: Timeout in number of seconds. 7640 * 7641 * This function issues the iocb to firmware and waits for the 7642 * iocb to complete. If the iocb command is not 7643 * completed within timeout seconds, it returns IOCB_TIMEDOUT. 7644 * Caller should not free the iocb resources if this function 7645 * returns IOCB_TIMEDOUT. 7646 * The function waits for the iocb completion using an 7647 * non-interruptible wait. 7648 * This function will sleep while waiting for iocb completion. 7649 * So, this function should not be called from any context which 7650 * does not allow sleeping. Due to the same reason, this function 7651 * cannot be called with interrupt disabled. 7652 * This function assumes that the iocb completions occur while 7653 * this function sleep. So, this function cannot be called from 7654 * the thread which process iocb completion for this ring. 7655 * This function clears the iocb_flag of the iocb object before 7656 * issuing the iocb and the iocb completion handler sets this 7657 * flag and wakes this thread when the iocb completes. 7658 * The contents of the response iocb will be copied to prspiocbq 7659 * by the completion handler when the command completes. 7660 * This function returns IOCB_SUCCESS when success. 7661 * This function is called with no lock held. 7662 **/ 7663 int 7664 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 7665 uint32_t ring_number, 7666 struct lpfc_iocbq *piocb, 7667 struct lpfc_iocbq *prspiocbq, 7668 uint32_t timeout) 7669 { 7670 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 7671 long timeleft, timeout_req = 0; 7672 int retval = IOCB_SUCCESS; 7673 uint32_t creg_val; 7674 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7675 /* 7676 * If the caller has provided a response iocbq buffer, then context2 7677 * is NULL or its an error. 7678 */ 7679 if (prspiocbq) { 7680 if (piocb->context2) 7681 return IOCB_ERROR; 7682 piocb->context2 = prspiocbq; 7683 } 7684 7685 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 7686 piocb->context_un.wait_queue = &done_q; 7687 piocb->iocb_flag &= ~LPFC_IO_WAKE; 7688 7689 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7690 creg_val = readl(phba->HCregaddr); 7691 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 7692 writel(creg_val, phba->HCregaddr); 7693 readl(phba->HCregaddr); /* flush */ 7694 } 7695 7696 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 7697 SLI_IOCB_RET_IOCB); 7698 if (retval == IOCB_SUCCESS) { 7699 timeout_req = timeout * HZ; 7700 timeleft = wait_event_timeout(done_q, 7701 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 7702 timeout_req); 7703 7704 if (piocb->iocb_flag & LPFC_IO_WAKE) { 7705 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7706 "0331 IOCB wake signaled\n"); 7707 } else if (timeleft == 0) { 7708 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7709 "0338 IOCB wait timeout error - no " 7710 "wake response Data x%x\n", timeout); 7711 retval = IOCB_TIMEDOUT; 7712 } else { 7713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7714 "0330 IOCB wake NOT set, " 7715 "Data x%x x%lx\n", 7716 timeout, (timeleft / jiffies)); 7717 retval = IOCB_TIMEDOUT; 7718 } 7719 } else if (retval == IOCB_BUSY) { 7720 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7721 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 7722 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); 7723 return retval; 7724 } else { 7725 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7726 "0332 IOCB wait issue failed, Data x%x\n", 7727 retval); 7728 retval = IOCB_ERROR; 7729 } 7730 7731 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7732 creg_val = readl(phba->HCregaddr); 7733 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 7734 writel(creg_val, phba->HCregaddr); 7735 readl(phba->HCregaddr); /* flush */ 7736 } 7737 7738 if (prspiocbq) 7739 piocb->context2 = NULL; 7740 7741 piocb->context_un.wait_queue = NULL; 7742 piocb->iocb_cmpl = NULL; 7743 return retval; 7744 } 7745 7746 /** 7747 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 7748 * @phba: Pointer to HBA context object. 7749 * @pmboxq: Pointer to driver mailbox object. 7750 * @timeout: Timeout in number of seconds. 7751 * 7752 * This function issues the mailbox to firmware and waits for the 7753 * mailbox command to complete. If the mailbox command is not 7754 * completed within timeout seconds, it returns MBX_TIMEOUT. 7755 * The function waits for the mailbox completion using an 7756 * interruptible wait. If the thread is woken up due to a 7757 * signal, MBX_TIMEOUT error is returned to the caller. Caller 7758 * should not free the mailbox resources, if this function returns 7759 * MBX_TIMEOUT. 7760 * This function will sleep while waiting for mailbox completion. 7761 * So, this function should not be called from any context which 7762 * does not allow sleeping. Due to the same reason, this function 7763 * cannot be called with interrupt disabled. 7764 * This function assumes that the mailbox completion occurs while 7765 * this function sleep. So, this function cannot be called from 7766 * the worker thread which processes mailbox completion. 7767 * This function is called in the context of HBA management 7768 * applications. 7769 * This function returns MBX_SUCCESS when successful. 7770 * This function is called with no lock held. 7771 **/ 7772 int 7773 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 7774 uint32_t timeout) 7775 { 7776 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 7777 int retval; 7778 unsigned long flag; 7779 7780 /* The caller must leave context1 empty. */ 7781 if (pmboxq->context1) 7782 return MBX_NOT_FINISHED; 7783 7784 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 7785 /* setup wake call as IOCB callback */ 7786 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 7787 /* setup context field to pass wait_queue pointer to wake function */ 7788 pmboxq->context1 = &done_q; 7789 7790 /* now issue the command */ 7791 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 7792 7793 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 7794 wait_event_interruptible_timeout(done_q, 7795 pmboxq->mbox_flag & LPFC_MBX_WAKE, 7796 timeout * HZ); 7797 7798 spin_lock_irqsave(&phba->hbalock, flag); 7799 pmboxq->context1 = NULL; 7800 /* 7801 * if LPFC_MBX_WAKE flag is set the mailbox is completed 7802 * else do not free the resources. 7803 */ 7804 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 7805 retval = MBX_SUCCESS; 7806 lpfc_sli4_swap_str(phba, pmboxq); 7807 } else { 7808 retval = MBX_TIMEOUT; 7809 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7810 } 7811 spin_unlock_irqrestore(&phba->hbalock, flag); 7812 } 7813 7814 return retval; 7815 } 7816 7817 /** 7818 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 7819 * @phba: Pointer to HBA context. 7820 * 7821 * This function is called to shutdown the driver's mailbox sub-system. 7822 * It first marks the mailbox sub-system is in a block state to prevent 7823 * the asynchronous mailbox command from issued off the pending mailbox 7824 * command queue. If the mailbox command sub-system shutdown is due to 7825 * HBA error conditions such as EEH or ERATT, this routine shall invoke 7826 * the mailbox sub-system flush routine to forcefully bring down the 7827 * mailbox sub-system. Otherwise, if it is due to normal condition (such 7828 * as with offline or HBA function reset), this routine will wait for the 7829 * outstanding mailbox command to complete before invoking the mailbox 7830 * sub-system flush routine to gracefully bring down mailbox sub-system. 7831 **/ 7832 void 7833 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) 7834 { 7835 struct lpfc_sli *psli = &phba->sli; 7836 uint8_t actcmd = MBX_HEARTBEAT; 7837 unsigned long timeout; 7838 7839 spin_lock_irq(&phba->hbalock); 7840 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7841 spin_unlock_irq(&phba->hbalock); 7842 7843 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7844 spin_lock_irq(&phba->hbalock); 7845 if (phba->sli.mbox_active) 7846 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 7847 spin_unlock_irq(&phba->hbalock); 7848 /* Determine how long we might wait for the active mailbox 7849 * command to be gracefully completed by firmware. 7850 */ 7851 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 7852 1000) + jiffies; 7853 while (phba->sli.mbox_active) { 7854 /* Check active mailbox complete status every 2ms */ 7855 msleep(2); 7856 if (time_after(jiffies, timeout)) 7857 /* Timeout, let the mailbox flush routine to 7858 * forcefully release active mailbox command 7859 */ 7860 break; 7861 } 7862 } 7863 lpfc_sli_mbox_sys_flush(phba); 7864 } 7865 7866 /** 7867 * lpfc_sli_eratt_read - read sli-3 error attention events 7868 * @phba: Pointer to HBA context. 7869 * 7870 * This function is called to read the SLI3 device error attention registers 7871 * for possible error attention events. The caller must hold the hostlock 7872 * with spin_lock_irq(). 7873 * 7874 * This fucntion returns 1 when there is Error Attention in the Host Attention 7875 * Register and returns 0 otherwise. 7876 **/ 7877 static int 7878 lpfc_sli_eratt_read(struct lpfc_hba *phba) 7879 { 7880 uint32_t ha_copy; 7881 7882 /* Read chip Host Attention (HA) register */ 7883 ha_copy = readl(phba->HAregaddr); 7884 if (ha_copy & HA_ERATT) { 7885 /* Read host status register to retrieve error event */ 7886 lpfc_sli_read_hs(phba); 7887 7888 /* Check if there is a deferred error condition is active */ 7889 if ((HS_FFER1 & phba->work_hs) && 7890 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 7891 HS_FFER6 | HS_FFER7) & phba->work_hs)) { 7892 phba->hba_flag |= DEFER_ERATT; 7893 /* Clear all interrupt enable conditions */ 7894 writel(0, phba->HCregaddr); 7895 readl(phba->HCregaddr); 7896 } 7897 7898 /* Set the driver HA work bitmap */ 7899 phba->work_ha |= HA_ERATT; 7900 /* Indicate polling handles this ERATT */ 7901 phba->hba_flag |= HBA_ERATT_HANDLED; 7902 return 1; 7903 } 7904 return 0; 7905 } 7906 7907 /** 7908 * lpfc_sli4_eratt_read - read sli-4 error attention events 7909 * @phba: Pointer to HBA context. 7910 * 7911 * This function is called to read the SLI4 device error attention registers 7912 * for possible error attention events. The caller must hold the hostlock 7913 * with spin_lock_irq(). 7914 * 7915 * This fucntion returns 1 when there is Error Attention in the Host Attention 7916 * Register and returns 0 otherwise. 7917 **/ 7918 static int 7919 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 7920 { 7921 uint32_t uerr_sta_hi, uerr_sta_lo; 7922 7923 /* For now, use the SLI4 device internal unrecoverable error 7924 * registers for error attention. This can be changed later. 7925 */ 7926 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); 7927 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); 7928 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 7929 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 7930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7931 "1423 HBA Unrecoverable error: " 7932 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 7933 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", 7934 uerr_sta_lo, uerr_sta_hi, 7935 phba->sli4_hba.ue_mask_lo, 7936 phba->sli4_hba.ue_mask_hi); 7937 phba->work_status[0] = uerr_sta_lo; 7938 phba->work_status[1] = uerr_sta_hi; 7939 /* Set the driver HA work bitmap */ 7940 phba->work_ha |= HA_ERATT; 7941 /* Indicate polling handles this ERATT */ 7942 phba->hba_flag |= HBA_ERATT_HANDLED; 7943 return 1; 7944 } 7945 return 0; 7946 } 7947 7948 /** 7949 * lpfc_sli_check_eratt - check error attention events 7950 * @phba: Pointer to HBA context. 7951 * 7952 * This function is called from timer soft interrupt context to check HBA's 7953 * error attention register bit for error attention events. 7954 * 7955 * This fucntion returns 1 when there is Error Attention in the Host Attention 7956 * Register and returns 0 otherwise. 7957 **/ 7958 int 7959 lpfc_sli_check_eratt(struct lpfc_hba *phba) 7960 { 7961 uint32_t ha_copy; 7962 7963 /* If somebody is waiting to handle an eratt, don't process it 7964 * here. The brdkill function will do this. 7965 */ 7966 if (phba->link_flag & LS_IGNORE_ERATT) 7967 return 0; 7968 7969 /* Check if interrupt handler handles this ERATT */ 7970 spin_lock_irq(&phba->hbalock); 7971 if (phba->hba_flag & HBA_ERATT_HANDLED) { 7972 /* Interrupt handler has handled ERATT */ 7973 spin_unlock_irq(&phba->hbalock); 7974 return 0; 7975 } 7976 7977 /* 7978 * If there is deferred error attention, do not check for error 7979 * attention 7980 */ 7981 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7982 spin_unlock_irq(&phba->hbalock); 7983 return 0; 7984 } 7985 7986 /* If PCI channel is offline, don't process it */ 7987 if (unlikely(pci_channel_offline(phba->pcidev))) { 7988 spin_unlock_irq(&phba->hbalock); 7989 return 0; 7990 } 7991 7992 switch (phba->sli_rev) { 7993 case LPFC_SLI_REV2: 7994 case LPFC_SLI_REV3: 7995 /* Read chip Host Attention (HA) register */ 7996 ha_copy = lpfc_sli_eratt_read(phba); 7997 break; 7998 case LPFC_SLI_REV4: 7999 /* Read devcie Uncoverable Error (UERR) registers */ 8000 ha_copy = lpfc_sli4_eratt_read(phba); 8001 break; 8002 default: 8003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8004 "0299 Invalid SLI revision (%d)\n", 8005 phba->sli_rev); 8006 ha_copy = 0; 8007 break; 8008 } 8009 spin_unlock_irq(&phba->hbalock); 8010 8011 return ha_copy; 8012 } 8013 8014 /** 8015 * lpfc_intr_state_check - Check device state for interrupt handling 8016 * @phba: Pointer to HBA context. 8017 * 8018 * This inline routine checks whether a device or its PCI slot is in a state 8019 * that the interrupt should be handled. 8020 * 8021 * This function returns 0 if the device or the PCI slot is in a state that 8022 * interrupt should be handled, otherwise -EIO. 8023 */ 8024 static inline int 8025 lpfc_intr_state_check(struct lpfc_hba *phba) 8026 { 8027 /* If the pci channel is offline, ignore all the interrupts */ 8028 if (unlikely(pci_channel_offline(phba->pcidev))) 8029 return -EIO; 8030 8031 /* Update device level interrupt statistics */ 8032 phba->sli.slistat.sli_intr++; 8033 8034 /* Ignore all interrupts during initialization. */ 8035 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 8036 return -EIO; 8037 8038 return 0; 8039 } 8040 8041 /** 8042 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 8043 * @irq: Interrupt number. 8044 * @dev_id: The device context pointer. 8045 * 8046 * This function is directly called from the PCI layer as an interrupt 8047 * service routine when device with SLI-3 interface spec is enabled with 8048 * MSI-X multi-message interrupt mode and there are slow-path events in 8049 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 8050 * interrupt mode, this function is called as part of the device-level 8051 * interrupt handler. When the PCI slot is in error recovery or the HBA 8052 * is undergoing initialization, the interrupt handler will not process 8053 * the interrupt. The link attention and ELS ring attention events are 8054 * handled by the worker thread. The interrupt handler signals the worker 8055 * thread and returns for these events. This function is called without 8056 * any lock held. It gets the hbalock to access and update SLI data 8057 * structures. 8058 * 8059 * This function returns IRQ_HANDLED when interrupt is handled else it 8060 * returns IRQ_NONE. 8061 **/ 8062 irqreturn_t 8063 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 8064 { 8065 struct lpfc_hba *phba; 8066 uint32_t ha_copy, hc_copy; 8067 uint32_t work_ha_copy; 8068 unsigned long status; 8069 unsigned long iflag; 8070 uint32_t control; 8071 8072 MAILBOX_t *mbox, *pmbox; 8073 struct lpfc_vport *vport; 8074 struct lpfc_nodelist *ndlp; 8075 struct lpfc_dmabuf *mp; 8076 LPFC_MBOXQ_t *pmb; 8077 int rc; 8078 8079 /* 8080 * Get the driver's phba structure from the dev_id and 8081 * assume the HBA is not interrupting. 8082 */ 8083 phba = (struct lpfc_hba *)dev_id; 8084 8085 if (unlikely(!phba)) 8086 return IRQ_NONE; 8087 8088 /* 8089 * Stuff needs to be attented to when this function is invoked as an 8090 * individual interrupt handler in MSI-X multi-message interrupt mode 8091 */ 8092 if (phba->intr_type == MSIX) { 8093 /* Check device state for handling interrupt */ 8094 if (lpfc_intr_state_check(phba)) 8095 return IRQ_NONE; 8096 /* Need to read HA REG for slow-path events */ 8097 spin_lock_irqsave(&phba->hbalock, iflag); 8098 ha_copy = readl(phba->HAregaddr); 8099 /* If somebody is waiting to handle an eratt don't process it 8100 * here. The brdkill function will do this. 8101 */ 8102 if (phba->link_flag & LS_IGNORE_ERATT) 8103 ha_copy &= ~HA_ERATT; 8104 /* Check the need for handling ERATT in interrupt handler */ 8105 if (ha_copy & HA_ERATT) { 8106 if (phba->hba_flag & HBA_ERATT_HANDLED) 8107 /* ERATT polling has handled ERATT */ 8108 ha_copy &= ~HA_ERATT; 8109 else 8110 /* Indicate interrupt handler handles ERATT */ 8111 phba->hba_flag |= HBA_ERATT_HANDLED; 8112 } 8113 8114 /* 8115 * If there is deferred error attention, do not check for any 8116 * interrupt. 8117 */ 8118 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8119 spin_unlock_irqrestore(&phba->hbalock, iflag); 8120 return IRQ_NONE; 8121 } 8122 8123 /* Clear up only attention source related to slow-path */ 8124 hc_copy = readl(phba->HCregaddr); 8125 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 8126 HC_LAINT_ENA | HC_ERINT_ENA), 8127 phba->HCregaddr); 8128 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 8129 phba->HAregaddr); 8130 writel(hc_copy, phba->HCregaddr); 8131 readl(phba->HAregaddr); /* flush */ 8132 spin_unlock_irqrestore(&phba->hbalock, iflag); 8133 } else 8134 ha_copy = phba->ha_copy; 8135 8136 work_ha_copy = ha_copy & phba->work_ha_mask; 8137 8138 if (work_ha_copy) { 8139 if (work_ha_copy & HA_LATT) { 8140 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 8141 /* 8142 * Turn off Link Attention interrupts 8143 * until CLEAR_LA done 8144 */ 8145 spin_lock_irqsave(&phba->hbalock, iflag); 8146 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 8147 control = readl(phba->HCregaddr); 8148 control &= ~HC_LAINT_ENA; 8149 writel(control, phba->HCregaddr); 8150 readl(phba->HCregaddr); /* flush */ 8151 spin_unlock_irqrestore(&phba->hbalock, iflag); 8152 } 8153 else 8154 work_ha_copy &= ~HA_LATT; 8155 } 8156 8157 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 8158 /* 8159 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 8160 * the only slow ring. 8161 */ 8162 status = (work_ha_copy & 8163 (HA_RXMASK << (4*LPFC_ELS_RING))); 8164 status >>= (4*LPFC_ELS_RING); 8165 if (status & HA_RXMASK) { 8166 spin_lock_irqsave(&phba->hbalock, iflag); 8167 control = readl(phba->HCregaddr); 8168 8169 lpfc_debugfs_slow_ring_trc(phba, 8170 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 8171 control, status, 8172 (uint32_t)phba->sli.slistat.sli_intr); 8173 8174 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 8175 lpfc_debugfs_slow_ring_trc(phba, 8176 "ISR Disable ring:" 8177 "pwork:x%x hawork:x%x wait:x%x", 8178 phba->work_ha, work_ha_copy, 8179 (uint32_t)((unsigned long) 8180 &phba->work_waitq)); 8181 8182 control &= 8183 ~(HC_R0INT_ENA << LPFC_ELS_RING); 8184 writel(control, phba->HCregaddr); 8185 readl(phba->HCregaddr); /* flush */ 8186 } 8187 else { 8188 lpfc_debugfs_slow_ring_trc(phba, 8189 "ISR slow ring: pwork:" 8190 "x%x hawork:x%x wait:x%x", 8191 phba->work_ha, work_ha_copy, 8192 (uint32_t)((unsigned long) 8193 &phba->work_waitq)); 8194 } 8195 spin_unlock_irqrestore(&phba->hbalock, iflag); 8196 } 8197 } 8198 spin_lock_irqsave(&phba->hbalock, iflag); 8199 if (work_ha_copy & HA_ERATT) { 8200 lpfc_sli_read_hs(phba); 8201 /* 8202 * Check if there is a deferred error condition 8203 * is active 8204 */ 8205 if ((HS_FFER1 & phba->work_hs) && 8206 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 8207 HS_FFER6 | HS_FFER7) & phba->work_hs)) { 8208 phba->hba_flag |= DEFER_ERATT; 8209 /* Clear all interrupt enable conditions */ 8210 writel(0, phba->HCregaddr); 8211 readl(phba->HCregaddr); 8212 } 8213 } 8214 8215 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 8216 pmb = phba->sli.mbox_active; 8217 pmbox = &pmb->u.mb; 8218 mbox = phba->mbox; 8219 vport = pmb->vport; 8220 8221 /* First check out the status word */ 8222 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 8223 if (pmbox->mbxOwner != OWN_HOST) { 8224 spin_unlock_irqrestore(&phba->hbalock, iflag); 8225 /* 8226 * Stray Mailbox Interrupt, mbxCommand <cmd> 8227 * mbxStatus <status> 8228 */ 8229 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8230 LOG_SLI, 8231 "(%d):0304 Stray Mailbox " 8232 "Interrupt mbxCommand x%x " 8233 "mbxStatus x%x\n", 8234 (vport ? vport->vpi : 0), 8235 pmbox->mbxCommand, 8236 pmbox->mbxStatus); 8237 /* clear mailbox attention bit */ 8238 work_ha_copy &= ~HA_MBATT; 8239 } else { 8240 phba->sli.mbox_active = NULL; 8241 spin_unlock_irqrestore(&phba->hbalock, iflag); 8242 phba->last_completion_time = jiffies; 8243 del_timer(&phba->sli.mbox_tmo); 8244 if (pmb->mbox_cmpl) { 8245 lpfc_sli_pcimem_bcopy(mbox, pmbox, 8246 MAILBOX_CMD_SIZE); 8247 if (pmb->out_ext_byte_len && 8248 pmb->context2) 8249 lpfc_sli_pcimem_bcopy( 8250 phba->mbox_ext, 8251 pmb->context2, 8252 pmb->out_ext_byte_len); 8253 } 8254 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 8255 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 8256 8257 lpfc_debugfs_disc_trc(vport, 8258 LPFC_DISC_TRC_MBOX_VPORT, 8259 "MBOX dflt rpi: : " 8260 "status:x%x rpi:x%x", 8261 (uint32_t)pmbox->mbxStatus, 8262 pmbox->un.varWords[0], 0); 8263 8264 if (!pmbox->mbxStatus) { 8265 mp = (struct lpfc_dmabuf *) 8266 (pmb->context1); 8267 ndlp = (struct lpfc_nodelist *) 8268 pmb->context2; 8269 8270 /* Reg_LOGIN of dflt RPI was 8271 * successful. new lets get 8272 * rid of the RPI using the 8273 * same mbox buffer. 8274 */ 8275 lpfc_unreg_login(phba, 8276 vport->vpi, 8277 pmbox->un.varWords[0], 8278 pmb); 8279 pmb->mbox_cmpl = 8280 lpfc_mbx_cmpl_dflt_rpi; 8281 pmb->context1 = mp; 8282 pmb->context2 = ndlp; 8283 pmb->vport = vport; 8284 rc = lpfc_sli_issue_mbox(phba, 8285 pmb, 8286 MBX_NOWAIT); 8287 if (rc != MBX_BUSY) 8288 lpfc_printf_log(phba, 8289 KERN_ERR, 8290 LOG_MBOX | LOG_SLI, 8291 "0350 rc should have" 8292 "been MBX_BUSY\n"); 8293 if (rc != MBX_NOT_FINISHED) 8294 goto send_current_mbox; 8295 } 8296 } 8297 spin_lock_irqsave( 8298 &phba->pport->work_port_lock, 8299 iflag); 8300 phba->pport->work_port_events &= 8301 ~WORKER_MBOX_TMO; 8302 spin_unlock_irqrestore( 8303 &phba->pport->work_port_lock, 8304 iflag); 8305 lpfc_mbox_cmpl_put(phba, pmb); 8306 } 8307 } else 8308 spin_unlock_irqrestore(&phba->hbalock, iflag); 8309 8310 if ((work_ha_copy & HA_MBATT) && 8311 (phba->sli.mbox_active == NULL)) { 8312 send_current_mbox: 8313 /* Process next mailbox command if there is one */ 8314 do { 8315 rc = lpfc_sli_issue_mbox(phba, NULL, 8316 MBX_NOWAIT); 8317 } while (rc == MBX_NOT_FINISHED); 8318 if (rc != MBX_SUCCESS) 8319 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8320 LOG_SLI, "0349 rc should be " 8321 "MBX_SUCCESS\n"); 8322 } 8323 8324 spin_lock_irqsave(&phba->hbalock, iflag); 8325 phba->work_ha |= work_ha_copy; 8326 spin_unlock_irqrestore(&phba->hbalock, iflag); 8327 lpfc_worker_wake_up(phba); 8328 } 8329 return IRQ_HANDLED; 8330 8331 } /* lpfc_sli_sp_intr_handler */ 8332 8333 /** 8334 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 8335 * @irq: Interrupt number. 8336 * @dev_id: The device context pointer. 8337 * 8338 * This function is directly called from the PCI layer as an interrupt 8339 * service routine when device with SLI-3 interface spec is enabled with 8340 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 8341 * ring event in the HBA. However, when the device is enabled with either 8342 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 8343 * device-level interrupt handler. When the PCI slot is in error recovery 8344 * or the HBA is undergoing initialization, the interrupt handler will not 8345 * process the interrupt. The SCSI FCP fast-path ring event are handled in 8346 * the intrrupt context. This function is called without any lock held. 8347 * It gets the hbalock to access and update SLI data structures. 8348 * 8349 * This function returns IRQ_HANDLED when interrupt is handled else it 8350 * returns IRQ_NONE. 8351 **/ 8352 irqreturn_t 8353 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 8354 { 8355 struct lpfc_hba *phba; 8356 uint32_t ha_copy; 8357 unsigned long status; 8358 unsigned long iflag; 8359 8360 /* Get the driver's phba structure from the dev_id and 8361 * assume the HBA is not interrupting. 8362 */ 8363 phba = (struct lpfc_hba *) dev_id; 8364 8365 if (unlikely(!phba)) 8366 return IRQ_NONE; 8367 8368 /* 8369 * Stuff needs to be attented to when this function is invoked as an 8370 * individual interrupt handler in MSI-X multi-message interrupt mode 8371 */ 8372 if (phba->intr_type == MSIX) { 8373 /* Check device state for handling interrupt */ 8374 if (lpfc_intr_state_check(phba)) 8375 return IRQ_NONE; 8376 /* Need to read HA REG for FCP ring and other ring events */ 8377 ha_copy = readl(phba->HAregaddr); 8378 /* Clear up only attention source related to fast-path */ 8379 spin_lock_irqsave(&phba->hbalock, iflag); 8380 /* 8381 * If there is deferred error attention, do not check for 8382 * any interrupt. 8383 */ 8384 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8385 spin_unlock_irqrestore(&phba->hbalock, iflag); 8386 return IRQ_NONE; 8387 } 8388 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8389 phba->HAregaddr); 8390 readl(phba->HAregaddr); /* flush */ 8391 spin_unlock_irqrestore(&phba->hbalock, iflag); 8392 } else 8393 ha_copy = phba->ha_copy; 8394 8395 /* 8396 * Process all events on FCP ring. Take the optimized path for FCP IO. 8397 */ 8398 ha_copy &= ~(phba->work_ha_mask); 8399 8400 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 8401 status >>= (4*LPFC_FCP_RING); 8402 if (status & HA_RXMASK) 8403 lpfc_sli_handle_fast_ring_event(phba, 8404 &phba->sli.ring[LPFC_FCP_RING], 8405 status); 8406 8407 if (phba->cfg_multi_ring_support == 2) { 8408 /* 8409 * Process all events on extra ring. Take the optimized path 8410 * for extra ring IO. 8411 */ 8412 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 8413 status >>= (4*LPFC_EXTRA_RING); 8414 if (status & HA_RXMASK) { 8415 lpfc_sli_handle_fast_ring_event(phba, 8416 &phba->sli.ring[LPFC_EXTRA_RING], 8417 status); 8418 } 8419 } 8420 return IRQ_HANDLED; 8421 } /* lpfc_sli_fp_intr_handler */ 8422 8423 /** 8424 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 8425 * @irq: Interrupt number. 8426 * @dev_id: The device context pointer. 8427 * 8428 * This function is the HBA device-level interrupt handler to device with 8429 * SLI-3 interface spec, called from the PCI layer when either MSI or 8430 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 8431 * requires driver attention. This function invokes the slow-path interrupt 8432 * attention handling function and fast-path interrupt attention handling 8433 * function in turn to process the relevant HBA attention events. This 8434 * function is called without any lock held. It gets the hbalock to access 8435 * and update SLI data structures. 8436 * 8437 * This function returns IRQ_HANDLED when interrupt is handled, else it 8438 * returns IRQ_NONE. 8439 **/ 8440 irqreturn_t 8441 lpfc_sli_intr_handler(int irq, void *dev_id) 8442 { 8443 struct lpfc_hba *phba; 8444 irqreturn_t sp_irq_rc, fp_irq_rc; 8445 unsigned long status1, status2; 8446 uint32_t hc_copy; 8447 8448 /* 8449 * Get the driver's phba structure from the dev_id and 8450 * assume the HBA is not interrupting. 8451 */ 8452 phba = (struct lpfc_hba *) dev_id; 8453 8454 if (unlikely(!phba)) 8455 return IRQ_NONE; 8456 8457 /* Check device state for handling interrupt */ 8458 if (lpfc_intr_state_check(phba)) 8459 return IRQ_NONE; 8460 8461 spin_lock(&phba->hbalock); 8462 phba->ha_copy = readl(phba->HAregaddr); 8463 if (unlikely(!phba->ha_copy)) { 8464 spin_unlock(&phba->hbalock); 8465 return IRQ_NONE; 8466 } else if (phba->ha_copy & HA_ERATT) { 8467 if (phba->hba_flag & HBA_ERATT_HANDLED) 8468 /* ERATT polling has handled ERATT */ 8469 phba->ha_copy &= ~HA_ERATT; 8470 else 8471 /* Indicate interrupt handler handles ERATT */ 8472 phba->hba_flag |= HBA_ERATT_HANDLED; 8473 } 8474 8475 /* 8476 * If there is deferred error attention, do not check for any interrupt. 8477 */ 8478 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8479 spin_unlock_irq(&phba->hbalock); 8480 return IRQ_NONE; 8481 } 8482 8483 /* Clear attention sources except link and error attentions */ 8484 hc_copy = readl(phba->HCregaddr); 8485 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 8486 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 8487 phba->HCregaddr); 8488 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 8489 writel(hc_copy, phba->HCregaddr); 8490 readl(phba->HAregaddr); /* flush */ 8491 spin_unlock(&phba->hbalock); 8492 8493 /* 8494 * Invokes slow-path host attention interrupt handling as appropriate. 8495 */ 8496 8497 /* status of events with mailbox and link attention */ 8498 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 8499 8500 /* status of events with ELS ring */ 8501 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 8502 status2 >>= (4*LPFC_ELS_RING); 8503 8504 if (status1 || (status2 & HA_RXMASK)) 8505 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 8506 else 8507 sp_irq_rc = IRQ_NONE; 8508 8509 /* 8510 * Invoke fast-path host attention interrupt handling as appropriate. 8511 */ 8512 8513 /* status of events with FCP ring */ 8514 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 8515 status1 >>= (4*LPFC_FCP_RING); 8516 8517 /* status of events with extra ring */ 8518 if (phba->cfg_multi_ring_support == 2) { 8519 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 8520 status2 >>= (4*LPFC_EXTRA_RING); 8521 } else 8522 status2 = 0; 8523 8524 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 8525 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 8526 else 8527 fp_irq_rc = IRQ_NONE; 8528 8529 /* Return device-level interrupt handling status */ 8530 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 8531 } /* lpfc_sli_intr_handler */ 8532 8533 /** 8534 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 8535 * @phba: pointer to lpfc hba data structure. 8536 * 8537 * This routine is invoked by the worker thread to process all the pending 8538 * SLI4 FCP abort XRI events. 8539 **/ 8540 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 8541 { 8542 struct lpfc_cq_event *cq_event; 8543 8544 /* First, declare the fcp xri abort event has been handled */ 8545 spin_lock_irq(&phba->hbalock); 8546 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 8547 spin_unlock_irq(&phba->hbalock); 8548 /* Now, handle all the fcp xri abort events */ 8549 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 8550 /* Get the first event from the head of the event queue */ 8551 spin_lock_irq(&phba->hbalock); 8552 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 8553 cq_event, struct lpfc_cq_event, list); 8554 spin_unlock_irq(&phba->hbalock); 8555 /* Notify aborted XRI for FCP work queue */ 8556 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 8557 /* Free the event processed back to the free pool */ 8558 lpfc_sli4_cq_event_release(phba, cq_event); 8559 } 8560 } 8561 8562 /** 8563 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 8564 * @phba: pointer to lpfc hba data structure. 8565 * 8566 * This routine is invoked by the worker thread to process all the pending 8567 * SLI4 els abort xri events. 8568 **/ 8569 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 8570 { 8571 struct lpfc_cq_event *cq_event; 8572 8573 /* First, declare the els xri abort event has been handled */ 8574 spin_lock_irq(&phba->hbalock); 8575 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 8576 spin_unlock_irq(&phba->hbalock); 8577 /* Now, handle all the els xri abort events */ 8578 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 8579 /* Get the first event from the head of the event queue */ 8580 spin_lock_irq(&phba->hbalock); 8581 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 8582 cq_event, struct lpfc_cq_event, list); 8583 spin_unlock_irq(&phba->hbalock); 8584 /* Notify aborted XRI for ELS work queue */ 8585 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 8586 /* Free the event processed back to the free pool */ 8587 lpfc_sli4_cq_event_release(phba, cq_event); 8588 } 8589 } 8590 8591 /** 8592 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 8593 * @phba: pointer to lpfc hba data structure 8594 * @pIocbIn: pointer to the rspiocbq 8595 * @pIocbOut: pointer to the cmdiocbq 8596 * @wcqe: pointer to the complete wcqe 8597 * 8598 * This routine transfers the fields of a command iocbq to a response iocbq 8599 * by copying all the IOCB fields from command iocbq and transferring the 8600 * completion status information from the complete wcqe. 8601 **/ 8602 static void 8603 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 8604 struct lpfc_iocbq *pIocbIn, 8605 struct lpfc_iocbq *pIocbOut, 8606 struct lpfc_wcqe_complete *wcqe) 8607 { 8608 unsigned long iflags; 8609 size_t offset = offsetof(struct lpfc_iocbq, iocb); 8610 8611 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 8612 sizeof(struct lpfc_iocbq) - offset); 8613 /* Map WCQE parameters into irspiocb parameters */ 8614 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 8615 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 8616 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 8617 pIocbIn->iocb.un.fcpi.fcpi_parm = 8618 pIocbOut->iocb.un.fcpi.fcpi_parm - 8619 wcqe->total_data_placed; 8620 else 8621 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8622 else { 8623 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8624 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; 8625 } 8626 8627 /* Pick up HBA exchange busy condition */ 8628 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 8629 spin_lock_irqsave(&phba->hbalock, iflags); 8630 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 8631 spin_unlock_irqrestore(&phba->hbalock, iflags); 8632 } 8633 } 8634 8635 /** 8636 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 8637 * @phba: Pointer to HBA context object. 8638 * @wcqe: Pointer to work-queue completion queue entry. 8639 * 8640 * This routine handles an ELS work-queue completion event and construct 8641 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 8642 * discovery engine to handle. 8643 * 8644 * Return: Pointer to the receive IOCBQ, NULL otherwise. 8645 **/ 8646 static struct lpfc_iocbq * 8647 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 8648 struct lpfc_iocbq *irspiocbq) 8649 { 8650 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 8651 struct lpfc_iocbq *cmdiocbq; 8652 struct lpfc_wcqe_complete *wcqe; 8653 unsigned long iflags; 8654 8655 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 8656 spin_lock_irqsave(&phba->hbalock, iflags); 8657 pring->stats.iocb_event++; 8658 /* Look up the ELS command IOCB and create pseudo response IOCB */ 8659 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 8660 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 8661 spin_unlock_irqrestore(&phba->hbalock, iflags); 8662 8663 if (unlikely(!cmdiocbq)) { 8664 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8665 "0386 ELS complete with no corresponding " 8666 "cmdiocb: iotag (%d)\n", 8667 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 8668 lpfc_sli_release_iocbq(phba, irspiocbq); 8669 return NULL; 8670 } 8671 8672 /* Fake the irspiocbq and copy necessary response information */ 8673 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 8674 8675 return irspiocbq; 8676 } 8677 8678 /** 8679 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 8680 * @phba: Pointer to HBA context object. 8681 * @cqe: Pointer to mailbox completion queue entry. 8682 * 8683 * This routine process a mailbox completion queue entry with asynchrous 8684 * event. 8685 * 8686 * Return: true if work posted to worker thread, otherwise false. 8687 **/ 8688 static bool 8689 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 8690 { 8691 struct lpfc_cq_event *cq_event; 8692 unsigned long iflags; 8693 8694 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8695 "0392 Async Event: word0:x%x, word1:x%x, " 8696 "word2:x%x, word3:x%x\n", mcqe->word0, 8697 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 8698 8699 /* Allocate a new internal CQ_EVENT entry */ 8700 cq_event = lpfc_sli4_cq_event_alloc(phba); 8701 if (!cq_event) { 8702 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8703 "0394 Failed to allocate CQ_EVENT entry\n"); 8704 return false; 8705 } 8706 8707 /* Move the CQE into an asynchronous event entry */ 8708 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 8709 spin_lock_irqsave(&phba->hbalock, iflags); 8710 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 8711 /* Set the async event flag */ 8712 phba->hba_flag |= ASYNC_EVENT; 8713 spin_unlock_irqrestore(&phba->hbalock, iflags); 8714 8715 return true; 8716 } 8717 8718 /** 8719 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 8720 * @phba: Pointer to HBA context object. 8721 * @cqe: Pointer to mailbox completion queue entry. 8722 * 8723 * This routine process a mailbox completion queue entry with mailbox 8724 * completion event. 8725 * 8726 * Return: true if work posted to worker thread, otherwise false. 8727 **/ 8728 static bool 8729 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 8730 { 8731 uint32_t mcqe_status; 8732 MAILBOX_t *mbox, *pmbox; 8733 struct lpfc_mqe *mqe; 8734 struct lpfc_vport *vport; 8735 struct lpfc_nodelist *ndlp; 8736 struct lpfc_dmabuf *mp; 8737 unsigned long iflags; 8738 LPFC_MBOXQ_t *pmb; 8739 bool workposted = false; 8740 int rc; 8741 8742 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 8743 if (!bf_get(lpfc_trailer_completed, mcqe)) 8744 goto out_no_mqe_complete; 8745 8746 /* Get the reference to the active mbox command */ 8747 spin_lock_irqsave(&phba->hbalock, iflags); 8748 pmb = phba->sli.mbox_active; 8749 if (unlikely(!pmb)) { 8750 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 8751 "1832 No pending MBOX command to handle\n"); 8752 spin_unlock_irqrestore(&phba->hbalock, iflags); 8753 goto out_no_mqe_complete; 8754 } 8755 spin_unlock_irqrestore(&phba->hbalock, iflags); 8756 mqe = &pmb->u.mqe; 8757 pmbox = (MAILBOX_t *)&pmb->u.mqe; 8758 mbox = phba->mbox; 8759 vport = pmb->vport; 8760 8761 /* Reset heartbeat timer */ 8762 phba->last_completion_time = jiffies; 8763 del_timer(&phba->sli.mbox_tmo); 8764 8765 /* Move mbox data to caller's mailbox region, do endian swapping */ 8766 if (pmb->mbox_cmpl && mbox) 8767 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 8768 /* Set the mailbox status with SLI4 range 0x4000 */ 8769 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 8770 if (mcqe_status != MB_CQE_STATUS_SUCCESS) 8771 bf_set(lpfc_mqe_status, mqe, 8772 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8773 8774 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 8775 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 8776 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 8777 "MBOX dflt rpi: status:x%x rpi:x%x", 8778 mcqe_status, 8779 pmbox->un.varWords[0], 0); 8780 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 8781 mp = (struct lpfc_dmabuf *)(pmb->context1); 8782 ndlp = (struct lpfc_nodelist *)pmb->context2; 8783 /* Reg_LOGIN of dflt RPI was successful. Now lets get 8784 * RID of the PPI using the same mbox buffer. 8785 */ 8786 lpfc_unreg_login(phba, vport->vpi, 8787 pmbox->un.varWords[0], pmb); 8788 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 8789 pmb->context1 = mp; 8790 pmb->context2 = ndlp; 8791 pmb->vport = vport; 8792 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 8793 if (rc != MBX_BUSY) 8794 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8795 LOG_SLI, "0385 rc should " 8796 "have been MBX_BUSY\n"); 8797 if (rc != MBX_NOT_FINISHED) 8798 goto send_current_mbox; 8799 } 8800 } 8801 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 8802 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 8803 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 8804 8805 /* There is mailbox completion work to do */ 8806 spin_lock_irqsave(&phba->hbalock, iflags); 8807 __lpfc_mbox_cmpl_put(phba, pmb); 8808 phba->work_ha |= HA_MBATT; 8809 spin_unlock_irqrestore(&phba->hbalock, iflags); 8810 workposted = true; 8811 8812 send_current_mbox: 8813 spin_lock_irqsave(&phba->hbalock, iflags); 8814 /* Release the mailbox command posting token */ 8815 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8816 /* Setting active mailbox pointer need to be in sync to flag clear */ 8817 phba->sli.mbox_active = NULL; 8818 spin_unlock_irqrestore(&phba->hbalock, iflags); 8819 /* Wake up worker thread to post the next pending mailbox command */ 8820 lpfc_worker_wake_up(phba); 8821 out_no_mqe_complete: 8822 if (bf_get(lpfc_trailer_consumed, mcqe)) 8823 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 8824 return workposted; 8825 } 8826 8827 /** 8828 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 8829 * @phba: Pointer to HBA context object. 8830 * @cqe: Pointer to mailbox completion queue entry. 8831 * 8832 * This routine process a mailbox completion queue entry, it invokes the 8833 * proper mailbox complete handling or asynchrous event handling routine 8834 * according to the MCQE's async bit. 8835 * 8836 * Return: true if work posted to worker thread, otherwise false. 8837 **/ 8838 static bool 8839 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 8840 { 8841 struct lpfc_mcqe mcqe; 8842 bool workposted; 8843 8844 /* Copy the mailbox MCQE and convert endian order as needed */ 8845 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 8846 8847 /* Invoke the proper event handling routine */ 8848 if (!bf_get(lpfc_trailer_async, &mcqe)) 8849 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 8850 else 8851 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 8852 return workposted; 8853 } 8854 8855 /** 8856 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 8857 * @phba: Pointer to HBA context object. 8858 * @wcqe: Pointer to work-queue completion queue entry. 8859 * 8860 * This routine handles an ELS work-queue completion event. 8861 * 8862 * Return: true if work posted to worker thread, otherwise false. 8863 **/ 8864 static bool 8865 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 8866 struct lpfc_wcqe_complete *wcqe) 8867 { 8868 struct lpfc_iocbq *irspiocbq; 8869 unsigned long iflags; 8870 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 8871 8872 /* Get an irspiocbq for later ELS response processing use */ 8873 irspiocbq = lpfc_sli_get_iocbq(phba); 8874 if (!irspiocbq) { 8875 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8876 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 8877 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 8878 pring->txq_cnt, phba->iocb_cnt, 8879 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, 8880 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); 8881 return false; 8882 } 8883 8884 /* Save off the slow-path queue event for work thread to process */ 8885 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 8886 spin_lock_irqsave(&phba->hbalock, iflags); 8887 list_add_tail(&irspiocbq->cq_event.list, 8888 &phba->sli4_hba.sp_queue_event); 8889 phba->hba_flag |= HBA_SP_QUEUE_EVT; 8890 spin_unlock_irqrestore(&phba->hbalock, iflags); 8891 8892 return true; 8893 } 8894 8895 /** 8896 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 8897 * @phba: Pointer to HBA context object. 8898 * @wcqe: Pointer to work-queue completion queue entry. 8899 * 8900 * This routine handles slow-path WQ entry comsumed event by invoking the 8901 * proper WQ release routine to the slow-path WQ. 8902 **/ 8903 static void 8904 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 8905 struct lpfc_wcqe_release *wcqe) 8906 { 8907 /* Check for the slow-path ELS work queue */ 8908 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 8909 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 8910 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 8911 else 8912 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8913 "2579 Slow-path wqe consume event carries " 8914 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 8915 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 8916 phba->sli4_hba.els_wq->queue_id); 8917 } 8918 8919 /** 8920 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 8921 * @phba: Pointer to HBA context object. 8922 * @cq: Pointer to a WQ completion queue. 8923 * @wcqe: Pointer to work-queue completion queue entry. 8924 * 8925 * This routine handles an XRI abort event. 8926 * 8927 * Return: true if work posted to worker thread, otherwise false. 8928 **/ 8929 static bool 8930 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 8931 struct lpfc_queue *cq, 8932 struct sli4_wcqe_xri_aborted *wcqe) 8933 { 8934 bool workposted = false; 8935 struct lpfc_cq_event *cq_event; 8936 unsigned long iflags; 8937 8938 /* Allocate a new internal CQ_EVENT entry */ 8939 cq_event = lpfc_sli4_cq_event_alloc(phba); 8940 if (!cq_event) { 8941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8942 "0602 Failed to allocate CQ_EVENT entry\n"); 8943 return false; 8944 } 8945 8946 /* Move the CQE into the proper xri abort event list */ 8947 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 8948 switch (cq->subtype) { 8949 case LPFC_FCP: 8950 spin_lock_irqsave(&phba->hbalock, iflags); 8951 list_add_tail(&cq_event->list, 8952 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 8953 /* Set the fcp xri abort event flag */ 8954 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 8955 spin_unlock_irqrestore(&phba->hbalock, iflags); 8956 workposted = true; 8957 break; 8958 case LPFC_ELS: 8959 spin_lock_irqsave(&phba->hbalock, iflags); 8960 list_add_tail(&cq_event->list, 8961 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 8962 /* Set the els xri abort event flag */ 8963 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 8964 spin_unlock_irqrestore(&phba->hbalock, iflags); 8965 workposted = true; 8966 break; 8967 default: 8968 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8969 "0603 Invalid work queue CQE subtype (x%x)\n", 8970 cq->subtype); 8971 workposted = false; 8972 break; 8973 } 8974 return workposted; 8975 } 8976 8977 /** 8978 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 8979 * @phba: Pointer to HBA context object. 8980 * @rcqe: Pointer to receive-queue completion queue entry. 8981 * 8982 * This routine process a receive-queue completion queue entry. 8983 * 8984 * Return: true if work posted to worker thread, otherwise false. 8985 **/ 8986 static bool 8987 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 8988 { 8989 bool workposted = false; 8990 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 8991 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 8992 struct hbq_dmabuf *dma_buf; 8993 uint32_t status; 8994 unsigned long iflags; 8995 8996 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) 8997 goto out; 8998 8999 status = bf_get(lpfc_rcqe_status, rcqe); 9000 switch (status) { 9001 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 9002 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9003 "2537 Receive Frame Truncated!!\n"); 9004 case FC_STATUS_RQ_SUCCESS: 9005 lpfc_sli4_rq_release(hrq, drq); 9006 spin_lock_irqsave(&phba->hbalock, iflags); 9007 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 9008 if (!dma_buf) { 9009 spin_unlock_irqrestore(&phba->hbalock, iflags); 9010 goto out; 9011 } 9012 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 9013 /* save off the frame for the word thread to process */ 9014 list_add_tail(&dma_buf->cq_event.list, 9015 &phba->sli4_hba.sp_queue_event); 9016 /* Frame received */ 9017 phba->hba_flag |= HBA_SP_QUEUE_EVT; 9018 spin_unlock_irqrestore(&phba->hbalock, iflags); 9019 workposted = true; 9020 break; 9021 case FC_STATUS_INSUFF_BUF_NEED_BUF: 9022 case FC_STATUS_INSUFF_BUF_FRM_DISC: 9023 /* Post more buffers if possible */ 9024 spin_lock_irqsave(&phba->hbalock, iflags); 9025 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 9026 spin_unlock_irqrestore(&phba->hbalock, iflags); 9027 workposted = true; 9028 break; 9029 } 9030 out: 9031 return workposted; 9032 } 9033 9034 /** 9035 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 9036 * @phba: Pointer to HBA context object. 9037 * @cq: Pointer to the completion queue. 9038 * @wcqe: Pointer to a completion queue entry. 9039 * 9040 * This routine process a slow-path work-queue or recieve queue completion queue 9041 * entry. 9042 * 9043 * Return: true if work posted to worker thread, otherwise false. 9044 **/ 9045 static bool 9046 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 9047 struct lpfc_cqe *cqe) 9048 { 9049 struct lpfc_cqe cqevt; 9050 bool workposted = false; 9051 9052 /* Copy the work queue CQE and convert endian order if needed */ 9053 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 9054 9055 /* Check and process for different type of WCQE and dispatch */ 9056 switch (bf_get(lpfc_cqe_code, &cqevt)) { 9057 case CQE_CODE_COMPL_WQE: 9058 /* Process the WQ/RQ complete event */ 9059 phba->last_completion_time = jiffies; 9060 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 9061 (struct lpfc_wcqe_complete *)&cqevt); 9062 break; 9063 case CQE_CODE_RELEASE_WQE: 9064 /* Process the WQ release event */ 9065 lpfc_sli4_sp_handle_rel_wcqe(phba, 9066 (struct lpfc_wcqe_release *)&cqevt); 9067 break; 9068 case CQE_CODE_XRI_ABORTED: 9069 /* Process the WQ XRI abort event */ 9070 phba->last_completion_time = jiffies; 9071 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 9072 (struct sli4_wcqe_xri_aborted *)&cqevt); 9073 break; 9074 case CQE_CODE_RECEIVE: 9075 /* Process the RQ event */ 9076 phba->last_completion_time = jiffies; 9077 workposted = lpfc_sli4_sp_handle_rcqe(phba, 9078 (struct lpfc_rcqe *)&cqevt); 9079 break; 9080 default: 9081 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9082 "0388 Not a valid WCQE code: x%x\n", 9083 bf_get(lpfc_cqe_code, &cqevt)); 9084 break; 9085 } 9086 return workposted; 9087 } 9088 9089 /** 9090 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 9091 * @phba: Pointer to HBA context object. 9092 * @eqe: Pointer to fast-path event queue entry. 9093 * 9094 * This routine process a event queue entry from the slow-path event queue. 9095 * It will check the MajorCode and MinorCode to determine this is for a 9096 * completion event on a completion queue, if not, an error shall be logged 9097 * and just return. Otherwise, it will get to the corresponding completion 9098 * queue and process all the entries on that completion queue, rearm the 9099 * completion queue, and then return. 9100 * 9101 **/ 9102 static void 9103 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 9104 { 9105 struct lpfc_queue *cq = NULL, *childq, *speq; 9106 struct lpfc_cqe *cqe; 9107 bool workposted = false; 9108 int ecount = 0; 9109 uint16_t cqid; 9110 9111 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { 9112 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9113 "0359 Not a valid slow-path completion " 9114 "event: majorcode=x%x, minorcode=x%x\n", 9115 bf_get_le32(lpfc_eqe_major_code, eqe), 9116 bf_get_le32(lpfc_eqe_minor_code, eqe)); 9117 return; 9118 } 9119 9120 /* Get the reference to the corresponding CQ */ 9121 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 9122 9123 /* Search for completion queue pointer matching this cqid */ 9124 speq = phba->sli4_hba.sp_eq; 9125 list_for_each_entry(childq, &speq->child_list, list) { 9126 if (childq->queue_id == cqid) { 9127 cq = childq; 9128 break; 9129 } 9130 } 9131 if (unlikely(!cq)) { 9132 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 9133 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9134 "0365 Slow-path CQ identifier " 9135 "(%d) does not exist\n", cqid); 9136 return; 9137 } 9138 9139 /* Process all the entries to the CQ */ 9140 switch (cq->type) { 9141 case LPFC_MCQ: 9142 while ((cqe = lpfc_sli4_cq_get(cq))) { 9143 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 9144 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9145 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9146 } 9147 break; 9148 case LPFC_WCQ: 9149 while ((cqe = lpfc_sli4_cq_get(cq))) { 9150 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); 9151 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9152 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9153 } 9154 break; 9155 default: 9156 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9157 "0370 Invalid completion queue type (%d)\n", 9158 cq->type); 9159 return; 9160 } 9161 9162 /* Catch the no cq entry condition, log an error */ 9163 if (unlikely(ecount == 0)) 9164 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9165 "0371 No entry from the CQ: identifier " 9166 "(x%x), type (%d)\n", cq->queue_id, cq->type); 9167 9168 /* In any case, flash and re-arm the RCQ */ 9169 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 9170 9171 /* wake up worker thread if there are works to be done */ 9172 if (workposted) 9173 lpfc_worker_wake_up(phba); 9174 } 9175 9176 /** 9177 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 9178 * @eqe: Pointer to fast-path completion queue entry. 9179 * 9180 * This routine process a fast-path work queue completion entry from fast-path 9181 * event queue for FCP command response completion. 9182 **/ 9183 static void 9184 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 9185 struct lpfc_wcqe_complete *wcqe) 9186 { 9187 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 9188 struct lpfc_iocbq *cmdiocbq; 9189 struct lpfc_iocbq irspiocbq; 9190 unsigned long iflags; 9191 9192 spin_lock_irqsave(&phba->hbalock, iflags); 9193 pring->stats.iocb_event++; 9194 spin_unlock_irqrestore(&phba->hbalock, iflags); 9195 9196 /* Check for response status */ 9197 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 9198 /* If resource errors reported from HBA, reduce queue 9199 * depth of the SCSI device. 9200 */ 9201 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 9202 IOSTAT_LOCAL_REJECT) && 9203 (wcqe->parameter == IOERR_NO_RESOURCES)) { 9204 phba->lpfc_rampdown_queue_depth(phba); 9205 } 9206 /* Log the error status */ 9207 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9208 "0373 FCP complete error: status=x%x, " 9209 "hw_status=x%x, total_data_specified=%d, " 9210 "parameter=x%x, word3=x%x\n", 9211 bf_get(lpfc_wcqe_c_status, wcqe), 9212 bf_get(lpfc_wcqe_c_hw_status, wcqe), 9213 wcqe->total_data_placed, wcqe->parameter, 9214 wcqe->word3); 9215 } 9216 9217 /* Look up the FCP command IOCB and create pseudo response IOCB */ 9218 spin_lock_irqsave(&phba->hbalock, iflags); 9219 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 9220 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9221 spin_unlock_irqrestore(&phba->hbalock, iflags); 9222 if (unlikely(!cmdiocbq)) { 9223 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9224 "0374 FCP complete with no corresponding " 9225 "cmdiocb: iotag (%d)\n", 9226 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9227 return; 9228 } 9229 if (unlikely(!cmdiocbq->iocb_cmpl)) { 9230 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9231 "0375 FCP cmdiocb not callback function " 9232 "iotag: (%d)\n", 9233 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9234 return; 9235 } 9236 9237 /* Fake the irspiocb and copy necessary response information */ 9238 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 9239 9240 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 9241 spin_lock_irqsave(&phba->hbalock, iflags); 9242 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 9243 spin_unlock_irqrestore(&phba->hbalock, iflags); 9244 } 9245 9246 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9247 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9248 } 9249 9250 /** 9251 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 9252 * @phba: Pointer to HBA context object. 9253 * @cq: Pointer to completion queue. 9254 * @wcqe: Pointer to work-queue completion queue entry. 9255 * 9256 * This routine handles an fast-path WQ entry comsumed event by invoking the 9257 * proper WQ release routine to the slow-path WQ. 9258 **/ 9259 static void 9260 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 9261 struct lpfc_wcqe_release *wcqe) 9262 { 9263 struct lpfc_queue *childwq; 9264 bool wqid_matched = false; 9265 uint16_t fcp_wqid; 9266 9267 /* Check for fast-path FCP work queue release */ 9268 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 9269 list_for_each_entry(childwq, &cq->child_list, list) { 9270 if (childwq->queue_id == fcp_wqid) { 9271 lpfc_sli4_wq_release(childwq, 9272 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 9273 wqid_matched = true; 9274 break; 9275 } 9276 } 9277 /* Report warning log message if no match found */ 9278 if (wqid_matched != true) 9279 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9280 "2580 Fast-path wqe consume event carries " 9281 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 9282 } 9283 9284 /** 9285 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 9286 * @cq: Pointer to the completion queue. 9287 * @eqe: Pointer to fast-path completion queue entry. 9288 * 9289 * This routine process a fast-path work queue completion entry from fast-path 9290 * event queue for FCP command response completion. 9291 **/ 9292 static int 9293 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 9294 struct lpfc_cqe *cqe) 9295 { 9296 struct lpfc_wcqe_release wcqe; 9297 bool workposted = false; 9298 9299 /* Copy the work queue CQE and convert endian order if needed */ 9300 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 9301 9302 /* Check and process for different type of WCQE and dispatch */ 9303 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 9304 case CQE_CODE_COMPL_WQE: 9305 /* Process the WQ complete event */ 9306 phba->last_completion_time = jiffies; 9307 lpfc_sli4_fp_handle_fcp_wcqe(phba, 9308 (struct lpfc_wcqe_complete *)&wcqe); 9309 break; 9310 case CQE_CODE_RELEASE_WQE: 9311 /* Process the WQ release event */ 9312 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 9313 (struct lpfc_wcqe_release *)&wcqe); 9314 break; 9315 case CQE_CODE_XRI_ABORTED: 9316 /* Process the WQ XRI abort event */ 9317 phba->last_completion_time = jiffies; 9318 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 9319 (struct sli4_wcqe_xri_aborted *)&wcqe); 9320 break; 9321 default: 9322 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9323 "0144 Not a valid WCQE code: x%x\n", 9324 bf_get(lpfc_wcqe_c_code, &wcqe)); 9325 break; 9326 } 9327 return workposted; 9328 } 9329 9330 /** 9331 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 9332 * @phba: Pointer to HBA context object. 9333 * @eqe: Pointer to fast-path event queue entry. 9334 * 9335 * This routine process a event queue entry from the fast-path event queue. 9336 * It will check the MajorCode and MinorCode to determine this is for a 9337 * completion event on a completion queue, if not, an error shall be logged 9338 * and just return. Otherwise, it will get to the corresponding completion 9339 * queue and process all the entries on the completion queue, rearm the 9340 * completion queue, and then return. 9341 **/ 9342 static void 9343 lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 9344 uint32_t fcp_cqidx) 9345 { 9346 struct lpfc_queue *cq; 9347 struct lpfc_cqe *cqe; 9348 bool workposted = false; 9349 uint16_t cqid; 9350 int ecount = 0; 9351 9352 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 9353 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9354 "0366 Not a valid fast-path completion " 9355 "event: majorcode=x%x, minorcode=x%x\n", 9356 bf_get_le32(lpfc_eqe_major_code, eqe), 9357 bf_get_le32(lpfc_eqe_minor_code, eqe)); 9358 return; 9359 } 9360 9361 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 9362 if (unlikely(!cq)) { 9363 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 9364 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9365 "0367 Fast-path completion queue " 9366 "does not exist\n"); 9367 return; 9368 } 9369 9370 /* Get the reference to the corresponding CQ */ 9371 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 9372 if (unlikely(cqid != cq->queue_id)) { 9373 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9374 "0368 Miss-matched fast-path completion " 9375 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 9376 cqid, cq->queue_id); 9377 return; 9378 } 9379 9380 /* Process all the entries to the CQ */ 9381 while ((cqe = lpfc_sli4_cq_get(cq))) { 9382 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 9383 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9384 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9385 } 9386 9387 /* Catch the no cq entry condition */ 9388 if (unlikely(ecount == 0)) 9389 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9390 "0369 No entry from fast-path completion " 9391 "queue fcpcqid=%d\n", cq->queue_id); 9392 9393 /* In any case, flash and re-arm the CQ */ 9394 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 9395 9396 /* wake up worker thread if there are works to be done */ 9397 if (workposted) 9398 lpfc_worker_wake_up(phba); 9399 } 9400 9401 static void 9402 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 9403 { 9404 struct lpfc_eqe *eqe; 9405 9406 /* walk all the EQ entries and drop on the floor */ 9407 while ((eqe = lpfc_sli4_eq_get(eq))) 9408 ; 9409 9410 /* Clear and re-arm the EQ */ 9411 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 9412 } 9413 9414 /** 9415 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 9416 * @irq: Interrupt number. 9417 * @dev_id: The device context pointer. 9418 * 9419 * This function is directly called from the PCI layer as an interrupt 9420 * service routine when device with SLI-4 interface spec is enabled with 9421 * MSI-X multi-message interrupt mode and there are slow-path events in 9422 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 9423 * interrupt mode, this function is called as part of the device-level 9424 * interrupt handler. When the PCI slot is in error recovery or the HBA is 9425 * undergoing initialization, the interrupt handler will not process the 9426 * interrupt. The link attention and ELS ring attention events are handled 9427 * by the worker thread. The interrupt handler signals the worker thread 9428 * and returns for these events. This function is called without any lock 9429 * held. It gets the hbalock to access and update SLI data structures. 9430 * 9431 * This function returns IRQ_HANDLED when interrupt is handled else it 9432 * returns IRQ_NONE. 9433 **/ 9434 irqreturn_t 9435 lpfc_sli4_sp_intr_handler(int irq, void *dev_id) 9436 { 9437 struct lpfc_hba *phba; 9438 struct lpfc_queue *speq; 9439 struct lpfc_eqe *eqe; 9440 unsigned long iflag; 9441 int ecount = 0; 9442 9443 /* 9444 * Get the driver's phba structure from the dev_id 9445 */ 9446 phba = (struct lpfc_hba *)dev_id; 9447 9448 if (unlikely(!phba)) 9449 return IRQ_NONE; 9450 9451 /* Get to the EQ struct associated with this vector */ 9452 speq = phba->sli4_hba.sp_eq; 9453 9454 /* Check device state for handling interrupt */ 9455 if (unlikely(lpfc_intr_state_check(phba))) { 9456 /* Check again for link_state with lock held */ 9457 spin_lock_irqsave(&phba->hbalock, iflag); 9458 if (phba->link_state < LPFC_LINK_DOWN) 9459 /* Flush, clear interrupt, and rearm the EQ */ 9460 lpfc_sli4_eq_flush(phba, speq); 9461 spin_unlock_irqrestore(&phba->hbalock, iflag); 9462 return IRQ_NONE; 9463 } 9464 9465 /* 9466 * Process all the event on FCP slow-path EQ 9467 */ 9468 while ((eqe = lpfc_sli4_eq_get(speq))) { 9469 lpfc_sli4_sp_handle_eqe(phba, eqe); 9470 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9471 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 9472 } 9473 9474 /* Always clear and re-arm the slow-path EQ */ 9475 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); 9476 9477 /* Catch the no cq entry condition */ 9478 if (unlikely(ecount == 0)) { 9479 if (phba->intr_type == MSIX) 9480 /* MSI-X treated interrupt served as no EQ share INT */ 9481 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9482 "0357 MSI-X interrupt with no EQE\n"); 9483 else 9484 /* Non MSI-X treated on interrupt as EQ share INT */ 9485 return IRQ_NONE; 9486 } 9487 9488 return IRQ_HANDLED; 9489 } /* lpfc_sli4_sp_intr_handler */ 9490 9491 /** 9492 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device 9493 * @irq: Interrupt number. 9494 * @dev_id: The device context pointer. 9495 * 9496 * This function is directly called from the PCI layer as an interrupt 9497 * service routine when device with SLI-4 interface spec is enabled with 9498 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 9499 * ring event in the HBA. However, when the device is enabled with either 9500 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 9501 * device-level interrupt handler. When the PCI slot is in error recovery 9502 * or the HBA is undergoing initialization, the interrupt handler will not 9503 * process the interrupt. The SCSI FCP fast-path ring event are handled in 9504 * the intrrupt context. This function is called without any lock held. 9505 * It gets the hbalock to access and update SLI data structures. Note that, 9506 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 9507 * equal to that of FCP CQ index. 9508 * 9509 * This function returns IRQ_HANDLED when interrupt is handled else it 9510 * returns IRQ_NONE. 9511 **/ 9512 irqreturn_t 9513 lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 9514 { 9515 struct lpfc_hba *phba; 9516 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 9517 struct lpfc_queue *fpeq; 9518 struct lpfc_eqe *eqe; 9519 unsigned long iflag; 9520 int ecount = 0; 9521 uint32_t fcp_eqidx; 9522 9523 /* Get the driver's phba structure from the dev_id */ 9524 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 9525 phba = fcp_eq_hdl->phba; 9526 fcp_eqidx = fcp_eq_hdl->idx; 9527 9528 if (unlikely(!phba)) 9529 return IRQ_NONE; 9530 9531 /* Get to the EQ struct associated with this vector */ 9532 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 9533 9534 /* Check device state for handling interrupt */ 9535 if (unlikely(lpfc_intr_state_check(phba))) { 9536 /* Check again for link_state with lock held */ 9537 spin_lock_irqsave(&phba->hbalock, iflag); 9538 if (phba->link_state < LPFC_LINK_DOWN) 9539 /* Flush, clear interrupt, and rearm the EQ */ 9540 lpfc_sli4_eq_flush(phba, fpeq); 9541 spin_unlock_irqrestore(&phba->hbalock, iflag); 9542 return IRQ_NONE; 9543 } 9544 9545 /* 9546 * Process all the event on FCP fast-path EQ 9547 */ 9548 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 9549 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 9550 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9551 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 9552 } 9553 9554 /* Always clear and re-arm the fast-path EQ */ 9555 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 9556 9557 if (unlikely(ecount == 0)) { 9558 if (phba->intr_type == MSIX) 9559 /* MSI-X treated interrupt served as no EQ share INT */ 9560 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9561 "0358 MSI-X interrupt with no EQE\n"); 9562 else 9563 /* Non MSI-X treated on interrupt as EQ share INT */ 9564 return IRQ_NONE; 9565 } 9566 9567 return IRQ_HANDLED; 9568 } /* lpfc_sli4_fp_intr_handler */ 9569 9570 /** 9571 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 9572 * @irq: Interrupt number. 9573 * @dev_id: The device context pointer. 9574 * 9575 * This function is the device-level interrupt handler to device with SLI-4 9576 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 9577 * interrupt mode is enabled and there is an event in the HBA which requires 9578 * driver attention. This function invokes the slow-path interrupt attention 9579 * handling function and fast-path interrupt attention handling function in 9580 * turn to process the relevant HBA attention events. This function is called 9581 * without any lock held. It gets the hbalock to access and update SLI data 9582 * structures. 9583 * 9584 * This function returns IRQ_HANDLED when interrupt is handled, else it 9585 * returns IRQ_NONE. 9586 **/ 9587 irqreturn_t 9588 lpfc_sli4_intr_handler(int irq, void *dev_id) 9589 { 9590 struct lpfc_hba *phba; 9591 irqreturn_t sp_irq_rc, fp_irq_rc; 9592 bool fp_handled = false; 9593 uint32_t fcp_eqidx; 9594 9595 /* Get the driver's phba structure from the dev_id */ 9596 phba = (struct lpfc_hba *)dev_id; 9597 9598 if (unlikely(!phba)) 9599 return IRQ_NONE; 9600 9601 /* 9602 * Invokes slow-path host attention interrupt handling as appropriate. 9603 */ 9604 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); 9605 9606 /* 9607 * Invoke fast-path host attention interrupt handling as appropriate. 9608 */ 9609 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 9610 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 9611 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 9612 if (fp_irq_rc == IRQ_HANDLED) 9613 fp_handled |= true; 9614 } 9615 9616 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 9617 } /* lpfc_sli4_intr_handler */ 9618 9619 /** 9620 * lpfc_sli4_queue_free - free a queue structure and associated memory 9621 * @queue: The queue structure to free. 9622 * 9623 * This function frees a queue structure and the DMAable memeory used for 9624 * the host resident queue. This function must be called after destroying the 9625 * queue on the HBA. 9626 **/ 9627 void 9628 lpfc_sli4_queue_free(struct lpfc_queue *queue) 9629 { 9630 struct lpfc_dmabuf *dmabuf; 9631 9632 if (!queue) 9633 return; 9634 9635 while (!list_empty(&queue->page_list)) { 9636 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 9637 list); 9638 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 9639 dmabuf->virt, dmabuf->phys); 9640 kfree(dmabuf); 9641 } 9642 kfree(queue); 9643 return; 9644 } 9645 9646 /** 9647 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 9648 * @phba: The HBA that this queue is being created on. 9649 * @entry_size: The size of each queue entry for this queue. 9650 * @entry count: The number of entries that this queue will handle. 9651 * 9652 * This function allocates a queue structure and the DMAable memory used for 9653 * the host resident queue. This function must be called before creating the 9654 * queue on the HBA. 9655 **/ 9656 struct lpfc_queue * 9657 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 9658 uint32_t entry_count) 9659 { 9660 struct lpfc_queue *queue; 9661 struct lpfc_dmabuf *dmabuf; 9662 int x, total_qe_count; 9663 void *dma_pointer; 9664 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 9665 9666 if (!phba->sli4_hba.pc_sli4_params.supported) 9667 hw_page_size = SLI4_PAGE_SIZE; 9668 9669 queue = kzalloc(sizeof(struct lpfc_queue) + 9670 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 9671 if (!queue) 9672 return NULL; 9673 queue->page_count = (ALIGN(entry_size * entry_count, 9674 hw_page_size))/hw_page_size; 9675 INIT_LIST_HEAD(&queue->list); 9676 INIT_LIST_HEAD(&queue->page_list); 9677 INIT_LIST_HEAD(&queue->child_list); 9678 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 9679 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 9680 if (!dmabuf) 9681 goto out_fail; 9682 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9683 hw_page_size, &dmabuf->phys, 9684 GFP_KERNEL); 9685 if (!dmabuf->virt) { 9686 kfree(dmabuf); 9687 goto out_fail; 9688 } 9689 memset(dmabuf->virt, 0, hw_page_size); 9690 dmabuf->buffer_tag = x; 9691 list_add_tail(&dmabuf->list, &queue->page_list); 9692 /* initialize queue's entry array */ 9693 dma_pointer = dmabuf->virt; 9694 for (; total_qe_count < entry_count && 9695 dma_pointer < (hw_page_size + dmabuf->virt); 9696 total_qe_count++, dma_pointer += entry_size) { 9697 queue->qe[total_qe_count].address = dma_pointer; 9698 } 9699 } 9700 queue->entry_size = entry_size; 9701 queue->entry_count = entry_count; 9702 queue->phba = phba; 9703 9704 return queue; 9705 out_fail: 9706 lpfc_sli4_queue_free(queue); 9707 return NULL; 9708 } 9709 9710 /** 9711 * lpfc_eq_create - Create an Event Queue on the HBA 9712 * @phba: HBA structure that indicates port to create a queue on. 9713 * @eq: The queue structure to use to create the event queue. 9714 * @imax: The maximum interrupt per second limit. 9715 * 9716 * This function creates an event queue, as detailed in @eq, on a port, 9717 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 9718 * 9719 * The @phba struct is used to send mailbox command to HBA. The @eq struct 9720 * is used to get the entry count and entry size that are necessary to 9721 * determine the number of pages to allocate and use for this queue. This 9722 * function will send the EQ_CREATE mailbox command to the HBA to setup the 9723 * event queue. This function is asynchronous and will wait for the mailbox 9724 * command to finish before continuing. 9725 * 9726 * On success this function will return a zero. If unable to allocate enough 9727 * memory this function will return ENOMEM. If the queue create mailbox command 9728 * fails this function will return ENXIO. 9729 **/ 9730 uint32_t 9731 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 9732 { 9733 struct lpfc_mbx_eq_create *eq_create; 9734 LPFC_MBOXQ_t *mbox; 9735 int rc, length, status = 0; 9736 struct lpfc_dmabuf *dmabuf; 9737 uint32_t shdr_status, shdr_add_status; 9738 union lpfc_sli4_cfg_shdr *shdr; 9739 uint16_t dmult; 9740 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 9741 9742 if (!phba->sli4_hba.pc_sli4_params.supported) 9743 hw_page_size = SLI4_PAGE_SIZE; 9744 9745 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9746 if (!mbox) 9747 return -ENOMEM; 9748 length = (sizeof(struct lpfc_mbx_eq_create) - 9749 sizeof(struct lpfc_sli4_cfg_mhdr)); 9750 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 9751 LPFC_MBOX_OPCODE_EQ_CREATE, 9752 length, LPFC_SLI4_MBX_EMBED); 9753 eq_create = &mbox->u.mqe.un.eq_create; 9754 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 9755 eq->page_count); 9756 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 9757 LPFC_EQE_SIZE); 9758 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 9759 /* Calculate delay multiper from maximum interrupt per second */ 9760 dmult = LPFC_DMULT_CONST/imax - 1; 9761 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 9762 dmult); 9763 switch (eq->entry_count) { 9764 default: 9765 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9766 "0360 Unsupported EQ count. (%d)\n", 9767 eq->entry_count); 9768 if (eq->entry_count < 256) 9769 return -EINVAL; 9770 /* otherwise default to smallest count (drop through) */ 9771 case 256: 9772 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9773 LPFC_EQ_CNT_256); 9774 break; 9775 case 512: 9776 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9777 LPFC_EQ_CNT_512); 9778 break; 9779 case 1024: 9780 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9781 LPFC_EQ_CNT_1024); 9782 break; 9783 case 2048: 9784 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9785 LPFC_EQ_CNT_2048); 9786 break; 9787 case 4096: 9788 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9789 LPFC_EQ_CNT_4096); 9790 break; 9791 } 9792 list_for_each_entry(dmabuf, &eq->page_list, list) { 9793 memset(dmabuf->virt, 0, hw_page_size); 9794 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9795 putPaddrLow(dmabuf->phys); 9796 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9797 putPaddrHigh(dmabuf->phys); 9798 } 9799 mbox->vport = phba->pport; 9800 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 9801 mbox->context1 = NULL; 9802 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 9803 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 9804 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9805 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9806 if (shdr_status || shdr_add_status || rc) { 9807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9808 "2500 EQ_CREATE mailbox failed with " 9809 "status x%x add_status x%x, mbx status x%x\n", 9810 shdr_status, shdr_add_status, rc); 9811 status = -ENXIO; 9812 } 9813 eq->type = LPFC_EQ; 9814 eq->subtype = LPFC_NONE; 9815 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 9816 if (eq->queue_id == 0xFFFF) 9817 status = -ENXIO; 9818 eq->host_index = 0; 9819 eq->hba_index = 0; 9820 9821 mempool_free(mbox, phba->mbox_mem_pool); 9822 return status; 9823 } 9824 9825 /** 9826 * lpfc_cq_create - Create a Completion Queue on the HBA 9827 * @phba: HBA structure that indicates port to create a queue on. 9828 * @cq: The queue structure to use to create the completion queue. 9829 * @eq: The event queue to bind this completion queue to. 9830 * 9831 * This function creates a completion queue, as detailed in @wq, on a port, 9832 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 9833 * 9834 * The @phba struct is used to send mailbox command to HBA. The @cq struct 9835 * is used to get the entry count and entry size that are necessary to 9836 * determine the number of pages to allocate and use for this queue. The @eq 9837 * is used to indicate which event queue to bind this completion queue to. This 9838 * function will send the CQ_CREATE mailbox command to the HBA to setup the 9839 * completion queue. This function is asynchronous and will wait for the mailbox 9840 * command to finish before continuing. 9841 * 9842 * On success this function will return a zero. If unable to allocate enough 9843 * memory this function will return ENOMEM. If the queue create mailbox command 9844 * fails this function will return ENXIO. 9845 **/ 9846 uint32_t 9847 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 9848 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 9849 { 9850 struct lpfc_mbx_cq_create *cq_create; 9851 struct lpfc_dmabuf *dmabuf; 9852 LPFC_MBOXQ_t *mbox; 9853 int rc, length, status = 0; 9854 uint32_t shdr_status, shdr_add_status; 9855 union lpfc_sli4_cfg_shdr *shdr; 9856 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 9857 9858 if (!phba->sli4_hba.pc_sli4_params.supported) 9859 hw_page_size = SLI4_PAGE_SIZE; 9860 9861 9862 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9863 if (!mbox) 9864 return -ENOMEM; 9865 length = (sizeof(struct lpfc_mbx_cq_create) - 9866 sizeof(struct lpfc_sli4_cfg_mhdr)); 9867 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 9868 LPFC_MBOX_OPCODE_CQ_CREATE, 9869 length, LPFC_SLI4_MBX_EMBED); 9870 cq_create = &mbox->u.mqe.un.cq_create; 9871 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 9872 cq->page_count); 9873 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 9874 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 9875 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); 9876 switch (cq->entry_count) { 9877 default: 9878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9879 "0361 Unsupported CQ count. (%d)\n", 9880 cq->entry_count); 9881 if (cq->entry_count < 256) 9882 return -EINVAL; 9883 /* otherwise default to smallest count (drop through) */ 9884 case 256: 9885 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 9886 LPFC_CQ_CNT_256); 9887 break; 9888 case 512: 9889 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 9890 LPFC_CQ_CNT_512); 9891 break; 9892 case 1024: 9893 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 9894 LPFC_CQ_CNT_1024); 9895 break; 9896 } 9897 list_for_each_entry(dmabuf, &cq->page_list, list) { 9898 memset(dmabuf->virt, 0, hw_page_size); 9899 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9900 putPaddrLow(dmabuf->phys); 9901 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9902 putPaddrHigh(dmabuf->phys); 9903 } 9904 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 9905 9906 /* The IOCTL status is embedded in the mailbox subheader. */ 9907 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 9908 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9909 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9910 if (shdr_status || shdr_add_status || rc) { 9911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9912 "2501 CQ_CREATE mailbox failed with " 9913 "status x%x add_status x%x, mbx status x%x\n", 9914 shdr_status, shdr_add_status, rc); 9915 status = -ENXIO; 9916 goto out; 9917 } 9918 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 9919 if (cq->queue_id == 0xFFFF) { 9920 status = -ENXIO; 9921 goto out; 9922 } 9923 /* link the cq onto the parent eq child list */ 9924 list_add_tail(&cq->list, &eq->child_list); 9925 /* Set up completion queue's type and subtype */ 9926 cq->type = type; 9927 cq->subtype = subtype; 9928 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 9929 cq->host_index = 0; 9930 cq->hba_index = 0; 9931 9932 out: 9933 mempool_free(mbox, phba->mbox_mem_pool); 9934 return status; 9935 } 9936 9937 /** 9938 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 9939 * @phba: HBA structure that indicates port to create a queue on. 9940 * @mq: The queue structure to use to create the mailbox queue. 9941 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 9942 * @cq: The completion queue to associate with this cq. 9943 * 9944 * This function provides failback (fb) functionality when the 9945 * mq_create_ext fails on older FW generations. It's purpose is identical 9946 * to mq_create_ext otherwise. 9947 * 9948 * This routine cannot fail as all attributes were previously accessed and 9949 * initialized in mq_create_ext. 9950 **/ 9951 static void 9952 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 9953 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 9954 { 9955 struct lpfc_mbx_mq_create *mq_create; 9956 struct lpfc_dmabuf *dmabuf; 9957 int length; 9958 9959 length = (sizeof(struct lpfc_mbx_mq_create) - 9960 sizeof(struct lpfc_sli4_cfg_mhdr)); 9961 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 9962 LPFC_MBOX_OPCODE_MQ_CREATE, 9963 length, LPFC_SLI4_MBX_EMBED); 9964 mq_create = &mbox->u.mqe.un.mq_create; 9965 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 9966 mq->page_count); 9967 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 9968 cq->queue_id); 9969 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 9970 switch (mq->entry_count) { 9971 case 16: 9972 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9973 LPFC_MQ_CNT_16); 9974 break; 9975 case 32: 9976 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9977 LPFC_MQ_CNT_32); 9978 break; 9979 case 64: 9980 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9981 LPFC_MQ_CNT_64); 9982 break; 9983 case 128: 9984 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9985 LPFC_MQ_CNT_128); 9986 break; 9987 } 9988 list_for_each_entry(dmabuf, &mq->page_list, list) { 9989 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9990 putPaddrLow(dmabuf->phys); 9991 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9992 putPaddrHigh(dmabuf->phys); 9993 } 9994 } 9995 9996 /** 9997 * lpfc_mq_create - Create a mailbox Queue on the HBA 9998 * @phba: HBA structure that indicates port to create a queue on. 9999 * @mq: The queue structure to use to create the mailbox queue. 10000 * @cq: The completion queue to associate with this cq. 10001 * @subtype: The queue's subtype. 10002 * 10003 * This function creates a mailbox queue, as detailed in @mq, on a port, 10004 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 10005 * 10006 * The @phba struct is used to send mailbox command to HBA. The @cq struct 10007 * is used to get the entry count and entry size that are necessary to 10008 * determine the number of pages to allocate and use for this queue. This 10009 * function will send the MQ_CREATE mailbox command to the HBA to setup the 10010 * mailbox queue. This function is asynchronous and will wait for the mailbox 10011 * command to finish before continuing. 10012 * 10013 * On success this function will return a zero. If unable to allocate enough 10014 * memory this function will return ENOMEM. If the queue create mailbox command 10015 * fails this function will return ENXIO. 10016 **/ 10017 int32_t 10018 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 10019 struct lpfc_queue *cq, uint32_t subtype) 10020 { 10021 struct lpfc_mbx_mq_create *mq_create; 10022 struct lpfc_mbx_mq_create_ext *mq_create_ext; 10023 struct lpfc_dmabuf *dmabuf; 10024 LPFC_MBOXQ_t *mbox; 10025 int rc, length, status = 0; 10026 uint32_t shdr_status, shdr_add_status; 10027 union lpfc_sli4_cfg_shdr *shdr; 10028 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10029 10030 if (!phba->sli4_hba.pc_sli4_params.supported) 10031 hw_page_size = SLI4_PAGE_SIZE; 10032 10033 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10034 if (!mbox) 10035 return -ENOMEM; 10036 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 10037 sizeof(struct lpfc_sli4_cfg_mhdr)); 10038 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10039 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 10040 length, LPFC_SLI4_MBX_EMBED); 10041 10042 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 10043 bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request, 10044 mq->page_count); 10045 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request, 10046 1); 10047 bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste, 10048 &mq_create_ext->u.request, 1); 10049 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 10050 &mq_create_ext->u.request, 1); 10051 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 10052 cq->queue_id); 10053 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 10054 switch (mq->entry_count) { 10055 default: 10056 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10057 "0362 Unsupported MQ count. (%d)\n", 10058 mq->entry_count); 10059 if (mq->entry_count < 16) 10060 return -EINVAL; 10061 /* otherwise default to smallest count (drop through) */ 10062 case 16: 10063 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10064 LPFC_MQ_CNT_16); 10065 break; 10066 case 32: 10067 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10068 LPFC_MQ_CNT_32); 10069 break; 10070 case 64: 10071 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10072 LPFC_MQ_CNT_64); 10073 break; 10074 case 128: 10075 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10076 LPFC_MQ_CNT_128); 10077 break; 10078 } 10079 list_for_each_entry(dmabuf, &mq->page_list, list) { 10080 memset(dmabuf->virt, 0, hw_page_size); 10081 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 10082 putPaddrLow(dmabuf->phys); 10083 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 10084 putPaddrHigh(dmabuf->phys); 10085 } 10086 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10087 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 10088 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 10089 &mq_create_ext->u.response); 10090 if (rc != MBX_SUCCESS) { 10091 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10092 "2795 MQ_CREATE_EXT failed with " 10093 "status x%x. Failback to MQ_CREATE.\n", 10094 rc); 10095 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 10096 mq_create = &mbox->u.mqe.un.mq_create; 10097 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10098 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 10099 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 10100 &mq_create->u.response); 10101 } 10102 10103 /* The IOCTL status is embedded in the mailbox subheader. */ 10104 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10105 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10106 if (shdr_status || shdr_add_status || rc) { 10107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10108 "2502 MQ_CREATE mailbox failed with " 10109 "status x%x add_status x%x, mbx status x%x\n", 10110 shdr_status, shdr_add_status, rc); 10111 status = -ENXIO; 10112 goto out; 10113 } 10114 if (mq->queue_id == 0xFFFF) { 10115 status = -ENXIO; 10116 goto out; 10117 } 10118 mq->type = LPFC_MQ; 10119 mq->subtype = subtype; 10120 mq->host_index = 0; 10121 mq->hba_index = 0; 10122 10123 /* link the mq onto the parent cq child list */ 10124 list_add_tail(&mq->list, &cq->child_list); 10125 out: 10126 mempool_free(mbox, phba->mbox_mem_pool); 10127 return status; 10128 } 10129 10130 /** 10131 * lpfc_wq_create - Create a Work Queue on the HBA 10132 * @phba: HBA structure that indicates port to create a queue on. 10133 * @wq: The queue structure to use to create the work queue. 10134 * @cq: The completion queue to bind this work queue to. 10135 * @subtype: The subtype of the work queue indicating its functionality. 10136 * 10137 * This function creates a work queue, as detailed in @wq, on a port, described 10138 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 10139 * 10140 * The @phba struct is used to send mailbox command to HBA. The @wq struct 10141 * is used to get the entry count and entry size that are necessary to 10142 * determine the number of pages to allocate and use for this queue. The @cq 10143 * is used to indicate which completion queue to bind this work queue to. This 10144 * function will send the WQ_CREATE mailbox command to the HBA to setup the 10145 * work queue. This function is asynchronous and will wait for the mailbox 10146 * command to finish before continuing. 10147 * 10148 * On success this function will return a zero. If unable to allocate enough 10149 * memory this function will return ENOMEM. If the queue create mailbox command 10150 * fails this function will return ENXIO. 10151 **/ 10152 uint32_t 10153 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 10154 struct lpfc_queue *cq, uint32_t subtype) 10155 { 10156 struct lpfc_mbx_wq_create *wq_create; 10157 struct lpfc_dmabuf *dmabuf; 10158 LPFC_MBOXQ_t *mbox; 10159 int rc, length, status = 0; 10160 uint32_t shdr_status, shdr_add_status; 10161 union lpfc_sli4_cfg_shdr *shdr; 10162 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10163 10164 if (!phba->sli4_hba.pc_sli4_params.supported) 10165 hw_page_size = SLI4_PAGE_SIZE; 10166 10167 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10168 if (!mbox) 10169 return -ENOMEM; 10170 length = (sizeof(struct lpfc_mbx_wq_create) - 10171 sizeof(struct lpfc_sli4_cfg_mhdr)); 10172 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10173 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 10174 length, LPFC_SLI4_MBX_EMBED); 10175 wq_create = &mbox->u.mqe.un.wq_create; 10176 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 10177 wq->page_count); 10178 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 10179 cq->queue_id); 10180 list_for_each_entry(dmabuf, &wq->page_list, list) { 10181 memset(dmabuf->virt, 0, hw_page_size); 10182 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10183 putPaddrLow(dmabuf->phys); 10184 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10185 putPaddrHigh(dmabuf->phys); 10186 } 10187 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10188 /* The IOCTL status is embedded in the mailbox subheader. */ 10189 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 10190 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10191 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10192 if (shdr_status || shdr_add_status || rc) { 10193 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10194 "2503 WQ_CREATE mailbox failed with " 10195 "status x%x add_status x%x, mbx status x%x\n", 10196 shdr_status, shdr_add_status, rc); 10197 status = -ENXIO; 10198 goto out; 10199 } 10200 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 10201 if (wq->queue_id == 0xFFFF) { 10202 status = -ENXIO; 10203 goto out; 10204 } 10205 wq->type = LPFC_WQ; 10206 wq->subtype = subtype; 10207 wq->host_index = 0; 10208 wq->hba_index = 0; 10209 10210 /* link the wq onto the parent cq child list */ 10211 list_add_tail(&wq->list, &cq->child_list); 10212 out: 10213 mempool_free(mbox, phba->mbox_mem_pool); 10214 return status; 10215 } 10216 10217 /** 10218 * lpfc_rq_create - Create a Receive Queue on the HBA 10219 * @phba: HBA structure that indicates port to create a queue on. 10220 * @hrq: The queue structure to use to create the header receive queue. 10221 * @drq: The queue structure to use to create the data receive queue. 10222 * @cq: The completion queue to bind this work queue to. 10223 * 10224 * This function creates a receive buffer queue pair , as detailed in @hrq and 10225 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 10226 * to the HBA. 10227 * 10228 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 10229 * struct is used to get the entry count that is necessary to determine the 10230 * number of pages to use for this queue. The @cq is used to indicate which 10231 * completion queue to bind received buffers that are posted to these queues to. 10232 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 10233 * receive queue pair. This function is asynchronous and will wait for the 10234 * mailbox command to finish before continuing. 10235 * 10236 * On success this function will return a zero. If unable to allocate enough 10237 * memory this function will return ENOMEM. If the queue create mailbox command 10238 * fails this function will return ENXIO. 10239 **/ 10240 uint32_t 10241 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 10242 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 10243 { 10244 struct lpfc_mbx_rq_create *rq_create; 10245 struct lpfc_dmabuf *dmabuf; 10246 LPFC_MBOXQ_t *mbox; 10247 int rc, length, status = 0; 10248 uint32_t shdr_status, shdr_add_status; 10249 union lpfc_sli4_cfg_shdr *shdr; 10250 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10251 10252 if (!phba->sli4_hba.pc_sli4_params.supported) 10253 hw_page_size = SLI4_PAGE_SIZE; 10254 10255 if (hrq->entry_count != drq->entry_count) 10256 return -EINVAL; 10257 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10258 if (!mbox) 10259 return -ENOMEM; 10260 length = (sizeof(struct lpfc_mbx_rq_create) - 10261 sizeof(struct lpfc_sli4_cfg_mhdr)); 10262 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10263 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 10264 length, LPFC_SLI4_MBX_EMBED); 10265 rq_create = &mbox->u.mqe.un.rq_create; 10266 switch (hrq->entry_count) { 10267 default: 10268 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10269 "2535 Unsupported RQ count. (%d)\n", 10270 hrq->entry_count); 10271 if (hrq->entry_count < 512) 10272 return -EINVAL; 10273 /* otherwise default to smallest count (drop through) */ 10274 case 512: 10275 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10276 LPFC_RQ_RING_SIZE_512); 10277 break; 10278 case 1024: 10279 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10280 LPFC_RQ_RING_SIZE_1024); 10281 break; 10282 case 2048: 10283 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10284 LPFC_RQ_RING_SIZE_2048); 10285 break; 10286 case 4096: 10287 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10288 LPFC_RQ_RING_SIZE_4096); 10289 break; 10290 } 10291 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 10292 cq->queue_id); 10293 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 10294 hrq->page_count); 10295 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10296 LPFC_HDR_BUF_SIZE); 10297 list_for_each_entry(dmabuf, &hrq->page_list, list) { 10298 memset(dmabuf->virt, 0, hw_page_size); 10299 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10300 putPaddrLow(dmabuf->phys); 10301 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10302 putPaddrHigh(dmabuf->phys); 10303 } 10304 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10305 /* The IOCTL status is embedded in the mailbox subheader. */ 10306 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 10307 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10308 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10309 if (shdr_status || shdr_add_status || rc) { 10310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10311 "2504 RQ_CREATE mailbox failed with " 10312 "status x%x add_status x%x, mbx status x%x\n", 10313 shdr_status, shdr_add_status, rc); 10314 status = -ENXIO; 10315 goto out; 10316 } 10317 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 10318 if (hrq->queue_id == 0xFFFF) { 10319 status = -ENXIO; 10320 goto out; 10321 } 10322 hrq->type = LPFC_HRQ; 10323 hrq->subtype = subtype; 10324 hrq->host_index = 0; 10325 hrq->hba_index = 0; 10326 10327 /* now create the data queue */ 10328 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10329 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 10330 length, LPFC_SLI4_MBX_EMBED); 10331 switch (drq->entry_count) { 10332 default: 10333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10334 "2536 Unsupported RQ count. (%d)\n", 10335 drq->entry_count); 10336 if (drq->entry_count < 512) 10337 return -EINVAL; 10338 /* otherwise default to smallest count (drop through) */ 10339 case 512: 10340 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10341 LPFC_RQ_RING_SIZE_512); 10342 break; 10343 case 1024: 10344 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10345 LPFC_RQ_RING_SIZE_1024); 10346 break; 10347 case 2048: 10348 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10349 LPFC_RQ_RING_SIZE_2048); 10350 break; 10351 case 4096: 10352 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10353 LPFC_RQ_RING_SIZE_4096); 10354 break; 10355 } 10356 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 10357 cq->queue_id); 10358 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 10359 drq->page_count); 10360 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10361 LPFC_DATA_BUF_SIZE); 10362 list_for_each_entry(dmabuf, &drq->page_list, list) { 10363 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10364 putPaddrLow(dmabuf->phys); 10365 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10366 putPaddrHigh(dmabuf->phys); 10367 } 10368 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10369 /* The IOCTL status is embedded in the mailbox subheader. */ 10370 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 10371 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10372 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10373 if (shdr_status || shdr_add_status || rc) { 10374 status = -ENXIO; 10375 goto out; 10376 } 10377 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 10378 if (drq->queue_id == 0xFFFF) { 10379 status = -ENXIO; 10380 goto out; 10381 } 10382 drq->type = LPFC_DRQ; 10383 drq->subtype = subtype; 10384 drq->host_index = 0; 10385 drq->hba_index = 0; 10386 10387 /* link the header and data RQs onto the parent cq child list */ 10388 list_add_tail(&hrq->list, &cq->child_list); 10389 list_add_tail(&drq->list, &cq->child_list); 10390 10391 out: 10392 mempool_free(mbox, phba->mbox_mem_pool); 10393 return status; 10394 } 10395 10396 /** 10397 * lpfc_eq_destroy - Destroy an event Queue on the HBA 10398 * @eq: The queue structure associated with the queue to destroy. 10399 * 10400 * This function destroys a queue, as detailed in @eq by sending an mailbox 10401 * command, specific to the type of queue, to the HBA. 10402 * 10403 * The @eq struct is used to get the queue ID of the queue to destroy. 10404 * 10405 * On success this function will return a zero. If the queue destroy mailbox 10406 * command fails this function will return ENXIO. 10407 **/ 10408 uint32_t 10409 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 10410 { 10411 LPFC_MBOXQ_t *mbox; 10412 int rc, length, status = 0; 10413 uint32_t shdr_status, shdr_add_status; 10414 union lpfc_sli4_cfg_shdr *shdr; 10415 10416 if (!eq) 10417 return -ENODEV; 10418 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 10419 if (!mbox) 10420 return -ENOMEM; 10421 length = (sizeof(struct lpfc_mbx_eq_destroy) - 10422 sizeof(struct lpfc_sli4_cfg_mhdr)); 10423 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10424 LPFC_MBOX_OPCODE_EQ_DESTROY, 10425 length, LPFC_SLI4_MBX_EMBED); 10426 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 10427 eq->queue_id); 10428 mbox->vport = eq->phba->pport; 10429 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10430 10431 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 10432 /* The IOCTL status is embedded in the mailbox subheader. */ 10433 shdr = (union lpfc_sli4_cfg_shdr *) 10434 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 10435 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10436 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10437 if (shdr_status || shdr_add_status || rc) { 10438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10439 "2505 EQ_DESTROY mailbox failed with " 10440 "status x%x add_status x%x, mbx status x%x\n", 10441 shdr_status, shdr_add_status, rc); 10442 status = -ENXIO; 10443 } 10444 10445 /* Remove eq from any list */ 10446 list_del_init(&eq->list); 10447 mempool_free(mbox, eq->phba->mbox_mem_pool); 10448 return status; 10449 } 10450 10451 /** 10452 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 10453 * @cq: The queue structure associated with the queue to destroy. 10454 * 10455 * This function destroys a queue, as detailed in @cq by sending an mailbox 10456 * command, specific to the type of queue, to the HBA. 10457 * 10458 * The @cq struct is used to get the queue ID of the queue to destroy. 10459 * 10460 * On success this function will return a zero. If the queue destroy mailbox 10461 * command fails this function will return ENXIO. 10462 **/ 10463 uint32_t 10464 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 10465 { 10466 LPFC_MBOXQ_t *mbox; 10467 int rc, length, status = 0; 10468 uint32_t shdr_status, shdr_add_status; 10469 union lpfc_sli4_cfg_shdr *shdr; 10470 10471 if (!cq) 10472 return -ENODEV; 10473 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 10474 if (!mbox) 10475 return -ENOMEM; 10476 length = (sizeof(struct lpfc_mbx_cq_destroy) - 10477 sizeof(struct lpfc_sli4_cfg_mhdr)); 10478 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10479 LPFC_MBOX_OPCODE_CQ_DESTROY, 10480 length, LPFC_SLI4_MBX_EMBED); 10481 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 10482 cq->queue_id); 10483 mbox->vport = cq->phba->pport; 10484 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10485 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 10486 /* The IOCTL status is embedded in the mailbox subheader. */ 10487 shdr = (union lpfc_sli4_cfg_shdr *) 10488 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 10489 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10490 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10491 if (shdr_status || shdr_add_status || rc) { 10492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10493 "2506 CQ_DESTROY mailbox failed with " 10494 "status x%x add_status x%x, mbx status x%x\n", 10495 shdr_status, shdr_add_status, rc); 10496 status = -ENXIO; 10497 } 10498 /* Remove cq from any list */ 10499 list_del_init(&cq->list); 10500 mempool_free(mbox, cq->phba->mbox_mem_pool); 10501 return status; 10502 } 10503 10504 /** 10505 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 10506 * @qm: The queue structure associated with the queue to destroy. 10507 * 10508 * This function destroys a queue, as detailed in @mq by sending an mailbox 10509 * command, specific to the type of queue, to the HBA. 10510 * 10511 * The @mq struct is used to get the queue ID of the queue to destroy. 10512 * 10513 * On success this function will return a zero. If the queue destroy mailbox 10514 * command fails this function will return ENXIO. 10515 **/ 10516 uint32_t 10517 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 10518 { 10519 LPFC_MBOXQ_t *mbox; 10520 int rc, length, status = 0; 10521 uint32_t shdr_status, shdr_add_status; 10522 union lpfc_sli4_cfg_shdr *shdr; 10523 10524 if (!mq) 10525 return -ENODEV; 10526 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 10527 if (!mbox) 10528 return -ENOMEM; 10529 length = (sizeof(struct lpfc_mbx_mq_destroy) - 10530 sizeof(struct lpfc_sli4_cfg_mhdr)); 10531 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10532 LPFC_MBOX_OPCODE_MQ_DESTROY, 10533 length, LPFC_SLI4_MBX_EMBED); 10534 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 10535 mq->queue_id); 10536 mbox->vport = mq->phba->pport; 10537 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10538 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 10539 /* The IOCTL status is embedded in the mailbox subheader. */ 10540 shdr = (union lpfc_sli4_cfg_shdr *) 10541 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 10542 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10543 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10544 if (shdr_status || shdr_add_status || rc) { 10545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10546 "2507 MQ_DESTROY mailbox failed with " 10547 "status x%x add_status x%x, mbx status x%x\n", 10548 shdr_status, shdr_add_status, rc); 10549 status = -ENXIO; 10550 } 10551 /* Remove mq from any list */ 10552 list_del_init(&mq->list); 10553 mempool_free(mbox, mq->phba->mbox_mem_pool); 10554 return status; 10555 } 10556 10557 /** 10558 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 10559 * @wq: The queue structure associated with the queue to destroy. 10560 * 10561 * This function destroys a queue, as detailed in @wq by sending an mailbox 10562 * command, specific to the type of queue, to the HBA. 10563 * 10564 * The @wq struct is used to get the queue ID of the queue to destroy. 10565 * 10566 * On success this function will return a zero. If the queue destroy mailbox 10567 * command fails this function will return ENXIO. 10568 **/ 10569 uint32_t 10570 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 10571 { 10572 LPFC_MBOXQ_t *mbox; 10573 int rc, length, status = 0; 10574 uint32_t shdr_status, shdr_add_status; 10575 union lpfc_sli4_cfg_shdr *shdr; 10576 10577 if (!wq) 10578 return -ENODEV; 10579 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 10580 if (!mbox) 10581 return -ENOMEM; 10582 length = (sizeof(struct lpfc_mbx_wq_destroy) - 10583 sizeof(struct lpfc_sli4_cfg_mhdr)); 10584 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10585 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 10586 length, LPFC_SLI4_MBX_EMBED); 10587 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 10588 wq->queue_id); 10589 mbox->vport = wq->phba->pport; 10590 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10591 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 10592 shdr = (union lpfc_sli4_cfg_shdr *) 10593 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 10594 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10595 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10596 if (shdr_status || shdr_add_status || rc) { 10597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10598 "2508 WQ_DESTROY mailbox failed with " 10599 "status x%x add_status x%x, mbx status x%x\n", 10600 shdr_status, shdr_add_status, rc); 10601 status = -ENXIO; 10602 } 10603 /* Remove wq from any list */ 10604 list_del_init(&wq->list); 10605 mempool_free(mbox, wq->phba->mbox_mem_pool); 10606 return status; 10607 } 10608 10609 /** 10610 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 10611 * @rq: The queue structure associated with the queue to destroy. 10612 * 10613 * This function destroys a queue, as detailed in @rq by sending an mailbox 10614 * command, specific to the type of queue, to the HBA. 10615 * 10616 * The @rq struct is used to get the queue ID of the queue to destroy. 10617 * 10618 * On success this function will return a zero. If the queue destroy mailbox 10619 * command fails this function will return ENXIO. 10620 **/ 10621 uint32_t 10622 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 10623 struct lpfc_queue *drq) 10624 { 10625 LPFC_MBOXQ_t *mbox; 10626 int rc, length, status = 0; 10627 uint32_t shdr_status, shdr_add_status; 10628 union lpfc_sli4_cfg_shdr *shdr; 10629 10630 if (!hrq || !drq) 10631 return -ENODEV; 10632 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 10633 if (!mbox) 10634 return -ENOMEM; 10635 length = (sizeof(struct lpfc_mbx_rq_destroy) - 10636 sizeof(struct mbox_header)); 10637 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10638 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 10639 length, LPFC_SLI4_MBX_EMBED); 10640 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 10641 hrq->queue_id); 10642 mbox->vport = hrq->phba->pport; 10643 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10644 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 10645 /* The IOCTL status is embedded in the mailbox subheader. */ 10646 shdr = (union lpfc_sli4_cfg_shdr *) 10647 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 10648 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10649 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10650 if (shdr_status || shdr_add_status || rc) { 10651 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10652 "2509 RQ_DESTROY mailbox failed with " 10653 "status x%x add_status x%x, mbx status x%x\n", 10654 shdr_status, shdr_add_status, rc); 10655 if (rc != MBX_TIMEOUT) 10656 mempool_free(mbox, hrq->phba->mbox_mem_pool); 10657 return -ENXIO; 10658 } 10659 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 10660 drq->queue_id); 10661 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 10662 shdr = (union lpfc_sli4_cfg_shdr *) 10663 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 10664 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10665 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10666 if (shdr_status || shdr_add_status || rc) { 10667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10668 "2510 RQ_DESTROY mailbox failed with " 10669 "status x%x add_status x%x, mbx status x%x\n", 10670 shdr_status, shdr_add_status, rc); 10671 status = -ENXIO; 10672 } 10673 list_del_init(&hrq->list); 10674 list_del_init(&drq->list); 10675 mempool_free(mbox, hrq->phba->mbox_mem_pool); 10676 return status; 10677 } 10678 10679 /** 10680 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 10681 * @phba: The virtual port for which this call being executed. 10682 * @pdma_phys_addr0: Physical address of the 1st SGL page. 10683 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 10684 * @xritag: the xritag that ties this io to the SGL pages. 10685 * 10686 * This routine will post the sgl pages for the IO that has the xritag 10687 * that is in the iocbq structure. The xritag is assigned during iocbq 10688 * creation and persists for as long as the driver is loaded. 10689 * if the caller has fewer than 256 scatter gather segments to map then 10690 * pdma_phys_addr1 should be 0. 10691 * If the caller needs to map more than 256 scatter gather segment then 10692 * pdma_phys_addr1 should be a valid physical address. 10693 * physical address for SGLs must be 64 byte aligned. 10694 * If you are going to map 2 SGL's then the first one must have 256 entries 10695 * the second sgl can have between 1 and 256 entries. 10696 * 10697 * Return codes: 10698 * 0 - Success 10699 * -ENXIO, -ENOMEM - Failure 10700 **/ 10701 int 10702 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 10703 dma_addr_t pdma_phys_addr0, 10704 dma_addr_t pdma_phys_addr1, 10705 uint16_t xritag) 10706 { 10707 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 10708 LPFC_MBOXQ_t *mbox; 10709 int rc; 10710 uint32_t shdr_status, shdr_add_status; 10711 union lpfc_sli4_cfg_shdr *shdr; 10712 10713 if (xritag == NO_XRI) { 10714 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10715 "0364 Invalid param:\n"); 10716 return -EINVAL; 10717 } 10718 10719 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10720 if (!mbox) 10721 return -ENOMEM; 10722 10723 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10724 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 10725 sizeof(struct lpfc_mbx_post_sgl_pages) - 10726 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); 10727 10728 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 10729 &mbox->u.mqe.un.post_sgl_pages; 10730 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 10731 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 10732 10733 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 10734 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 10735 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 10736 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 10737 10738 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 10739 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 10740 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 10741 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 10742 if (!phba->sli4_hba.intr_enable) 10743 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10744 else 10745 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 10746 /* The IOCTL status is embedded in the mailbox subheader. */ 10747 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 10748 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10749 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10750 if (rc != MBX_TIMEOUT) 10751 mempool_free(mbox, phba->mbox_mem_pool); 10752 if (shdr_status || shdr_add_status || rc) { 10753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10754 "2511 POST_SGL mailbox failed with " 10755 "status x%x add_status x%x, mbx status x%x\n", 10756 shdr_status, shdr_add_status, rc); 10757 rc = -ENXIO; 10758 } 10759 return 0; 10760 } 10761 /** 10762 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA 10763 * @phba: The virtual port for which this call being executed. 10764 * 10765 * This routine will remove all of the sgl pages registered with the hba. 10766 * 10767 * Return codes: 10768 * 0 - Success 10769 * -ENXIO, -ENOMEM - Failure 10770 **/ 10771 int 10772 lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba) 10773 { 10774 LPFC_MBOXQ_t *mbox; 10775 int rc; 10776 uint32_t shdr_status, shdr_add_status; 10777 union lpfc_sli4_cfg_shdr *shdr; 10778 10779 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10780 if (!mbox) 10781 return -ENOMEM; 10782 10783 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10784 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0, 10785 LPFC_SLI4_MBX_EMBED); 10786 if (!phba->sli4_hba.intr_enable) 10787 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10788 else 10789 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 10790 /* The IOCTL status is embedded in the mailbox subheader. */ 10791 shdr = (union lpfc_sli4_cfg_shdr *) 10792 &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 10793 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10794 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10795 if (rc != MBX_TIMEOUT) 10796 mempool_free(mbox, phba->mbox_mem_pool); 10797 if (shdr_status || shdr_add_status || rc) { 10798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10799 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with " 10800 "status x%x add_status x%x, mbx status x%x\n", 10801 shdr_status, shdr_add_status, rc); 10802 rc = -ENXIO; 10803 } 10804 return rc; 10805 } 10806 10807 /** 10808 * lpfc_sli4_next_xritag - Get an xritag for the io 10809 * @phba: Pointer to HBA context object. 10810 * 10811 * This function gets an xritag for the iocb. If there is no unused xritag 10812 * it will return 0xffff. 10813 * The function returns the allocated xritag if successful, else returns zero. 10814 * Zero is not a valid xritag. 10815 * The caller is not required to hold any lock. 10816 **/ 10817 uint16_t 10818 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 10819 { 10820 uint16_t xritag; 10821 10822 spin_lock_irq(&phba->hbalock); 10823 xritag = phba->sli4_hba.next_xri; 10824 if ((xritag != (uint16_t) -1) && xritag < 10825 (phba->sli4_hba.max_cfg_param.max_xri 10826 + phba->sli4_hba.max_cfg_param.xri_base)) { 10827 phba->sli4_hba.next_xri++; 10828 phba->sli4_hba.max_cfg_param.xri_used++; 10829 spin_unlock_irq(&phba->hbalock); 10830 return xritag; 10831 } 10832 spin_unlock_irq(&phba->hbalock); 10833 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10834 "2004 Failed to allocate XRI.last XRITAG is %d" 10835 " Max XRI is %d, Used XRI is %d\n", 10836 phba->sli4_hba.next_xri, 10837 phba->sli4_hba.max_cfg_param.max_xri, 10838 phba->sli4_hba.max_cfg_param.xri_used); 10839 return -1; 10840 } 10841 10842 /** 10843 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. 10844 * @phba: pointer to lpfc hba data structure. 10845 * 10846 * This routine is invoked to post a block of driver's sgl pages to the 10847 * HBA using non-embedded mailbox command. No Lock is held. This routine 10848 * is only called when the driver is loading and after all IO has been 10849 * stopped. 10850 **/ 10851 int 10852 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) 10853 { 10854 struct lpfc_sglq *sglq_entry; 10855 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 10856 struct sgl_page_pairs *sgl_pg_pairs; 10857 void *viraddr; 10858 LPFC_MBOXQ_t *mbox; 10859 uint32_t reqlen, alloclen, pg_pairs; 10860 uint32_t mbox_tmo; 10861 uint16_t xritag_start = 0; 10862 int els_xri_cnt, rc = 0; 10863 uint32_t shdr_status, shdr_add_status; 10864 union lpfc_sli4_cfg_shdr *shdr; 10865 10866 /* The number of sgls to be posted */ 10867 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 10868 10869 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 10870 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 10871 if (reqlen > SLI4_PAGE_SIZE) { 10872 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10873 "2559 Block sgl registration required DMA " 10874 "size (%d) great than a page\n", reqlen); 10875 return -ENOMEM; 10876 } 10877 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10878 if (!mbox) { 10879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10880 "2560 Failed to allocate mbox cmd memory\n"); 10881 return -ENOMEM; 10882 } 10883 10884 /* Allocate DMA memory and set up the non-embedded mailbox command */ 10885 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10886 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 10887 LPFC_SLI4_MBX_NEMBED); 10888 10889 if (alloclen < reqlen) { 10890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10891 "0285 Allocated DMA memory size (%d) is " 10892 "less than the requested DMA memory " 10893 "size (%d)\n", alloclen, reqlen); 10894 lpfc_sli4_mbox_cmd_free(phba, mbox); 10895 return -ENOMEM; 10896 } 10897 /* Get the first SGE entry from the non-embedded DMA memory */ 10898 viraddr = mbox->sge_array->addr[0]; 10899 10900 /* Set up the SGL pages in the non-embedded DMA pages */ 10901 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 10902 sgl_pg_pairs = &sgl->sgl_pg_pairs; 10903 10904 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 10905 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 10906 /* Set up the sge entry */ 10907 sgl_pg_pairs->sgl_pg0_addr_lo = 10908 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 10909 sgl_pg_pairs->sgl_pg0_addr_hi = 10910 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 10911 sgl_pg_pairs->sgl_pg1_addr_lo = 10912 cpu_to_le32(putPaddrLow(0)); 10913 sgl_pg_pairs->sgl_pg1_addr_hi = 10914 cpu_to_le32(putPaddrHigh(0)); 10915 /* Keep the first xritag on the list */ 10916 if (pg_pairs == 0) 10917 xritag_start = sglq_entry->sli4_xritag; 10918 sgl_pg_pairs++; 10919 } 10920 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 10921 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 10922 /* Perform endian conversion if necessary */ 10923 sgl->word0 = cpu_to_le32(sgl->word0); 10924 10925 if (!phba->sli4_hba.intr_enable) 10926 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10927 else { 10928 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 10929 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 10930 } 10931 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 10932 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10933 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10934 if (rc != MBX_TIMEOUT) 10935 lpfc_sli4_mbox_cmd_free(phba, mbox); 10936 if (shdr_status || shdr_add_status || rc) { 10937 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10938 "2513 POST_SGL_BLOCK mailbox command failed " 10939 "status x%x add_status x%x mbx status x%x\n", 10940 shdr_status, shdr_add_status, rc); 10941 rc = -ENXIO; 10942 } 10943 return rc; 10944 } 10945 10946 /** 10947 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 10948 * @phba: pointer to lpfc hba data structure. 10949 * @sblist: pointer to scsi buffer list. 10950 * @count: number of scsi buffers on the list. 10951 * 10952 * This routine is invoked to post a block of @count scsi sgl pages from a 10953 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 10954 * No Lock is held. 10955 * 10956 **/ 10957 int 10958 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, 10959 int cnt) 10960 { 10961 struct lpfc_scsi_buf *psb; 10962 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 10963 struct sgl_page_pairs *sgl_pg_pairs; 10964 void *viraddr; 10965 LPFC_MBOXQ_t *mbox; 10966 uint32_t reqlen, alloclen, pg_pairs; 10967 uint32_t mbox_tmo; 10968 uint16_t xritag_start = 0; 10969 int rc = 0; 10970 uint32_t shdr_status, shdr_add_status; 10971 dma_addr_t pdma_phys_bpl1; 10972 union lpfc_sli4_cfg_shdr *shdr; 10973 10974 /* Calculate the requested length of the dma memory */ 10975 reqlen = cnt * sizeof(struct sgl_page_pairs) + 10976 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 10977 if (reqlen > SLI4_PAGE_SIZE) { 10978 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10979 "0217 Block sgl registration required DMA " 10980 "size (%d) great than a page\n", reqlen); 10981 return -ENOMEM; 10982 } 10983 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10984 if (!mbox) { 10985 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10986 "0283 Failed to allocate mbox cmd memory\n"); 10987 return -ENOMEM; 10988 } 10989 10990 /* Allocate DMA memory and set up the non-embedded mailbox command */ 10991 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10992 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 10993 LPFC_SLI4_MBX_NEMBED); 10994 10995 if (alloclen < reqlen) { 10996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10997 "2561 Allocated DMA memory size (%d) is " 10998 "less than the requested DMA memory " 10999 "size (%d)\n", alloclen, reqlen); 11000 lpfc_sli4_mbox_cmd_free(phba, mbox); 11001 return -ENOMEM; 11002 } 11003 /* Get the first SGE entry from the non-embedded DMA memory */ 11004 viraddr = mbox->sge_array->addr[0]; 11005 11006 /* Set up the SGL pages in the non-embedded DMA pages */ 11007 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 11008 sgl_pg_pairs = &sgl->sgl_pg_pairs; 11009 11010 pg_pairs = 0; 11011 list_for_each_entry(psb, sblist, list) { 11012 /* Set up the sge entry */ 11013 sgl_pg_pairs->sgl_pg0_addr_lo = 11014 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 11015 sgl_pg_pairs->sgl_pg0_addr_hi = 11016 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 11017 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 11018 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 11019 else 11020 pdma_phys_bpl1 = 0; 11021 sgl_pg_pairs->sgl_pg1_addr_lo = 11022 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 11023 sgl_pg_pairs->sgl_pg1_addr_hi = 11024 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 11025 /* Keep the first xritag on the list */ 11026 if (pg_pairs == 0) 11027 xritag_start = psb->cur_iocbq.sli4_xritag; 11028 sgl_pg_pairs++; 11029 pg_pairs++; 11030 } 11031 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 11032 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 11033 /* Perform endian conversion if necessary */ 11034 sgl->word0 = cpu_to_le32(sgl->word0); 11035 11036 if (!phba->sli4_hba.intr_enable) 11037 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11038 else { 11039 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 11040 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 11041 } 11042 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 11043 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11044 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11045 if (rc != MBX_TIMEOUT) 11046 lpfc_sli4_mbox_cmd_free(phba, mbox); 11047 if (shdr_status || shdr_add_status || rc) { 11048 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11049 "2564 POST_SGL_BLOCK mailbox command failed " 11050 "status x%x add_status x%x mbx status x%x\n", 11051 shdr_status, shdr_add_status, rc); 11052 rc = -ENXIO; 11053 } 11054 return rc; 11055 } 11056 11057 /** 11058 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 11059 * @phba: pointer to lpfc_hba struct that the frame was received on 11060 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 11061 * 11062 * This function checks the fields in the @fc_hdr to see if the FC frame is a 11063 * valid type of frame that the LPFC driver will handle. This function will 11064 * return a zero if the frame is a valid frame or a non zero value when the 11065 * frame does not pass the check. 11066 **/ 11067 static int 11068 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 11069 { 11070 char *rctl_names[] = FC_RCTL_NAMES_INIT; 11071 char *type_names[] = FC_TYPE_NAMES_INIT; 11072 struct fc_vft_header *fc_vft_hdr; 11073 11074 switch (fc_hdr->fh_r_ctl) { 11075 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 11076 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 11077 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 11078 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 11079 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 11080 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 11081 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 11082 case FC_RCTL_DD_CMD_STATUS: /* command status */ 11083 case FC_RCTL_ELS_REQ: /* extended link services request */ 11084 case FC_RCTL_ELS_REP: /* extended link services reply */ 11085 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 11086 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 11087 case FC_RCTL_BA_NOP: /* basic link service NOP */ 11088 case FC_RCTL_BA_ABTS: /* basic link service abort */ 11089 case FC_RCTL_BA_RMC: /* remove connection */ 11090 case FC_RCTL_BA_ACC: /* basic accept */ 11091 case FC_RCTL_BA_RJT: /* basic reject */ 11092 case FC_RCTL_BA_PRMT: 11093 case FC_RCTL_ACK_1: /* acknowledge_1 */ 11094 case FC_RCTL_ACK_0: /* acknowledge_0 */ 11095 case FC_RCTL_P_RJT: /* port reject */ 11096 case FC_RCTL_F_RJT: /* fabric reject */ 11097 case FC_RCTL_P_BSY: /* port busy */ 11098 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 11099 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 11100 case FC_RCTL_LCR: /* link credit reset */ 11101 case FC_RCTL_END: /* end */ 11102 break; 11103 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 11104 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 11105 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 11106 return lpfc_fc_frame_check(phba, fc_hdr); 11107 default: 11108 goto drop; 11109 } 11110 switch (fc_hdr->fh_type) { 11111 case FC_TYPE_BLS: 11112 case FC_TYPE_ELS: 11113 case FC_TYPE_FCP: 11114 case FC_TYPE_CT: 11115 break; 11116 case FC_TYPE_IP: 11117 case FC_TYPE_ILS: 11118 default: 11119 goto drop; 11120 } 11121 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11122 "2538 Received frame rctl:%s type:%s\n", 11123 rctl_names[fc_hdr->fh_r_ctl], 11124 type_names[fc_hdr->fh_type]); 11125 return 0; 11126 drop: 11127 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 11128 "2539 Dropped frame rctl:%s type:%s\n", 11129 rctl_names[fc_hdr->fh_r_ctl], 11130 type_names[fc_hdr->fh_type]); 11131 return 1; 11132 } 11133 11134 /** 11135 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 11136 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 11137 * 11138 * This function processes the FC header to retrieve the VFI from the VF 11139 * header, if one exists. This function will return the VFI if one exists 11140 * or 0 if no VSAN Header exists. 11141 **/ 11142 static uint32_t 11143 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 11144 { 11145 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 11146 11147 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 11148 return 0; 11149 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 11150 } 11151 11152 /** 11153 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 11154 * @phba: Pointer to the HBA structure to search for the vport on 11155 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 11156 * @fcfi: The FC Fabric ID that the frame came from 11157 * 11158 * This function searches the @phba for a vport that matches the content of the 11159 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 11160 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 11161 * returns the matching vport pointer or NULL if unable to match frame to a 11162 * vport. 11163 **/ 11164 static struct lpfc_vport * 11165 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 11166 uint16_t fcfi) 11167 { 11168 struct lpfc_vport **vports; 11169 struct lpfc_vport *vport = NULL; 11170 int i; 11171 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 11172 fc_hdr->fh_d_id[1] << 8 | 11173 fc_hdr->fh_d_id[2]); 11174 11175 vports = lpfc_create_vport_work_array(phba); 11176 if (vports != NULL) 11177 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 11178 if (phba->fcf.fcfi == fcfi && 11179 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 11180 vports[i]->fc_myDID == did) { 11181 vport = vports[i]; 11182 break; 11183 } 11184 } 11185 lpfc_destroy_vport_work_array(phba, vports); 11186 return vport; 11187 } 11188 11189 /** 11190 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 11191 * @vport: The vport to work on. 11192 * 11193 * This function updates the receive sequence time stamp for this vport. The 11194 * receive sequence time stamp indicates the time that the last frame of the 11195 * the sequence that has been idle for the longest amount of time was received. 11196 * the driver uses this time stamp to indicate if any received sequences have 11197 * timed out. 11198 **/ 11199 void 11200 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 11201 { 11202 struct lpfc_dmabuf *h_buf; 11203 struct hbq_dmabuf *dmabuf = NULL; 11204 11205 /* get the oldest sequence on the rcv list */ 11206 h_buf = list_get_first(&vport->rcv_buffer_list, 11207 struct lpfc_dmabuf, list); 11208 if (!h_buf) 11209 return; 11210 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11211 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 11212 } 11213 11214 /** 11215 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 11216 * @vport: The vport that the received sequences were sent to. 11217 * 11218 * This function cleans up all outstanding received sequences. This is called 11219 * by the driver when a link event or user action invalidates all the received 11220 * sequences. 11221 **/ 11222 void 11223 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 11224 { 11225 struct lpfc_dmabuf *h_buf, *hnext; 11226 struct lpfc_dmabuf *d_buf, *dnext; 11227 struct hbq_dmabuf *dmabuf = NULL; 11228 11229 /* start with the oldest sequence on the rcv list */ 11230 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 11231 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11232 list_del_init(&dmabuf->hbuf.list); 11233 list_for_each_entry_safe(d_buf, dnext, 11234 &dmabuf->dbuf.list, list) { 11235 list_del_init(&d_buf->list); 11236 lpfc_in_buf_free(vport->phba, d_buf); 11237 } 11238 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 11239 } 11240 } 11241 11242 /** 11243 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 11244 * @vport: The vport that the received sequences were sent to. 11245 * 11246 * This function determines whether any received sequences have timed out by 11247 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 11248 * indicates that there is at least one timed out sequence this routine will 11249 * go through the received sequences one at a time from most inactive to most 11250 * active to determine which ones need to be cleaned up. Once it has determined 11251 * that a sequence needs to be cleaned up it will simply free up the resources 11252 * without sending an abort. 11253 **/ 11254 void 11255 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 11256 { 11257 struct lpfc_dmabuf *h_buf, *hnext; 11258 struct lpfc_dmabuf *d_buf, *dnext; 11259 struct hbq_dmabuf *dmabuf = NULL; 11260 unsigned long timeout; 11261 int abort_count = 0; 11262 11263 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 11264 vport->rcv_buffer_time_stamp); 11265 if (list_empty(&vport->rcv_buffer_list) || 11266 time_before(jiffies, timeout)) 11267 return; 11268 /* start with the oldest sequence on the rcv list */ 11269 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 11270 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11271 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 11272 dmabuf->time_stamp); 11273 if (time_before(jiffies, timeout)) 11274 break; 11275 abort_count++; 11276 list_del_init(&dmabuf->hbuf.list); 11277 list_for_each_entry_safe(d_buf, dnext, 11278 &dmabuf->dbuf.list, list) { 11279 list_del_init(&d_buf->list); 11280 lpfc_in_buf_free(vport->phba, d_buf); 11281 } 11282 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 11283 } 11284 if (abort_count) 11285 lpfc_update_rcv_time_stamp(vport); 11286 } 11287 11288 /** 11289 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 11290 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 11291 * 11292 * This function searches through the existing incomplete sequences that have 11293 * been sent to this @vport. If the frame matches one of the incomplete 11294 * sequences then the dbuf in the @dmabuf is added to the list of frames that 11295 * make up that sequence. If no sequence is found that matches this frame then 11296 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 11297 * This function returns a pointer to the first dmabuf in the sequence list that 11298 * the frame was linked to. 11299 **/ 11300 static struct hbq_dmabuf * 11301 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 11302 { 11303 struct fc_frame_header *new_hdr; 11304 struct fc_frame_header *temp_hdr; 11305 struct lpfc_dmabuf *d_buf; 11306 struct lpfc_dmabuf *h_buf; 11307 struct hbq_dmabuf *seq_dmabuf = NULL; 11308 struct hbq_dmabuf *temp_dmabuf = NULL; 11309 11310 INIT_LIST_HEAD(&dmabuf->dbuf.list); 11311 dmabuf->time_stamp = jiffies; 11312 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11313 /* Use the hdr_buf to find the sequence that this frame belongs to */ 11314 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 11315 temp_hdr = (struct fc_frame_header *)h_buf->virt; 11316 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 11317 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 11318 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 11319 continue; 11320 /* found a pending sequence that matches this frame */ 11321 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11322 break; 11323 } 11324 if (!seq_dmabuf) { 11325 /* 11326 * This indicates first frame received for this sequence. 11327 * Queue the buffer on the vport's rcv_buffer_list. 11328 */ 11329 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 11330 lpfc_update_rcv_time_stamp(vport); 11331 return dmabuf; 11332 } 11333 temp_hdr = seq_dmabuf->hbuf.virt; 11334 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 11335 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 11336 list_del_init(&seq_dmabuf->hbuf.list); 11337 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 11338 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 11339 lpfc_update_rcv_time_stamp(vport); 11340 return dmabuf; 11341 } 11342 /* move this sequence to the tail to indicate a young sequence */ 11343 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 11344 seq_dmabuf->time_stamp = jiffies; 11345 lpfc_update_rcv_time_stamp(vport); 11346 if (list_empty(&seq_dmabuf->dbuf.list)) { 11347 temp_hdr = dmabuf->hbuf.virt; 11348 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 11349 return seq_dmabuf; 11350 } 11351 /* find the correct place in the sequence to insert this frame */ 11352 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 11353 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11354 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 11355 /* 11356 * If the frame's sequence count is greater than the frame on 11357 * the list then insert the frame right after this frame 11358 */ 11359 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 11360 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 11361 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 11362 return seq_dmabuf; 11363 } 11364 } 11365 return NULL; 11366 } 11367 11368 /** 11369 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 11370 * @vport: pointer to a vitural port 11371 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11372 * 11373 * This function tries to abort from the partially assembed sequence, described 11374 * by the information from basic abbort @dmabuf. It checks to see whether such 11375 * partially assembled sequence held by the driver. If so, it shall free up all 11376 * the frames from the partially assembled sequence. 11377 * 11378 * Return 11379 * true -- if there is matching partially assembled sequence present and all 11380 * the frames freed with the sequence; 11381 * false -- if there is no matching partially assembled sequence present so 11382 * nothing got aborted in the lower layer driver 11383 **/ 11384 static bool 11385 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 11386 struct hbq_dmabuf *dmabuf) 11387 { 11388 struct fc_frame_header *new_hdr; 11389 struct fc_frame_header *temp_hdr; 11390 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 11391 struct hbq_dmabuf *seq_dmabuf = NULL; 11392 11393 /* Use the hdr_buf to find the sequence that matches this frame */ 11394 INIT_LIST_HEAD(&dmabuf->dbuf.list); 11395 INIT_LIST_HEAD(&dmabuf->hbuf.list); 11396 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11397 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 11398 temp_hdr = (struct fc_frame_header *)h_buf->virt; 11399 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 11400 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 11401 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 11402 continue; 11403 /* found a pending sequence that matches this frame */ 11404 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11405 break; 11406 } 11407 11408 /* Free up all the frames from the partially assembled sequence */ 11409 if (seq_dmabuf) { 11410 list_for_each_entry_safe(d_buf, n_buf, 11411 &seq_dmabuf->dbuf.list, list) { 11412 list_del_init(&d_buf->list); 11413 lpfc_in_buf_free(vport->phba, d_buf); 11414 } 11415 return true; 11416 } 11417 return false; 11418 } 11419 11420 /** 11421 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler 11422 * @phba: Pointer to HBA context object. 11423 * @cmd_iocbq: pointer to the command iocbq structure. 11424 * @rsp_iocbq: pointer to the response iocbq structure. 11425 * 11426 * This function handles the sequence abort accept iocb command complete 11427 * event. It properly releases the memory allocated to the sequence abort 11428 * accept iocb. 11429 **/ 11430 static void 11431 lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, 11432 struct lpfc_iocbq *cmd_iocbq, 11433 struct lpfc_iocbq *rsp_iocbq) 11434 { 11435 if (cmd_iocbq) 11436 lpfc_sli_release_iocbq(phba, cmd_iocbq); 11437 } 11438 11439 /** 11440 * lpfc_sli4_seq_abort_acc - Accept sequence abort 11441 * @phba: Pointer to HBA context object. 11442 * @fc_hdr: pointer to a FC frame header. 11443 * 11444 * This function sends a basic accept to a previous unsol sequence abort 11445 * event after aborting the sequence handling. 11446 **/ 11447 static void 11448 lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, 11449 struct fc_frame_header *fc_hdr) 11450 { 11451 struct lpfc_iocbq *ctiocb = NULL; 11452 struct lpfc_nodelist *ndlp; 11453 uint16_t oxid, rxid; 11454 uint32_t sid, fctl; 11455 IOCB_t *icmd; 11456 11457 if (!lpfc_is_link_up(phba)) 11458 return; 11459 11460 sid = sli4_sid_from_fc_hdr(fc_hdr); 11461 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 11462 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 11463 11464 ndlp = lpfc_findnode_did(phba->pport, sid); 11465 if (!ndlp) { 11466 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 11467 "1268 Find ndlp returned NULL for oxid:x%x " 11468 "SID:x%x\n", oxid, sid); 11469 return; 11470 } 11471 11472 /* Allocate buffer for acc iocb */ 11473 ctiocb = lpfc_sli_get_iocbq(phba); 11474 if (!ctiocb) 11475 return; 11476 11477 /* Extract the F_CTL field from FC_HDR */ 11478 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 11479 11480 icmd = &ctiocb->iocb; 11481 icmd->un.xseq64.bdl.bdeSize = 0; 11482 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 11483 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 11484 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 11485 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 11486 11487 /* Fill in the rest of iocb fields */ 11488 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 11489 icmd->ulpBdeCount = 0; 11490 icmd->ulpLe = 1; 11491 icmd->ulpClass = CLASS3; 11492 icmd->ulpContext = ndlp->nlp_rpi; 11493 11494 ctiocb->iocb_cmpl = NULL; 11495 ctiocb->vport = phba->pport; 11496 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; 11497 11498 if (fctl & FC_FC_EX_CTX) { 11499 /* ABTS sent by responder to CT exchange, construction 11500 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 11501 * field and RX_ID from ABTS for RX_ID field. 11502 */ 11503 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); 11504 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); 11505 ctiocb->sli4_xritag = oxid; 11506 } else { 11507 /* ABTS sent by initiator to CT exchange, construction 11508 * of BA_ACC will need to allocate a new XRI as for the 11509 * XRI_TAG and RX_ID fields. 11510 */ 11511 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); 11512 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); 11513 ctiocb->sli4_xritag = NO_XRI; 11514 } 11515 bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); 11516 11517 /* Xmit CT abts accept on exchange <xid> */ 11518 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11519 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", 11520 CMD_XMIT_BLS_RSP64_CX, phba->link_state); 11521 lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 11522 } 11523 11524 /** 11525 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 11526 * @vport: Pointer to the vport on which this sequence was received 11527 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11528 * 11529 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 11530 * receive sequence is only partially assembed by the driver, it shall abort 11531 * the partially assembled frames for the sequence. Otherwise, if the 11532 * unsolicited receive sequence has been completely assembled and passed to 11533 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 11534 * unsolicited sequence has been aborted. After that, it will issue a basic 11535 * accept to accept the abort. 11536 **/ 11537 void 11538 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 11539 struct hbq_dmabuf *dmabuf) 11540 { 11541 struct lpfc_hba *phba = vport->phba; 11542 struct fc_frame_header fc_hdr; 11543 uint32_t fctl; 11544 bool abts_par; 11545 11546 /* Make a copy of fc_hdr before the dmabuf being released */ 11547 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 11548 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 11549 11550 if (fctl & FC_FC_EX_CTX) { 11551 /* 11552 * ABTS sent by responder to exchange, just free the buffer 11553 */ 11554 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11555 } else { 11556 /* 11557 * ABTS sent by initiator to exchange, need to do cleanup 11558 */ 11559 /* Try to abort partially assembled seq */ 11560 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); 11561 11562 /* Send abort to ULP if partially seq abort failed */ 11563 if (abts_par == false) 11564 lpfc_sli4_send_seq_to_ulp(vport, dmabuf); 11565 else 11566 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11567 } 11568 /* Send basic accept (BA_ACC) to the abort requester */ 11569 lpfc_sli4_seq_abort_acc(phba, &fc_hdr); 11570 } 11571 11572 /** 11573 * lpfc_seq_complete - Indicates if a sequence is complete 11574 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11575 * 11576 * This function checks the sequence, starting with the frame described by 11577 * @dmabuf, to see if all the frames associated with this sequence are present. 11578 * the frames associated with this sequence are linked to the @dmabuf using the 11579 * dbuf list. This function looks for two major things. 1) That the first frame 11580 * has a sequence count of zero. 2) There is a frame with last frame of sequence 11581 * set. 3) That there are no holes in the sequence count. The function will 11582 * return 1 when the sequence is complete, otherwise it will return 0. 11583 **/ 11584 static int 11585 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 11586 { 11587 struct fc_frame_header *hdr; 11588 struct lpfc_dmabuf *d_buf; 11589 struct hbq_dmabuf *seq_dmabuf; 11590 uint32_t fctl; 11591 int seq_count = 0; 11592 11593 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11594 /* make sure first fame of sequence has a sequence count of zero */ 11595 if (hdr->fh_seq_cnt != seq_count) 11596 return 0; 11597 fctl = (hdr->fh_f_ctl[0] << 16 | 11598 hdr->fh_f_ctl[1] << 8 | 11599 hdr->fh_f_ctl[2]); 11600 /* If last frame of sequence we can return success. */ 11601 if (fctl & FC_FC_END_SEQ) 11602 return 1; 11603 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 11604 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11605 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11606 /* If there is a hole in the sequence count then fail. */ 11607 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 11608 return 0; 11609 fctl = (hdr->fh_f_ctl[0] << 16 | 11610 hdr->fh_f_ctl[1] << 8 | 11611 hdr->fh_f_ctl[2]); 11612 /* If last frame of sequence we can return success. */ 11613 if (fctl & FC_FC_END_SEQ) 11614 return 1; 11615 } 11616 return 0; 11617 } 11618 11619 /** 11620 * lpfc_prep_seq - Prep sequence for ULP processing 11621 * @vport: Pointer to the vport on which this sequence was received 11622 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11623 * 11624 * This function takes a sequence, described by a list of frames, and creates 11625 * a list of iocbq structures to describe the sequence. This iocbq list will be 11626 * used to issue to the generic unsolicited sequence handler. This routine 11627 * returns a pointer to the first iocbq in the list. If the function is unable 11628 * to allocate an iocbq then it throw out the received frames that were not 11629 * able to be described and return a pointer to the first iocbq. If unable to 11630 * allocate any iocbqs (including the first) this function will return NULL. 11631 **/ 11632 static struct lpfc_iocbq * 11633 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 11634 { 11635 struct lpfc_dmabuf *d_buf, *n_buf; 11636 struct lpfc_iocbq *first_iocbq, *iocbq; 11637 struct fc_frame_header *fc_hdr; 11638 uint32_t sid; 11639 struct ulp_bde64 *pbde; 11640 11641 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11642 /* remove from receive buffer list */ 11643 list_del_init(&seq_dmabuf->hbuf.list); 11644 lpfc_update_rcv_time_stamp(vport); 11645 /* get the Remote Port's SID */ 11646 sid = sli4_sid_from_fc_hdr(fc_hdr); 11647 /* Get an iocbq struct to fill in. */ 11648 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 11649 if (first_iocbq) { 11650 /* Initialize the first IOCB. */ 11651 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 11652 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 11653 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 11654 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 11655 first_iocbq->iocb.unsli3.rcvsli3.vpi = 11656 vport->vpi + vport->phba->vpi_base; 11657 /* put the first buffer into the first IOCBq */ 11658 first_iocbq->context2 = &seq_dmabuf->dbuf; 11659 first_iocbq->context3 = NULL; 11660 first_iocbq->iocb.ulpBdeCount = 1; 11661 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 11662 LPFC_DATA_BUF_SIZE; 11663 first_iocbq->iocb.un.rcvels.remoteID = sid; 11664 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11665 bf_get(lpfc_rcqe_length, 11666 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 11667 } 11668 iocbq = first_iocbq; 11669 /* 11670 * Each IOCBq can have two Buffers assigned, so go through the list 11671 * of buffers for this sequence and save two buffers in each IOCBq 11672 */ 11673 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 11674 if (!iocbq) { 11675 lpfc_in_buf_free(vport->phba, d_buf); 11676 continue; 11677 } 11678 if (!iocbq->context3) { 11679 iocbq->context3 = d_buf; 11680 iocbq->iocb.ulpBdeCount++; 11681 pbde = (struct ulp_bde64 *) 11682 &iocbq->iocb.unsli3.sli3Words[4]; 11683 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 11684 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11685 bf_get(lpfc_rcqe_length, 11686 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 11687 } else { 11688 iocbq = lpfc_sli_get_iocbq(vport->phba); 11689 if (!iocbq) { 11690 if (first_iocbq) { 11691 first_iocbq->iocb.ulpStatus = 11692 IOSTAT_FCP_RSP_ERROR; 11693 first_iocbq->iocb.un.ulpWord[4] = 11694 IOERR_NO_RESOURCES; 11695 } 11696 lpfc_in_buf_free(vport->phba, d_buf); 11697 continue; 11698 } 11699 iocbq->context2 = d_buf; 11700 iocbq->context3 = NULL; 11701 iocbq->iocb.ulpBdeCount = 1; 11702 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 11703 LPFC_DATA_BUF_SIZE; 11704 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11705 bf_get(lpfc_rcqe_length, 11706 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 11707 iocbq->iocb.un.rcvels.remoteID = sid; 11708 list_add_tail(&iocbq->list, &first_iocbq->list); 11709 } 11710 } 11711 return first_iocbq; 11712 } 11713 11714 static void 11715 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 11716 struct hbq_dmabuf *seq_dmabuf) 11717 { 11718 struct fc_frame_header *fc_hdr; 11719 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 11720 struct lpfc_hba *phba = vport->phba; 11721 11722 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11723 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 11724 if (!iocbq) { 11725 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11726 "2707 Ring %d handler: Failed to allocate " 11727 "iocb Rctl x%x Type x%x received\n", 11728 LPFC_ELS_RING, 11729 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 11730 return; 11731 } 11732 if (!lpfc_complete_unsol_iocb(phba, 11733 &phba->sli.ring[LPFC_ELS_RING], 11734 iocbq, fc_hdr->fh_r_ctl, 11735 fc_hdr->fh_type)) 11736 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11737 "2540 Ring %d handler: unexpected Rctl " 11738 "x%x Type x%x received\n", 11739 LPFC_ELS_RING, 11740 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 11741 11742 /* Free iocb created in lpfc_prep_seq */ 11743 list_for_each_entry_safe(curr_iocb, next_iocb, 11744 &iocbq->list, list) { 11745 list_del_init(&curr_iocb->list); 11746 lpfc_sli_release_iocbq(phba, curr_iocb); 11747 } 11748 lpfc_sli_release_iocbq(phba, iocbq); 11749 } 11750 11751 /** 11752 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 11753 * @phba: Pointer to HBA context object. 11754 * 11755 * This function is called with no lock held. This function processes all 11756 * the received buffers and gives it to upper layers when a received buffer 11757 * indicates that it is the final frame in the sequence. The interrupt 11758 * service routine processes received buffers at interrupt contexts and adds 11759 * received dma buffers to the rb_pend_list queue and signals the worker thread. 11760 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 11761 * appropriate receive function when the final frame in a sequence is received. 11762 **/ 11763 void 11764 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 11765 struct hbq_dmabuf *dmabuf) 11766 { 11767 struct hbq_dmabuf *seq_dmabuf; 11768 struct fc_frame_header *fc_hdr; 11769 struct lpfc_vport *vport; 11770 uint32_t fcfi; 11771 11772 /* Process each received buffer */ 11773 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11774 /* check to see if this a valid type of frame */ 11775 if (lpfc_fc_frame_check(phba, fc_hdr)) { 11776 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11777 return; 11778 } 11779 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); 11780 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 11781 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 11782 /* throw out the frame */ 11783 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11784 return; 11785 } 11786 /* Handle the basic abort sequence (BA_ABTS) event */ 11787 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 11788 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 11789 return; 11790 } 11791 11792 /* Link this frame */ 11793 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 11794 if (!seq_dmabuf) { 11795 /* unable to add frame to vport - throw it out */ 11796 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11797 return; 11798 } 11799 /* If not last frame in sequence continue processing frames. */ 11800 if (!lpfc_seq_complete(seq_dmabuf)) 11801 return; 11802 11803 /* Send the complete sequence to the upper layer protocol */ 11804 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 11805 } 11806 11807 /** 11808 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 11809 * @phba: pointer to lpfc hba data structure. 11810 * 11811 * This routine is invoked to post rpi header templates to the 11812 * HBA consistent with the SLI-4 interface spec. This routine 11813 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 11814 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 11815 * 11816 * This routine does not require any locks. It's usage is expected 11817 * to be driver load or reset recovery when the driver is 11818 * sequential. 11819 * 11820 * Return codes 11821 * 0 - successful 11822 * EIO - The mailbox failed to complete successfully. 11823 * When this error occurs, the driver is not guaranteed 11824 * to have any rpi regions posted to the device and 11825 * must either attempt to repost the regions or take a 11826 * fatal error. 11827 **/ 11828 int 11829 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 11830 { 11831 struct lpfc_rpi_hdr *rpi_page; 11832 uint32_t rc = 0; 11833 11834 /* Post all rpi memory regions to the port. */ 11835 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 11836 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 11837 if (rc != MBX_SUCCESS) { 11838 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11839 "2008 Error %d posting all rpi " 11840 "headers\n", rc); 11841 rc = -EIO; 11842 break; 11843 } 11844 } 11845 11846 return rc; 11847 } 11848 11849 /** 11850 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 11851 * @phba: pointer to lpfc hba data structure. 11852 * @rpi_page: pointer to the rpi memory region. 11853 * 11854 * This routine is invoked to post a single rpi header to the 11855 * HBA consistent with the SLI-4 interface spec. This memory region 11856 * maps up to 64 rpi context regions. 11857 * 11858 * Return codes 11859 * 0 - successful 11860 * ENOMEM - No available memory 11861 * EIO - The mailbox failed to complete successfully. 11862 **/ 11863 int 11864 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 11865 { 11866 LPFC_MBOXQ_t *mboxq; 11867 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 11868 uint32_t rc = 0; 11869 uint32_t mbox_tmo; 11870 uint32_t shdr_status, shdr_add_status; 11871 union lpfc_sli4_cfg_shdr *shdr; 11872 11873 /* The port is notified of the header region via a mailbox command. */ 11874 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11875 if (!mboxq) { 11876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11877 "2001 Unable to allocate memory for issuing " 11878 "SLI_CONFIG_SPECIAL mailbox command\n"); 11879 return -ENOMEM; 11880 } 11881 11882 /* Post all rpi memory regions to the port. */ 11883 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 11884 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 11885 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 11886 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 11887 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 11888 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); 11889 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 11890 hdr_tmpl, rpi_page->page_count); 11891 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 11892 rpi_page->start_rpi); 11893 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 11894 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 11895 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11896 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 11897 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11898 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11899 if (rc != MBX_TIMEOUT) 11900 mempool_free(mboxq, phba->mbox_mem_pool); 11901 if (shdr_status || shdr_add_status || rc) { 11902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11903 "2514 POST_RPI_HDR mailbox failed with " 11904 "status x%x add_status x%x, mbx status x%x\n", 11905 shdr_status, shdr_add_status, rc); 11906 rc = -ENXIO; 11907 } 11908 return rc; 11909 } 11910 11911 /** 11912 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 11913 * @phba: pointer to lpfc hba data structure. 11914 * 11915 * This routine is invoked to post rpi header templates to the 11916 * HBA consistent with the SLI-4 interface spec. This routine 11917 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 11918 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 11919 * 11920 * Returns 11921 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 11922 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 11923 **/ 11924 int 11925 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 11926 { 11927 int rpi; 11928 uint16_t max_rpi, rpi_base, rpi_limit; 11929 uint16_t rpi_remaining; 11930 struct lpfc_rpi_hdr *rpi_hdr; 11931 11932 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 11933 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base; 11934 rpi_limit = phba->sli4_hba.next_rpi; 11935 11936 /* 11937 * The valid rpi range is not guaranteed to be zero-based. Start 11938 * the search at the rpi_base as reported by the port. 11939 */ 11940 spin_lock_irq(&phba->hbalock); 11941 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); 11942 if (rpi >= rpi_limit || rpi < rpi_base) 11943 rpi = LPFC_RPI_ALLOC_ERROR; 11944 else { 11945 set_bit(rpi, phba->sli4_hba.rpi_bmask); 11946 phba->sli4_hba.max_cfg_param.rpi_used++; 11947 phba->sli4_hba.rpi_count++; 11948 } 11949 11950 /* 11951 * Don't try to allocate more rpi header regions if the device limit 11952 * on available rpis max has been exhausted. 11953 */ 11954 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 11955 (phba->sli4_hba.rpi_count >= max_rpi)) { 11956 spin_unlock_irq(&phba->hbalock); 11957 return rpi; 11958 } 11959 11960 /* 11961 * If the driver is running low on rpi resources, allocate another 11962 * page now. Note that the next_rpi value is used because 11963 * it represents how many are actually in use whereas max_rpi notes 11964 * how many are supported max by the device. 11965 */ 11966 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - 11967 phba->sli4_hba.rpi_count; 11968 spin_unlock_irq(&phba->hbalock); 11969 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 11970 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 11971 if (!rpi_hdr) { 11972 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11973 "2002 Error Could not grow rpi " 11974 "count\n"); 11975 } else { 11976 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 11977 } 11978 } 11979 11980 return rpi; 11981 } 11982 11983 /** 11984 * lpfc_sli4_free_rpi - Release an rpi for reuse. 11985 * @phba: pointer to lpfc hba data structure. 11986 * 11987 * This routine is invoked to release an rpi to the pool of 11988 * available rpis maintained by the driver. 11989 **/ 11990 void 11991 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 11992 { 11993 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 11994 phba->sli4_hba.rpi_count--; 11995 phba->sli4_hba.max_cfg_param.rpi_used--; 11996 } 11997 } 11998 11999 /** 12000 * lpfc_sli4_free_rpi - Release an rpi for reuse. 12001 * @phba: pointer to lpfc hba data structure. 12002 * 12003 * This routine is invoked to release an rpi to the pool of 12004 * available rpis maintained by the driver. 12005 **/ 12006 void 12007 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 12008 { 12009 spin_lock_irq(&phba->hbalock); 12010 __lpfc_sli4_free_rpi(phba, rpi); 12011 spin_unlock_irq(&phba->hbalock); 12012 } 12013 12014 /** 12015 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 12016 * @phba: pointer to lpfc hba data structure. 12017 * 12018 * This routine is invoked to remove the memory region that 12019 * provided rpi via a bitmask. 12020 **/ 12021 void 12022 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 12023 { 12024 kfree(phba->sli4_hba.rpi_bmask); 12025 } 12026 12027 /** 12028 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 12029 * @phba: pointer to lpfc hba data structure. 12030 * 12031 * This routine is invoked to remove the memory region that 12032 * provided rpi via a bitmask. 12033 **/ 12034 int 12035 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) 12036 { 12037 LPFC_MBOXQ_t *mboxq; 12038 struct lpfc_hba *phba = ndlp->phba; 12039 int rc; 12040 12041 /* The port is notified of the header region via a mailbox command. */ 12042 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12043 if (!mboxq) 12044 return -ENOMEM; 12045 12046 /* Post all rpi memory regions to the port. */ 12047 lpfc_resume_rpi(mboxq, ndlp); 12048 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12049 if (rc == MBX_NOT_FINISHED) { 12050 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12051 "2010 Resume RPI Mailbox failed " 12052 "status %d, mbxStatus x%x\n", rc, 12053 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 12054 mempool_free(mboxq, phba->mbox_mem_pool); 12055 return -EIO; 12056 } 12057 return 0; 12058 } 12059 12060 /** 12061 * lpfc_sli4_init_vpi - Initialize a vpi with the port 12062 * @phba: pointer to lpfc hba data structure. 12063 * @vpi: vpi value to activate with the port. 12064 * 12065 * This routine is invoked to activate a vpi with the 12066 * port when the host intends to use vports with a 12067 * nonzero vpi. 12068 * 12069 * Returns: 12070 * 0 success 12071 * -Evalue otherwise 12072 **/ 12073 int 12074 lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) 12075 { 12076 LPFC_MBOXQ_t *mboxq; 12077 int rc = 0; 12078 int retval = MBX_SUCCESS; 12079 uint32_t mbox_tmo; 12080 12081 if (vpi == 0) 12082 return -EINVAL; 12083 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12084 if (!mboxq) 12085 return -ENOMEM; 12086 lpfc_init_vpi(phba, mboxq, vpi); 12087 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 12088 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 12089 if (rc != MBX_SUCCESS) { 12090 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12091 "2022 INIT VPI Mailbox failed " 12092 "status %d, mbxStatus x%x\n", rc, 12093 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 12094 retval = -EIO; 12095 } 12096 if (rc != MBX_TIMEOUT) 12097 mempool_free(mboxq, phba->mbox_mem_pool); 12098 12099 return retval; 12100 } 12101 12102 /** 12103 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 12104 * @phba: pointer to lpfc hba data structure. 12105 * @mboxq: Pointer to mailbox object. 12106 * 12107 * This routine is invoked to manually add a single FCF record. The caller 12108 * must pass a completely initialized FCF_Record. This routine takes 12109 * care of the nonembedded mailbox operations. 12110 **/ 12111 static void 12112 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 12113 { 12114 void *virt_addr; 12115 union lpfc_sli4_cfg_shdr *shdr; 12116 uint32_t shdr_status, shdr_add_status; 12117 12118 virt_addr = mboxq->sge_array->addr[0]; 12119 /* The IOCTL status is embedded in the mailbox subheader. */ 12120 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 12121 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12122 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12123 12124 if ((shdr_status || shdr_add_status) && 12125 (shdr_status != STATUS_FCF_IN_USE)) 12126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12127 "2558 ADD_FCF_RECORD mailbox failed with " 12128 "status x%x add_status x%x\n", 12129 shdr_status, shdr_add_status); 12130 12131 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12132 } 12133 12134 /** 12135 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 12136 * @phba: pointer to lpfc hba data structure. 12137 * @fcf_record: pointer to the initialized fcf record to add. 12138 * 12139 * This routine is invoked to manually add a single FCF record. The caller 12140 * must pass a completely initialized FCF_Record. This routine takes 12141 * care of the nonembedded mailbox operations. 12142 **/ 12143 int 12144 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 12145 { 12146 int rc = 0; 12147 LPFC_MBOXQ_t *mboxq; 12148 uint8_t *bytep; 12149 void *virt_addr; 12150 dma_addr_t phys_addr; 12151 struct lpfc_mbx_sge sge; 12152 uint32_t alloc_len, req_len; 12153 uint32_t fcfindex; 12154 12155 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12156 if (!mboxq) { 12157 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12158 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 12159 return -ENOMEM; 12160 } 12161 12162 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 12163 sizeof(uint32_t); 12164 12165 /* Allocate DMA memory and set up the non-embedded mailbox command */ 12166 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 12167 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 12168 req_len, LPFC_SLI4_MBX_NEMBED); 12169 if (alloc_len < req_len) { 12170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12171 "2523 Allocated DMA memory size (x%x) is " 12172 "less than the requested DMA memory " 12173 "size (x%x)\n", alloc_len, req_len); 12174 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12175 return -ENOMEM; 12176 } 12177 12178 /* 12179 * Get the first SGE entry from the non-embedded DMA memory. This 12180 * routine only uses a single SGE. 12181 */ 12182 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 12183 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 12184 virt_addr = mboxq->sge_array->addr[0]; 12185 /* 12186 * Configure the FCF record for FCFI 0. This is the driver's 12187 * hardcoded default and gets used in nonFIP mode. 12188 */ 12189 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 12190 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 12191 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 12192 12193 /* 12194 * Copy the fcf_index and the FCF Record Data. The data starts after 12195 * the FCoE header plus word10. The data copy needs to be endian 12196 * correct. 12197 */ 12198 bytep += sizeof(uint32_t); 12199 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 12200 mboxq->vport = phba->pport; 12201 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 12202 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12203 if (rc == MBX_NOT_FINISHED) { 12204 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12205 "2515 ADD_FCF_RECORD mailbox failed with " 12206 "status 0x%x\n", rc); 12207 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12208 rc = -EIO; 12209 } else 12210 rc = 0; 12211 12212 return rc; 12213 } 12214 12215 /** 12216 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 12217 * @phba: pointer to lpfc hba data structure. 12218 * @fcf_record: pointer to the fcf record to write the default data. 12219 * @fcf_index: FCF table entry index. 12220 * 12221 * This routine is invoked to build the driver's default FCF record. The 12222 * values used are hardcoded. This routine handles memory initialization. 12223 * 12224 **/ 12225 void 12226 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 12227 struct fcf_record *fcf_record, 12228 uint16_t fcf_index) 12229 { 12230 memset(fcf_record, 0, sizeof(struct fcf_record)); 12231 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 12232 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 12233 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 12234 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 12235 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 12236 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 12237 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 12238 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 12239 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 12240 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 12241 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 12242 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 12243 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 12244 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 12245 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 12246 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 12247 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 12248 /* Set the VLAN bit map */ 12249 if (phba->valid_vlan) { 12250 fcf_record->vlan_bitmap[phba->vlan_id / 8] 12251 = 1 << (phba->vlan_id % 8); 12252 } 12253 } 12254 12255 /** 12256 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 12257 * @phba: pointer to lpfc hba data structure. 12258 * @fcf_index: FCF table entry offset. 12259 * 12260 * This routine is invoked to scan the entire FCF table by reading FCF 12261 * record and processing it one at a time starting from the @fcf_index 12262 * for initial FCF discovery or fast FCF failover rediscovery. 12263 * 12264 * Return 0 if the mailbox command is submitted sucessfully, none 0 12265 * otherwise. 12266 **/ 12267 int 12268 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12269 { 12270 int rc = 0, error; 12271 LPFC_MBOXQ_t *mboxq; 12272 12273 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 12274 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12275 if (!mboxq) { 12276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12277 "2000 Failed to allocate mbox for " 12278 "READ_FCF cmd\n"); 12279 error = -ENOMEM; 12280 goto fail_fcf_scan; 12281 } 12282 /* Construct the read FCF record mailbox command */ 12283 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12284 if (rc) { 12285 error = -EINVAL; 12286 goto fail_fcf_scan; 12287 } 12288 /* Issue the mailbox command asynchronously */ 12289 mboxq->vport = phba->pport; 12290 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 12291 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12292 if (rc == MBX_NOT_FINISHED) 12293 error = -EIO; 12294 else { 12295 spin_lock_irq(&phba->hbalock); 12296 phba->hba_flag |= FCF_DISC_INPROGRESS; 12297 spin_unlock_irq(&phba->hbalock); 12298 /* Reset eligible FCF count for new scan */ 12299 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12300 phba->fcf.eligible_fcf_cnt = 0; 12301 error = 0; 12302 } 12303 fail_fcf_scan: 12304 if (error) { 12305 if (mboxq) 12306 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12307 /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ 12308 spin_lock_irq(&phba->hbalock); 12309 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 12310 spin_unlock_irq(&phba->hbalock); 12311 } 12312 return error; 12313 } 12314 12315 /** 12316 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. 12317 * @phba: pointer to lpfc hba data structure. 12318 * @fcf_index: FCF table entry offset. 12319 * 12320 * This routine is invoked to read an FCF record indicated by @fcf_index 12321 * and to use it for FLOGI round robin FCF failover. 12322 * 12323 * Return 0 if the mailbox command is submitted sucessfully, none 0 12324 * otherwise. 12325 **/ 12326 int 12327 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12328 { 12329 int rc = 0, error; 12330 LPFC_MBOXQ_t *mboxq; 12331 12332 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12333 if (!mboxq) { 12334 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 12335 "2763 Failed to allocate mbox for " 12336 "READ_FCF cmd\n"); 12337 error = -ENOMEM; 12338 goto fail_fcf_read; 12339 } 12340 /* Construct the read FCF record mailbox command */ 12341 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12342 if (rc) { 12343 error = -EINVAL; 12344 goto fail_fcf_read; 12345 } 12346 /* Issue the mailbox command asynchronously */ 12347 mboxq->vport = phba->pport; 12348 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 12349 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12350 if (rc == MBX_NOT_FINISHED) 12351 error = -EIO; 12352 else 12353 error = 0; 12354 12355 fail_fcf_read: 12356 if (error && mboxq) 12357 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12358 return error; 12359 } 12360 12361 /** 12362 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 12363 * @phba: pointer to lpfc hba data structure. 12364 * @fcf_index: FCF table entry offset. 12365 * 12366 * This routine is invoked to read an FCF record indicated by @fcf_index to 12367 * determine whether it's eligible for FLOGI round robin failover list. 12368 * 12369 * Return 0 if the mailbox command is submitted sucessfully, none 0 12370 * otherwise. 12371 **/ 12372 int 12373 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12374 { 12375 int rc = 0, error; 12376 LPFC_MBOXQ_t *mboxq; 12377 12378 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12379 if (!mboxq) { 12380 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 12381 "2758 Failed to allocate mbox for " 12382 "READ_FCF cmd\n"); 12383 error = -ENOMEM; 12384 goto fail_fcf_read; 12385 } 12386 /* Construct the read FCF record mailbox command */ 12387 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12388 if (rc) { 12389 error = -EINVAL; 12390 goto fail_fcf_read; 12391 } 12392 /* Issue the mailbox command asynchronously */ 12393 mboxq->vport = phba->pport; 12394 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 12395 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12396 if (rc == MBX_NOT_FINISHED) 12397 error = -EIO; 12398 else 12399 error = 0; 12400 12401 fail_fcf_read: 12402 if (error && mboxq) 12403 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12404 return error; 12405 } 12406 12407 /** 12408 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 12409 * @phba: pointer to lpfc hba data structure. 12410 * 12411 * This routine is to get the next eligible FCF record index in a round 12412 * robin fashion. If the next eligible FCF record index equals to the 12413 * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12414 * shall be returned, otherwise, the next eligible FCF record's index 12415 * shall be returned. 12416 **/ 12417 uint16_t 12418 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 12419 { 12420 uint16_t next_fcf_index; 12421 12422 /* Search start from next bit of currently registered FCF index */ 12423 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 12424 LPFC_SLI4_FCF_TBL_INDX_MAX; 12425 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12426 LPFC_SLI4_FCF_TBL_INDX_MAX, 12427 next_fcf_index); 12428 12429 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 12430 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 12431 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12432 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 12433 12434 /* Check roundrobin failover list empty condition */ 12435 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12436 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 12437 "2844 No roundrobin failover FCF available\n"); 12438 return LPFC_FCOE_FCF_NEXT_NONE; 12439 } 12440 12441 /* Check roundrobin failover index bmask stop condition */ 12442 if (next_fcf_index == phba->fcf.fcf_rr_init_indx) { 12443 if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) { 12444 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 12445 "2847 Round robin failover FCF index " 12446 "search hit stop condition:x%x\n", 12447 next_fcf_index); 12448 return LPFC_FCOE_FCF_NEXT_NONE; 12449 } 12450 /* The roundrobin failover index bmask updated, start over */ 12451 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12452 "2848 Round robin failover FCF index bmask " 12453 "updated, start over\n"); 12454 spin_lock_irq(&phba->hbalock); 12455 phba->fcf.fcf_flag &= ~FCF_REDISC_RRU; 12456 spin_unlock_irq(&phba->hbalock); 12457 return phba->fcf.fcf_rr_init_indx; 12458 } 12459 12460 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12461 "2845 Get next round robin failover " 12462 "FCF index x%x\n", next_fcf_index); 12463 return next_fcf_index; 12464 } 12465 12466 /** 12467 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 12468 * @phba: pointer to lpfc hba data structure. 12469 * 12470 * This routine sets the FCF record index in to the eligible bmask for 12471 * round robin failover search. It checks to make sure that the index 12472 * does not go beyond the range of the driver allocated bmask dimension 12473 * before setting the bit. 12474 * 12475 * Returns 0 if the index bit successfully set, otherwise, it returns 12476 * -EINVAL. 12477 **/ 12478 int 12479 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 12480 { 12481 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12482 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12483 "2610 HBA FCF index reached driver's " 12484 "book keeping dimension: fcf_index:%d, " 12485 "driver_bmask_max:%d\n", 12486 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12487 return -EINVAL; 12488 } 12489 /* Set the eligible FCF record index bmask */ 12490 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12491 12492 /* Set the roundrobin index bmask updated */ 12493 spin_lock_irq(&phba->hbalock); 12494 phba->fcf.fcf_flag |= FCF_REDISC_RRU; 12495 spin_unlock_irq(&phba->hbalock); 12496 12497 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12498 "2790 Set FCF index x%x to round robin failover " 12499 "bmask\n", fcf_index); 12500 12501 return 0; 12502 } 12503 12504 /** 12505 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 12506 * @phba: pointer to lpfc hba data structure. 12507 * 12508 * This routine clears the FCF record index from the eligible bmask for 12509 * round robin failover search. It checks to make sure that the index 12510 * does not go beyond the range of the driver allocated bmask dimension 12511 * before clearing the bit. 12512 **/ 12513 void 12514 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 12515 { 12516 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12517 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12518 "2762 HBA FCF index goes beyond driver's " 12519 "book keeping dimension: fcf_index:%d, " 12520 "driver_bmask_max:%d\n", 12521 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12522 return; 12523 } 12524 /* Clear the eligible FCF record index bmask */ 12525 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12526 12527 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12528 "2791 Clear FCF index x%x from round robin failover " 12529 "bmask\n", fcf_index); 12530 } 12531 12532 /** 12533 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 12534 * @phba: pointer to lpfc hba data structure. 12535 * 12536 * This routine is the completion routine for the rediscover FCF table mailbox 12537 * command. If the mailbox command returned failure, it will try to stop the 12538 * FCF rediscover wait timer. 12539 **/ 12540 void 12541 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 12542 { 12543 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 12544 uint32_t shdr_status, shdr_add_status; 12545 12546 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 12547 12548 shdr_status = bf_get(lpfc_mbox_hdr_status, 12549 &redisc_fcf->header.cfg_shdr.response); 12550 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 12551 &redisc_fcf->header.cfg_shdr.response); 12552 if (shdr_status || shdr_add_status) { 12553 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12554 "2746 Requesting for FCF rediscovery failed " 12555 "status x%x add_status x%x\n", 12556 shdr_status, shdr_add_status); 12557 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 12558 spin_lock_irq(&phba->hbalock); 12559 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 12560 spin_unlock_irq(&phba->hbalock); 12561 /* 12562 * CVL event triggered FCF rediscover request failed, 12563 * last resort to re-try current registered FCF entry. 12564 */ 12565 lpfc_retry_pport_discovery(phba); 12566 } else { 12567 spin_lock_irq(&phba->hbalock); 12568 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 12569 spin_unlock_irq(&phba->hbalock); 12570 /* 12571 * DEAD FCF event triggered FCF rediscover request 12572 * failed, last resort to fail over as a link down 12573 * to FCF registration. 12574 */ 12575 lpfc_sli4_fcf_dead_failthrough(phba); 12576 } 12577 } else { 12578 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12579 "2775 Start FCF rediscovery quiescent period " 12580 "wait timer before scaning FCF table\n"); 12581 /* 12582 * Start FCF rediscovery wait timer for pending FCF 12583 * before rescan FCF record table. 12584 */ 12585 lpfc_fcf_redisc_wait_start_timer(phba); 12586 } 12587 12588 mempool_free(mbox, phba->mbox_mem_pool); 12589 } 12590 12591 /** 12592 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 12593 * @phba: pointer to lpfc hba data structure. 12594 * 12595 * This routine is invoked to request for rediscovery of the entire FCF table 12596 * by the port. 12597 **/ 12598 int 12599 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 12600 { 12601 LPFC_MBOXQ_t *mbox; 12602 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 12603 int rc, length; 12604 12605 /* Cancel retry delay timers to all vports before FCF rediscover */ 12606 lpfc_cancel_all_vport_retry_delay_timer(phba); 12607 12608 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12609 if (!mbox) { 12610 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12611 "2745 Failed to allocate mbox for " 12612 "requesting FCF rediscover.\n"); 12613 return -ENOMEM; 12614 } 12615 12616 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 12617 sizeof(struct lpfc_sli4_cfg_mhdr)); 12618 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12619 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 12620 length, LPFC_SLI4_MBX_EMBED); 12621 12622 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 12623 /* Set count to 0 for invalidating the entire FCF database */ 12624 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 12625 12626 /* Issue the mailbox command asynchronously */ 12627 mbox->vport = phba->pport; 12628 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 12629 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 12630 12631 if (rc == MBX_NOT_FINISHED) { 12632 mempool_free(mbox, phba->mbox_mem_pool); 12633 return -EIO; 12634 } 12635 return 0; 12636 } 12637 12638 /** 12639 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 12640 * @phba: pointer to lpfc hba data structure. 12641 * 12642 * This function is the failover routine as a last resort to the FCF DEAD 12643 * event when driver failed to perform fast FCF failover. 12644 **/ 12645 void 12646 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 12647 { 12648 uint32_t link_state; 12649 12650 /* 12651 * Last resort as FCF DEAD event failover will treat this as 12652 * a link down, but save the link state because we don't want 12653 * it to be changed to Link Down unless it is already down. 12654 */ 12655 link_state = phba->link_state; 12656 lpfc_linkdown(phba); 12657 phba->link_state = link_state; 12658 12659 /* Unregister FCF if no devices connected to it */ 12660 lpfc_unregister_unused_fcf(phba); 12661 } 12662 12663 /** 12664 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 12665 * @phba: pointer to lpfc hba data structure. 12666 * 12667 * This function read region 23 and parse TLV for port status to 12668 * decide if the user disaled the port. If the TLV indicates the 12669 * port is disabled, the hba_flag is set accordingly. 12670 **/ 12671 void 12672 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 12673 { 12674 LPFC_MBOXQ_t *pmb = NULL; 12675 MAILBOX_t *mb; 12676 uint8_t *rgn23_data = NULL; 12677 uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset; 12678 int rc; 12679 12680 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12681 if (!pmb) { 12682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12683 "2600 lpfc_sli_read_serdes_param failed to" 12684 " allocate mailbox memory\n"); 12685 goto out; 12686 } 12687 mb = &pmb->u.mb; 12688 12689 /* Get adapter Region 23 data */ 12690 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 12691 if (!rgn23_data) 12692 goto out; 12693 12694 do { 12695 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 12696 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 12697 12698 if (rc != MBX_SUCCESS) { 12699 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12700 "2601 lpfc_sli_read_link_ste failed to" 12701 " read config region 23 rc 0x%x Status 0x%x\n", 12702 rc, mb->mbxStatus); 12703 mb->un.varDmp.word_cnt = 0; 12704 } 12705 /* 12706 * dump mem may return a zero when finished or we got a 12707 * mailbox error, either way we are done. 12708 */ 12709 if (mb->un.varDmp.word_cnt == 0) 12710 break; 12711 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 12712 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 12713 12714 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 12715 rgn23_data + offset, 12716 mb->un.varDmp.word_cnt); 12717 offset += mb->un.varDmp.word_cnt; 12718 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 12719 12720 data_size = offset; 12721 offset = 0; 12722 12723 if (!data_size) 12724 goto out; 12725 12726 /* Check the region signature first */ 12727 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 12728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12729 "2619 Config region 23 has bad signature\n"); 12730 goto out; 12731 } 12732 offset += 4; 12733 12734 /* Check the data structure version */ 12735 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 12736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12737 "2620 Config region 23 has bad version\n"); 12738 goto out; 12739 } 12740 offset += 4; 12741 12742 /* Parse TLV entries in the region */ 12743 while (offset < data_size) { 12744 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 12745 break; 12746 /* 12747 * If the TLV is not driver specific TLV or driver id is 12748 * not linux driver id, skip the record. 12749 */ 12750 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 12751 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 12752 (rgn23_data[offset + 3] != 0)) { 12753 offset += rgn23_data[offset + 1] * 4 + 4; 12754 continue; 12755 } 12756 12757 /* Driver found a driver specific TLV in the config region */ 12758 sub_tlv_len = rgn23_data[offset + 1] * 4; 12759 offset += 4; 12760 tlv_offset = 0; 12761 12762 /* 12763 * Search for configured port state sub-TLV. 12764 */ 12765 while ((offset < data_size) && 12766 (tlv_offset < sub_tlv_len)) { 12767 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 12768 offset += 4; 12769 tlv_offset += 4; 12770 break; 12771 } 12772 if (rgn23_data[offset] != PORT_STE_TYPE) { 12773 offset += rgn23_data[offset + 1] * 4 + 4; 12774 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 12775 continue; 12776 } 12777 12778 /* This HBA contains PORT_STE configured */ 12779 if (!rgn23_data[offset + 2]) 12780 phba->hba_flag |= LINK_DISABLED; 12781 12782 goto out; 12783 } 12784 } 12785 out: 12786 if (pmb) 12787 mempool_free(pmb, phba->mbox_mem_pool); 12788 kfree(rgn23_data); 12789 return; 12790 } 12791 12792 /** 12793 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 12794 * @vport: pointer to vport data structure. 12795 * 12796 * This function iterate through the mailboxq and clean up all REG_LOGIN 12797 * and REG_VPI mailbox commands associated with the vport. This function 12798 * is called when driver want to restart discovery of the vport due to 12799 * a Clear Virtual Link event. 12800 **/ 12801 void 12802 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 12803 { 12804 struct lpfc_hba *phba = vport->phba; 12805 LPFC_MBOXQ_t *mb, *nextmb; 12806 struct lpfc_dmabuf *mp; 12807 struct lpfc_nodelist *ndlp; 12808 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 12809 12810 spin_lock_irq(&phba->hbalock); 12811 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 12812 if (mb->vport != vport) 12813 continue; 12814 12815 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 12816 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 12817 continue; 12818 12819 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12820 if (phba->sli_rev == LPFC_SLI_REV4) 12821 __lpfc_sli4_free_rpi(phba, 12822 mb->u.mb.un.varRegLogin.rpi); 12823 mp = (struct lpfc_dmabuf *) (mb->context1); 12824 if (mp) { 12825 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 12826 kfree(mp); 12827 } 12828 ndlp = (struct lpfc_nodelist *) mb->context2; 12829 if (ndlp) { 12830 spin_lock_irq(shost->host_lock); 12831 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 12832 spin_unlock_irq(shost->host_lock); 12833 lpfc_nlp_put(ndlp); 12834 mb->context2 = NULL; 12835 } 12836 } 12837 list_del(&mb->list); 12838 mempool_free(mb, phba->mbox_mem_pool); 12839 } 12840 mb = phba->sli.mbox_active; 12841 if (mb && (mb->vport == vport)) { 12842 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 12843 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 12844 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12845 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12846 ndlp = (struct lpfc_nodelist *) mb->context2; 12847 if (ndlp) { 12848 spin_lock_irq(shost->host_lock); 12849 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 12850 spin_unlock_irq(shost->host_lock); 12851 lpfc_nlp_put(ndlp); 12852 mb->context2 = NULL; 12853 } 12854 /* Unregister the RPI when mailbox complete */ 12855 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 12856 } 12857 } 12858 spin_unlock_irq(&phba->hbalock); 12859 } 12860 12861 /** 12862 * lpfc_drain_txq - Drain the txq 12863 * @phba: Pointer to HBA context object. 12864 * 12865 * This function attempt to submit IOCBs on the txq 12866 * to the adapter. For SLI4 adapters, the txq contains 12867 * ELS IOCBs that have been deferred because the there 12868 * are no SGLs. This congestion can occur with large 12869 * vport counts during node discovery. 12870 **/ 12871 12872 uint32_t 12873 lpfc_drain_txq(struct lpfc_hba *phba) 12874 { 12875 LIST_HEAD(completions); 12876 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 12877 struct lpfc_iocbq *piocbq = 0; 12878 unsigned long iflags = 0; 12879 char *fail_msg = NULL; 12880 struct lpfc_sglq *sglq; 12881 union lpfc_wqe wqe; 12882 12883 spin_lock_irqsave(&phba->hbalock, iflags); 12884 if (pring->txq_cnt > pring->txq_max) 12885 pring->txq_max = pring->txq_cnt; 12886 12887 spin_unlock_irqrestore(&phba->hbalock, iflags); 12888 12889 while (pring->txq_cnt) { 12890 spin_lock_irqsave(&phba->hbalock, iflags); 12891 12892 sglq = __lpfc_sli_get_sglq(phba); 12893 if (!sglq) { 12894 spin_unlock_irqrestore(&phba->hbalock, iflags); 12895 break; 12896 } else { 12897 piocbq = lpfc_sli_ringtx_get(phba, pring); 12898 if (!piocbq) { 12899 /* The txq_cnt out of sync. This should 12900 * never happen 12901 */ 12902 sglq = __lpfc_clear_active_sglq(phba, 12903 sglq->sli4_xritag); 12904 spin_unlock_irqrestore(&phba->hbalock, iflags); 12905 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12906 "2823 txq empty and txq_cnt is %d\n ", 12907 pring->txq_cnt); 12908 break; 12909 } 12910 } 12911 12912 /* The xri and iocb resources secured, 12913 * attempt to issue request 12914 */ 12915 piocbq->sli4_xritag = sglq->sli4_xritag; 12916 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 12917 fail_msg = "to convert bpl to sgl"; 12918 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 12919 fail_msg = "to convert iocb to wqe"; 12920 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 12921 fail_msg = " - Wq is full"; 12922 else 12923 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 12924 12925 if (fail_msg) { 12926 /* Failed means we can't issue and need to cancel */ 12927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12928 "2822 IOCB failed %s iotag 0x%x " 12929 "xri 0x%x\n", 12930 fail_msg, 12931 piocbq->iotag, piocbq->sli4_xritag); 12932 list_add_tail(&piocbq->list, &completions); 12933 } 12934 spin_unlock_irqrestore(&phba->hbalock, iflags); 12935 } 12936 12937 /* Cancel all the IOCBs that cannot be issued */ 12938 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 12939 IOERR_SLI_ABORTED); 12940 12941 return pring->txq_cnt; 12942 } 12943