1 /* 2 * linux/drivers/scsi/esas2r/esas2r_init.c 3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers 4 * 5 * Copyright (c) 2001-2013 ATTO Technology, Inc. 6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * NO WARRANTY 19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 23 * solely responsible for determining the appropriateness of using and 24 * distributing the Program and assumes all risks associated with its 25 * exercise of rights under this Agreement, including but not limited to 26 * the risks and costs of program errors, damage to or loss of data, 27 * programs or equipment, and unavailability or interruption of operations. 28 * 29 * DISCLAIMER OF LIABILITY 30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 37 * 38 * You should have received a copy of the GNU General Public License 39 * along with this program; if not, write to the Free Software 40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 41 * USA. 42 */ 43 44 #include "esas2r.h" 45 46 static bool esas2r_initmem_alloc(struct esas2r_adapter *a, 47 struct esas2r_mem_desc *mem_desc, 48 u32 align) 49 { 50 mem_desc->esas2r_param = mem_desc->size + align; 51 mem_desc->virt_addr = NULL; 52 mem_desc->phys_addr = 0; 53 mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev, 54 (size_t)mem_desc-> 55 esas2r_param, 56 (dma_addr_t *)&mem_desc-> 57 phys_addr, 58 GFP_KERNEL); 59 60 if (mem_desc->esas2r_data == NULL) { 61 esas2r_log(ESAS2R_LOG_CRIT, 62 "failed to allocate %lu bytes of consistent memory!", 63 (long 64 unsigned 65 int)mem_desc->esas2r_param); 66 return false; 67 } 68 69 mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align); 70 mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align); 71 memset(mem_desc->virt_addr, 0, mem_desc->size); 72 return true; 73 } 74 75 static void esas2r_initmem_free(struct esas2r_adapter *a, 76 struct esas2r_mem_desc *mem_desc) 77 { 78 if (mem_desc->virt_addr == NULL) 79 return; 80 81 /* 82 * Careful! phys_addr and virt_addr may have been adjusted from the 83 * original allocation in order to return the desired alignment. That 84 * means we have to use the original address (in esas2r_data) and size 85 * (esas2r_param) and calculate the original physical address based on 86 * the difference between the requested and actual allocation size. 87 */ 88 if (mem_desc->phys_addr) { 89 int unalign = ((u8 *)mem_desc->virt_addr) - 90 ((u8 *)mem_desc->esas2r_data); 91 92 dma_free_coherent(&a->pcid->dev, 93 (size_t)mem_desc->esas2r_param, 94 mem_desc->esas2r_data, 95 (dma_addr_t)(mem_desc->phys_addr - unalign)); 96 } else { 97 kfree(mem_desc->esas2r_data); 98 } 99 100 mem_desc->virt_addr = NULL; 101 } 102 103 static bool alloc_vda_req(struct esas2r_adapter *a, 104 struct esas2r_request *rq) 105 { 106 struct esas2r_mem_desc *memdesc = kzalloc( 107 sizeof(struct esas2r_mem_desc), GFP_KERNEL); 108 109 if (memdesc == NULL) { 110 esas2r_hdebug("could not alloc mem for vda request memdesc\n"); 111 return false; 112 } 113 114 memdesc->size = sizeof(union atto_vda_req) + 115 ESAS2R_DATA_BUF_LEN; 116 117 if (!esas2r_initmem_alloc(a, memdesc, 256)) { 118 esas2r_hdebug("could not alloc mem for vda request\n"); 119 kfree(memdesc); 120 return false; 121 } 122 123 a->num_vrqs++; 124 list_add(&memdesc->next_desc, &a->vrq_mds_head); 125 126 rq->vrq_md = memdesc; 127 rq->vrq = (union atto_vda_req *)memdesc->virt_addr; 128 rq->vrq->scsi.handle = a->num_vrqs; 129 130 return true; 131 } 132 133 static void esas2r_unmap_regions(struct esas2r_adapter *a) 134 { 135 if (a->regs) 136 iounmap((void __iomem *)a->regs); 137 138 a->regs = NULL; 139 140 pci_release_region(a->pcid, 2); 141 142 if (a->data_window) 143 iounmap((void __iomem *)a->data_window); 144 145 a->data_window = NULL; 146 147 pci_release_region(a->pcid, 0); 148 } 149 150 static int esas2r_map_regions(struct esas2r_adapter *a) 151 { 152 int error; 153 154 a->regs = NULL; 155 a->data_window = NULL; 156 157 error = pci_request_region(a->pcid, 2, a->name); 158 if (error != 0) { 159 esas2r_log(ESAS2R_LOG_CRIT, 160 "pci_request_region(2) failed, error %d", 161 error); 162 163 return error; 164 } 165 166 a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2), 167 pci_resource_len(a->pcid, 2)); 168 if (a->regs == NULL) { 169 esas2r_log(ESAS2R_LOG_CRIT, 170 "ioremap failed for regs mem region\n"); 171 pci_release_region(a->pcid, 2); 172 return -EFAULT; 173 } 174 175 error = pci_request_region(a->pcid, 0, a->name); 176 if (error != 0) { 177 esas2r_log(ESAS2R_LOG_CRIT, 178 "pci_request_region(2) failed, error %d", 179 error); 180 esas2r_unmap_regions(a); 181 return error; 182 } 183 184 a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid, 185 0), 186 pci_resource_len(a->pcid, 0)); 187 if (a->data_window == NULL) { 188 esas2r_log(ESAS2R_LOG_CRIT, 189 "ioremap failed for data_window mem region\n"); 190 esas2r_unmap_regions(a); 191 return -EFAULT; 192 } 193 194 return 0; 195 } 196 197 static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode) 198 { 199 int i; 200 201 /* Set up interrupt mode based on the requested value */ 202 switch (intr_mode) { 203 case INTR_MODE_LEGACY: 204 use_legacy_interrupts: 205 a->intr_mode = INTR_MODE_LEGACY; 206 break; 207 208 case INTR_MODE_MSI: 209 i = pci_enable_msi(a->pcid); 210 if (i != 0) { 211 esas2r_log(ESAS2R_LOG_WARN, 212 "failed to enable MSI for adapter %d, " 213 "falling back to legacy interrupts " 214 "(err=%d)", a->index, 215 i); 216 goto use_legacy_interrupts; 217 } 218 a->intr_mode = INTR_MODE_MSI; 219 esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED); 220 break; 221 222 223 default: 224 esas2r_log(ESAS2R_LOG_WARN, 225 "unknown interrupt_mode %d requested, " 226 "falling back to legacy interrupt", 227 interrupt_mode); 228 goto use_legacy_interrupts; 229 } 230 } 231 232 static void esas2r_claim_interrupts(struct esas2r_adapter *a) 233 { 234 unsigned long flags = IRQF_DISABLED; 235 236 if (a->intr_mode == INTR_MODE_LEGACY) 237 flags |= IRQF_SHARED; 238 239 esas2r_log(ESAS2R_LOG_INFO, 240 "esas2r_claim_interrupts irq=%d (%p, %s, %x)", 241 a->pcid->irq, a, a->name, flags); 242 243 if (request_irq(a->pcid->irq, 244 (a->intr_mode == 245 INTR_MODE_LEGACY) ? esas2r_interrupt : 246 esas2r_msi_interrupt, 247 flags, 248 a->name, 249 a)) { 250 esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X", 251 a->pcid->irq); 252 return; 253 } 254 255 esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED); 256 esas2r_log(ESAS2R_LOG_INFO, 257 "claimed IRQ %d flags: 0x%lx", 258 a->pcid->irq, flags); 259 } 260 261 int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, 262 int index) 263 { 264 struct esas2r_adapter *a; 265 u64 bus_addr = 0; 266 int i; 267 void *next_uncached; 268 struct esas2r_request *first_request, *last_request; 269 270 if (index >= MAX_ADAPTERS) { 271 esas2r_log(ESAS2R_LOG_CRIT, 272 "tried to init invalid adapter index %u!", 273 index); 274 return 0; 275 } 276 277 if (esas2r_adapters[index]) { 278 esas2r_log(ESAS2R_LOG_CRIT, 279 "tried to init existing adapter index %u!", 280 index); 281 return 0; 282 } 283 284 a = (struct esas2r_adapter *)host->hostdata; 285 memset(a, 0, sizeof(struct esas2r_adapter)); 286 a->pcid = pcid; 287 a->host = host; 288 289 if (sizeof(dma_addr_t) > 4) { 290 const uint64_t required_mask = dma_get_required_mask 291 (&pcid->dev); 292 if (required_mask > DMA_BIT_MASK(32) 293 && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64)) 294 && !pci_set_consistent_dma_mask(pcid, 295 DMA_BIT_MASK(64))) { 296 esas2r_log_dev(ESAS2R_LOG_INFO, 297 &(a->pcid->dev), 298 "64-bit PCI addressing enabled\n"); 299 } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) 300 && !pci_set_consistent_dma_mask(pcid, 301 DMA_BIT_MASK(32))) { 302 esas2r_log_dev(ESAS2R_LOG_INFO, 303 &(a->pcid->dev), 304 "32-bit PCI addressing enabled\n"); 305 } else { 306 esas2r_log(ESAS2R_LOG_CRIT, 307 "failed to set DMA mask"); 308 esas2r_kill_adapter(index); 309 return 0; 310 } 311 } else { 312 if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) 313 && !pci_set_consistent_dma_mask(pcid, 314 DMA_BIT_MASK(32))) { 315 esas2r_log_dev(ESAS2R_LOG_INFO, 316 &(a->pcid->dev), 317 "32-bit PCI addressing enabled\n"); 318 } else { 319 esas2r_log(ESAS2R_LOG_CRIT, 320 "failed to set DMA mask"); 321 esas2r_kill_adapter(index); 322 return 0; 323 } 324 } 325 esas2r_adapters[index] = a; 326 sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index); 327 esas2r_debug("new adapter %p, name %s", a, a->name); 328 spin_lock_init(&a->request_lock); 329 spin_lock_init(&a->fw_event_lock); 330 sema_init(&a->fm_api_semaphore, 1); 331 sema_init(&a->fs_api_semaphore, 1); 332 sema_init(&a->nvram_semaphore, 1); 333 334 esas2r_fw_event_off(a); 335 snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", 336 a->index); 337 a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); 338 339 init_waitqueue_head(&a->buffered_ioctl_waiter); 340 init_waitqueue_head(&a->nvram_waiter); 341 init_waitqueue_head(&a->fm_api_waiter); 342 init_waitqueue_head(&a->fs_api_waiter); 343 init_waitqueue_head(&a->vda_waiter); 344 345 INIT_LIST_HEAD(&a->general_req.req_list); 346 INIT_LIST_HEAD(&a->active_list); 347 INIT_LIST_HEAD(&a->defer_list); 348 INIT_LIST_HEAD(&a->free_sg_list_head); 349 INIT_LIST_HEAD(&a->avail_request); 350 INIT_LIST_HEAD(&a->vrq_mds_head); 351 INIT_LIST_HEAD(&a->fw_event_list); 352 353 first_request = (struct esas2r_request *)((u8 *)(a + 1)); 354 355 for (last_request = first_request, i = 1; i < num_requests; 356 last_request++, i++) { 357 INIT_LIST_HEAD(&last_request->req_list); 358 list_add_tail(&last_request->comp_list, &a->avail_request); 359 if (!alloc_vda_req(a, last_request)) { 360 esas2r_log(ESAS2R_LOG_CRIT, 361 "failed to allocate a VDA request!"); 362 esas2r_kill_adapter(index); 363 return 0; 364 } 365 } 366 367 esas2r_debug("requests: %p to %p (%d, %d)", first_request, 368 last_request, 369 sizeof(*first_request), 370 num_requests); 371 372 if (esas2r_map_regions(a) != 0) { 373 esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!"); 374 esas2r_kill_adapter(index); 375 return 0; 376 } 377 378 a->index = index; 379 380 /* interrupts will be disabled until we are done with init */ 381 atomic_inc(&a->dis_ints_cnt); 382 atomic_inc(&a->disable_cnt); 383 a->flags |= AF_CHPRST_PENDING 384 | AF_DISC_PENDING 385 | AF_FIRST_INIT 386 | AF_LEGACY_SGE_MODE; 387 388 a->init_msg = ESAS2R_INIT_MSG_START; 389 a->max_vdareq_size = 128; 390 a->build_sgl = esas2r_build_sg_list_sge; 391 392 esas2r_setup_interrupts(a, interrupt_mode); 393 394 a->uncached_size = esas2r_get_uncached_size(a); 395 a->uncached = dma_alloc_coherent(&pcid->dev, 396 (size_t)a->uncached_size, 397 (dma_addr_t *)&bus_addr, 398 GFP_KERNEL); 399 if (a->uncached == NULL) { 400 esas2r_log(ESAS2R_LOG_CRIT, 401 "failed to allocate %d bytes of consistent memory!", 402 a->uncached_size); 403 esas2r_kill_adapter(index); 404 return 0; 405 } 406 407 a->uncached_phys = bus_addr; 408 409 esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)", 410 a->uncached_size, 411 a->uncached, 412 upper_32_bits(bus_addr), 413 lower_32_bits(bus_addr)); 414 memset(a->uncached, 0, a->uncached_size); 415 next_uncached = a->uncached; 416 417 if (!esas2r_init_adapter_struct(a, 418 &next_uncached)) { 419 esas2r_log(ESAS2R_LOG_CRIT, 420 "failed to initialize adapter structure (2)!"); 421 esas2r_kill_adapter(index); 422 return 0; 423 } 424 425 tasklet_init(&a->tasklet, 426 esas2r_adapter_tasklet, 427 (unsigned long)a); 428 429 /* 430 * Disable chip interrupts to prevent spurious interrupts 431 * until we claim the IRQ. 432 */ 433 esas2r_disable_chip_interrupts(a); 434 esas2r_check_adapter(a); 435 436 if (!esas2r_init_adapter_hw(a, true)) 437 esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!"); 438 else 439 esas2r_debug("esas2r_init_adapter ok"); 440 441 esas2r_claim_interrupts(a); 442 443 if (a->flags2 & AF2_IRQ_CLAIMED) 444 esas2r_enable_chip_interrupts(a); 445 446 esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE); 447 if (!(a->flags & AF_DEGRADED_MODE)) 448 esas2r_kickoff_timer(a); 449 esas2r_debug("esas2r_init_adapter done for %p (%d)", 450 a, a->disable_cnt); 451 452 return 1; 453 } 454 455 static void esas2r_adapter_power_down(struct esas2r_adapter *a, 456 int power_management) 457 { 458 struct esas2r_mem_desc *memdesc, *next; 459 460 if ((a->flags2 & AF2_INIT_DONE) 461 && (!(a->flags & AF_DEGRADED_MODE))) { 462 if (!power_management) { 463 del_timer_sync(&a->timer); 464 tasklet_kill(&a->tasklet); 465 } 466 esas2r_power_down(a); 467 468 /* 469 * There are versions of firmware that do not handle the sync 470 * cache command correctly. Stall here to ensure that the 471 * cache is lazily flushed. 472 */ 473 mdelay(500); 474 esas2r_debug("chip halted"); 475 } 476 477 /* Remove sysfs binary files */ 478 if (a->sysfs_fw_created) { 479 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw); 480 a->sysfs_fw_created = 0; 481 } 482 483 if (a->sysfs_fs_created) { 484 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs); 485 a->sysfs_fs_created = 0; 486 } 487 488 if (a->sysfs_vda_created) { 489 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda); 490 a->sysfs_vda_created = 0; 491 } 492 493 if (a->sysfs_hw_created) { 494 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw); 495 a->sysfs_hw_created = 0; 496 } 497 498 if (a->sysfs_live_nvram_created) { 499 sysfs_remove_bin_file(&a->host->shost_dev.kobj, 500 &bin_attr_live_nvram); 501 a->sysfs_live_nvram_created = 0; 502 } 503 504 if (a->sysfs_default_nvram_created) { 505 sysfs_remove_bin_file(&a->host->shost_dev.kobj, 506 &bin_attr_default_nvram); 507 a->sysfs_default_nvram_created = 0; 508 } 509 510 /* Clean up interrupts */ 511 if (a->flags2 & AF2_IRQ_CLAIMED) { 512 esas2r_log_dev(ESAS2R_LOG_INFO, 513 &(a->pcid->dev), 514 "free_irq(%d) called", a->pcid->irq); 515 516 free_irq(a->pcid->irq, a); 517 esas2r_debug("IRQ released"); 518 esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED); 519 } 520 521 if (a->flags2 & AF2_MSI_ENABLED) { 522 pci_disable_msi(a->pcid); 523 esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED); 524 esas2r_debug("MSI disabled"); 525 } 526 527 if (a->inbound_list_md.virt_addr) 528 esas2r_initmem_free(a, &a->inbound_list_md); 529 530 if (a->outbound_list_md.virt_addr) 531 esas2r_initmem_free(a, &a->outbound_list_md); 532 533 list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head, 534 next_desc) { 535 esas2r_initmem_free(a, memdesc); 536 } 537 538 /* Following frees everything allocated via alloc_vda_req */ 539 list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) { 540 esas2r_initmem_free(a, memdesc); 541 list_del(&memdesc->next_desc); 542 kfree(memdesc); 543 } 544 545 kfree(a->first_ae_req); 546 a->first_ae_req = NULL; 547 548 kfree(a->sg_list_mds); 549 a->sg_list_mds = NULL; 550 551 kfree(a->req_table); 552 a->req_table = NULL; 553 554 if (a->regs) { 555 esas2r_unmap_regions(a); 556 a->regs = NULL; 557 a->data_window = NULL; 558 esas2r_debug("regions unmapped"); 559 } 560 } 561 562 /* Release/free allocated resources for specified adapters. */ 563 void esas2r_kill_adapter(int i) 564 { 565 struct esas2r_adapter *a = esas2r_adapters[i]; 566 567 if (a) { 568 unsigned long flags; 569 struct workqueue_struct *wq; 570 esas2r_debug("killing adapter %p [%d] ", a, i); 571 esas2r_fw_event_off(a); 572 esas2r_adapter_power_down(a, 0); 573 if (esas2r_buffered_ioctl && 574 (a->pcid == esas2r_buffered_ioctl_pcid)) { 575 dma_free_coherent(&a->pcid->dev, 576 (size_t)esas2r_buffered_ioctl_size, 577 esas2r_buffered_ioctl, 578 esas2r_buffered_ioctl_addr); 579 esas2r_buffered_ioctl = NULL; 580 } 581 582 if (a->vda_buffer) { 583 dma_free_coherent(&a->pcid->dev, 584 (size_t)VDA_MAX_BUFFER_SIZE, 585 a->vda_buffer, 586 (dma_addr_t)a->ppvda_buffer); 587 a->vda_buffer = NULL; 588 } 589 if (a->fs_api_buffer) { 590 dma_free_coherent(&a->pcid->dev, 591 (size_t)a->fs_api_buffer_size, 592 a->fs_api_buffer, 593 (dma_addr_t)a->ppfs_api_buffer); 594 a->fs_api_buffer = NULL; 595 } 596 597 kfree(a->local_atto_ioctl); 598 a->local_atto_ioctl = NULL; 599 600 spin_lock_irqsave(&a->fw_event_lock, flags); 601 wq = a->fw_event_q; 602 a->fw_event_q = NULL; 603 spin_unlock_irqrestore(&a->fw_event_lock, flags); 604 if (wq) 605 destroy_workqueue(wq); 606 607 if (a->uncached) { 608 dma_free_coherent(&a->pcid->dev, 609 (size_t)a->uncached_size, 610 a->uncached, 611 (dma_addr_t)a->uncached_phys); 612 a->uncached = NULL; 613 esas2r_debug("uncached area freed"); 614 } 615 616 esas2r_log_dev(ESAS2R_LOG_INFO, 617 &(a->pcid->dev), 618 "pci_disable_device() called. msix_enabled: %d " 619 "msi_enabled: %d irq: %d pin: %d", 620 a->pcid->msix_enabled, 621 a->pcid->msi_enabled, 622 a->pcid->irq, 623 a->pcid->pin); 624 625 esas2r_log_dev(ESAS2R_LOG_INFO, 626 &(a->pcid->dev), 627 "before pci_disable_device() enable_cnt: %d", 628 a->pcid->enable_cnt.counter); 629 630 pci_disable_device(a->pcid); 631 esas2r_log_dev(ESAS2R_LOG_INFO, 632 &(a->pcid->dev), 633 "after pci_disable_device() enable_cnt: %d", 634 a->pcid->enable_cnt.counter); 635 636 esas2r_log_dev(ESAS2R_LOG_INFO, 637 &(a->pcid->dev), 638 "pci_set_drv_data(%p, NULL) called", 639 a->pcid); 640 641 pci_set_drvdata(a->pcid, NULL); 642 esas2r_adapters[i] = NULL; 643 644 if (a->flags2 & AF2_INIT_DONE) { 645 esas2r_lock_clear_flags(&a->flags2, 646 AF2_INIT_DONE); 647 648 esas2r_lock_set_flags(&a->flags, 649 AF_DEGRADED_MODE); 650 651 esas2r_log_dev(ESAS2R_LOG_INFO, 652 &(a->host->shost_gendev), 653 "scsi_remove_host() called"); 654 655 scsi_remove_host(a->host); 656 657 esas2r_log_dev(ESAS2R_LOG_INFO, 658 &(a->host->shost_gendev), 659 "scsi_host_put() called"); 660 661 scsi_host_put(a->host); 662 } 663 } 664 } 665 666 int esas2r_cleanup(struct Scsi_Host *host) 667 { 668 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; 669 int index; 670 671 if (host == NULL) { 672 int i; 673 674 esas2r_debug("esas2r_cleanup everything"); 675 for (i = 0; i < MAX_ADAPTERS; i++) 676 esas2r_kill_adapter(i); 677 return -1; 678 } 679 680 esas2r_debug("esas2r_cleanup called for host %p", host); 681 index = a->index; 682 esas2r_kill_adapter(index); 683 return index; 684 } 685 686 int esas2r_suspend(struct pci_dev *pdev, pm_message_t state) 687 { 688 struct Scsi_Host *host = pci_get_drvdata(pdev); 689 u32 device_state; 690 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; 691 692 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()"); 693 if (!a) 694 return -ENODEV; 695 696 esas2r_adapter_power_down(a, 1); 697 device_state = pci_choose_state(pdev, state); 698 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 699 "pci_save_state() called"); 700 pci_save_state(pdev); 701 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 702 "pci_disable_device() called"); 703 pci_disable_device(pdev); 704 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 705 "pci_set_power_state() called"); 706 pci_set_power_state(pdev, device_state); 707 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0"); 708 return 0; 709 } 710 711 int esas2r_resume(struct pci_dev *pdev) 712 { 713 struct Scsi_Host *host = pci_get_drvdata(pdev); 714 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; 715 int rez; 716 717 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()"); 718 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 719 "pci_set_power_state(PCI_D0) " 720 "called"); 721 pci_set_power_state(pdev, PCI_D0); 722 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 723 "pci_enable_wake(PCI_D0, 0) " 724 "called"); 725 pci_enable_wake(pdev, PCI_D0, 0); 726 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 727 "pci_restore_state() called"); 728 pci_restore_state(pdev); 729 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 730 "pci_enable_device() called"); 731 rez = pci_enable_device(pdev); 732 pci_set_master(pdev); 733 734 if (!a) { 735 rez = -ENODEV; 736 goto error_exit; 737 } 738 739 if (esas2r_map_regions(a) != 0) { 740 esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!"); 741 rez = -ENOMEM; 742 goto error_exit; 743 } 744 745 /* Set up interupt mode */ 746 esas2r_setup_interrupts(a, a->intr_mode); 747 748 /* 749 * Disable chip interrupts to prevent spurious interrupts until we 750 * claim the IRQ. 751 */ 752 esas2r_disable_chip_interrupts(a); 753 if (!esas2r_power_up(a, true)) { 754 esas2r_debug("yikes, esas2r_power_up failed"); 755 rez = -ENOMEM; 756 goto error_exit; 757 } 758 759 esas2r_claim_interrupts(a); 760 761 if (a->flags2 & AF2_IRQ_CLAIMED) { 762 /* 763 * Now that system interrupt(s) are claimed, we can enable 764 * chip interrupts. 765 */ 766 esas2r_enable_chip_interrupts(a); 767 esas2r_kickoff_timer(a); 768 } else { 769 esas2r_debug("yikes, unable to claim IRQ"); 770 esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!"); 771 rez = -ENOMEM; 772 goto error_exit; 773 } 774 775 error_exit: 776 esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d", 777 rez); 778 return rez; 779 } 780 781 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) 782 { 783 esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); 784 esas2r_log(ESAS2R_LOG_CRIT, 785 "setting adapter to degraded mode: %s\n", error_str); 786 return false; 787 } 788 789 u32 esas2r_get_uncached_size(struct esas2r_adapter *a) 790 { 791 return sizeof(struct esas2r_sas_nvram) 792 + ALIGN(ESAS2R_DISC_BUF_LEN, 8) 793 + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */ 794 + 8 795 + (num_sg_lists * (u16)sgl_page_size) 796 + ALIGN((num_requests + num_ae_requests + 1 + 797 ESAS2R_LIST_EXTRA) * 798 sizeof(struct esas2r_inbound_list_source_entry), 799 8) 800 + ALIGN((num_requests + num_ae_requests + 1 + 801 ESAS2R_LIST_EXTRA) * 802 sizeof(struct atto_vda_ob_rsp), 8) 803 + 256; /* VDA request and buffer align */ 804 } 805 806 static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) 807 { 808 int pcie_cap_reg; 809 810 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); 811 if (0xffff && pcie_cap_reg) { 812 u16 devcontrol; 813 814 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, 815 &devcontrol); 816 817 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) { 818 esas2r_log(ESAS2R_LOG_INFO, 819 "max read request size > 512B"); 820 821 devcontrol &= ~PCI_EXP_DEVCTL_READRQ; 822 devcontrol |= 0x2000; 823 pci_write_config_word(a->pcid, 824 pcie_cap_reg + PCI_EXP_DEVCTL, 825 devcontrol); 826 } 827 } 828 } 829 830 /* 831 * Determine the organization of the uncached data area and 832 * finish initializing the adapter structure 833 */ 834 bool esas2r_init_adapter_struct(struct esas2r_adapter *a, 835 void **uncached_area) 836 { 837 u32 i; 838 u8 *high; 839 struct esas2r_inbound_list_source_entry *element; 840 struct esas2r_request *rq; 841 struct esas2r_mem_desc *sgl; 842 843 spin_lock_init(&a->sg_list_lock); 844 spin_lock_init(&a->mem_lock); 845 spin_lock_init(&a->queue_lock); 846 847 a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS]; 848 849 if (!alloc_vda_req(a, &a->general_req)) { 850 esas2r_hdebug( 851 "failed to allocate a VDA request for the general req!"); 852 return false; 853 } 854 855 /* allocate requests for asynchronous events */ 856 a->first_ae_req = 857 kzalloc(num_ae_requests * sizeof(struct esas2r_request), 858 GFP_KERNEL); 859 860 if (a->first_ae_req == NULL) { 861 esas2r_log(ESAS2R_LOG_CRIT, 862 "failed to allocate memory for asynchronous events"); 863 return false; 864 } 865 866 /* allocate the S/G list memory descriptors */ 867 a->sg_list_mds = kzalloc( 868 num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL); 869 870 if (a->sg_list_mds == NULL) { 871 esas2r_log(ESAS2R_LOG_CRIT, 872 "failed to allocate memory for s/g list descriptors"); 873 return false; 874 } 875 876 /* allocate the request table */ 877 a->req_table = 878 kzalloc((num_requests + num_ae_requests + 879 1) * sizeof(struct esas2r_request *), GFP_KERNEL); 880 881 if (a->req_table == NULL) { 882 esas2r_log(ESAS2R_LOG_CRIT, 883 "failed to allocate memory for the request table"); 884 return false; 885 } 886 887 /* initialize PCI configuration space */ 888 esas2r_init_pci_cfg_space(a); 889 890 /* 891 * the thunder_stream boards all have a serial flash part that has a 892 * different base address on the AHB bus. 893 */ 894 if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID) 895 && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) 896 a->flags2 |= AF2_THUNDERBOLT; 897 898 if (a->flags2 & AF2_THUNDERBOLT) 899 a->flags2 |= AF2_SERIAL_FLASH; 900 901 if (a->pcid->subsystem_device == ATTO_TLSH_1068) 902 a->flags2 |= AF2_THUNDERLINK; 903 904 /* Uncached Area */ 905 high = (u8 *)*uncached_area; 906 907 /* initialize the scatter/gather table pages */ 908 909 for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) { 910 sgl->size = sgl_page_size; 911 912 list_add_tail(&sgl->next_desc, &a->free_sg_list_head); 913 914 if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) { 915 /* Allow the driver to load if the minimum count met. */ 916 if (i < NUM_SGL_MIN) 917 return false; 918 break; 919 } 920 } 921 922 /* compute the size of the lists */ 923 a->list_size = num_requests + ESAS2R_LIST_EXTRA; 924 925 /* allocate the inbound list */ 926 a->inbound_list_md.size = a->list_size * 927 sizeof(struct 928 esas2r_inbound_list_source_entry); 929 930 if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) { 931 esas2r_hdebug("failed to allocate IB list"); 932 return false; 933 } 934 935 /* allocate the outbound list */ 936 a->outbound_list_md.size = a->list_size * 937 sizeof(struct atto_vda_ob_rsp); 938 939 if (!esas2r_initmem_alloc(a, &a->outbound_list_md, 940 ESAS2R_LIST_ALIGN)) { 941 esas2r_hdebug("failed to allocate IB list"); 942 return false; 943 } 944 945 /* allocate the NVRAM structure */ 946 a->nvram = (struct esas2r_sas_nvram *)high; 947 high += sizeof(struct esas2r_sas_nvram); 948 949 /* allocate the discovery buffer */ 950 a->disc_buffer = high; 951 high += ESAS2R_DISC_BUF_LEN; 952 high = PTR_ALIGN(high, 8); 953 954 /* allocate the outbound list copy pointer */ 955 a->outbound_copy = (u32 volatile *)high; 956 high += sizeof(u32); 957 958 if (!(a->flags & AF_NVR_VALID)) 959 esas2r_nvram_set_defaults(a); 960 961 /* update the caller's uncached memory area pointer */ 962 *uncached_area = (void *)high; 963 964 /* initialize the allocated memory */ 965 if (a->flags & AF_FIRST_INIT) { 966 memset(a->req_table, 0, 967 (num_requests + num_ae_requests + 968 1) * sizeof(struct esas2r_request *)); 969 970 esas2r_targ_db_initialize(a); 971 972 /* prime parts of the inbound list */ 973 element = 974 (struct esas2r_inbound_list_source_entry *)a-> 975 inbound_list_md. 976 virt_addr; 977 978 for (i = 0; i < a->list_size; i++) { 979 element->address = 0; 980 element->reserved = 0; 981 element->length = cpu_to_le32(HWILSE_INTERFACE_F0 982 | (sizeof(union 983 atto_vda_req) 984 / 985 sizeof(u32))); 986 element++; 987 } 988 989 /* init the AE requests */ 990 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, 991 i++) { 992 INIT_LIST_HEAD(&rq->req_list); 993 if (!alloc_vda_req(a, rq)) { 994 esas2r_hdebug( 995 "failed to allocate a VDA request!"); 996 return false; 997 } 998 999 esas2r_rq_init_request(rq, a); 1000 1001 /* override the completion function */ 1002 rq->comp_cb = esas2r_ae_complete; 1003 } 1004 } 1005 1006 return true; 1007 } 1008 1009 /* This code will verify that the chip is operational. */ 1010 bool esas2r_check_adapter(struct esas2r_adapter *a) 1011 { 1012 u32 starttime; 1013 u32 doorbell; 1014 u64 ppaddr; 1015 u32 dw; 1016 1017 /* 1018 * if the chip reset detected flag is set, we can bypass a bunch of 1019 * stuff. 1020 */ 1021 if (a->flags & AF_CHPRST_DETECTED) 1022 goto skip_chip_reset; 1023 1024 /* 1025 * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver 1026 * may have left them enabled or we may be recovering from a fault. 1027 */ 1028 esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK); 1029 esas2r_flush_register_dword(a, MU_INT_MASK_OUT); 1030 1031 /* 1032 * wait for the firmware to become ready by forcing an interrupt and 1033 * waiting for a response. 1034 */ 1035 starttime = jiffies_to_msecs(jiffies); 1036 1037 while (true) { 1038 esas2r_force_interrupt(a); 1039 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1040 if (doorbell == 0xFFFFFFFF) { 1041 /* 1042 * Give the firmware up to two seconds to enable 1043 * register access after a reset. 1044 */ 1045 if ((jiffies_to_msecs(jiffies) - starttime) > 2000) 1046 return esas2r_set_degraded_mode(a, 1047 "unable to access registers"); 1048 } else if (doorbell & DRBL_FORCE_INT) { 1049 u32 ver = (doorbell & DRBL_FW_VER_MSK); 1050 1051 /* 1052 * This driver supports version 0 and version 1 of 1053 * the API 1054 */ 1055 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1056 doorbell); 1057 1058 if (ver == DRBL_FW_VER_0) { 1059 esas2r_lock_set_flags(&a->flags, 1060 AF_LEGACY_SGE_MODE); 1061 1062 a->max_vdareq_size = 128; 1063 a->build_sgl = esas2r_build_sg_list_sge; 1064 } else if (ver == DRBL_FW_VER_1) { 1065 esas2r_lock_clear_flags(&a->flags, 1066 AF_LEGACY_SGE_MODE); 1067 1068 a->max_vdareq_size = 1024; 1069 a->build_sgl = esas2r_build_sg_list_prd; 1070 } else { 1071 return esas2r_set_degraded_mode(a, 1072 "unknown firmware version"); 1073 } 1074 break; 1075 } 1076 1077 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1078 1079 if ((jiffies_to_msecs(jiffies) - starttime) > 180000) { 1080 esas2r_hdebug("FW ready TMO"); 1081 esas2r_bugon(); 1082 1083 return esas2r_set_degraded_mode(a, 1084 "firmware start has timed out"); 1085 } 1086 } 1087 1088 /* purge any asynchronous events since we will repost them later */ 1089 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); 1090 starttime = jiffies_to_msecs(jiffies); 1091 1092 while (true) { 1093 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1094 if (doorbell & DRBL_MSG_IFC_DOWN) { 1095 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1096 doorbell); 1097 break; 1098 } 1099 1100 schedule_timeout_interruptible(msecs_to_jiffies(50)); 1101 1102 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { 1103 esas2r_hdebug("timeout waiting for interface down"); 1104 break; 1105 } 1106 } 1107 skip_chip_reset: 1108 /* 1109 * first things first, before we go changing any of these registers 1110 * disable the communication lists. 1111 */ 1112 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); 1113 dw &= ~MU_ILC_ENABLE; 1114 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); 1115 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); 1116 dw &= ~MU_OLC_ENABLE; 1117 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); 1118 1119 /* configure the communication list addresses */ 1120 ppaddr = a->inbound_list_md.phys_addr; 1121 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO, 1122 lower_32_bits(ppaddr)); 1123 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI, 1124 upper_32_bits(ppaddr)); 1125 ppaddr = a->outbound_list_md.phys_addr; 1126 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO, 1127 lower_32_bits(ppaddr)); 1128 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI, 1129 upper_32_bits(ppaddr)); 1130 ppaddr = a->uncached_phys + 1131 ((u8 *)a->outbound_copy - a->uncached); 1132 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO, 1133 lower_32_bits(ppaddr)); 1134 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI, 1135 upper_32_bits(ppaddr)); 1136 1137 /* reset the read and write pointers */ 1138 *a->outbound_copy = 1139 a->last_write = 1140 a->last_read = a->list_size - 1; 1141 esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); 1142 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | 1143 a->last_write); 1144 esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | 1145 a->last_write); 1146 esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE | 1147 a->last_write); 1148 esas2r_write_register_dword(a, MU_OUT_LIST_WRITE, 1149 MU_OLW_TOGGLE | a->last_write); 1150 1151 /* configure the interface select fields */ 1152 dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG); 1153 dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST); 1154 esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG, 1155 (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR)); 1156 dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG); 1157 dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE); 1158 esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG, 1159 (dw | MU_OLIC_LIST_F0 | 1160 MU_OLIC_SOURCE_DDR)); 1161 1162 /* finish configuring the communication lists */ 1163 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); 1164 dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK); 1165 dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC 1166 | (a->list_size << MU_ILC_NUMBER_SHIFT); 1167 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); 1168 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); 1169 dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK); 1170 dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT); 1171 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); 1172 1173 /* 1174 * notify the firmware that we're done setting up the communication 1175 * list registers. wait here until the firmware is done configuring 1176 * its lists. it will signal that it is done by enabling the lists. 1177 */ 1178 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT); 1179 starttime = jiffies_to_msecs(jiffies); 1180 1181 while (true) { 1182 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1183 if (doorbell & DRBL_MSG_IFC_INIT) { 1184 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1185 doorbell); 1186 break; 1187 } 1188 1189 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1190 1191 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { 1192 esas2r_hdebug( 1193 "timeout waiting for communication list init"); 1194 esas2r_bugon(); 1195 return esas2r_set_degraded_mode(a, 1196 "timeout waiting for communication list init"); 1197 } 1198 } 1199 1200 /* 1201 * flag whether the firmware supports the power down doorbell. we 1202 * determine this by reading the inbound doorbell enable mask. 1203 */ 1204 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); 1205 if (doorbell & DRBL_POWER_DOWN) 1206 esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN); 1207 else 1208 esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN); 1209 1210 /* 1211 * enable assertion of outbound queue and doorbell interrupts in the 1212 * main interrupt cause register. 1213 */ 1214 esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK); 1215 esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK); 1216 return true; 1217 } 1218 1219 /* Process the initialization message just completed and format the next one. */ 1220 static bool esas2r_format_init_msg(struct esas2r_adapter *a, 1221 struct esas2r_request *rq) 1222 { 1223 u32 msg = a->init_msg; 1224 struct atto_vda_cfg_init *ci; 1225 1226 a->init_msg = 0; 1227 1228 switch (msg) { 1229 case ESAS2R_INIT_MSG_START: 1230 case ESAS2R_INIT_MSG_REINIT: 1231 { 1232 struct timeval now; 1233 do_gettimeofday(&now); 1234 esas2r_hdebug("CFG init"); 1235 esas2r_build_cfg_req(a, 1236 rq, 1237 VDA_CFG_INIT, 1238 0, 1239 NULL); 1240 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; 1241 ci->sgl_page_size = sgl_page_size; 1242 ci->epoch_time = now.tv_sec; 1243 rq->flags |= RF_FAILURE_OK; 1244 a->init_msg = ESAS2R_INIT_MSG_INIT; 1245 break; 1246 } 1247 1248 case ESAS2R_INIT_MSG_INIT: 1249 if (rq->req_stat == RS_SUCCESS) { 1250 u32 major; 1251 u32 minor; 1252 1253 a->fw_version = le16_to_cpu( 1254 rq->func_rsp.cfg_rsp.vda_version); 1255 a->fw_build = rq->func_rsp.cfg_rsp.fw_build; 1256 major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release); 1257 minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release); 1258 a->fw_version += (major << 16) + (minor << 24); 1259 } else { 1260 esas2r_hdebug("FAILED"); 1261 } 1262 1263 /* 1264 * the 2.71 and earlier releases of R6xx firmware did not error 1265 * unsupported config requests correctly. 1266 */ 1267 1268 if ((a->flags2 & AF2_THUNDERBOLT) 1269 || (be32_to_cpu(a->fw_version) > 1270 be32_to_cpu(0x47020052))) { 1271 esas2r_hdebug("CFG get init"); 1272 esas2r_build_cfg_req(a, 1273 rq, 1274 VDA_CFG_GET_INIT2, 1275 sizeof(struct atto_vda_cfg_init), 1276 NULL); 1277 1278 rq->vrq->cfg.sg_list_offset = offsetof( 1279 struct atto_vda_cfg_req, 1280 data.sge); 1281 rq->vrq->cfg.data.prde.ctl_len = 1282 cpu_to_le32(sizeof(struct atto_vda_cfg_init)); 1283 rq->vrq->cfg.data.prde.address = cpu_to_le64( 1284 rq->vrq_md->phys_addr + 1285 sizeof(union atto_vda_req)); 1286 rq->flags |= RF_FAILURE_OK; 1287 a->init_msg = ESAS2R_INIT_MSG_GET_INIT; 1288 break; 1289 } 1290 1291 case ESAS2R_INIT_MSG_GET_INIT: 1292 if (msg == ESAS2R_INIT_MSG_GET_INIT) { 1293 ci = (struct atto_vda_cfg_init *)rq->data_buf; 1294 if (rq->req_stat == RS_SUCCESS) { 1295 a->num_targets_backend = 1296 le32_to_cpu(ci->num_targets_backend); 1297 a->ioctl_tunnel = 1298 le32_to_cpu(ci->ioctl_tunnel); 1299 } else { 1300 esas2r_hdebug("FAILED"); 1301 } 1302 } 1303 /* fall through */ 1304 1305 default: 1306 rq->req_stat = RS_SUCCESS; 1307 return false; 1308 } 1309 return true; 1310 } 1311 1312 /* 1313 * Perform initialization messages via the request queue. Messages are 1314 * performed with interrupts disabled. 1315 */ 1316 bool esas2r_init_msgs(struct esas2r_adapter *a) 1317 { 1318 bool success = true; 1319 struct esas2r_request *rq = &a->general_req; 1320 1321 esas2r_rq_init_request(rq, a); 1322 rq->comp_cb = esas2r_dummy_complete; 1323 1324 if (a->init_msg == 0) 1325 a->init_msg = ESAS2R_INIT_MSG_REINIT; 1326 1327 while (a->init_msg) { 1328 if (esas2r_format_init_msg(a, rq)) { 1329 unsigned long flags; 1330 while (true) { 1331 spin_lock_irqsave(&a->queue_lock, flags); 1332 esas2r_start_vda_request(a, rq); 1333 spin_unlock_irqrestore(&a->queue_lock, flags); 1334 esas2r_wait_request(a, rq); 1335 if (rq->req_stat != RS_PENDING) 1336 break; 1337 } 1338 } 1339 1340 if (rq->req_stat == RS_SUCCESS 1341 || ((rq->flags & RF_FAILURE_OK) 1342 && rq->req_stat != RS_TIMEOUT)) 1343 continue; 1344 1345 esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)", 1346 a->init_msg, rq->req_stat, rq->flags); 1347 a->init_msg = ESAS2R_INIT_MSG_START; 1348 success = false; 1349 break; 1350 } 1351 1352 esas2r_rq_destroy_request(rq, a); 1353 return success; 1354 } 1355 1356 /* Initialize the adapter chip */ 1357 bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) 1358 { 1359 bool rslt = false; 1360 struct esas2r_request *rq; 1361 u32 i; 1362 1363 if (a->flags & AF_DEGRADED_MODE) 1364 goto exit; 1365 1366 if (!(a->flags & AF_NVR_VALID)) { 1367 if (!esas2r_nvram_read_direct(a)) 1368 esas2r_log(ESAS2R_LOG_WARN, 1369 "invalid/missing NVRAM parameters"); 1370 } 1371 1372 if (!esas2r_init_msgs(a)) { 1373 esas2r_set_degraded_mode(a, "init messages failed"); 1374 goto exit; 1375 } 1376 1377 /* The firmware is ready. */ 1378 esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE); 1379 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); 1380 1381 /* Post all the async event requests */ 1382 for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) 1383 esas2r_start_ae_request(a, rq); 1384 1385 if (!a->flash_rev[0]) 1386 esas2r_read_flash_rev(a); 1387 1388 if (!a->image_type[0]) 1389 esas2r_read_image_type(a); 1390 1391 if (a->fw_version == 0) 1392 a->fw_rev[0] = 0; 1393 else 1394 sprintf(a->fw_rev, "%1d.%02d", 1395 (int)LOBYTE(HIWORD(a->fw_version)), 1396 (int)HIBYTE(HIWORD(a->fw_version))); 1397 1398 esas2r_hdebug("firmware revision: %s", a->fw_rev); 1399 1400 if ((a->flags & AF_CHPRST_DETECTED) 1401 && (a->flags & AF_FIRST_INIT)) { 1402 esas2r_enable_chip_interrupts(a); 1403 return true; 1404 } 1405 1406 /* initialize discovery */ 1407 esas2r_disc_initialize(a); 1408 1409 /* 1410 * wait for the device wait time to expire here if requested. this is 1411 * usually requested during initial driver load and possibly when 1412 * resuming from a low power state. deferred device waiting will use 1413 * interrupts. chip reset recovery always defers device waiting to 1414 * avoid being in a TASKLET too long. 1415 */ 1416 if (init_poll) { 1417 u32 currtime = a->disc_start_time; 1418 u32 nexttick = 100; 1419 u32 deltatime; 1420 1421 /* 1422 * Block Tasklets from getting scheduled and indicate this is 1423 * polled discovery. 1424 */ 1425 esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED); 1426 esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED); 1427 1428 /* 1429 * Temporarily bring the disable count to zero to enable 1430 * deferred processing. Note that the count is already zero 1431 * after the first initialization. 1432 */ 1433 if (a->flags & AF_FIRST_INIT) 1434 atomic_dec(&a->disable_cnt); 1435 1436 while (a->flags & AF_DISC_PENDING) { 1437 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1438 1439 /* 1440 * Determine the need for a timer tick based on the 1441 * delta time between this and the last iteration of 1442 * this loop. We don't use the absolute time because 1443 * then we would have to worry about when nexttick 1444 * wraps and currtime hasn't yet. 1445 */ 1446 deltatime = jiffies_to_msecs(jiffies) - currtime; 1447 currtime += deltatime; 1448 1449 /* 1450 * Process any waiting discovery as long as the chip is 1451 * up. If a chip reset happens during initial polling, 1452 * we have to make sure the timer tick processes the 1453 * doorbell indicating the firmware is ready. 1454 */ 1455 if (!(a->flags & AF_CHPRST_PENDING)) 1456 esas2r_disc_check_for_work(a); 1457 1458 /* Simulate a timer tick. */ 1459 if (nexttick <= deltatime) { 1460 1461 /* Time for a timer tick */ 1462 nexttick += 100; 1463 esas2r_timer_tick(a); 1464 } 1465 1466 if (nexttick > deltatime) 1467 nexttick -= deltatime; 1468 1469 /* Do any deferred processing */ 1470 if (esas2r_is_tasklet_pending(a)) 1471 esas2r_do_tasklet_tasks(a); 1472 1473 } 1474 1475 if (a->flags & AF_FIRST_INIT) 1476 atomic_inc(&a->disable_cnt); 1477 1478 esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED); 1479 esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); 1480 } 1481 1482 1483 esas2r_targ_db_report_changes(a); 1484 1485 /* 1486 * For cases where (a) the initialization messages processing may 1487 * handle an interrupt for a port event and a discovery is waiting, but 1488 * we are not waiting for devices, or (b) the device wait time has been 1489 * exhausted but there is still discovery pending, start any leftover 1490 * discovery in interrupt driven mode. 1491 */ 1492 esas2r_disc_start_waiting(a); 1493 1494 /* Enable chip interrupts */ 1495 a->int_mask = ESAS2R_INT_STS_MASK; 1496 esas2r_enable_chip_interrupts(a); 1497 esas2r_enable_heartbeat(a); 1498 rslt = true; 1499 1500 exit: 1501 /* 1502 * Regardless of whether initialization was successful, certain things 1503 * need to get done before we exit. 1504 */ 1505 1506 if ((a->flags & AF_CHPRST_DETECTED) 1507 && (a->flags & AF_FIRST_INIT)) { 1508 /* 1509 * Reinitialization was performed during the first 1510 * initialization. Only clear the chip reset flag so the 1511 * original device polling is not cancelled. 1512 */ 1513 if (!rslt) 1514 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); 1515 } else { 1516 /* First initialization or a subsequent re-init is complete. */ 1517 if (!rslt) { 1518 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); 1519 esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); 1520 } 1521 1522 1523 /* Enable deferred processing after the first initialization. */ 1524 if (a->flags & AF_FIRST_INIT) { 1525 esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT); 1526 1527 if (atomic_dec_return(&a->disable_cnt) == 0) 1528 esas2r_do_deferred_processes(a); 1529 } 1530 } 1531 1532 return rslt; 1533 } 1534 1535 void esas2r_reset_adapter(struct esas2r_adapter *a) 1536 { 1537 esas2r_lock_set_flags(&a->flags, AF_OS_RESET); 1538 esas2r_local_reset_adapter(a); 1539 esas2r_schedule_tasklet(a); 1540 } 1541 1542 void esas2r_reset_chip(struct esas2r_adapter *a) 1543 { 1544 if (!esas2r_is_adapter_present(a)) 1545 return; 1546 1547 /* 1548 * Before we reset the chip, save off the VDA core dump. The VDA core 1549 * dump is located in the upper 512KB of the onchip SRAM. Make sure 1550 * to not overwrite a previous crash that was saved. 1551 */ 1552 if ((a->flags2 & AF2_COREDUMP_AVAIL) 1553 && !(a->flags2 & AF2_COREDUMP_SAVED) 1554 && a->fw_coredump_buff) { 1555 esas2r_read_mem_block(a, 1556 a->fw_coredump_buff, 1557 MW_DATA_ADDR_SRAM + 0x80000, 1558 ESAS2R_FWCOREDUMP_SZ); 1559 1560 esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED); 1561 } 1562 1563 esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL); 1564 1565 /* Reset the chip */ 1566 if (a->pcid->revision == MVR_FREY_B2) 1567 esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2, 1568 MU_CTL_IN_FULL_RST2); 1569 else 1570 esas2r_write_register_dword(a, MU_CTL_STATUS_IN, 1571 MU_CTL_IN_FULL_RST); 1572 1573 1574 /* Stall a little while to let the reset condition clear */ 1575 mdelay(10); 1576 } 1577 1578 static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a) 1579 { 1580 u32 starttime; 1581 u32 doorbell; 1582 1583 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN); 1584 starttime = jiffies_to_msecs(jiffies); 1585 1586 while (true) { 1587 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1588 if (doorbell & DRBL_POWER_DOWN) { 1589 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1590 doorbell); 1591 break; 1592 } 1593 1594 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1595 1596 if ((jiffies_to_msecs(jiffies) - starttime) > 30000) { 1597 esas2r_hdebug("Timeout waiting for power down"); 1598 break; 1599 } 1600 } 1601 } 1602 1603 /* 1604 * Perform power management processing including managing device states, adapter 1605 * states, interrupts, and I/O. 1606 */ 1607 void esas2r_power_down(struct esas2r_adapter *a) 1608 { 1609 esas2r_lock_set_flags(&a->flags, AF_POWER_MGT); 1610 esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN); 1611 1612 if (!(a->flags & AF_DEGRADED_MODE)) { 1613 u32 starttime; 1614 u32 doorbell; 1615 1616 /* 1617 * We are currently running OK and will be reinitializing later. 1618 * increment the disable count to coordinate with 1619 * esas2r_init_adapter. We don't have to do this in degraded 1620 * mode since we never enabled interrupts in the first place. 1621 */ 1622 esas2r_disable_chip_interrupts(a); 1623 esas2r_disable_heartbeat(a); 1624 1625 /* wait for any VDA activity to clear before continuing */ 1626 esas2r_write_register_dword(a, MU_DOORBELL_IN, 1627 DRBL_MSG_IFC_DOWN); 1628 starttime = jiffies_to_msecs(jiffies); 1629 1630 while (true) { 1631 doorbell = 1632 esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1633 if (doorbell & DRBL_MSG_IFC_DOWN) { 1634 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1635 doorbell); 1636 break; 1637 } 1638 1639 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1640 1641 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { 1642 esas2r_hdebug( 1643 "timeout waiting for interface down"); 1644 break; 1645 } 1646 } 1647 1648 /* 1649 * For versions of firmware that support it tell them the driver 1650 * is powering down. 1651 */ 1652 if (a->flags2 & AF2_VDA_POWER_DOWN) 1653 esas2r_power_down_notify_firmware(a); 1654 } 1655 1656 /* Suspend I/O processing. */ 1657 esas2r_lock_set_flags(&a->flags, AF_OS_RESET); 1658 esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); 1659 esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); 1660 1661 esas2r_process_adapter_reset(a); 1662 1663 /* Remove devices now that I/O is cleaned up. */ 1664 a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); 1665 esas2r_targ_db_remove_all(a, false); 1666 } 1667 1668 /* 1669 * Perform power management processing including managing device states, adapter 1670 * states, interrupts, and I/O. 1671 */ 1672 bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) 1673 { 1674 bool ret; 1675 1676 esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN); 1677 esas2r_init_pci_cfg_space(a); 1678 esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT); 1679 atomic_inc(&a->disable_cnt); 1680 1681 /* reinitialize the adapter */ 1682 ret = esas2r_check_adapter(a); 1683 if (!esas2r_init_adapter_hw(a, init_poll)) 1684 ret = false; 1685 1686 /* send the reset asynchronous event */ 1687 esas2r_send_reset_ae(a, true); 1688 1689 /* clear this flag after initialization. */ 1690 esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT); 1691 return ret; 1692 } 1693 1694 bool esas2r_is_adapter_present(struct esas2r_adapter *a) 1695 { 1696 if (a->flags & AF_NOT_PRESENT) 1697 return false; 1698 1699 if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { 1700 esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT); 1701 1702 return false; 1703 } 1704 return true; 1705 } 1706 1707 const char *esas2r_get_model_name(struct esas2r_adapter *a) 1708 { 1709 switch (a->pcid->subsystem_device) { 1710 case ATTO_ESAS_R680: 1711 return "ATTO ExpressSAS R680"; 1712 1713 case ATTO_ESAS_R608: 1714 return "ATTO ExpressSAS R608"; 1715 1716 case ATTO_ESAS_R60F: 1717 return "ATTO ExpressSAS R60F"; 1718 1719 case ATTO_ESAS_R6F0: 1720 return "ATTO ExpressSAS R6F0"; 1721 1722 case ATTO_ESAS_R644: 1723 return "ATTO ExpressSAS R644"; 1724 1725 case ATTO_ESAS_R648: 1726 return "ATTO ExpressSAS R648"; 1727 1728 case ATTO_TSSC_3808: 1729 return "ATTO ThunderStream SC 3808D"; 1730 1731 case ATTO_TSSC_3808E: 1732 return "ATTO ThunderStream SC 3808E"; 1733 1734 case ATTO_TLSH_1068: 1735 return "ATTO ThunderLink SH 1068"; 1736 } 1737 1738 return "ATTO SAS Controller"; 1739 } 1740 1741 const char *esas2r_get_model_name_short(struct esas2r_adapter *a) 1742 { 1743 switch (a->pcid->subsystem_device) { 1744 case ATTO_ESAS_R680: 1745 return "R680"; 1746 1747 case ATTO_ESAS_R608: 1748 return "R608"; 1749 1750 case ATTO_ESAS_R60F: 1751 return "R60F"; 1752 1753 case ATTO_ESAS_R6F0: 1754 return "R6F0"; 1755 1756 case ATTO_ESAS_R644: 1757 return "R644"; 1758 1759 case ATTO_ESAS_R648: 1760 return "R648"; 1761 1762 case ATTO_TSSC_3808: 1763 return "SC 3808D"; 1764 1765 case ATTO_TSSC_3808E: 1766 return "SC 3808E"; 1767 1768 case ATTO_TLSH_1068: 1769 return "SH 1068"; 1770 } 1771 1772 return "unknown"; 1773 } 1774