1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2010 Adaptec, Inc. 9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * Module Name: 26 * src.c 27 * 28 * Abstract: Hardware Device Interface for PMC SRC based controllers 29 * 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/init.h> 34 #include <linux/types.h> 35 #include <linux/pci.h> 36 #include <linux/spinlock.h> 37 #include <linux/slab.h> 38 #include <linux/blkdev.h> 39 #include <linux/delay.h> 40 #include <linux/completion.h> 41 #include <linux/time.h> 42 #include <linux/interrupt.h> 43 #include <scsi/scsi_host.h> 44 45 #include "aacraid.h" 46 47 static int aac_src_get_sync_status(struct aac_dev *dev); 48 49 static irqreturn_t aac_src_intr_message(int irq, void *dev_id) 50 { 51 struct aac_msix_ctx *ctx; 52 struct aac_dev *dev; 53 unsigned long bellbits, bellbits_shifted; 54 int vector_no; 55 int isFastResponse, mode; 56 u32 index, handle; 57 58 ctx = (struct aac_msix_ctx *)dev_id; 59 dev = ctx->dev; 60 vector_no = ctx->vector_no; 61 62 if (dev->msi_enabled) { 63 mode = AAC_INT_MODE_MSI; 64 if (vector_no == 0) { 65 bellbits = src_readl(dev, MUnit.ODR_MSI); 66 if (bellbits & 0x40000) 67 mode |= AAC_INT_MODE_AIF; 68 if (bellbits & 0x1000) 69 mode |= AAC_INT_MODE_SYNC; 70 } 71 } else { 72 mode = AAC_INT_MODE_INTX; 73 bellbits = src_readl(dev, MUnit.ODR_R); 74 if (bellbits & PmDoorBellResponseSent) { 75 bellbits = PmDoorBellResponseSent; 76 src_writel(dev, MUnit.ODR_C, bellbits); 77 src_readl(dev, MUnit.ODR_C); 78 } else { 79 bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); 80 src_writel(dev, MUnit.ODR_C, bellbits); 81 src_readl(dev, MUnit.ODR_C); 82 83 if (bellbits_shifted & DoorBellAifPending) 84 mode |= AAC_INT_MODE_AIF; 85 else if (bellbits_shifted & OUTBOUNDDOORBELL_0) 86 mode |= AAC_INT_MODE_SYNC; 87 } 88 } 89 90 if (mode & AAC_INT_MODE_SYNC) { 91 unsigned long sflags; 92 struct list_head *entry; 93 int send_it = 0; 94 extern int aac_sync_mode; 95 96 if (!aac_sync_mode && !dev->msi_enabled) { 97 src_writel(dev, MUnit.ODR_C, bellbits); 98 src_readl(dev, MUnit.ODR_C); 99 } 100 101 if (dev->sync_fib) { 102 if (dev->sync_fib->callback) 103 dev->sync_fib->callback(dev->sync_fib->callback_data, 104 dev->sync_fib); 105 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); 106 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { 107 dev->management_fib_count--; 108 up(&dev->sync_fib->event_wait); 109 } 110 spin_unlock_irqrestore(&dev->sync_fib->event_lock, 111 sflags); 112 spin_lock_irqsave(&dev->sync_lock, sflags); 113 if (!list_empty(&dev->sync_fib_list)) { 114 entry = dev->sync_fib_list.next; 115 dev->sync_fib = list_entry(entry, 116 struct fib, 117 fiblink); 118 list_del(entry); 119 send_it = 1; 120 } else { 121 dev->sync_fib = NULL; 122 } 123 spin_unlock_irqrestore(&dev->sync_lock, sflags); 124 if (send_it) { 125 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, 126 (u32)dev->sync_fib->hw_fib_pa, 127 0, 0, 0, 0, 0, 128 NULL, NULL, NULL, NULL, NULL); 129 } 130 } 131 if (!dev->msi_enabled) 132 mode = 0; 133 134 } 135 136 if (mode & AAC_INT_MODE_AIF) { 137 /* handle AIF */ 138 if (dev->aif_thread && dev->fsa_dev) 139 aac_intr_normal(dev, 0, 2, 0, NULL); 140 if (dev->msi_enabled) 141 aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); 142 mode = 0; 143 } 144 145 if (mode) { 146 index = dev->host_rrq_idx[vector_no]; 147 148 for (;;) { 149 isFastResponse = 0; 150 /* remove toggle bit (31) */ 151 handle = (dev->host_rrq[index] & 0x7fffffff); 152 /* check fast response bit (30) */ 153 if (handle & 0x40000000) 154 isFastResponse = 1; 155 handle &= 0x0000ffff; 156 if (handle == 0) 157 break; 158 if (dev->msi_enabled && dev->max_msix > 1) 159 atomic_dec(&dev->rrq_outstanding[vector_no]); 160 dev->host_rrq[index++] = 0; 161 aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); 162 if (index == (vector_no + 1) * dev->vector_cap) 163 index = vector_no * dev->vector_cap; 164 dev->host_rrq_idx[vector_no] = index; 165 } 166 mode = 0; 167 } 168 169 return IRQ_HANDLED; 170 } 171 172 /** 173 * aac_src_disable_interrupt - Disable interrupts 174 * @dev: Adapter 175 */ 176 177 static void aac_src_disable_interrupt(struct aac_dev *dev) 178 { 179 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); 180 } 181 182 /** 183 * aac_src_enable_interrupt_message - Enable interrupts 184 * @dev: Adapter 185 */ 186 187 static void aac_src_enable_interrupt_message(struct aac_dev *dev) 188 { 189 aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT); 190 } 191 192 /** 193 * src_sync_cmd - send a command and wait 194 * @dev: Adapter 195 * @command: Command to execute 196 * @p1: first parameter 197 * @ret: adapter status 198 * 199 * This routine will send a synchronous command to the adapter and wait 200 * for its completion. 201 */ 202 203 static int src_sync_cmd(struct aac_dev *dev, u32 command, 204 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, 205 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) 206 { 207 unsigned long start; 208 unsigned long delay; 209 int ok; 210 211 /* 212 * Write the command into Mailbox 0 213 */ 214 writel(command, &dev->IndexRegs->Mailbox[0]); 215 /* 216 * Write the parameters into Mailboxes 1 - 6 217 */ 218 writel(p1, &dev->IndexRegs->Mailbox[1]); 219 writel(p2, &dev->IndexRegs->Mailbox[2]); 220 writel(p3, &dev->IndexRegs->Mailbox[3]); 221 writel(p4, &dev->IndexRegs->Mailbox[4]); 222 223 /* 224 * Clear the synch command doorbell to start on a clean slate. 225 */ 226 if (!dev->msi_enabled) 227 src_writel(dev, 228 MUnit.ODR_C, 229 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 230 231 /* 232 * Disable doorbell interrupts 233 */ 234 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); 235 236 /* 237 * Force the completion of the mask register write before issuing 238 * the interrupt. 239 */ 240 src_readl(dev, MUnit.OIMR); 241 242 /* 243 * Signal that there is a new synch command 244 */ 245 src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); 246 247 if (!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) { 248 ok = 0; 249 start = jiffies; 250 251 if (command == IOP_RESET_ALWAYS) { 252 /* Wait up to 10 sec */ 253 delay = 10*HZ; 254 } else { 255 /* Wait up to 5 minutes */ 256 delay = 300*HZ; 257 } 258 while (time_before(jiffies, start+delay)) { 259 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ 260 /* 261 * Mon960 will set doorbell0 bit when it has completed the command. 262 */ 263 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { 264 /* 265 * Clear the doorbell. 266 */ 267 if (dev->msi_enabled) 268 aac_src_access_devreg(dev, 269 AAC_CLEAR_SYNC_BIT); 270 else 271 src_writel(dev, 272 MUnit.ODR_C, 273 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 274 ok = 1; 275 break; 276 } 277 /* 278 * Yield the processor in case we are slow 279 */ 280 msleep(1); 281 } 282 if (unlikely(ok != 1)) { 283 /* 284 * Restore interrupt mask even though we timed out 285 */ 286 aac_adapter_enable_int(dev); 287 return -ETIMEDOUT; 288 } 289 /* 290 * Pull the synch status from Mailbox 0. 291 */ 292 if (status) 293 *status = readl(&dev->IndexRegs->Mailbox[0]); 294 if (r1) 295 *r1 = readl(&dev->IndexRegs->Mailbox[1]); 296 if (r2) 297 *r2 = readl(&dev->IndexRegs->Mailbox[2]); 298 if (r3) 299 *r3 = readl(&dev->IndexRegs->Mailbox[3]); 300 if (r4) 301 *r4 = readl(&dev->IndexRegs->Mailbox[4]); 302 if (command == GET_COMM_PREFERRED_SETTINGS) 303 dev->max_msix = 304 readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF; 305 /* 306 * Clear the synch command doorbell. 307 */ 308 if (!dev->msi_enabled) 309 src_writel(dev, 310 MUnit.ODR_C, 311 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 312 } 313 314 /* 315 * Restore interrupt mask 316 */ 317 aac_adapter_enable_int(dev); 318 return 0; 319 } 320 321 /** 322 * aac_src_interrupt_adapter - interrupt adapter 323 * @dev: Adapter 324 * 325 * Send an interrupt to the i960 and breakpoint it. 326 */ 327 328 static void aac_src_interrupt_adapter(struct aac_dev *dev) 329 { 330 src_sync_cmd(dev, BREAKPOINT_REQUEST, 331 0, 0, 0, 0, 0, 0, 332 NULL, NULL, NULL, NULL, NULL); 333 } 334 335 /** 336 * aac_src_notify_adapter - send an event to the adapter 337 * @dev: Adapter 338 * @event: Event to send 339 * 340 * Notify the i960 that something it probably cares about has 341 * happened. 342 */ 343 344 static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) 345 { 346 switch (event) { 347 348 case AdapNormCmdQue: 349 src_writel(dev, MUnit.ODR_C, 350 INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); 351 break; 352 case HostNormRespNotFull: 353 src_writel(dev, MUnit.ODR_C, 354 INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); 355 break; 356 case AdapNormRespQue: 357 src_writel(dev, MUnit.ODR_C, 358 INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); 359 break; 360 case HostNormCmdNotFull: 361 src_writel(dev, MUnit.ODR_C, 362 INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); 363 break; 364 case FastIo: 365 src_writel(dev, MUnit.ODR_C, 366 INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); 367 break; 368 case AdapPrintfDone: 369 src_writel(dev, MUnit.ODR_C, 370 INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); 371 break; 372 default: 373 BUG(); 374 break; 375 } 376 } 377 378 /** 379 * aac_src_start_adapter - activate adapter 380 * @dev: Adapter 381 * 382 * Start up processing on an i960 based AAC adapter 383 */ 384 385 static void aac_src_start_adapter(struct aac_dev *dev) 386 { 387 struct aac_init *init; 388 int i; 389 390 /* reset host_rrq_idx first */ 391 for (i = 0; i < dev->max_msix; i++) { 392 dev->host_rrq_idx[i] = i * dev->vector_cap; 393 atomic_set(&dev->rrq_outstanding[i], 0); 394 } 395 dev->fibs_pushed_no = 0; 396 397 init = dev->init; 398 init->HostElapsedSeconds = cpu_to_le32(get_seconds()); 399 400 /* We can only use a 32 bit address here */ 401 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 402 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 403 } 404 405 /** 406 * aac_src_check_health 407 * @dev: device to check if healthy 408 * 409 * Will attempt to determine if the specified adapter is alive and 410 * capable of handling requests, returning 0 if alive. 411 */ 412 static int aac_src_check_health(struct aac_dev *dev) 413 { 414 u32 status = src_readl(dev, MUnit.OMR); 415 416 /* 417 * Check to see if the board failed any self tests. 418 */ 419 if (unlikely(status & SELF_TEST_FAILED)) 420 return -1; 421 422 /* 423 * Check to see if the board panic'd. 424 */ 425 if (unlikely(status & KERNEL_PANIC)) 426 return (status >> 16) & 0xFF; 427 /* 428 * Wait for the adapter to be up and running. 429 */ 430 if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) 431 return -3; 432 /* 433 * Everything is OK 434 */ 435 return 0; 436 } 437 438 /** 439 * aac_src_deliver_message 440 * @fib: fib to issue 441 * 442 * Will send a fib, returning 0 if successful. 443 */ 444 static int aac_src_deliver_message(struct fib *fib) 445 { 446 struct aac_dev *dev = fib->dev; 447 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 448 u32 fibsize; 449 dma_addr_t address; 450 struct aac_fib_xporthdr *pFibX; 451 #if !defined(writeq) 452 unsigned long flags; 453 #endif 454 455 u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size); 456 u16 vector_no; 457 458 atomic_inc(&q->numpending); 459 460 if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest && 461 dev->max_msix > 1) { 462 vector_no = fib->vector_no; 463 fib->hw_fib_va->header.Handle += (vector_no << 16); 464 } else { 465 vector_no = 0; 466 } 467 468 atomic_inc(&dev->rrq_outstanding[vector_no]); 469 470 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { 471 /* Calculate the amount to the fibsize bits */ 472 fibsize = (hdr_size + 127) / 128 - 1; 473 if (fibsize > (ALIGN32 - 1)) 474 return -EMSGSIZE; 475 /* New FIB header, 32-bit */ 476 address = fib->hw_fib_pa; 477 fib->hw_fib_va->header.StructType = FIB_MAGIC2; 478 fib->hw_fib_va->header.SenderFibAddress = (u32)address; 479 fib->hw_fib_va->header.u.TimeStamp = 0; 480 BUG_ON(upper_32_bits(address) != 0L); 481 address |= fibsize; 482 } else { 483 /* Calculate the amount to the fibsize bits */ 484 fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1; 485 if (fibsize > (ALIGN32 - 1)) 486 return -EMSGSIZE; 487 488 /* Fill XPORT header */ 489 pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr); 490 pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle); 491 pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa); 492 pFibX->Size = cpu_to_le32(hdr_size); 493 494 /* 495 * The xport header has been 32-byte aligned for us so that fibsize 496 * can be masked out of this address by hardware. -- BenC 497 */ 498 address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr); 499 if (address & (ALIGN32 - 1)) 500 return -EINVAL; 501 address |= fibsize; 502 } 503 #if defined(writeq) 504 src_writeq(dev, MUnit.IQ_L, (u64)address); 505 #else 506 spin_lock_irqsave(&fib->dev->iq_lock, flags); 507 src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff); 508 src_writel(dev, MUnit.IQ_L, address & 0xffffffff); 509 spin_unlock_irqrestore(&fib->dev->iq_lock, flags); 510 #endif 511 return 0; 512 } 513 514 /** 515 * aac_src_ioremap 516 * @size: mapping resize request 517 * 518 */ 519 static int aac_src_ioremap(struct aac_dev *dev, u32 size) 520 { 521 if (!size) { 522 iounmap(dev->regs.src.bar1); 523 dev->regs.src.bar1 = NULL; 524 iounmap(dev->regs.src.bar0); 525 dev->base = dev->regs.src.bar0 = NULL; 526 return 0; 527 } 528 dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), 529 AAC_MIN_SRC_BAR1_SIZE); 530 dev->base = NULL; 531 if (dev->regs.src.bar1 == NULL) 532 return -1; 533 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); 534 if (dev->base == NULL) { 535 iounmap(dev->regs.src.bar1); 536 dev->regs.src.bar1 = NULL; 537 return -1; 538 } 539 dev->IndexRegs = &((struct src_registers __iomem *) 540 dev->base)->u.tupelo.IndexRegs; 541 return 0; 542 } 543 544 /** 545 * aac_srcv_ioremap 546 * @size: mapping resize request 547 * 548 */ 549 static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) 550 { 551 if (!size) { 552 iounmap(dev->regs.src.bar0); 553 dev->base = dev->regs.src.bar0 = NULL; 554 return 0; 555 } 556 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); 557 if (dev->base == NULL) 558 return -1; 559 dev->IndexRegs = &((struct src_registers __iomem *) 560 dev->base)->u.denali.IndexRegs; 561 return 0; 562 } 563 564 static int aac_src_restart_adapter(struct aac_dev *dev, int bled) 565 { 566 u32 var, reset_mask; 567 568 if (bled >= 0) { 569 if (bled) 570 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", 571 dev->name, dev->id, bled); 572 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 573 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 574 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); 575 if ((bled || (var != 0x00000001)) && 576 !dev->doorbell_mask) 577 return -EINVAL; 578 else if (dev->doorbell_mask) { 579 reset_mask = dev->doorbell_mask; 580 bled = 0; 581 var = 0x00000001; 582 } 583 584 if ((dev->pdev->device == PMC_DEVICE_S7 || 585 dev->pdev->device == PMC_DEVICE_S8 || 586 dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) { 587 aac_src_access_devreg(dev, AAC_ENABLE_INTX); 588 dev->msi_enabled = 0; 589 msleep(5000); /* Delay 5 seconds */ 590 } 591 592 if (!bled && (dev->supplement_adapter_info.SupportedOptions2 & 593 AAC_OPTION_DOORBELL_RESET)) { 594 src_writel(dev, MUnit.IDR, reset_mask); 595 ssleep(45); 596 } else { 597 src_writel(dev, MUnit.IDR, 0x100); 598 ssleep(45); 599 } 600 } 601 602 if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) 603 return -ENODEV; 604 605 if (startup_timeout < 300) 606 startup_timeout = 300; 607 608 return 0; 609 } 610 611 /** 612 * aac_src_select_comm - Select communications method 613 * @dev: Adapter 614 * @comm: communications method 615 */ 616 static int aac_src_select_comm(struct aac_dev *dev, int comm) 617 { 618 switch (comm) { 619 case AAC_COMM_MESSAGE: 620 dev->a_ops.adapter_intr = aac_src_intr_message; 621 dev->a_ops.adapter_deliver = aac_src_deliver_message; 622 break; 623 default: 624 return 1; 625 } 626 return 0; 627 } 628 629 /** 630 * aac_src_init - initialize an Cardinal Frey Bar card 631 * @dev: device to configure 632 * 633 */ 634 635 int aac_src_init(struct aac_dev *dev) 636 { 637 unsigned long start; 638 unsigned long status; 639 int restart = 0; 640 int instance = dev->id; 641 const char *name = dev->name; 642 643 dev->a_ops.adapter_ioremap = aac_src_ioremap; 644 dev->a_ops.adapter_comm = aac_src_select_comm; 645 646 dev->base_size = AAC_MIN_SRC_BAR0_SIZE; 647 if (aac_adapter_ioremap(dev, dev->base_size)) { 648 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 649 goto error_iounmap; 650 } 651 652 /* Failure to reset here is an option ... */ 653 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 654 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 655 if ((aac_reset_devices || reset_devices) && 656 !aac_src_restart_adapter(dev, 0)) 657 ++restart; 658 /* 659 * Check to see if the board panic'd while booting. 660 */ 661 status = src_readl(dev, MUnit.OMR); 662 if (status & KERNEL_PANIC) { 663 if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) 664 goto error_iounmap; 665 ++restart; 666 } 667 /* 668 * Check to see if the board failed any self tests. 669 */ 670 status = src_readl(dev, MUnit.OMR); 671 if (status & SELF_TEST_FAILED) { 672 printk(KERN_ERR "%s%d: adapter self-test failed.\n", 673 dev->name, instance); 674 goto error_iounmap; 675 } 676 /* 677 * Check to see if the monitor panic'd while booting. 678 */ 679 if (status & MONITOR_PANIC) { 680 printk(KERN_ERR "%s%d: adapter monitor panic.\n", 681 dev->name, instance); 682 goto error_iounmap; 683 } 684 start = jiffies; 685 /* 686 * Wait for the adapter to be up and running. Wait up to 3 minutes 687 */ 688 while (!((status = src_readl(dev, MUnit.OMR)) & 689 KERNEL_UP_AND_RUNNING)) { 690 if ((restart && 691 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 692 time_after(jiffies, start+HZ*startup_timeout)) { 693 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 694 dev->name, instance, status); 695 goto error_iounmap; 696 } 697 if (!restart && 698 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || 699 time_after(jiffies, start + HZ * 700 ((startup_timeout > 60) 701 ? (startup_timeout - 60) 702 : (startup_timeout / 2))))) { 703 if (likely(!aac_src_restart_adapter(dev, 704 aac_src_check_health(dev)))) 705 start = jiffies; 706 ++restart; 707 } 708 msleep(1); 709 } 710 if (restart && aac_commit) 711 aac_commit = 1; 712 /* 713 * Fill in the common function dispatch table. 714 */ 715 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; 716 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; 717 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 718 dev->a_ops.adapter_notify = aac_src_notify_adapter; 719 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 720 dev->a_ops.adapter_check_health = aac_src_check_health; 721 dev->a_ops.adapter_restart = aac_src_restart_adapter; 722 dev->a_ops.adapter_start = aac_src_start_adapter; 723 724 /* 725 * First clear out all interrupts. Then enable the one's that we 726 * can handle. 727 */ 728 aac_adapter_comm(dev, AAC_COMM_MESSAGE); 729 aac_adapter_disable_int(dev); 730 src_writel(dev, MUnit.ODR_C, 0xffffffff); 731 aac_adapter_enable_int(dev); 732 733 if (aac_init_adapter(dev) == NULL) 734 goto error_iounmap; 735 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) 736 goto error_iounmap; 737 738 dev->msi = !pci_enable_msi(dev->pdev); 739 740 dev->aac_msix[0].vector_no = 0; 741 dev->aac_msix[0].dev = dev; 742 743 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 744 IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) { 745 746 if (dev->msi) 747 pci_disable_msi(dev->pdev); 748 749 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 750 name, instance); 751 goto error_iounmap; 752 } 753 dev->dbg_base = pci_resource_start(dev->pdev, 2); 754 dev->dbg_base_mapped = dev->regs.src.bar1; 755 dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; 756 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; 757 758 aac_adapter_enable_int(dev); 759 760 if (!dev->sync_mode) { 761 /* 762 * Tell the adapter that all is configured, and it can 763 * start accepting requests 764 */ 765 aac_src_start_adapter(dev); 766 } 767 return 0; 768 769 error_iounmap: 770 771 return -1; 772 } 773 774 /** 775 * aac_srcv_init - initialize an SRCv card 776 * @dev: device to configure 777 * 778 */ 779 780 int aac_srcv_init(struct aac_dev *dev) 781 { 782 unsigned long start; 783 unsigned long status; 784 int restart = 0; 785 int instance = dev->id; 786 const char *name = dev->name; 787 788 dev->a_ops.adapter_ioremap = aac_srcv_ioremap; 789 dev->a_ops.adapter_comm = aac_src_select_comm; 790 791 dev->base_size = AAC_MIN_SRCV_BAR0_SIZE; 792 if (aac_adapter_ioremap(dev, dev->base_size)) { 793 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 794 goto error_iounmap; 795 } 796 797 /* Failure to reset here is an option ... */ 798 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 799 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 800 if ((aac_reset_devices || reset_devices) && 801 !aac_src_restart_adapter(dev, 0)) 802 ++restart; 803 /* 804 * Check to see if flash update is running. 805 * Wait for the adapter to be up and running. Wait up to 5 minutes 806 */ 807 status = src_readl(dev, MUnit.OMR); 808 if (status & FLASH_UPD_PENDING) { 809 start = jiffies; 810 do { 811 status = src_readl(dev, MUnit.OMR); 812 if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) { 813 printk(KERN_ERR "%s%d: adapter flash update failed.\n", 814 dev->name, instance); 815 goto error_iounmap; 816 } 817 } while (!(status & FLASH_UPD_SUCCESS) && 818 !(status & FLASH_UPD_FAILED)); 819 /* Delay 10 seconds. 820 * Because right now FW is doing a soft reset, 821 * do not read scratch pad register at this time 822 */ 823 ssleep(10); 824 } 825 /* 826 * Check to see if the board panic'd while booting. 827 */ 828 status = src_readl(dev, MUnit.OMR); 829 if (status & KERNEL_PANIC) { 830 if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) 831 goto error_iounmap; 832 ++restart; 833 } 834 /* 835 * Check to see if the board failed any self tests. 836 */ 837 status = src_readl(dev, MUnit.OMR); 838 if (status & SELF_TEST_FAILED) { 839 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); 840 goto error_iounmap; 841 } 842 /* 843 * Check to see if the monitor panic'd while booting. 844 */ 845 if (status & MONITOR_PANIC) { 846 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); 847 goto error_iounmap; 848 } 849 start = jiffies; 850 /* 851 * Wait for the adapter to be up and running. Wait up to 3 minutes 852 */ 853 while (!((status = src_readl(dev, MUnit.OMR)) & 854 KERNEL_UP_AND_RUNNING) || 855 status == 0xffffffff) { 856 if ((restart && 857 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 858 time_after(jiffies, start+HZ*startup_timeout)) { 859 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 860 dev->name, instance, status); 861 goto error_iounmap; 862 } 863 if (!restart && 864 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || 865 time_after(jiffies, start + HZ * 866 ((startup_timeout > 60) 867 ? (startup_timeout - 60) 868 : (startup_timeout / 2))))) { 869 if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev)))) 870 start = jiffies; 871 ++restart; 872 } 873 msleep(1); 874 } 875 if (restart && aac_commit) 876 aac_commit = 1; 877 /* 878 * Fill in the common function dispatch table. 879 */ 880 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; 881 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; 882 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 883 dev->a_ops.adapter_notify = aac_src_notify_adapter; 884 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 885 dev->a_ops.adapter_check_health = aac_src_check_health; 886 dev->a_ops.adapter_restart = aac_src_restart_adapter; 887 dev->a_ops.adapter_start = aac_src_start_adapter; 888 889 /* 890 * First clear out all interrupts. Then enable the one's that we 891 * can handle. 892 */ 893 aac_adapter_comm(dev, AAC_COMM_MESSAGE); 894 aac_adapter_disable_int(dev); 895 src_writel(dev, MUnit.ODR_C, 0xffffffff); 896 aac_adapter_enable_int(dev); 897 898 if (aac_init_adapter(dev) == NULL) 899 goto error_iounmap; 900 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) 901 goto error_iounmap; 902 if (dev->msi_enabled) 903 aac_src_access_devreg(dev, AAC_ENABLE_MSIX); 904 905 if (aac_acquire_irq(dev)) 906 goto error_iounmap; 907 908 dev->dbg_base = dev->base_start; 909 dev->dbg_base_mapped = dev->base; 910 dev->dbg_size = dev->base_size; 911 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; 912 913 aac_adapter_enable_int(dev); 914 915 if (!dev->sync_mode) { 916 /* 917 * Tell the adapter that all is configured, and it can 918 * start accepting requests 919 */ 920 aac_src_start_adapter(dev); 921 } 922 return 0; 923 924 error_iounmap: 925 926 return -1; 927 } 928 929 void aac_src_access_devreg(struct aac_dev *dev, int mode) 930 { 931 u_int32_t val; 932 933 switch (mode) { 934 case AAC_ENABLE_INTERRUPT: 935 src_writel(dev, 936 MUnit.OIMR, 937 dev->OIMR = (dev->msi_enabled ? 938 AAC_INT_ENABLE_TYPE1_MSIX : 939 AAC_INT_ENABLE_TYPE1_INTX)); 940 break; 941 942 case AAC_DISABLE_INTERRUPT: 943 src_writel(dev, 944 MUnit.OIMR, 945 dev->OIMR = AAC_INT_DISABLE_ALL); 946 break; 947 948 case AAC_ENABLE_MSIX: 949 /* set bit 6 */ 950 val = src_readl(dev, MUnit.IDR); 951 val |= 0x40; 952 src_writel(dev, MUnit.IDR, val); 953 src_readl(dev, MUnit.IDR); 954 /* unmask int. */ 955 val = PMC_ALL_INTERRUPT_BITS; 956 src_writel(dev, MUnit.IOAR, val); 957 val = src_readl(dev, MUnit.OIMR); 958 src_writel(dev, 959 MUnit.OIMR, 960 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); 961 break; 962 963 case AAC_DISABLE_MSIX: 964 /* reset bit 6 */ 965 val = src_readl(dev, MUnit.IDR); 966 val &= ~0x40; 967 src_writel(dev, MUnit.IDR, val); 968 src_readl(dev, MUnit.IDR); 969 break; 970 971 case AAC_CLEAR_AIF_BIT: 972 /* set bit 5 */ 973 val = src_readl(dev, MUnit.IDR); 974 val |= 0x20; 975 src_writel(dev, MUnit.IDR, val); 976 src_readl(dev, MUnit.IDR); 977 break; 978 979 case AAC_CLEAR_SYNC_BIT: 980 /* set bit 4 */ 981 val = src_readl(dev, MUnit.IDR); 982 val |= 0x10; 983 src_writel(dev, MUnit.IDR, val); 984 src_readl(dev, MUnit.IDR); 985 break; 986 987 case AAC_ENABLE_INTX: 988 /* set bit 7 */ 989 val = src_readl(dev, MUnit.IDR); 990 val |= 0x80; 991 src_writel(dev, MUnit.IDR, val); 992 src_readl(dev, MUnit.IDR); 993 /* unmask int. */ 994 val = PMC_ALL_INTERRUPT_BITS; 995 src_writel(dev, MUnit.IOAR, val); 996 src_readl(dev, MUnit.IOAR); 997 val = src_readl(dev, MUnit.OIMR); 998 src_writel(dev, MUnit.OIMR, 999 val & (~(PMC_GLOBAL_INT_BIT2))); 1000 break; 1001 1002 default: 1003 break; 1004 } 1005 } 1006 1007 static int aac_src_get_sync_status(struct aac_dev *dev) 1008 { 1009 1010 int val; 1011 1012 if (dev->msi_enabled) 1013 val = src_readl(dev, MUnit.ODR_MSI) & 0x1000 ? 1 : 0; 1014 else 1015 val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT; 1016 1017 return val; 1018 } 1019