1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2010 Adaptec, Inc. 9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * Module Name: 26 * src.c 27 * 28 * Abstract: Hardware Device Interface for PMC SRC based controllers 29 * 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/init.h> 34 #include <linux/types.h> 35 #include <linux/pci.h> 36 #include <linux/spinlock.h> 37 #include <linux/slab.h> 38 #include <linux/blkdev.h> 39 #include <linux/delay.h> 40 #include <linux/completion.h> 41 #include <linux/time.h> 42 #include <linux/interrupt.h> 43 #include <scsi/scsi_host.h> 44 45 #include "aacraid.h" 46 47 static int aac_src_get_sync_status(struct aac_dev *dev); 48 49 static irqreturn_t aac_src_intr_message(int irq, void *dev_id) 50 { 51 struct aac_msix_ctx *ctx; 52 struct aac_dev *dev; 53 unsigned long bellbits, bellbits_shifted; 54 int vector_no; 55 int isFastResponse, mode; 56 u32 index, handle; 57 58 ctx = (struct aac_msix_ctx *)dev_id; 59 dev = ctx->dev; 60 vector_no = ctx->vector_no; 61 62 if (dev->msi_enabled) { 63 mode = AAC_INT_MODE_MSI; 64 if (vector_no == 0) { 65 bellbits = src_readl(dev, MUnit.ODR_MSI); 66 if (bellbits & 0x40000) 67 mode |= AAC_INT_MODE_AIF; 68 if (bellbits & 0x1000) 69 mode |= AAC_INT_MODE_SYNC; 70 } 71 } else { 72 mode = AAC_INT_MODE_INTX; 73 bellbits = src_readl(dev, MUnit.ODR_R); 74 if (bellbits & PmDoorBellResponseSent) { 75 bellbits = PmDoorBellResponseSent; 76 src_writel(dev, MUnit.ODR_C, bellbits); 77 src_readl(dev, MUnit.ODR_C); 78 } else { 79 bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); 80 src_writel(dev, MUnit.ODR_C, bellbits); 81 src_readl(dev, MUnit.ODR_C); 82 83 if (bellbits_shifted & DoorBellAifPending) 84 mode |= AAC_INT_MODE_AIF; 85 else if (bellbits_shifted & OUTBOUNDDOORBELL_0) 86 mode |= AAC_INT_MODE_SYNC; 87 } 88 } 89 90 if (mode & AAC_INT_MODE_SYNC) { 91 unsigned long sflags; 92 struct list_head *entry; 93 int send_it = 0; 94 extern int aac_sync_mode; 95 96 if (!aac_sync_mode && !dev->msi_enabled) { 97 src_writel(dev, MUnit.ODR_C, bellbits); 98 src_readl(dev, MUnit.ODR_C); 99 } 100 101 if (dev->sync_fib) { 102 if (dev->sync_fib->callback) 103 dev->sync_fib->callback(dev->sync_fib->callback_data, 104 dev->sync_fib); 105 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); 106 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { 107 dev->management_fib_count--; 108 up(&dev->sync_fib->event_wait); 109 } 110 spin_unlock_irqrestore(&dev->sync_fib->event_lock, 111 sflags); 112 spin_lock_irqsave(&dev->sync_lock, sflags); 113 if (!list_empty(&dev->sync_fib_list)) { 114 entry = dev->sync_fib_list.next; 115 dev->sync_fib = list_entry(entry, 116 struct fib, 117 fiblink); 118 list_del(entry); 119 send_it = 1; 120 } else { 121 dev->sync_fib = NULL; 122 } 123 spin_unlock_irqrestore(&dev->sync_lock, sflags); 124 if (send_it) { 125 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, 126 (u32)dev->sync_fib->hw_fib_pa, 127 0, 0, 0, 0, 0, 128 NULL, NULL, NULL, NULL, NULL); 129 } 130 } 131 if (!dev->msi_enabled) 132 mode = 0; 133 134 } 135 136 if (mode & AAC_INT_MODE_AIF) { 137 /* handle AIF */ 138 aac_intr_normal(dev, 0, 2, 0, NULL); 139 if (dev->msi_enabled) 140 aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); 141 mode = 0; 142 } 143 144 if (mode) { 145 index = dev->host_rrq_idx[vector_no]; 146 147 for (;;) { 148 isFastResponse = 0; 149 /* remove toggle bit (31) */ 150 handle = (dev->host_rrq[index] & 0x7fffffff); 151 /* check fast response bit (30) */ 152 if (handle & 0x40000000) 153 isFastResponse = 1; 154 handle &= 0x0000ffff; 155 if (handle == 0) 156 break; 157 if (dev->msi_enabled && dev->max_msix > 1) 158 atomic_dec(&dev->rrq_outstanding[vector_no]); 159 aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); 160 dev->host_rrq[index++] = 0; 161 if (index == (vector_no + 1) * dev->vector_cap) 162 index = vector_no * dev->vector_cap; 163 dev->host_rrq_idx[vector_no] = index; 164 } 165 mode = 0; 166 } 167 168 return IRQ_HANDLED; 169 } 170 171 /** 172 * aac_src_disable_interrupt - Disable interrupts 173 * @dev: Adapter 174 */ 175 176 static void aac_src_disable_interrupt(struct aac_dev *dev) 177 { 178 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); 179 } 180 181 /** 182 * aac_src_enable_interrupt_message - Enable interrupts 183 * @dev: Adapter 184 */ 185 186 static void aac_src_enable_interrupt_message(struct aac_dev *dev) 187 { 188 aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT); 189 } 190 191 /** 192 * src_sync_cmd - send a command and wait 193 * @dev: Adapter 194 * @command: Command to execute 195 * @p1: first parameter 196 * @ret: adapter status 197 * 198 * This routine will send a synchronous command to the adapter and wait 199 * for its completion. 200 */ 201 202 static int src_sync_cmd(struct aac_dev *dev, u32 command, 203 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, 204 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) 205 { 206 unsigned long start; 207 unsigned long delay; 208 int ok; 209 210 /* 211 * Write the command into Mailbox 0 212 */ 213 writel(command, &dev->IndexRegs->Mailbox[0]); 214 /* 215 * Write the parameters into Mailboxes 1 - 6 216 */ 217 writel(p1, &dev->IndexRegs->Mailbox[1]); 218 writel(p2, &dev->IndexRegs->Mailbox[2]); 219 writel(p3, &dev->IndexRegs->Mailbox[3]); 220 writel(p4, &dev->IndexRegs->Mailbox[4]); 221 222 /* 223 * Clear the synch command doorbell to start on a clean slate. 224 */ 225 if (!dev->msi_enabled) 226 src_writel(dev, 227 MUnit.ODR_C, 228 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 229 230 /* 231 * Disable doorbell interrupts 232 */ 233 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); 234 235 /* 236 * Force the completion of the mask register write before issuing 237 * the interrupt. 238 */ 239 src_readl(dev, MUnit.OIMR); 240 241 /* 242 * Signal that there is a new synch command 243 */ 244 src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); 245 246 if (!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) { 247 ok = 0; 248 start = jiffies; 249 250 if (command == IOP_RESET_ALWAYS) { 251 /* Wait up to 10 sec */ 252 delay = 10*HZ; 253 } else { 254 /* Wait up to 5 minutes */ 255 delay = 300*HZ; 256 } 257 while (time_before(jiffies, start+delay)) { 258 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ 259 /* 260 * Mon960 will set doorbell0 bit when it has completed the command. 261 */ 262 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { 263 /* 264 * Clear the doorbell. 265 */ 266 if (dev->msi_enabled) 267 aac_src_access_devreg(dev, 268 AAC_CLEAR_SYNC_BIT); 269 else 270 src_writel(dev, 271 MUnit.ODR_C, 272 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 273 ok = 1; 274 break; 275 } 276 /* 277 * Yield the processor in case we are slow 278 */ 279 msleep(1); 280 } 281 if (unlikely(ok != 1)) { 282 /* 283 * Restore interrupt mask even though we timed out 284 */ 285 aac_adapter_enable_int(dev); 286 return -ETIMEDOUT; 287 } 288 /* 289 * Pull the synch status from Mailbox 0. 290 */ 291 if (status) 292 *status = readl(&dev->IndexRegs->Mailbox[0]); 293 if (r1) 294 *r1 = readl(&dev->IndexRegs->Mailbox[1]); 295 if (r2) 296 *r2 = readl(&dev->IndexRegs->Mailbox[2]); 297 if (r3) 298 *r3 = readl(&dev->IndexRegs->Mailbox[3]); 299 if (r4) 300 *r4 = readl(&dev->IndexRegs->Mailbox[4]); 301 if (command == GET_COMM_PREFERRED_SETTINGS) 302 dev->max_msix = 303 readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF; 304 /* 305 * Clear the synch command doorbell. 306 */ 307 if (!dev->msi_enabled) 308 src_writel(dev, 309 MUnit.ODR_C, 310 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 311 } 312 313 /* 314 * Restore interrupt mask 315 */ 316 aac_adapter_enable_int(dev); 317 return 0; 318 } 319 320 /** 321 * aac_src_interrupt_adapter - interrupt adapter 322 * @dev: Adapter 323 * 324 * Send an interrupt to the i960 and breakpoint it. 325 */ 326 327 static void aac_src_interrupt_adapter(struct aac_dev *dev) 328 { 329 src_sync_cmd(dev, BREAKPOINT_REQUEST, 330 0, 0, 0, 0, 0, 0, 331 NULL, NULL, NULL, NULL, NULL); 332 } 333 334 /** 335 * aac_src_notify_adapter - send an event to the adapter 336 * @dev: Adapter 337 * @event: Event to send 338 * 339 * Notify the i960 that something it probably cares about has 340 * happened. 341 */ 342 343 static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) 344 { 345 switch (event) { 346 347 case AdapNormCmdQue: 348 src_writel(dev, MUnit.ODR_C, 349 INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); 350 break; 351 case HostNormRespNotFull: 352 src_writel(dev, MUnit.ODR_C, 353 INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); 354 break; 355 case AdapNormRespQue: 356 src_writel(dev, MUnit.ODR_C, 357 INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); 358 break; 359 case HostNormCmdNotFull: 360 src_writel(dev, MUnit.ODR_C, 361 INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); 362 break; 363 case FastIo: 364 src_writel(dev, MUnit.ODR_C, 365 INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); 366 break; 367 case AdapPrintfDone: 368 src_writel(dev, MUnit.ODR_C, 369 INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); 370 break; 371 default: 372 BUG(); 373 break; 374 } 375 } 376 377 /** 378 * aac_src_start_adapter - activate adapter 379 * @dev: Adapter 380 * 381 * Start up processing on an i960 based AAC adapter 382 */ 383 384 static void aac_src_start_adapter(struct aac_dev *dev) 385 { 386 struct aac_init *init; 387 int i; 388 389 /* reset host_rrq_idx first */ 390 for (i = 0; i < dev->max_msix; i++) { 391 dev->host_rrq_idx[i] = i * dev->vector_cap; 392 atomic_set(&dev->rrq_outstanding[i], 0); 393 } 394 dev->fibs_pushed_no = 0; 395 396 init = dev->init; 397 init->HostElapsedSeconds = cpu_to_le32(get_seconds()); 398 399 /* We can only use a 32 bit address here */ 400 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 401 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 402 } 403 404 /** 405 * aac_src_check_health 406 * @dev: device to check if healthy 407 * 408 * Will attempt to determine if the specified adapter is alive and 409 * capable of handling requests, returning 0 if alive. 410 */ 411 static int aac_src_check_health(struct aac_dev *dev) 412 { 413 u32 status = src_readl(dev, MUnit.OMR); 414 415 /* 416 * Check to see if the board failed any self tests. 417 */ 418 if (unlikely(status & SELF_TEST_FAILED)) 419 return -1; 420 421 /* 422 * Check to see if the board panic'd. 423 */ 424 if (unlikely(status & KERNEL_PANIC)) 425 return (status >> 16) & 0xFF; 426 /* 427 * Wait for the adapter to be up and running. 428 */ 429 if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) 430 return -3; 431 /* 432 * Everything is OK 433 */ 434 return 0; 435 } 436 437 /** 438 * aac_src_deliver_message 439 * @fib: fib to issue 440 * 441 * Will send a fib, returning 0 if successful. 442 */ 443 static int aac_src_deliver_message(struct fib *fib) 444 { 445 struct aac_dev *dev = fib->dev; 446 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 447 u32 fibsize; 448 dma_addr_t address; 449 struct aac_fib_xporthdr *pFibX; 450 u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size); 451 452 atomic_inc(&q->numpending); 453 454 if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest && 455 dev->max_msix > 1) { 456 u_int16_t vector_no, first_choice = 0xffff; 457 458 vector_no = dev->fibs_pushed_no % dev->max_msix; 459 do { 460 vector_no += 1; 461 if (vector_no == dev->max_msix) 462 vector_no = 1; 463 if (atomic_read(&dev->rrq_outstanding[vector_no]) < 464 dev->vector_cap) 465 break; 466 if (0xffff == first_choice) 467 first_choice = vector_no; 468 else if (vector_no == first_choice) 469 break; 470 } while (1); 471 if (vector_no == first_choice) 472 vector_no = 0; 473 atomic_inc(&dev->rrq_outstanding[vector_no]); 474 if (dev->fibs_pushed_no == 0xffffffff) 475 dev->fibs_pushed_no = 0; 476 else 477 dev->fibs_pushed_no++; 478 fib->hw_fib_va->header.Handle += (vector_no << 16); 479 } 480 481 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { 482 /* Calculate the amount to the fibsize bits */ 483 fibsize = (hdr_size + 127) / 128 - 1; 484 if (fibsize > (ALIGN32 - 1)) 485 return -EMSGSIZE; 486 /* New FIB header, 32-bit */ 487 address = fib->hw_fib_pa; 488 fib->hw_fib_va->header.StructType = FIB_MAGIC2; 489 fib->hw_fib_va->header.SenderFibAddress = (u32)address; 490 fib->hw_fib_va->header.u.TimeStamp = 0; 491 BUG_ON(upper_32_bits(address) != 0L); 492 address |= fibsize; 493 } else { 494 /* Calculate the amount to the fibsize bits */ 495 fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1; 496 if (fibsize > (ALIGN32 - 1)) 497 return -EMSGSIZE; 498 499 /* Fill XPORT header */ 500 pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr); 501 pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle); 502 pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa); 503 pFibX->Size = cpu_to_le32(hdr_size); 504 505 /* 506 * The xport header has been 32-byte aligned for us so that fibsize 507 * can be masked out of this address by hardware. -- BenC 508 */ 509 address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr); 510 if (address & (ALIGN32 - 1)) 511 return -EINVAL; 512 address |= fibsize; 513 } 514 515 src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff); 516 src_writel(dev, MUnit.IQ_L, address & 0xffffffff); 517 518 return 0; 519 } 520 521 /** 522 * aac_src_ioremap 523 * @size: mapping resize request 524 * 525 */ 526 static int aac_src_ioremap(struct aac_dev *dev, u32 size) 527 { 528 if (!size) { 529 iounmap(dev->regs.src.bar1); 530 dev->regs.src.bar1 = NULL; 531 iounmap(dev->regs.src.bar0); 532 dev->base = dev->regs.src.bar0 = NULL; 533 return 0; 534 } 535 dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), 536 AAC_MIN_SRC_BAR1_SIZE); 537 dev->base = NULL; 538 if (dev->regs.src.bar1 == NULL) 539 return -1; 540 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); 541 if (dev->base == NULL) { 542 iounmap(dev->regs.src.bar1); 543 dev->regs.src.bar1 = NULL; 544 return -1; 545 } 546 dev->IndexRegs = &((struct src_registers __iomem *) 547 dev->base)->u.tupelo.IndexRegs; 548 return 0; 549 } 550 551 /** 552 * aac_srcv_ioremap 553 * @size: mapping resize request 554 * 555 */ 556 static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) 557 { 558 if (!size) { 559 iounmap(dev->regs.src.bar0); 560 dev->base = dev->regs.src.bar0 = NULL; 561 return 0; 562 } 563 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); 564 if (dev->base == NULL) 565 return -1; 566 dev->IndexRegs = &((struct src_registers __iomem *) 567 dev->base)->u.denali.IndexRegs; 568 return 0; 569 } 570 571 static int aac_src_restart_adapter(struct aac_dev *dev, int bled) 572 { 573 u32 var, reset_mask; 574 575 if (bled >= 0) { 576 if (bled) 577 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", 578 dev->name, dev->id, bled); 579 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 580 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 581 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); 582 if ((bled || (var != 0x00000001)) && 583 !dev->doorbell_mask) 584 return -EINVAL; 585 else if (dev->doorbell_mask) { 586 reset_mask = dev->doorbell_mask; 587 bled = 0; 588 var = 0x00000001; 589 } 590 591 if ((dev->pdev->device == PMC_DEVICE_S7 || 592 dev->pdev->device == PMC_DEVICE_S8 || 593 dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) { 594 aac_src_access_devreg(dev, AAC_ENABLE_INTX); 595 dev->msi_enabled = 0; 596 msleep(5000); /* Delay 5 seconds */ 597 } 598 599 if (!bled && (dev->supplement_adapter_info.SupportedOptions2 & 600 AAC_OPTION_DOORBELL_RESET)) { 601 src_writel(dev, MUnit.IDR, reset_mask); 602 ssleep(45); 603 } else { 604 src_writel(dev, MUnit.IDR, 0x100); 605 ssleep(45); 606 } 607 } 608 609 if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) 610 return -ENODEV; 611 612 if (startup_timeout < 300) 613 startup_timeout = 300; 614 615 return 0; 616 } 617 618 /** 619 * aac_src_select_comm - Select communications method 620 * @dev: Adapter 621 * @comm: communications method 622 */ 623 int aac_src_select_comm(struct aac_dev *dev, int comm) 624 { 625 switch (comm) { 626 case AAC_COMM_MESSAGE: 627 dev->a_ops.adapter_intr = aac_src_intr_message; 628 dev->a_ops.adapter_deliver = aac_src_deliver_message; 629 break; 630 default: 631 return 1; 632 } 633 return 0; 634 } 635 636 /** 637 * aac_src_init - initialize an Cardinal Frey Bar card 638 * @dev: device to configure 639 * 640 */ 641 642 int aac_src_init(struct aac_dev *dev) 643 { 644 unsigned long start; 645 unsigned long status; 646 int restart = 0; 647 int instance = dev->id; 648 const char *name = dev->name; 649 650 dev->a_ops.adapter_ioremap = aac_src_ioremap; 651 dev->a_ops.adapter_comm = aac_src_select_comm; 652 653 dev->base_size = AAC_MIN_SRC_BAR0_SIZE; 654 if (aac_adapter_ioremap(dev, dev->base_size)) { 655 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 656 goto error_iounmap; 657 } 658 659 /* Failure to reset here is an option ... */ 660 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 661 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 662 if ((aac_reset_devices || reset_devices) && 663 !aac_src_restart_adapter(dev, 0)) 664 ++restart; 665 /* 666 * Check to see if the board panic'd while booting. 667 */ 668 status = src_readl(dev, MUnit.OMR); 669 if (status & KERNEL_PANIC) { 670 if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) 671 goto error_iounmap; 672 ++restart; 673 } 674 /* 675 * Check to see if the board failed any self tests. 676 */ 677 status = src_readl(dev, MUnit.OMR); 678 if (status & SELF_TEST_FAILED) { 679 printk(KERN_ERR "%s%d: adapter self-test failed.\n", 680 dev->name, instance); 681 goto error_iounmap; 682 } 683 /* 684 * Check to see if the monitor panic'd while booting. 685 */ 686 if (status & MONITOR_PANIC) { 687 printk(KERN_ERR "%s%d: adapter monitor panic.\n", 688 dev->name, instance); 689 goto error_iounmap; 690 } 691 start = jiffies; 692 /* 693 * Wait for the adapter to be up and running. Wait up to 3 minutes 694 */ 695 while (!((status = src_readl(dev, MUnit.OMR)) & 696 KERNEL_UP_AND_RUNNING)) { 697 if ((restart && 698 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 699 time_after(jiffies, start+HZ*startup_timeout)) { 700 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 701 dev->name, instance, status); 702 goto error_iounmap; 703 } 704 if (!restart && 705 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || 706 time_after(jiffies, start + HZ * 707 ((startup_timeout > 60) 708 ? (startup_timeout - 60) 709 : (startup_timeout / 2))))) { 710 if (likely(!aac_src_restart_adapter(dev, 711 aac_src_check_health(dev)))) 712 start = jiffies; 713 ++restart; 714 } 715 msleep(1); 716 } 717 if (restart && aac_commit) 718 aac_commit = 1; 719 /* 720 * Fill in the common function dispatch table. 721 */ 722 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; 723 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; 724 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 725 dev->a_ops.adapter_notify = aac_src_notify_adapter; 726 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 727 dev->a_ops.adapter_check_health = aac_src_check_health; 728 dev->a_ops.adapter_restart = aac_src_restart_adapter; 729 730 /* 731 * First clear out all interrupts. Then enable the one's that we 732 * can handle. 733 */ 734 aac_adapter_comm(dev, AAC_COMM_MESSAGE); 735 aac_adapter_disable_int(dev); 736 src_writel(dev, MUnit.ODR_C, 0xffffffff); 737 aac_adapter_enable_int(dev); 738 739 if (aac_init_adapter(dev) == NULL) 740 goto error_iounmap; 741 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) 742 goto error_iounmap; 743 744 dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 745 746 dev->aac_msix[0].vector_no = 0; 747 dev->aac_msix[0].dev = dev; 748 749 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 750 IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) { 751 752 if (dev->msi) 753 pci_disable_msi(dev->pdev); 754 755 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 756 name, instance); 757 goto error_iounmap; 758 } 759 dev->dbg_base = pci_resource_start(dev->pdev, 2); 760 dev->dbg_base_mapped = dev->regs.src.bar1; 761 dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; 762 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; 763 764 aac_adapter_enable_int(dev); 765 766 if (!dev->sync_mode) { 767 /* 768 * Tell the adapter that all is configured, and it can 769 * start accepting requests 770 */ 771 aac_src_start_adapter(dev); 772 } 773 return 0; 774 775 error_iounmap: 776 777 return -1; 778 } 779 780 /** 781 * aac_srcv_init - initialize an SRCv card 782 * @dev: device to configure 783 * 784 */ 785 786 int aac_srcv_init(struct aac_dev *dev) 787 { 788 unsigned long start; 789 unsigned long status; 790 int restart = 0; 791 int instance = dev->id; 792 int i, j; 793 const char *name = dev->name; 794 int cpu; 795 796 dev->a_ops.adapter_ioremap = aac_srcv_ioremap; 797 dev->a_ops.adapter_comm = aac_src_select_comm; 798 799 dev->base_size = AAC_MIN_SRCV_BAR0_SIZE; 800 if (aac_adapter_ioremap(dev, dev->base_size)) { 801 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 802 goto error_iounmap; 803 } 804 805 /* Failure to reset here is an option ... */ 806 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 807 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 808 if ((aac_reset_devices || reset_devices) && 809 !aac_src_restart_adapter(dev, 0)) 810 ++restart; 811 /* 812 * Check to see if flash update is running. 813 * Wait for the adapter to be up and running. Wait up to 5 minutes 814 */ 815 status = src_readl(dev, MUnit.OMR); 816 if (status & FLASH_UPD_PENDING) { 817 start = jiffies; 818 do { 819 status = src_readl(dev, MUnit.OMR); 820 if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) { 821 printk(KERN_ERR "%s%d: adapter flash update failed.\n", 822 dev->name, instance); 823 goto error_iounmap; 824 } 825 } while (!(status & FLASH_UPD_SUCCESS) && 826 !(status & FLASH_UPD_FAILED)); 827 /* Delay 10 seconds. 828 * Because right now FW is doing a soft reset, 829 * do not read scratch pad register at this time 830 */ 831 ssleep(10); 832 } 833 /* 834 * Check to see if the board panic'd while booting. 835 */ 836 status = src_readl(dev, MUnit.OMR); 837 if (status & KERNEL_PANIC) { 838 if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) 839 goto error_iounmap; 840 ++restart; 841 } 842 /* 843 * Check to see if the board failed any self tests. 844 */ 845 status = src_readl(dev, MUnit.OMR); 846 if (status & SELF_TEST_FAILED) { 847 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); 848 goto error_iounmap; 849 } 850 /* 851 * Check to see if the monitor panic'd while booting. 852 */ 853 if (status & MONITOR_PANIC) { 854 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); 855 goto error_iounmap; 856 } 857 start = jiffies; 858 /* 859 * Wait for the adapter to be up and running. Wait up to 3 minutes 860 */ 861 while (!((status = src_readl(dev, MUnit.OMR)) & 862 KERNEL_UP_AND_RUNNING) || 863 status == 0xffffffff) { 864 if ((restart && 865 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 866 time_after(jiffies, start+HZ*startup_timeout)) { 867 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 868 dev->name, instance, status); 869 goto error_iounmap; 870 } 871 if (!restart && 872 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || 873 time_after(jiffies, start + HZ * 874 ((startup_timeout > 60) 875 ? (startup_timeout - 60) 876 : (startup_timeout / 2))))) { 877 if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev)))) 878 start = jiffies; 879 ++restart; 880 } 881 msleep(1); 882 } 883 if (restart && aac_commit) 884 aac_commit = 1; 885 /* 886 * Fill in the common function dispatch table. 887 */ 888 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; 889 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; 890 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 891 dev->a_ops.adapter_notify = aac_src_notify_adapter; 892 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 893 dev->a_ops.adapter_check_health = aac_src_check_health; 894 dev->a_ops.adapter_restart = aac_src_restart_adapter; 895 896 /* 897 * First clear out all interrupts. Then enable the one's that we 898 * can handle. 899 */ 900 aac_adapter_comm(dev, AAC_COMM_MESSAGE); 901 aac_adapter_disable_int(dev); 902 src_writel(dev, MUnit.ODR_C, 0xffffffff); 903 aac_adapter_enable_int(dev); 904 905 if (aac_init_adapter(dev) == NULL) 906 goto error_iounmap; 907 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) 908 goto error_iounmap; 909 if (dev->msi_enabled) 910 aac_src_access_devreg(dev, AAC_ENABLE_MSIX); 911 if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { 912 cpu = cpumask_first(cpu_online_mask); 913 for (i = 0; i < dev->max_msix; i++) { 914 dev->aac_msix[i].vector_no = i; 915 dev->aac_msix[i].dev = dev; 916 917 if (request_irq(dev->msixentry[i].vector, 918 dev->a_ops.adapter_intr, 919 0, 920 "aacraid", 921 &(dev->aac_msix[i]))) { 922 printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", 923 name, instance, i); 924 for (j = 0 ; j < i ; j++) 925 free_irq(dev->msixentry[j].vector, 926 &(dev->aac_msix[j])); 927 pci_disable_msix(dev->pdev); 928 goto error_iounmap; 929 } 930 if (irq_set_affinity_hint( 931 dev->msixentry[i].vector, 932 get_cpu_mask(cpu))) { 933 printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n", 934 name, instance, cpu); 935 } 936 cpu = cpumask_next(cpu, cpu_online_mask); 937 } 938 } else { 939 dev->aac_msix[0].vector_no = 0; 940 dev->aac_msix[0].dev = dev; 941 942 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 943 IRQF_SHARED, 944 "aacraid", 945 &(dev->aac_msix[0])) < 0) { 946 if (dev->msi) 947 pci_disable_msi(dev->pdev); 948 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 949 name, instance); 950 goto error_iounmap; 951 } 952 } 953 dev->dbg_base = dev->base_start; 954 dev->dbg_base_mapped = dev->base; 955 dev->dbg_size = dev->base_size; 956 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; 957 958 aac_adapter_enable_int(dev); 959 960 if (!dev->sync_mode) { 961 /* 962 * Tell the adapter that all is configured, and it can 963 * start accepting requests 964 */ 965 aac_src_start_adapter(dev); 966 } 967 return 0; 968 969 error_iounmap: 970 971 return -1; 972 } 973 974 void aac_src_access_devreg(struct aac_dev *dev, int mode) 975 { 976 u_int32_t val; 977 978 switch (mode) { 979 case AAC_ENABLE_INTERRUPT: 980 src_writel(dev, 981 MUnit.OIMR, 982 dev->OIMR = (dev->msi_enabled ? 983 AAC_INT_ENABLE_TYPE1_MSIX : 984 AAC_INT_ENABLE_TYPE1_INTX)); 985 break; 986 987 case AAC_DISABLE_INTERRUPT: 988 src_writel(dev, 989 MUnit.OIMR, 990 dev->OIMR = AAC_INT_DISABLE_ALL); 991 break; 992 993 case AAC_ENABLE_MSIX: 994 /* set bit 6 */ 995 val = src_readl(dev, MUnit.IDR); 996 val |= 0x40; 997 src_writel(dev, MUnit.IDR, val); 998 src_readl(dev, MUnit.IDR); 999 /* unmask int. */ 1000 val = PMC_ALL_INTERRUPT_BITS; 1001 src_writel(dev, MUnit.IOAR, val); 1002 val = src_readl(dev, MUnit.OIMR); 1003 src_writel(dev, 1004 MUnit.OIMR, 1005 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); 1006 break; 1007 1008 case AAC_DISABLE_MSIX: 1009 /* reset bit 6 */ 1010 val = src_readl(dev, MUnit.IDR); 1011 val &= ~0x40; 1012 src_writel(dev, MUnit.IDR, val); 1013 src_readl(dev, MUnit.IDR); 1014 break; 1015 1016 case AAC_CLEAR_AIF_BIT: 1017 /* set bit 5 */ 1018 val = src_readl(dev, MUnit.IDR); 1019 val |= 0x20; 1020 src_writel(dev, MUnit.IDR, val); 1021 src_readl(dev, MUnit.IDR); 1022 break; 1023 1024 case AAC_CLEAR_SYNC_BIT: 1025 /* set bit 4 */ 1026 val = src_readl(dev, MUnit.IDR); 1027 val |= 0x10; 1028 src_writel(dev, MUnit.IDR, val); 1029 src_readl(dev, MUnit.IDR); 1030 break; 1031 1032 case AAC_ENABLE_INTX: 1033 /* set bit 7 */ 1034 val = src_readl(dev, MUnit.IDR); 1035 val |= 0x80; 1036 src_writel(dev, MUnit.IDR, val); 1037 src_readl(dev, MUnit.IDR); 1038 /* unmask int. */ 1039 val = PMC_ALL_INTERRUPT_BITS; 1040 src_writel(dev, MUnit.IOAR, val); 1041 src_readl(dev, MUnit.IOAR); 1042 val = src_readl(dev, MUnit.OIMR); 1043 src_writel(dev, MUnit.OIMR, 1044 val & (~(PMC_GLOBAL_INT_BIT2))); 1045 break; 1046 1047 default: 1048 break; 1049 } 1050 } 1051 1052 static int aac_src_get_sync_status(struct aac_dev *dev) 1053 { 1054 1055 int val; 1056 1057 if (dev->msi_enabled) 1058 val = src_readl(dev, MUnit.ODR_MSI) & 0x1000 ? 1 : 0; 1059 else 1060 val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT; 1061 1062 return val; 1063 } 1064