1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * rx.c 26 * 27 * Abstract: Hardware miniport for Drawbridge specific hardware functions. 28 * 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/types.h> 34 #include <linux/pci.h> 35 #include <linux/spinlock.h> 36 #include <linux/slab.h> 37 #include <linux/blkdev.h> 38 #include <linux/delay.h> 39 #include <linux/completion.h> 40 #include <linux/time.h> 41 #include <linux/interrupt.h> 42 #include <asm/semaphore.h> 43 44 #include <scsi/scsi_host.h> 45 46 #include "aacraid.h" 47 48 static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) 49 { 50 struct aac_dev *dev = dev_id; 51 unsigned long bellbits; 52 u8 intstat = rx_readb(dev, MUnit.OISR); 53 54 /* 55 * Read mask and invert because drawbridge is reversed. 56 * This allows us to only service interrupts that have 57 * been enabled. 58 * Check to see if this is our interrupt. If it isn't just return 59 */ 60 if (intstat & ~(dev->OIMR)) { 61 bellbits = rx_readl(dev, OutboundDoorbellReg); 62 if (bellbits & DoorBellPrintfReady) { 63 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); 64 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); 65 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); 66 } 67 else if (bellbits & DoorBellAdapterNormCmdReady) { 68 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); 69 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); 70 } 71 else if (bellbits & DoorBellAdapterNormRespReady) { 72 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); 73 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); 74 } 75 else if (bellbits & DoorBellAdapterNormCmdNotFull) { 76 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 77 } 78 else if (bellbits & DoorBellAdapterNormRespNotFull) { 79 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 80 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); 81 } 82 return IRQ_HANDLED; 83 } 84 return IRQ_NONE; 85 } 86 87 static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) 88 { 89 struct aac_dev *dev = dev_id; 90 u32 Index = rx_readl(dev, MUnit.OutboundQueue); 91 if (Index == 0xFFFFFFFFL) 92 Index = rx_readl(dev, MUnit.OutboundQueue); 93 if (Index != 0xFFFFFFFFL) { 94 do { 95 if (aac_intr_normal(dev, Index)) { 96 rx_writel(dev, MUnit.OutboundQueue, Index); 97 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); 98 } 99 Index = rx_readl(dev, MUnit.OutboundQueue); 100 } while (Index != 0xFFFFFFFFL); 101 return IRQ_HANDLED; 102 } 103 return IRQ_NONE; 104 } 105 106 /** 107 * aac_rx_disable_interrupt - Disable interrupts 108 * @dev: Adapter 109 */ 110 111 static void aac_rx_disable_interrupt(struct aac_dev *dev) 112 { 113 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); 114 } 115 116 /** 117 * aac_rx_enable_interrupt_producer - Enable interrupts 118 * @dev: Adapter 119 */ 120 121 static void aac_rx_enable_interrupt_producer(struct aac_dev *dev) 122 { 123 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); 124 } 125 126 /** 127 * aac_rx_enable_interrupt_message - Enable interrupts 128 * @dev: Adapter 129 */ 130 131 static void aac_rx_enable_interrupt_message(struct aac_dev *dev) 132 { 133 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); 134 } 135 136 /** 137 * rx_sync_cmd - send a command and wait 138 * @dev: Adapter 139 * @command: Command to execute 140 * @p1: first parameter 141 * @ret: adapter status 142 * 143 * This routine will send a synchronous command to the adapter and wait 144 * for its completion. 145 */ 146 147 static int rx_sync_cmd(struct aac_dev *dev, u32 command, 148 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, 149 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) 150 { 151 unsigned long start; 152 int ok; 153 /* 154 * Write the command into Mailbox 0 155 */ 156 writel(command, &dev->IndexRegs->Mailbox[0]); 157 /* 158 * Write the parameters into Mailboxes 1 - 6 159 */ 160 writel(p1, &dev->IndexRegs->Mailbox[1]); 161 writel(p2, &dev->IndexRegs->Mailbox[2]); 162 writel(p3, &dev->IndexRegs->Mailbox[3]); 163 writel(p4, &dev->IndexRegs->Mailbox[4]); 164 /* 165 * Clear the synch command doorbell to start on a clean slate. 166 */ 167 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); 168 /* 169 * Disable doorbell interrupts 170 */ 171 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); 172 /* 173 * Force the completion of the mask register write before issuing 174 * the interrupt. 175 */ 176 rx_readb (dev, MUnit.OIMR); 177 /* 178 * Signal that there is a new synch command 179 */ 180 rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0); 181 182 ok = 0; 183 start = jiffies; 184 185 /* 186 * Wait up to 30 seconds 187 */ 188 while (time_before(jiffies, start+30*HZ)) 189 { 190 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ 191 /* 192 * Mon960 will set doorbell0 bit when it has completed the command. 193 */ 194 if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) { 195 /* 196 * Clear the doorbell. 197 */ 198 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); 199 ok = 1; 200 break; 201 } 202 /* 203 * Yield the processor in case we are slow 204 */ 205 msleep(1); 206 } 207 if (ok != 1) { 208 /* 209 * Restore interrupt mask even though we timed out 210 */ 211 aac_adapter_enable_int(dev); 212 return -ETIMEDOUT; 213 } 214 /* 215 * Pull the synch status from Mailbox 0. 216 */ 217 if (status) 218 *status = readl(&dev->IndexRegs->Mailbox[0]); 219 if (r1) 220 *r1 = readl(&dev->IndexRegs->Mailbox[1]); 221 if (r2) 222 *r2 = readl(&dev->IndexRegs->Mailbox[2]); 223 if (r3) 224 *r3 = readl(&dev->IndexRegs->Mailbox[3]); 225 if (r4) 226 *r4 = readl(&dev->IndexRegs->Mailbox[4]); 227 /* 228 * Clear the synch command doorbell. 229 */ 230 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); 231 /* 232 * Restore interrupt mask 233 */ 234 aac_adapter_enable_int(dev); 235 return 0; 236 237 } 238 239 /** 240 * aac_rx_interrupt_adapter - interrupt adapter 241 * @dev: Adapter 242 * 243 * Send an interrupt to the i960 and breakpoint it. 244 */ 245 246 static void aac_rx_interrupt_adapter(struct aac_dev *dev) 247 { 248 rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 249 } 250 251 /** 252 * aac_rx_notify_adapter - send an event to the adapter 253 * @dev: Adapter 254 * @event: Event to send 255 * 256 * Notify the i960 that something it probably cares about has 257 * happened. 258 */ 259 260 static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event) 261 { 262 switch (event) { 263 264 case AdapNormCmdQue: 265 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1); 266 break; 267 case HostNormRespNotFull: 268 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4); 269 break; 270 case AdapNormRespQue: 271 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2); 272 break; 273 case HostNormCmdNotFull: 274 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); 275 break; 276 case HostShutdown: 277 break; 278 case FastIo: 279 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); 280 break; 281 case AdapPrintfDone: 282 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5); 283 break; 284 default: 285 BUG(); 286 break; 287 } 288 } 289 290 /** 291 * aac_rx_start_adapter - activate adapter 292 * @dev: Adapter 293 * 294 * Start up processing on an i960 based AAC adapter 295 */ 296 297 void aac_rx_start_adapter(struct aac_dev *dev) 298 { 299 struct aac_init *init; 300 301 init = dev->init; 302 init->HostElapsedSeconds = cpu_to_le32(get_seconds()); 303 // We can only use a 32 bit address here 304 rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 305 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 306 } 307 308 /** 309 * aac_rx_check_health 310 * @dev: device to check if healthy 311 * 312 * Will attempt to determine if the specified adapter is alive and 313 * capable of handling requests, returning 0 if alive. 314 */ 315 static int aac_rx_check_health(struct aac_dev *dev) 316 { 317 u32 status = rx_readl(dev, MUnit.OMRx[0]); 318 319 /* 320 * Check to see if the board failed any self tests. 321 */ 322 if (status & SELF_TEST_FAILED) 323 return -1; 324 /* 325 * Check to see if the board panic'd. 326 */ 327 if (status & KERNEL_PANIC) { 328 char * buffer; 329 struct POSTSTATUS { 330 __le32 Post_Command; 331 __le32 Post_Address; 332 } * post; 333 dma_addr_t paddr, baddr; 334 int ret; 335 336 if ((status & 0xFF000000L) == 0xBC000000L) 337 return (status >> 16) & 0xFF; 338 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr); 339 ret = -2; 340 if (buffer == NULL) 341 return ret; 342 post = pci_alloc_consistent(dev->pdev, 343 sizeof(struct POSTSTATUS), &paddr); 344 if (post == NULL) { 345 pci_free_consistent(dev->pdev, 512, buffer, baddr); 346 return ret; 347 } 348 memset(buffer, 0, 512); 349 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS); 350 post->Post_Address = cpu_to_le32(baddr); 351 rx_writel(dev, MUnit.IMRx[0], paddr); 352 rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0, 353 NULL, NULL, NULL, NULL, NULL); 354 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), 355 post, paddr); 356 if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) { 357 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10); 358 ret <<= 4; 359 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10); 360 } 361 pci_free_consistent(dev->pdev, 512, buffer, baddr); 362 return ret; 363 } 364 /* 365 * Wait for the adapter to be up and running. 366 */ 367 if (!(status & KERNEL_UP_AND_RUNNING)) 368 return -3; 369 /* 370 * Everything is OK 371 */ 372 return 0; 373 } 374 375 /** 376 * aac_rx_deliver_producer 377 * @fib: fib to issue 378 * 379 * Will send a fib, returning 0 if successful. 380 */ 381 static int aac_rx_deliver_producer(struct fib * fib) 382 { 383 struct aac_dev *dev = fib->dev; 384 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 385 unsigned long qflags; 386 u32 Index; 387 unsigned long nointr = 0; 388 389 spin_lock_irqsave(q->lock, qflags); 390 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr); 391 392 q->numpending++; 393 *(q->headers.producer) = cpu_to_le32(Index + 1); 394 spin_unlock_irqrestore(q->lock, qflags); 395 if (!(nointr & aac_config.irq_mod)) 396 aac_adapter_notify(dev, AdapNormCmdQueue); 397 398 return 0; 399 } 400 401 /** 402 * aac_rx_deliver_message 403 * @fib: fib to issue 404 * 405 * Will send a fib, returning 0 if successful. 406 */ 407 static int aac_rx_deliver_message(struct fib * fib) 408 { 409 struct aac_dev *dev = fib->dev; 410 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 411 unsigned long qflags; 412 u32 Index; 413 u64 addr; 414 volatile void __iomem *device; 415 416 unsigned long count = 10000000L; /* 50 seconds */ 417 spin_lock_irqsave(q->lock, qflags); 418 q->numpending++; 419 spin_unlock_irqrestore(q->lock, qflags); 420 for(;;) { 421 Index = rx_readl(dev, MUnit.InboundQueue); 422 if (Index == 0xFFFFFFFFL) 423 Index = rx_readl(dev, MUnit.InboundQueue); 424 if (Index != 0xFFFFFFFFL) 425 break; 426 if (--count == 0) { 427 spin_lock_irqsave(q->lock, qflags); 428 q->numpending--; 429 spin_unlock_irqrestore(q->lock, qflags); 430 return -ETIMEDOUT; 431 } 432 udelay(5); 433 } 434 device = dev->base + Index; 435 addr = fib->hw_fib_pa; 436 writel((u32)(addr & 0xffffffff), device); 437 device += sizeof(u32); 438 writel((u32)(addr >> 32), device); 439 device += sizeof(u32); 440 writel(le16_to_cpu(fib->hw_fib->header.Size), device); 441 rx_writel(dev, MUnit.InboundQueue, Index); 442 return 0; 443 } 444 445 /** 446 * aac_rx_ioremap 447 * @size: mapping resize request 448 * 449 */ 450 static int aac_rx_ioremap(struct aac_dev * dev, u32 size) 451 { 452 if (!size) { 453 iounmap(dev->regs.rx); 454 return 0; 455 } 456 dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size); 457 if (dev->base == NULL) 458 return -1; 459 dev->IndexRegs = &dev->regs.rx->IndexRegs; 460 return 0; 461 } 462 463 static int aac_rx_restart_adapter(struct aac_dev *dev) 464 { 465 u32 var; 466 467 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", 468 dev->name, dev->id); 469 470 if (aac_rx_check_health(dev) <= 0) 471 return 1; 472 if (rx_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0, 473 &var, NULL, NULL, NULL, NULL)) 474 return 1; 475 if (var != 0x00000001) 476 return 1; 477 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) 478 return 1; 479 return 0; 480 } 481 482 /** 483 * aac_rx_select_comm - Select communications method 484 * @dev: Adapter 485 * @comm: communications method 486 */ 487 488 int aac_rx_select_comm(struct aac_dev *dev, int comm) 489 { 490 switch (comm) { 491 case AAC_COMM_PRODUCER: 492 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer; 493 dev->a_ops.adapter_intr = aac_rx_intr_producer; 494 dev->a_ops.adapter_deliver = aac_rx_deliver_producer; 495 break; 496 case AAC_COMM_MESSAGE: 497 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message; 498 dev->a_ops.adapter_intr = aac_rx_intr_message; 499 dev->a_ops.adapter_deliver = aac_rx_deliver_message; 500 break; 501 default: 502 return 1; 503 } 504 return 0; 505 } 506 507 /** 508 * aac_rx_init - initialize an i960 based AAC card 509 * @dev: device to configure 510 * 511 * Allocate and set up resources for the i960 based AAC variants. The 512 * device_interface in the commregion will be allocated and linked 513 * to the comm region. 514 */ 515 516 int _aac_rx_init(struct aac_dev *dev) 517 { 518 unsigned long start; 519 unsigned long status; 520 int instance; 521 const char * name; 522 523 instance = dev->id; 524 name = dev->name; 525 526 if (aac_adapter_ioremap(dev, dev->base_size)) { 527 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 528 goto error_iounmap; 529 } 530 531 /* 532 * Check to see if the board panic'd while booting. 533 */ 534 status = rx_readl(dev, MUnit.OMRx[0]); 535 if (status & KERNEL_PANIC) 536 if (aac_rx_restart_adapter(dev)) 537 goto error_iounmap; 538 /* 539 * Check to see if the board failed any self tests. 540 */ 541 status = rx_readl(dev, MUnit.OMRx[0]); 542 if (status & SELF_TEST_FAILED) { 543 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); 544 goto error_iounmap; 545 } 546 /* 547 * Check to see if the monitor panic'd while booting. 548 */ 549 if (status & MONITOR_PANIC) { 550 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); 551 goto error_iounmap; 552 } 553 start = jiffies; 554 /* 555 * Wait for the adapter to be up and running. Wait up to 3 minutes 556 */ 557 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) 558 { 559 if(time_after(jiffies, start+startup_timeout*HZ)) 560 { 561 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 562 dev->name, instance, status); 563 goto error_iounmap; 564 } 565 msleep(1); 566 } 567 /* 568 * Fill in the common function dispatch table. 569 */ 570 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; 571 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; 572 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 573 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 574 dev->a_ops.adapter_check_health = aac_rx_check_health; 575 576 /* 577 * First clear out all interrupts. Then enable the one's that we 578 * can handle. 579 */ 580 aac_adapter_comm(dev, AAC_COMM_PRODUCER); 581 aac_adapter_disable_int(dev); 582 rx_writel(dev, MUnit.ODR, 0xffffffff); 583 aac_adapter_enable_int(dev); 584 585 if (aac_init_adapter(dev) == NULL) 586 goto error_iounmap; 587 aac_adapter_comm(dev, dev->comm_interface); 588 if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, 589 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 590 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 591 name, instance); 592 goto error_iounmap; 593 } 594 aac_adapter_enable_int(dev); 595 /* 596 * Tell the adapter that all is configured, and it can 597 * start accepting requests 598 */ 599 aac_rx_start_adapter(dev); 600 601 return 0; 602 603 error_iounmap: 604 605 return -1; 606 } 607 608 int aac_rx_init(struct aac_dev *dev) 609 { 610 /* 611 * Fill in the function dispatch table. 612 */ 613 dev->a_ops.adapter_ioremap = aac_rx_ioremap; 614 dev->a_ops.adapter_comm = aac_rx_select_comm; 615 616 return _aac_rx_init(dev); 617 } 618