1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * commsup.c 26 * 27 * Abstract: Contain all routines that are required for FSA host/adapter 28 * communication. 29 * 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/init.h> 34 #include <linux/types.h> 35 #include <linux/sched.h> 36 #include <linux/pci.h> 37 #include <linux/spinlock.h> 38 #include <linux/slab.h> 39 #include <linux/completion.h> 40 #include <linux/blkdev.h> 41 #include <linux/delay.h> 42 #include <linux/kthread.h> 43 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_device.h> 45 #include <asm/semaphore.h> 46 47 #include "aacraid.h" 48 49 /** 50 * fib_map_alloc - allocate the fib objects 51 * @dev: Adapter to allocate for 52 * 53 * Allocate and map the shared PCI space for the FIB blocks used to 54 * talk to the Adaptec firmware. 55 */ 56 57 static int fib_map_alloc(struct aac_dev *dev) 58 { 59 dprintk((KERN_INFO 60 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", 61 dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, 62 AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); 63 if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size 64 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), 65 &dev->hw_fib_pa))==NULL) 66 return -ENOMEM; 67 return 0; 68 } 69 70 /** 71 * aac_fib_map_free - free the fib objects 72 * @dev: Adapter to free 73 * 74 * Free the PCI mappings and the memory allocated for FIB blocks 75 * on this adapter. 76 */ 77 78 void aac_fib_map_free(struct aac_dev *dev) 79 { 80 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 81 } 82 83 /** 84 * aac_fib_setup - setup the fibs 85 * @dev: Adapter to set up 86 * 87 * Allocate the PCI space for the fibs, map it and then intialise the 88 * fib area, the unmapped fib data and also the free list 89 */ 90 91 int aac_fib_setup(struct aac_dev * dev) 92 { 93 struct fib *fibptr; 94 struct hw_fib *hw_fib_va; 95 dma_addr_t hw_fib_pa; 96 int i; 97 98 while (((i = fib_map_alloc(dev)) == -ENOMEM) 99 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { 100 dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1); 101 dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB; 102 } 103 if (i<0) 104 return -ENOMEM; 105 106 hw_fib_va = dev->hw_fib_va; 107 hw_fib_pa = dev->hw_fib_pa; 108 memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 109 /* 110 * Initialise the fibs 111 */ 112 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) 113 { 114 fibptr->dev = dev; 115 fibptr->hw_fib = hw_fib_va; 116 fibptr->data = (void *) fibptr->hw_fib->data; 117 fibptr->next = fibptr+1; /* Forward chain the fibs */ 118 init_MUTEX_LOCKED(&fibptr->event_wait); 119 spin_lock_init(&fibptr->event_lock); 120 hw_fib_va->header.XferState = cpu_to_le32(0xffffffff); 121 hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size); 122 fibptr->hw_fib_pa = hw_fib_pa; 123 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size); 124 hw_fib_pa = hw_fib_pa + dev->max_fib_size; 125 } 126 /* 127 * Add the fib chain to the free list 128 */ 129 dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; 130 /* 131 * Enable this to debug out of queue space 132 */ 133 dev->free_fib = &dev->fibs[0]; 134 return 0; 135 } 136 137 /** 138 * aac_fib_alloc - allocate a fib 139 * @dev: Adapter to allocate the fib for 140 * 141 * Allocate a fib from the adapter fib pool. If the pool is empty we 142 * return NULL. 143 */ 144 145 struct fib *aac_fib_alloc(struct aac_dev *dev) 146 { 147 struct fib * fibptr; 148 unsigned long flags; 149 spin_lock_irqsave(&dev->fib_lock, flags); 150 fibptr = dev->free_fib; 151 if(!fibptr){ 152 spin_unlock_irqrestore(&dev->fib_lock, flags); 153 return fibptr; 154 } 155 dev->free_fib = fibptr->next; 156 spin_unlock_irqrestore(&dev->fib_lock, flags); 157 /* 158 * Set the proper node type code and node byte size 159 */ 160 fibptr->type = FSAFS_NTC_FIB_CONTEXT; 161 fibptr->size = sizeof(struct fib); 162 /* 163 * Null out fields that depend on being zero at the start of 164 * each I/O 165 */ 166 fibptr->hw_fib->header.XferState = 0; 167 fibptr->callback = NULL; 168 fibptr->callback_data = NULL; 169 170 return fibptr; 171 } 172 173 /** 174 * aac_fib_free - free a fib 175 * @fibptr: fib to free up 176 * 177 * Frees up a fib and places it on the appropriate queue 178 * (either free or timed out) 179 */ 180 181 void aac_fib_free(struct fib *fibptr) 182 { 183 unsigned long flags; 184 185 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 186 if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) { 187 aac_config.fib_timeouts++; 188 fibptr->next = fibptr->dev->timeout_fib; 189 fibptr->dev->timeout_fib = fibptr; 190 } else { 191 if (fibptr->hw_fib->header.XferState != 0) { 192 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 193 (void*)fibptr, 194 le32_to_cpu(fibptr->hw_fib->header.XferState)); 195 } 196 fibptr->next = fibptr->dev->free_fib; 197 fibptr->dev->free_fib = fibptr; 198 } 199 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); 200 } 201 202 /** 203 * aac_fib_init - initialise a fib 204 * @fibptr: The fib to initialize 205 * 206 * Set up the generic fib fields ready for use 207 */ 208 209 void aac_fib_init(struct fib *fibptr) 210 { 211 struct hw_fib *hw_fib = fibptr->hw_fib; 212 213 hw_fib->header.StructType = FIB_MAGIC; 214 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); 215 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); 216 hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */ 217 hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); 218 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); 219 } 220 221 /** 222 * fib_deallocate - deallocate a fib 223 * @fibptr: fib to deallocate 224 * 225 * Will deallocate and return to the free pool the FIB pointed to by the 226 * caller. 227 */ 228 229 static void fib_dealloc(struct fib * fibptr) 230 { 231 struct hw_fib *hw_fib = fibptr->hw_fib; 232 if(hw_fib->header.StructType != FIB_MAGIC) 233 BUG(); 234 hw_fib->header.XferState = 0; 235 } 236 237 /* 238 * Commuication primitives define and support the queuing method we use to 239 * support host to adapter commuication. All queue accesses happen through 240 * these routines and are the only routines which have a knowledge of the 241 * how these queues are implemented. 242 */ 243 244 /** 245 * aac_get_entry - get a queue entry 246 * @dev: Adapter 247 * @qid: Queue Number 248 * @entry: Entry return 249 * @index: Index return 250 * @nonotify: notification control 251 * 252 * With a priority the routine returns a queue entry if the queue has free entries. If the queue 253 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is 254 * returned. 255 */ 256 257 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) 258 { 259 struct aac_queue * q; 260 unsigned long idx; 261 262 /* 263 * All of the queues wrap when they reach the end, so we check 264 * to see if they have reached the end and if they have we just 265 * set the index back to zero. This is a wrap. You could or off 266 * the high bits in all updates but this is a bit faster I think. 267 */ 268 269 q = &dev->queues->queue[qid]; 270 271 idx = *index = le32_to_cpu(*(q->headers.producer)); 272 /* Interrupt Moderation, only interrupt for first two entries */ 273 if (idx != le32_to_cpu(*(q->headers.consumer))) { 274 if (--idx == 0) { 275 if (qid == AdapNormCmdQueue) 276 idx = ADAP_NORM_CMD_ENTRIES; 277 else 278 idx = ADAP_NORM_RESP_ENTRIES; 279 } 280 if (idx != le32_to_cpu(*(q->headers.consumer))) 281 *nonotify = 1; 282 } 283 284 if (qid == AdapNormCmdQueue) { 285 if (*index >= ADAP_NORM_CMD_ENTRIES) 286 *index = 0; /* Wrap to front of the Producer Queue. */ 287 } else { 288 if (*index >= ADAP_NORM_RESP_ENTRIES) 289 *index = 0; /* Wrap to front of the Producer Queue. */ 290 } 291 292 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ 293 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 294 qid, q->numpending); 295 return 0; 296 } else { 297 *entry = q->base + *index; 298 return 1; 299 } 300 } 301 302 /** 303 * aac_queue_get - get the next free QE 304 * @dev: Adapter 305 * @index: Returned index 306 * @priority: Priority of fib 307 * @fib: Fib to associate with the queue entry 308 * @wait: Wait if queue full 309 * @fibptr: Driver fib object to go with fib 310 * @nonotify: Don't notify the adapter 311 * 312 * Gets the next free QE off the requested priorty adapter command 313 * queue and associates the Fib with the QE. The QE represented by 314 * index is ready to insert on the queue when this routine returns 315 * success. 316 */ 317 318 static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) 319 { 320 struct aac_entry * entry = NULL; 321 int map = 0; 322 323 if (qid == AdapNormCmdQueue) { 324 /* if no entries wait for some if caller wants to */ 325 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 326 { 327 printk(KERN_ERR "GetEntries failed\n"); 328 } 329 /* 330 * Setup queue entry with a command, status and fib mapped 331 */ 332 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 333 map = 1; 334 } else { 335 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 336 { 337 /* if no entries wait for some if caller wants to */ 338 } 339 /* 340 * Setup queue entry with command, status and fib mapped 341 */ 342 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 343 entry->addr = hw_fib->header.SenderFibAddress; 344 /* Restore adapters pointer to the FIB */ 345 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ 346 map = 0; 347 } 348 /* 349 * If MapFib is true than we need to map the Fib and put pointers 350 * in the queue entry. 351 */ 352 if (map) 353 entry->addr = cpu_to_le32(fibptr->hw_fib_pa); 354 return 0; 355 } 356 357 /* 358 * Define the highest level of host to adapter communication routines. 359 * These routines will support host to adapter FS commuication. These 360 * routines have no knowledge of the commuication method used. This level 361 * sends and receives FIBs. This level has no knowledge of how these FIBs 362 * get passed back and forth. 363 */ 364 365 /** 366 * aac_fib_send - send a fib to the adapter 367 * @command: Command to send 368 * @fibptr: The fib 369 * @size: Size of fib data area 370 * @priority: Priority of Fib 371 * @wait: Async/sync select 372 * @reply: True if a reply is wanted 373 * @callback: Called with reply 374 * @callback_data: Passed to callback 375 * 376 * Sends the requested FIB to the adapter and optionally will wait for a 377 * response FIB. If the caller does not wish to wait for a response than 378 * an event to wait on must be supplied. This event will be set when a 379 * response FIB is received from the adapter. 380 */ 381 382 int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, 383 int priority, int wait, int reply, fib_callback callback, 384 void *callback_data) 385 { 386 struct aac_dev * dev = fibptr->dev; 387 struct hw_fib * hw_fib = fibptr->hw_fib; 388 struct aac_queue * q; 389 unsigned long flags = 0; 390 unsigned long qflags; 391 392 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 393 return -EBUSY; 394 /* 395 * There are 5 cases with the wait and reponse requested flags. 396 * The only invalid cases are if the caller requests to wait and 397 * does not request a response and if the caller does not want a 398 * response and the Fib is not allocated from pool. If a response 399 * is not requesed the Fib will just be deallocaed by the DPC 400 * routine when the response comes back from the adapter. No 401 * further processing will be done besides deleting the Fib. We 402 * will have a debug mode where the adapter can notify the host 403 * it had a problem and the host can log that fact. 404 */ 405 if (wait && !reply) { 406 return -EINVAL; 407 } else if (!wait && reply) { 408 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); 409 FIB_COUNTER_INCREMENT(aac_config.AsyncSent); 410 } else if (!wait && !reply) { 411 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); 412 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); 413 } else if (wait && reply) { 414 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); 415 FIB_COUNTER_INCREMENT(aac_config.NormalSent); 416 } 417 /* 418 * Map the fib into 32bits by using the fib number 419 */ 420 421 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); 422 hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); 423 /* 424 * Set FIB state to indicate where it came from and if we want a 425 * response from the adapter. Also load the command from the 426 * caller. 427 * 428 * Map the hw fib pointer as a 32bit value 429 */ 430 hw_fib->header.Command = cpu_to_le16(command); 431 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); 432 fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/ 433 /* 434 * Set the size of the Fib we want to send to the adapter 435 */ 436 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); 437 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { 438 return -EMSGSIZE; 439 } 440 /* 441 * Get a queue entry connect the FIB to it and send an notify 442 * the adapter a command is ready. 443 */ 444 hw_fib->header.XferState |= cpu_to_le32(NormalPriority); 445 446 /* 447 * Fill in the Callback and CallbackContext if we are not 448 * going to wait. 449 */ 450 if (!wait) { 451 fibptr->callback = callback; 452 fibptr->callback_data = callback_data; 453 } 454 455 fibptr->done = 0; 456 fibptr->flags = 0; 457 458 FIB_COUNTER_INCREMENT(aac_config.FibsSent); 459 460 dprintk((KERN_DEBUG "Fib contents:.\n")); 461 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); 462 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); 463 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); 464 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); 465 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 466 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 467 468 q = &dev->queues->queue[AdapNormCmdQueue]; 469 470 if(wait) 471 spin_lock_irqsave(&fibptr->event_lock, flags); 472 spin_lock_irqsave(q->lock, qflags); 473 if (dev->new_comm_interface) { 474 unsigned long count = 10000000L; /* 50 seconds */ 475 list_add_tail(&fibptr->queue, &q->pendingq); 476 q->numpending++; 477 spin_unlock_irqrestore(q->lock, qflags); 478 while (aac_adapter_send(fibptr) != 0) { 479 if (--count == 0) { 480 if (wait) 481 spin_unlock_irqrestore(&fibptr->event_lock, flags); 482 spin_lock_irqsave(q->lock, qflags); 483 q->numpending--; 484 list_del(&fibptr->queue); 485 spin_unlock_irqrestore(q->lock, qflags); 486 return -ETIMEDOUT; 487 } 488 udelay(5); 489 } 490 } else { 491 u32 index; 492 unsigned long nointr = 0; 493 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); 494 495 list_add_tail(&fibptr->queue, &q->pendingq); 496 q->numpending++; 497 *(q->headers.producer) = cpu_to_le32(index + 1); 498 spin_unlock_irqrestore(q->lock, qflags); 499 dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index)); 500 if (!(nointr & aac_config.irq_mod)) 501 aac_adapter_notify(dev, AdapNormCmdQueue); 502 } 503 504 /* 505 * If the caller wanted us to wait for response wait now. 506 */ 507 508 if (wait) { 509 spin_unlock_irqrestore(&fibptr->event_lock, flags); 510 /* Only set for first known interruptable command */ 511 if (wait < 0) { 512 /* 513 * *VERY* Dangerous to time out a command, the 514 * assumption is made that we have no hope of 515 * functioning because an interrupt routing or other 516 * hardware failure has occurred. 517 */ 518 unsigned long count = 36000000L; /* 3 minutes */ 519 while (down_trylock(&fibptr->event_wait)) { 520 if (--count == 0) { 521 spin_lock_irqsave(q->lock, qflags); 522 q->numpending--; 523 list_del(&fibptr->queue); 524 spin_unlock_irqrestore(q->lock, qflags); 525 if (wait == -1) { 526 printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" 527 "Usually a result of a PCI interrupt routing problem;\n" 528 "update mother board BIOS or consider utilizing one of\n" 529 "the SAFE mode kernel options (acpi, apic etc)\n"); 530 } 531 return -ETIMEDOUT; 532 } 533 udelay(5); 534 } 535 } else 536 down(&fibptr->event_wait); 537 if(fibptr->done == 0) 538 BUG(); 539 540 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 541 return -ETIMEDOUT; 542 } else { 543 return 0; 544 } 545 } 546 /* 547 * If the user does not want a response than return success otherwise 548 * return pending 549 */ 550 if (reply) 551 return -EINPROGRESS; 552 else 553 return 0; 554 } 555 556 /** 557 * aac_consumer_get - get the top of the queue 558 * @dev: Adapter 559 * @q: Queue 560 * @entry: Return entry 561 * 562 * Will return a pointer to the entry on the top of the queue requested that 563 * we are a consumer of, and return the address of the queue entry. It does 564 * not change the state of the queue. 565 */ 566 567 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) 568 { 569 u32 index; 570 int status; 571 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { 572 status = 0; 573 } else { 574 /* 575 * The consumer index must be wrapped if we have reached 576 * the end of the queue, else we just use the entry 577 * pointed to by the header index 578 */ 579 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 580 index = 0; 581 else 582 index = le32_to_cpu(*q->headers.consumer); 583 *entry = q->base + index; 584 status = 1; 585 } 586 return(status); 587 } 588 589 /** 590 * aac_consumer_free - free consumer entry 591 * @dev: Adapter 592 * @q: Queue 593 * @qid: Queue ident 594 * 595 * Frees up the current top of the queue we are a consumer of. If the 596 * queue was full notify the producer that the queue is no longer full. 597 */ 598 599 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) 600 { 601 int wasfull = 0; 602 u32 notify; 603 604 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) 605 wasfull = 1; 606 607 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 608 *q->headers.consumer = cpu_to_le32(1); 609 else 610 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); 611 612 if (wasfull) { 613 switch (qid) { 614 615 case HostNormCmdQueue: 616 notify = HostNormCmdNotFull; 617 break; 618 case HostNormRespQueue: 619 notify = HostNormRespNotFull; 620 break; 621 default: 622 BUG(); 623 return; 624 } 625 aac_adapter_notify(dev, notify); 626 } 627 } 628 629 /** 630 * aac_fib_adapter_complete - complete adapter issued fib 631 * @fibptr: fib to complete 632 * @size: size of fib 633 * 634 * Will do all necessary work to complete a FIB that was sent from 635 * the adapter. 636 */ 637 638 int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) 639 { 640 struct hw_fib * hw_fib = fibptr->hw_fib; 641 struct aac_dev * dev = fibptr->dev; 642 struct aac_queue * q; 643 unsigned long nointr = 0; 644 unsigned long qflags; 645 646 if (hw_fib->header.XferState == 0) { 647 if (dev->new_comm_interface) 648 kfree (hw_fib); 649 return 0; 650 } 651 /* 652 * If we plan to do anything check the structure type first. 653 */ 654 if ( hw_fib->header.StructType != FIB_MAGIC ) { 655 if (dev->new_comm_interface) 656 kfree (hw_fib); 657 return -EINVAL; 658 } 659 /* 660 * This block handles the case where the adapter had sent us a 661 * command and we have finished processing the command. We 662 * call completeFib when we are done processing the command 663 * and want to send a response back to the adapter. This will 664 * send the completed cdb to the adapter. 665 */ 666 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 667 if (dev->new_comm_interface) { 668 kfree (hw_fib); 669 } else { 670 u32 index; 671 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 672 if (size) { 673 size += sizeof(struct aac_fibhdr); 674 if (size > le16_to_cpu(hw_fib->header.SenderSize)) 675 return -EMSGSIZE; 676 hw_fib->header.Size = cpu_to_le16(size); 677 } 678 q = &dev->queues->queue[AdapNormRespQueue]; 679 spin_lock_irqsave(q->lock, qflags); 680 aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); 681 *(q->headers.producer) = cpu_to_le32(index + 1); 682 spin_unlock_irqrestore(q->lock, qflags); 683 if (!(nointr & (int)aac_config.irq_mod)) 684 aac_adapter_notify(dev, AdapNormRespQueue); 685 } 686 } 687 else 688 { 689 printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n"); 690 BUG(); 691 } 692 return 0; 693 } 694 695 /** 696 * aac_fib_complete - fib completion handler 697 * @fib: FIB to complete 698 * 699 * Will do all necessary work to complete a FIB. 700 */ 701 702 int aac_fib_complete(struct fib *fibptr) 703 { 704 struct hw_fib * hw_fib = fibptr->hw_fib; 705 706 /* 707 * Check for a fib which has already been completed 708 */ 709 710 if (hw_fib->header.XferState == 0) 711 return 0; 712 /* 713 * If we plan to do anything check the structure type first. 714 */ 715 716 if (hw_fib->header.StructType != FIB_MAGIC) 717 return -EINVAL; 718 /* 719 * This block completes a cdb which orginated on the host and we 720 * just need to deallocate the cdb or reinit it. At this point the 721 * command is complete that we had sent to the adapter and this 722 * cdb could be reused. 723 */ 724 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 725 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 726 { 727 fib_dealloc(fibptr); 728 } 729 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) 730 { 731 /* 732 * This handles the case when the host has aborted the I/O 733 * to the adapter because the adapter is not responding 734 */ 735 fib_dealloc(fibptr); 736 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { 737 fib_dealloc(fibptr); 738 } else { 739 BUG(); 740 } 741 return 0; 742 } 743 744 /** 745 * aac_printf - handle printf from firmware 746 * @dev: Adapter 747 * @val: Message info 748 * 749 * Print a message passed to us by the controller firmware on the 750 * Adaptec board 751 */ 752 753 void aac_printf(struct aac_dev *dev, u32 val) 754 { 755 char *cp = dev->printfbuf; 756 if (dev->printf_enabled) 757 { 758 int length = val & 0xffff; 759 int level = (val >> 16) & 0xffff; 760 761 /* 762 * The size of the printfbuf is set in port.c 763 * There is no variable or define for it 764 */ 765 if (length > 255) 766 length = 255; 767 if (cp[length] != 0) 768 cp[length] = 0; 769 if (level == LOG_AAC_HIGH_ERROR) 770 printk(KERN_WARNING "%s:%s", dev->name, cp); 771 else 772 printk(KERN_INFO "%s:%s", dev->name, cp); 773 } 774 memset(cp, 0, 256); 775 } 776 777 778 /** 779 * aac_handle_aif - Handle a message from the firmware 780 * @dev: Which adapter this fib is from 781 * @fibptr: Pointer to fibptr from adapter 782 * 783 * This routine handles a driver notify fib from the adapter and 784 * dispatches it to the appropriate routine for handling. 785 */ 786 787 #define AIF_SNIFF_TIMEOUT (30*HZ) 788 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) 789 { 790 struct hw_fib * hw_fib = fibptr->hw_fib; 791 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; 792 int busy; 793 u32 container; 794 struct scsi_device *device; 795 enum { 796 NOTHING, 797 DELETE, 798 ADD, 799 CHANGE 800 } device_config_needed; 801 802 /* Sniff for container changes */ 803 804 if (!dev) 805 return; 806 container = (u32)-1; 807 808 /* 809 * We have set this up to try and minimize the number of 810 * re-configures that take place. As a result of this when 811 * certain AIF's come in we will set a flag waiting for another 812 * type of AIF before setting the re-config flag. 813 */ 814 switch (le32_to_cpu(aifcmd->command)) { 815 case AifCmdDriverNotify: 816 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 817 /* 818 * Morph or Expand complete 819 */ 820 case AifDenMorphComplete: 821 case AifDenVolumeExtendComplete: 822 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 823 if (container >= dev->maximum_num_containers) 824 break; 825 826 /* 827 * Find the scsi_device associated with the SCSI 828 * address. Make sure we have the right array, and if 829 * so set the flag to initiate a new re-config once we 830 * see an AifEnConfigChange AIF come through. 831 */ 832 833 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { 834 device = scsi_device_lookup(dev->scsi_host_ptr, 835 CONTAINER_TO_CHANNEL(container), 836 CONTAINER_TO_ID(container), 837 CONTAINER_TO_LUN(container)); 838 if (device) { 839 dev->fsa_dev[container].config_needed = CHANGE; 840 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; 841 dev->fsa_dev[container].config_waiting_stamp = jiffies; 842 scsi_device_put(device); 843 } 844 } 845 } 846 847 /* 848 * If we are waiting on something and this happens to be 849 * that thing then set the re-configure flag. 850 */ 851 if (container != (u32)-1) { 852 if (container >= dev->maximum_num_containers) 853 break; 854 if ((dev->fsa_dev[container].config_waiting_on == 855 le32_to_cpu(*(u32 *)aifcmd->data)) && 856 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 857 dev->fsa_dev[container].config_waiting_on = 0; 858 } else for (container = 0; 859 container < dev->maximum_num_containers; ++container) { 860 if ((dev->fsa_dev[container].config_waiting_on == 861 le32_to_cpu(*(u32 *)aifcmd->data)) && 862 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 863 dev->fsa_dev[container].config_waiting_on = 0; 864 } 865 break; 866 867 case AifCmdEventNotify: 868 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 869 /* 870 * Add an Array. 871 */ 872 case AifEnAddContainer: 873 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 874 if (container >= dev->maximum_num_containers) 875 break; 876 dev->fsa_dev[container].config_needed = ADD; 877 dev->fsa_dev[container].config_waiting_on = 878 AifEnConfigChange; 879 dev->fsa_dev[container].config_waiting_stamp = jiffies; 880 break; 881 882 /* 883 * Delete an Array. 884 */ 885 case AifEnDeleteContainer: 886 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 887 if (container >= dev->maximum_num_containers) 888 break; 889 dev->fsa_dev[container].config_needed = DELETE; 890 dev->fsa_dev[container].config_waiting_on = 891 AifEnConfigChange; 892 dev->fsa_dev[container].config_waiting_stamp = jiffies; 893 break; 894 895 /* 896 * Container change detected. If we currently are not 897 * waiting on something else, setup to wait on a Config Change. 898 */ 899 case AifEnContainerChange: 900 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 901 if (container >= dev->maximum_num_containers) 902 break; 903 if (dev->fsa_dev[container].config_waiting_on && 904 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 905 break; 906 dev->fsa_dev[container].config_needed = CHANGE; 907 dev->fsa_dev[container].config_waiting_on = 908 AifEnConfigChange; 909 dev->fsa_dev[container].config_waiting_stamp = jiffies; 910 break; 911 912 case AifEnConfigChange: 913 break; 914 915 } 916 917 /* 918 * If we are waiting on something and this happens to be 919 * that thing then set the re-configure flag. 920 */ 921 if (container != (u32)-1) { 922 if (container >= dev->maximum_num_containers) 923 break; 924 if ((dev->fsa_dev[container].config_waiting_on == 925 le32_to_cpu(*(u32 *)aifcmd->data)) && 926 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 927 dev->fsa_dev[container].config_waiting_on = 0; 928 } else for (container = 0; 929 container < dev->maximum_num_containers; ++container) { 930 if ((dev->fsa_dev[container].config_waiting_on == 931 le32_to_cpu(*(u32 *)aifcmd->data)) && 932 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 933 dev->fsa_dev[container].config_waiting_on = 0; 934 } 935 break; 936 937 case AifCmdJobProgress: 938 /* 939 * These are job progress AIF's. When a Clear is being 940 * done on a container it is initially created then hidden from 941 * the OS. When the clear completes we don't get a config 942 * change so we monitor the job status complete on a clear then 943 * wait for a container change. 944 */ 945 946 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 947 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5]) 948 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) { 949 for (container = 0; 950 container < dev->maximum_num_containers; 951 ++container) { 952 /* 953 * Stomp on all config sequencing for all 954 * containers? 955 */ 956 dev->fsa_dev[container].config_waiting_on = 957 AifEnContainerChange; 958 dev->fsa_dev[container].config_needed = ADD; 959 dev->fsa_dev[container].config_waiting_stamp = 960 jiffies; 961 } 962 } 963 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 964 && (((u32 *)aifcmd->data)[6] == 0) 965 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) { 966 for (container = 0; 967 container < dev->maximum_num_containers; 968 ++container) { 969 /* 970 * Stomp on all config sequencing for all 971 * containers? 972 */ 973 dev->fsa_dev[container].config_waiting_on = 974 AifEnContainerChange; 975 dev->fsa_dev[container].config_needed = DELETE; 976 dev->fsa_dev[container].config_waiting_stamp = 977 jiffies; 978 } 979 } 980 break; 981 } 982 983 device_config_needed = NOTHING; 984 for (container = 0; container < dev->maximum_num_containers; 985 ++container) { 986 if ((dev->fsa_dev[container].config_waiting_on == 0) && 987 (dev->fsa_dev[container].config_needed != NOTHING) && 988 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { 989 device_config_needed = 990 dev->fsa_dev[container].config_needed; 991 dev->fsa_dev[container].config_needed = NOTHING; 992 break; 993 } 994 } 995 if (device_config_needed == NOTHING) 996 return; 997 998 /* 999 * If we decided that a re-configuration needs to be done, 1000 * schedule it here on the way out the door, please close the door 1001 * behind you. 1002 */ 1003 1004 busy = 0; 1005 1006 1007 /* 1008 * Find the scsi_device associated with the SCSI address, 1009 * and mark it as changed, invalidating the cache. This deals 1010 * with changes to existing device IDs. 1011 */ 1012 1013 if (!dev || !dev->scsi_host_ptr) 1014 return; 1015 /* 1016 * force reload of disk info via aac_probe_container 1017 */ 1018 if ((device_config_needed == CHANGE) 1019 && (dev->fsa_dev[container].valid == 1)) 1020 dev->fsa_dev[container].valid = 2; 1021 if ((device_config_needed == CHANGE) || 1022 (device_config_needed == ADD)) 1023 aac_probe_container(dev, container); 1024 device = scsi_device_lookup(dev->scsi_host_ptr, 1025 CONTAINER_TO_CHANNEL(container), 1026 CONTAINER_TO_ID(container), 1027 CONTAINER_TO_LUN(container)); 1028 if (device) { 1029 switch (device_config_needed) { 1030 case DELETE: 1031 scsi_remove_device(device); 1032 break; 1033 case CHANGE: 1034 if (!dev->fsa_dev[container].valid) { 1035 scsi_remove_device(device); 1036 break; 1037 } 1038 scsi_rescan_device(&device->sdev_gendev); 1039 1040 default: 1041 break; 1042 } 1043 scsi_device_put(device); 1044 } 1045 if (device_config_needed == ADD) { 1046 scsi_add_device(dev->scsi_host_ptr, 1047 CONTAINER_TO_CHANNEL(container), 1048 CONTAINER_TO_ID(container), 1049 CONTAINER_TO_LUN(container)); 1050 } 1051 1052 } 1053 1054 /** 1055 * aac_command_thread - command processing thread 1056 * @dev: Adapter to monitor 1057 * 1058 * Waits on the commandready event in it's queue. When the event gets set 1059 * it will pull FIBs off it's queue. It will continue to pull FIBs off 1060 * until the queue is empty. When the queue is empty it will wait for 1061 * more FIBs. 1062 */ 1063 1064 int aac_command_thread(void *data) 1065 { 1066 struct aac_dev *dev = data; 1067 struct hw_fib *hw_fib, *hw_newfib; 1068 struct fib *fib, *newfib; 1069 struct aac_fib_context *fibctx; 1070 unsigned long flags; 1071 DECLARE_WAITQUEUE(wait, current); 1072 1073 /* 1074 * We can only have one thread per adapter for AIF's. 1075 */ 1076 if (dev->aif_thread) 1077 return -EINVAL; 1078 1079 /* 1080 * Let the DPC know it has a place to send the AIF's to. 1081 */ 1082 dev->aif_thread = 1; 1083 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1084 set_current_state(TASK_INTERRUPTIBLE); 1085 dprintk ((KERN_INFO "aac_command_thread start\n")); 1086 while(1) 1087 { 1088 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1089 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { 1090 struct list_head *entry; 1091 struct aac_aifcmd * aifcmd; 1092 1093 set_current_state(TASK_RUNNING); 1094 1095 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; 1096 list_del(entry); 1097 1098 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1099 fib = list_entry(entry, struct fib, fiblink); 1100 /* 1101 * We will process the FIB here or pass it to a 1102 * worker thread that is TBD. We Really can't 1103 * do anything at this point since we don't have 1104 * anything defined for this thread to do. 1105 */ 1106 hw_fib = fib->hw_fib; 1107 memset(fib, 0, sizeof(struct fib)); 1108 fib->type = FSAFS_NTC_FIB_CONTEXT; 1109 fib->size = sizeof( struct fib ); 1110 fib->hw_fib = hw_fib; 1111 fib->data = hw_fib->data; 1112 fib->dev = dev; 1113 /* 1114 * We only handle AifRequest fibs from the adapter. 1115 */ 1116 aifcmd = (struct aac_aifcmd *) hw_fib->data; 1117 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { 1118 /* Handle Driver Notify Events */ 1119 aac_handle_aif(dev, fib); 1120 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1121 aac_fib_adapter_complete(fib, (u16)sizeof(u32)); 1122 } else { 1123 struct list_head *entry; 1124 /* The u32 here is important and intended. We are using 1125 32bit wrapping time to fit the adapter field */ 1126 1127 u32 time_now, time_last; 1128 unsigned long flagv; 1129 unsigned num; 1130 struct hw_fib ** hw_fib_pool, ** hw_fib_p; 1131 struct fib ** fib_pool, ** fib_p; 1132 1133 /* Sniff events */ 1134 if ((aifcmd->command == 1135 cpu_to_le32(AifCmdEventNotify)) || 1136 (aifcmd->command == 1137 cpu_to_le32(AifCmdJobProgress))) { 1138 aac_handle_aif(dev, fib); 1139 } 1140 1141 time_now = jiffies/HZ; 1142 1143 /* 1144 * Warning: no sleep allowed while 1145 * holding spinlock. We take the estimate 1146 * and pre-allocate a set of fibs outside the 1147 * lock. 1148 */ 1149 num = le32_to_cpu(dev->init->AdapterFibsSize) 1150 / sizeof(struct hw_fib); /* some extra */ 1151 spin_lock_irqsave(&dev->fib_lock, flagv); 1152 entry = dev->fib_list.next; 1153 while (entry != &dev->fib_list) { 1154 entry = entry->next; 1155 ++num; 1156 } 1157 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1158 hw_fib_pool = NULL; 1159 fib_pool = NULL; 1160 if (num 1161 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL))) 1162 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) { 1163 hw_fib_p = hw_fib_pool; 1164 fib_p = fib_pool; 1165 while (hw_fib_p < &hw_fib_pool[num]) { 1166 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) { 1167 --hw_fib_p; 1168 break; 1169 } 1170 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) { 1171 kfree(*(--hw_fib_p)); 1172 break; 1173 } 1174 } 1175 if ((num = hw_fib_p - hw_fib_pool) == 0) { 1176 kfree(fib_pool); 1177 fib_pool = NULL; 1178 kfree(hw_fib_pool); 1179 hw_fib_pool = NULL; 1180 } 1181 } else { 1182 kfree(hw_fib_pool); 1183 hw_fib_pool = NULL; 1184 } 1185 spin_lock_irqsave(&dev->fib_lock, flagv); 1186 entry = dev->fib_list.next; 1187 /* 1188 * For each Context that is on the 1189 * fibctxList, make a copy of the 1190 * fib, and then set the event to wake up the 1191 * thread that is waiting for it. 1192 */ 1193 hw_fib_p = hw_fib_pool; 1194 fib_p = fib_pool; 1195 while (entry != &dev->fib_list) { 1196 /* 1197 * Extract the fibctx 1198 */ 1199 fibctx = list_entry(entry, struct aac_fib_context, next); 1200 /* 1201 * Check if the queue is getting 1202 * backlogged 1203 */ 1204 if (fibctx->count > 20) 1205 { 1206 /* 1207 * It's *not* jiffies folks, 1208 * but jiffies / HZ so do not 1209 * panic ... 1210 */ 1211 time_last = fibctx->jiffies; 1212 /* 1213 * Has it been > 2 minutes 1214 * since the last read off 1215 * the queue? 1216 */ 1217 if ((time_now - time_last) > 120) { 1218 entry = entry->next; 1219 aac_close_fib_context(dev, fibctx); 1220 continue; 1221 } 1222 } 1223 /* 1224 * Warning: no sleep allowed while 1225 * holding spinlock 1226 */ 1227 if (hw_fib_p < &hw_fib_pool[num]) { 1228 hw_newfib = *hw_fib_p; 1229 *(hw_fib_p++) = NULL; 1230 newfib = *fib_p; 1231 *(fib_p++) = NULL; 1232 /* 1233 * Make the copy of the FIB 1234 */ 1235 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); 1236 memcpy(newfib, fib, sizeof(struct fib)); 1237 newfib->hw_fib = hw_newfib; 1238 /* 1239 * Put the FIB onto the 1240 * fibctx's fibs 1241 */ 1242 list_add_tail(&newfib->fiblink, &fibctx->fib_list); 1243 fibctx->count++; 1244 /* 1245 * Set the event to wake up the 1246 * thread that is waiting. 1247 */ 1248 up(&fibctx->wait_sem); 1249 } else { 1250 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1251 } 1252 entry = entry->next; 1253 } 1254 /* 1255 * Set the status of this FIB 1256 */ 1257 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1258 aac_fib_adapter_complete(fib, sizeof(u32)); 1259 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1260 /* Free up the remaining resources */ 1261 hw_fib_p = hw_fib_pool; 1262 fib_p = fib_pool; 1263 while (hw_fib_p < &hw_fib_pool[num]) { 1264 kfree(*hw_fib_p); 1265 kfree(*fib_p); 1266 ++fib_p; 1267 ++hw_fib_p; 1268 } 1269 kfree(hw_fib_pool); 1270 kfree(fib_pool); 1271 } 1272 kfree(fib); 1273 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1274 } 1275 /* 1276 * There are no more AIF's 1277 */ 1278 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1279 schedule(); 1280 1281 if (kthread_should_stop()) 1282 break; 1283 set_current_state(TASK_INTERRUPTIBLE); 1284 } 1285 if (dev->queues) 1286 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1287 dev->aif_thread = 0; 1288 return 0; 1289 } 1290