1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * commsup.c 26 * 27 * Abstract: Contain all routines that are required for FSA host/adapter 28 * communication. 29 * 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/init.h> 34 #include <linux/types.h> 35 #include <linux/sched.h> 36 #include <linux/pci.h> 37 #include <linux/spinlock.h> 38 #include <linux/slab.h> 39 #include <linux/completion.h> 40 #include <linux/blkdev.h> 41 #include <linux/delay.h> 42 #include <linux/kthread.h> 43 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_device.h> 45 #include <asm/semaphore.h> 46 47 #include "aacraid.h" 48 49 /** 50 * fib_map_alloc - allocate the fib objects 51 * @dev: Adapter to allocate for 52 * 53 * Allocate and map the shared PCI space for the FIB blocks used to 54 * talk to the Adaptec firmware. 55 */ 56 57 static int fib_map_alloc(struct aac_dev *dev) 58 { 59 dprintk((KERN_INFO 60 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", 61 dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, 62 AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); 63 if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size 64 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), 65 &dev->hw_fib_pa))==NULL) 66 return -ENOMEM; 67 return 0; 68 } 69 70 /** 71 * aac_fib_map_free - free the fib objects 72 * @dev: Adapter to free 73 * 74 * Free the PCI mappings and the memory allocated for FIB blocks 75 * on this adapter. 76 */ 77 78 void aac_fib_map_free(struct aac_dev *dev) 79 { 80 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 81 } 82 83 /** 84 * aac_fib_setup - setup the fibs 85 * @dev: Adapter to set up 86 * 87 * Allocate the PCI space for the fibs, map it and then intialise the 88 * fib area, the unmapped fib data and also the free list 89 */ 90 91 int aac_fib_setup(struct aac_dev * dev) 92 { 93 struct fib *fibptr; 94 struct hw_fib *hw_fib_va; 95 dma_addr_t hw_fib_pa; 96 int i; 97 98 while (((i = fib_map_alloc(dev)) == -ENOMEM) 99 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { 100 dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1); 101 dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB; 102 } 103 if (i<0) 104 return -ENOMEM; 105 106 hw_fib_va = dev->hw_fib_va; 107 hw_fib_pa = dev->hw_fib_pa; 108 memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 109 /* 110 * Initialise the fibs 111 */ 112 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) 113 { 114 fibptr->dev = dev; 115 fibptr->hw_fib = hw_fib_va; 116 fibptr->data = (void *) fibptr->hw_fib->data; 117 fibptr->next = fibptr+1; /* Forward chain the fibs */ 118 init_MUTEX_LOCKED(&fibptr->event_wait); 119 spin_lock_init(&fibptr->event_lock); 120 hw_fib_va->header.XferState = cpu_to_le32(0xffffffff); 121 hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size); 122 fibptr->hw_fib_pa = hw_fib_pa; 123 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size); 124 hw_fib_pa = hw_fib_pa + dev->max_fib_size; 125 } 126 /* 127 * Add the fib chain to the free list 128 */ 129 dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; 130 /* 131 * Enable this to debug out of queue space 132 */ 133 dev->free_fib = &dev->fibs[0]; 134 return 0; 135 } 136 137 /** 138 * aac_fib_alloc - allocate a fib 139 * @dev: Adapter to allocate the fib for 140 * 141 * Allocate a fib from the adapter fib pool. If the pool is empty we 142 * return NULL. 143 */ 144 145 struct fib *aac_fib_alloc(struct aac_dev *dev) 146 { 147 struct fib * fibptr; 148 unsigned long flags; 149 spin_lock_irqsave(&dev->fib_lock, flags); 150 fibptr = dev->free_fib; 151 if(!fibptr){ 152 spin_unlock_irqrestore(&dev->fib_lock, flags); 153 return fibptr; 154 } 155 dev->free_fib = fibptr->next; 156 spin_unlock_irqrestore(&dev->fib_lock, flags); 157 /* 158 * Set the proper node type code and node byte size 159 */ 160 fibptr->type = FSAFS_NTC_FIB_CONTEXT; 161 fibptr->size = sizeof(struct fib); 162 /* 163 * Null out fields that depend on being zero at the start of 164 * each I/O 165 */ 166 fibptr->hw_fib->header.XferState = 0; 167 fibptr->callback = NULL; 168 fibptr->callback_data = NULL; 169 170 return fibptr; 171 } 172 173 /** 174 * aac_fib_free - free a fib 175 * @fibptr: fib to free up 176 * 177 * Frees up a fib and places it on the appropriate queue 178 * (either free or timed out) 179 */ 180 181 void aac_fib_free(struct fib *fibptr) 182 { 183 unsigned long flags; 184 185 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 186 if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) { 187 aac_config.fib_timeouts++; 188 fibptr->next = fibptr->dev->timeout_fib; 189 fibptr->dev->timeout_fib = fibptr; 190 } else { 191 if (fibptr->hw_fib->header.XferState != 0) { 192 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 193 (void*)fibptr, 194 le32_to_cpu(fibptr->hw_fib->header.XferState)); 195 } 196 fibptr->next = fibptr->dev->free_fib; 197 fibptr->dev->free_fib = fibptr; 198 } 199 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); 200 } 201 202 /** 203 * aac_fib_init - initialise a fib 204 * @fibptr: The fib to initialize 205 * 206 * Set up the generic fib fields ready for use 207 */ 208 209 void aac_fib_init(struct fib *fibptr) 210 { 211 struct hw_fib *hw_fib = fibptr->hw_fib; 212 213 hw_fib->header.StructType = FIB_MAGIC; 214 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); 215 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); 216 hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */ 217 hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); 218 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); 219 } 220 221 /** 222 * fib_deallocate - deallocate a fib 223 * @fibptr: fib to deallocate 224 * 225 * Will deallocate and return to the free pool the FIB pointed to by the 226 * caller. 227 */ 228 229 static void fib_dealloc(struct fib * fibptr) 230 { 231 struct hw_fib *hw_fib = fibptr->hw_fib; 232 BUG_ON(hw_fib->header.StructType != FIB_MAGIC); 233 hw_fib->header.XferState = 0; 234 } 235 236 /* 237 * Commuication primitives define and support the queuing method we use to 238 * support host to adapter commuication. All queue accesses happen through 239 * these routines and are the only routines which have a knowledge of the 240 * how these queues are implemented. 241 */ 242 243 /** 244 * aac_get_entry - get a queue entry 245 * @dev: Adapter 246 * @qid: Queue Number 247 * @entry: Entry return 248 * @index: Index return 249 * @nonotify: notification control 250 * 251 * With a priority the routine returns a queue entry if the queue has free entries. If the queue 252 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is 253 * returned. 254 */ 255 256 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) 257 { 258 struct aac_queue * q; 259 unsigned long idx; 260 261 /* 262 * All of the queues wrap when they reach the end, so we check 263 * to see if they have reached the end and if they have we just 264 * set the index back to zero. This is a wrap. You could or off 265 * the high bits in all updates but this is a bit faster I think. 266 */ 267 268 q = &dev->queues->queue[qid]; 269 270 idx = *index = le32_to_cpu(*(q->headers.producer)); 271 /* Interrupt Moderation, only interrupt for first two entries */ 272 if (idx != le32_to_cpu(*(q->headers.consumer))) { 273 if (--idx == 0) { 274 if (qid == AdapNormCmdQueue) 275 idx = ADAP_NORM_CMD_ENTRIES; 276 else 277 idx = ADAP_NORM_RESP_ENTRIES; 278 } 279 if (idx != le32_to_cpu(*(q->headers.consumer))) 280 *nonotify = 1; 281 } 282 283 if (qid == AdapNormCmdQueue) { 284 if (*index >= ADAP_NORM_CMD_ENTRIES) 285 *index = 0; /* Wrap to front of the Producer Queue. */ 286 } else { 287 if (*index >= ADAP_NORM_RESP_ENTRIES) 288 *index = 0; /* Wrap to front of the Producer Queue. */ 289 } 290 291 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ 292 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 293 qid, q->numpending); 294 return 0; 295 } else { 296 *entry = q->base + *index; 297 return 1; 298 } 299 } 300 301 /** 302 * aac_queue_get - get the next free QE 303 * @dev: Adapter 304 * @index: Returned index 305 * @priority: Priority of fib 306 * @fib: Fib to associate with the queue entry 307 * @wait: Wait if queue full 308 * @fibptr: Driver fib object to go with fib 309 * @nonotify: Don't notify the adapter 310 * 311 * Gets the next free QE off the requested priorty adapter command 312 * queue and associates the Fib with the QE. The QE represented by 313 * index is ready to insert on the queue when this routine returns 314 * success. 315 */ 316 317 static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) 318 { 319 struct aac_entry * entry = NULL; 320 int map = 0; 321 322 if (qid == AdapNormCmdQueue) { 323 /* if no entries wait for some if caller wants to */ 324 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 325 { 326 printk(KERN_ERR "GetEntries failed\n"); 327 } 328 /* 329 * Setup queue entry with a command, status and fib mapped 330 */ 331 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 332 map = 1; 333 } else { 334 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 335 { 336 /* if no entries wait for some if caller wants to */ 337 } 338 /* 339 * Setup queue entry with command, status and fib mapped 340 */ 341 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 342 entry->addr = hw_fib->header.SenderFibAddress; 343 /* Restore adapters pointer to the FIB */ 344 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ 345 map = 0; 346 } 347 /* 348 * If MapFib is true than we need to map the Fib and put pointers 349 * in the queue entry. 350 */ 351 if (map) 352 entry->addr = cpu_to_le32(fibptr->hw_fib_pa); 353 return 0; 354 } 355 356 /* 357 * Define the highest level of host to adapter communication routines. 358 * These routines will support host to adapter FS commuication. These 359 * routines have no knowledge of the commuication method used. This level 360 * sends and receives FIBs. This level has no knowledge of how these FIBs 361 * get passed back and forth. 362 */ 363 364 /** 365 * aac_fib_send - send a fib to the adapter 366 * @command: Command to send 367 * @fibptr: The fib 368 * @size: Size of fib data area 369 * @priority: Priority of Fib 370 * @wait: Async/sync select 371 * @reply: True if a reply is wanted 372 * @callback: Called with reply 373 * @callback_data: Passed to callback 374 * 375 * Sends the requested FIB to the adapter and optionally will wait for a 376 * response FIB. If the caller does not wish to wait for a response than 377 * an event to wait on must be supplied. This event will be set when a 378 * response FIB is received from the adapter. 379 */ 380 381 int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, 382 int priority, int wait, int reply, fib_callback callback, 383 void *callback_data) 384 { 385 struct aac_dev * dev = fibptr->dev; 386 struct hw_fib * hw_fib = fibptr->hw_fib; 387 struct aac_queue * q; 388 unsigned long flags = 0; 389 unsigned long qflags; 390 391 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 392 return -EBUSY; 393 /* 394 * There are 5 cases with the wait and reponse requested flags. 395 * The only invalid cases are if the caller requests to wait and 396 * does not request a response and if the caller does not want a 397 * response and the Fib is not allocated from pool. If a response 398 * is not requesed the Fib will just be deallocaed by the DPC 399 * routine when the response comes back from the adapter. No 400 * further processing will be done besides deleting the Fib. We 401 * will have a debug mode where the adapter can notify the host 402 * it had a problem and the host can log that fact. 403 */ 404 if (wait && !reply) { 405 return -EINVAL; 406 } else if (!wait && reply) { 407 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); 408 FIB_COUNTER_INCREMENT(aac_config.AsyncSent); 409 } else if (!wait && !reply) { 410 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); 411 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); 412 } else if (wait && reply) { 413 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); 414 FIB_COUNTER_INCREMENT(aac_config.NormalSent); 415 } 416 /* 417 * Map the fib into 32bits by using the fib number 418 */ 419 420 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); 421 hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); 422 /* 423 * Set FIB state to indicate where it came from and if we want a 424 * response from the adapter. Also load the command from the 425 * caller. 426 * 427 * Map the hw fib pointer as a 32bit value 428 */ 429 hw_fib->header.Command = cpu_to_le16(command); 430 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); 431 fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/ 432 /* 433 * Set the size of the Fib we want to send to the adapter 434 */ 435 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); 436 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { 437 return -EMSGSIZE; 438 } 439 /* 440 * Get a queue entry connect the FIB to it and send an notify 441 * the adapter a command is ready. 442 */ 443 hw_fib->header.XferState |= cpu_to_le32(NormalPriority); 444 445 /* 446 * Fill in the Callback and CallbackContext if we are not 447 * going to wait. 448 */ 449 if (!wait) { 450 fibptr->callback = callback; 451 fibptr->callback_data = callback_data; 452 } 453 454 fibptr->done = 0; 455 fibptr->flags = 0; 456 457 FIB_COUNTER_INCREMENT(aac_config.FibsSent); 458 459 dprintk((KERN_DEBUG "Fib contents:.\n")); 460 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); 461 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); 462 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); 463 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); 464 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 465 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 466 467 q = &dev->queues->queue[AdapNormCmdQueue]; 468 469 if(wait) 470 spin_lock_irqsave(&fibptr->event_lock, flags); 471 spin_lock_irqsave(q->lock, qflags); 472 if (dev->new_comm_interface) { 473 unsigned long count = 10000000L; /* 50 seconds */ 474 q->numpending++; 475 spin_unlock_irqrestore(q->lock, qflags); 476 while (aac_adapter_send(fibptr) != 0) { 477 if (--count == 0) { 478 if (wait) 479 spin_unlock_irqrestore(&fibptr->event_lock, flags); 480 spin_lock_irqsave(q->lock, qflags); 481 q->numpending--; 482 spin_unlock_irqrestore(q->lock, qflags); 483 return -ETIMEDOUT; 484 } 485 udelay(5); 486 } 487 } else { 488 u32 index; 489 unsigned long nointr = 0; 490 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); 491 492 q->numpending++; 493 *(q->headers.producer) = cpu_to_le32(index + 1); 494 spin_unlock_irqrestore(q->lock, qflags); 495 dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index)); 496 if (!(nointr & aac_config.irq_mod)) 497 aac_adapter_notify(dev, AdapNormCmdQueue); 498 } 499 500 /* 501 * If the caller wanted us to wait for response wait now. 502 */ 503 504 if (wait) { 505 spin_unlock_irqrestore(&fibptr->event_lock, flags); 506 /* Only set for first known interruptable command */ 507 if (wait < 0) { 508 /* 509 * *VERY* Dangerous to time out a command, the 510 * assumption is made that we have no hope of 511 * functioning because an interrupt routing or other 512 * hardware failure has occurred. 513 */ 514 unsigned long count = 36000000L; /* 3 minutes */ 515 while (down_trylock(&fibptr->event_wait)) { 516 if (--count == 0) { 517 spin_lock_irqsave(q->lock, qflags); 518 q->numpending--; 519 spin_unlock_irqrestore(q->lock, qflags); 520 if (wait == -1) { 521 printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" 522 "Usually a result of a PCI interrupt routing problem;\n" 523 "update mother board BIOS or consider utilizing one of\n" 524 "the SAFE mode kernel options (acpi, apic etc)\n"); 525 } 526 return -ETIMEDOUT; 527 } 528 udelay(5); 529 } 530 } else 531 down(&fibptr->event_wait); 532 BUG_ON(fibptr->done == 0); 533 534 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 535 return -ETIMEDOUT; 536 } else { 537 return 0; 538 } 539 } 540 /* 541 * If the user does not want a response than return success otherwise 542 * return pending 543 */ 544 if (reply) 545 return -EINPROGRESS; 546 else 547 return 0; 548 } 549 550 /** 551 * aac_consumer_get - get the top of the queue 552 * @dev: Adapter 553 * @q: Queue 554 * @entry: Return entry 555 * 556 * Will return a pointer to the entry on the top of the queue requested that 557 * we are a consumer of, and return the address of the queue entry. It does 558 * not change the state of the queue. 559 */ 560 561 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) 562 { 563 u32 index; 564 int status; 565 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { 566 status = 0; 567 } else { 568 /* 569 * The consumer index must be wrapped if we have reached 570 * the end of the queue, else we just use the entry 571 * pointed to by the header index 572 */ 573 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 574 index = 0; 575 else 576 index = le32_to_cpu(*q->headers.consumer); 577 *entry = q->base + index; 578 status = 1; 579 } 580 return(status); 581 } 582 583 /** 584 * aac_consumer_free - free consumer entry 585 * @dev: Adapter 586 * @q: Queue 587 * @qid: Queue ident 588 * 589 * Frees up the current top of the queue we are a consumer of. If the 590 * queue was full notify the producer that the queue is no longer full. 591 */ 592 593 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) 594 { 595 int wasfull = 0; 596 u32 notify; 597 598 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) 599 wasfull = 1; 600 601 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 602 *q->headers.consumer = cpu_to_le32(1); 603 else 604 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); 605 606 if (wasfull) { 607 switch (qid) { 608 609 case HostNormCmdQueue: 610 notify = HostNormCmdNotFull; 611 break; 612 case HostNormRespQueue: 613 notify = HostNormRespNotFull; 614 break; 615 default: 616 BUG(); 617 return; 618 } 619 aac_adapter_notify(dev, notify); 620 } 621 } 622 623 /** 624 * aac_fib_adapter_complete - complete adapter issued fib 625 * @fibptr: fib to complete 626 * @size: size of fib 627 * 628 * Will do all necessary work to complete a FIB that was sent from 629 * the adapter. 630 */ 631 632 int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) 633 { 634 struct hw_fib * hw_fib = fibptr->hw_fib; 635 struct aac_dev * dev = fibptr->dev; 636 struct aac_queue * q; 637 unsigned long nointr = 0; 638 unsigned long qflags; 639 640 if (hw_fib->header.XferState == 0) { 641 if (dev->new_comm_interface) 642 kfree (hw_fib); 643 return 0; 644 } 645 /* 646 * If we plan to do anything check the structure type first. 647 */ 648 if ( hw_fib->header.StructType != FIB_MAGIC ) { 649 if (dev->new_comm_interface) 650 kfree (hw_fib); 651 return -EINVAL; 652 } 653 /* 654 * This block handles the case where the adapter had sent us a 655 * command and we have finished processing the command. We 656 * call completeFib when we are done processing the command 657 * and want to send a response back to the adapter. This will 658 * send the completed cdb to the adapter. 659 */ 660 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 661 if (dev->new_comm_interface) { 662 kfree (hw_fib); 663 } else { 664 u32 index; 665 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 666 if (size) { 667 size += sizeof(struct aac_fibhdr); 668 if (size > le16_to_cpu(hw_fib->header.SenderSize)) 669 return -EMSGSIZE; 670 hw_fib->header.Size = cpu_to_le16(size); 671 } 672 q = &dev->queues->queue[AdapNormRespQueue]; 673 spin_lock_irqsave(q->lock, qflags); 674 aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); 675 *(q->headers.producer) = cpu_to_le32(index + 1); 676 spin_unlock_irqrestore(q->lock, qflags); 677 if (!(nointr & (int)aac_config.irq_mod)) 678 aac_adapter_notify(dev, AdapNormRespQueue); 679 } 680 } 681 else 682 { 683 printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n"); 684 BUG(); 685 } 686 return 0; 687 } 688 689 /** 690 * aac_fib_complete - fib completion handler 691 * @fib: FIB to complete 692 * 693 * Will do all necessary work to complete a FIB. 694 */ 695 696 int aac_fib_complete(struct fib *fibptr) 697 { 698 struct hw_fib * hw_fib = fibptr->hw_fib; 699 700 /* 701 * Check for a fib which has already been completed 702 */ 703 704 if (hw_fib->header.XferState == 0) 705 return 0; 706 /* 707 * If we plan to do anything check the structure type first. 708 */ 709 710 if (hw_fib->header.StructType != FIB_MAGIC) 711 return -EINVAL; 712 /* 713 * This block completes a cdb which orginated on the host and we 714 * just need to deallocate the cdb or reinit it. At this point the 715 * command is complete that we had sent to the adapter and this 716 * cdb could be reused. 717 */ 718 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 719 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 720 { 721 fib_dealloc(fibptr); 722 } 723 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) 724 { 725 /* 726 * This handles the case when the host has aborted the I/O 727 * to the adapter because the adapter is not responding 728 */ 729 fib_dealloc(fibptr); 730 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { 731 fib_dealloc(fibptr); 732 } else { 733 BUG(); 734 } 735 return 0; 736 } 737 738 /** 739 * aac_printf - handle printf from firmware 740 * @dev: Adapter 741 * @val: Message info 742 * 743 * Print a message passed to us by the controller firmware on the 744 * Adaptec board 745 */ 746 747 void aac_printf(struct aac_dev *dev, u32 val) 748 { 749 char *cp = dev->printfbuf; 750 if (dev->printf_enabled) 751 { 752 int length = val & 0xffff; 753 int level = (val >> 16) & 0xffff; 754 755 /* 756 * The size of the printfbuf is set in port.c 757 * There is no variable or define for it 758 */ 759 if (length > 255) 760 length = 255; 761 if (cp[length] != 0) 762 cp[length] = 0; 763 if (level == LOG_AAC_HIGH_ERROR) 764 printk(KERN_WARNING "%s:%s", dev->name, cp); 765 else 766 printk(KERN_INFO "%s:%s", dev->name, cp); 767 } 768 memset(cp, 0, 256); 769 } 770 771 772 /** 773 * aac_handle_aif - Handle a message from the firmware 774 * @dev: Which adapter this fib is from 775 * @fibptr: Pointer to fibptr from adapter 776 * 777 * This routine handles a driver notify fib from the adapter and 778 * dispatches it to the appropriate routine for handling. 779 */ 780 781 #define AIF_SNIFF_TIMEOUT (30*HZ) 782 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) 783 { 784 struct hw_fib * hw_fib = fibptr->hw_fib; 785 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; 786 int busy; 787 u32 container; 788 struct scsi_device *device; 789 enum { 790 NOTHING, 791 DELETE, 792 ADD, 793 CHANGE 794 } device_config_needed; 795 796 /* Sniff for container changes */ 797 798 if (!dev) 799 return; 800 container = (u32)-1; 801 802 /* 803 * We have set this up to try and minimize the number of 804 * re-configures that take place. As a result of this when 805 * certain AIF's come in we will set a flag waiting for another 806 * type of AIF before setting the re-config flag. 807 */ 808 switch (le32_to_cpu(aifcmd->command)) { 809 case AifCmdDriverNotify: 810 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 811 /* 812 * Morph or Expand complete 813 */ 814 case AifDenMorphComplete: 815 case AifDenVolumeExtendComplete: 816 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 817 if (container >= dev->maximum_num_containers) 818 break; 819 820 /* 821 * Find the scsi_device associated with the SCSI 822 * address. Make sure we have the right array, and if 823 * so set the flag to initiate a new re-config once we 824 * see an AifEnConfigChange AIF come through. 825 */ 826 827 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { 828 device = scsi_device_lookup(dev->scsi_host_ptr, 829 CONTAINER_TO_CHANNEL(container), 830 CONTAINER_TO_ID(container), 831 CONTAINER_TO_LUN(container)); 832 if (device) { 833 dev->fsa_dev[container].config_needed = CHANGE; 834 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; 835 dev->fsa_dev[container].config_waiting_stamp = jiffies; 836 scsi_device_put(device); 837 } 838 } 839 } 840 841 /* 842 * If we are waiting on something and this happens to be 843 * that thing then set the re-configure flag. 844 */ 845 if (container != (u32)-1) { 846 if (container >= dev->maximum_num_containers) 847 break; 848 if ((dev->fsa_dev[container].config_waiting_on == 849 le32_to_cpu(*(u32 *)aifcmd->data)) && 850 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 851 dev->fsa_dev[container].config_waiting_on = 0; 852 } else for (container = 0; 853 container < dev->maximum_num_containers; ++container) { 854 if ((dev->fsa_dev[container].config_waiting_on == 855 le32_to_cpu(*(u32 *)aifcmd->data)) && 856 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 857 dev->fsa_dev[container].config_waiting_on = 0; 858 } 859 break; 860 861 case AifCmdEventNotify: 862 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 863 /* 864 * Add an Array. 865 */ 866 case AifEnAddContainer: 867 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 868 if (container >= dev->maximum_num_containers) 869 break; 870 dev->fsa_dev[container].config_needed = ADD; 871 dev->fsa_dev[container].config_waiting_on = 872 AifEnConfigChange; 873 dev->fsa_dev[container].config_waiting_stamp = jiffies; 874 break; 875 876 /* 877 * Delete an Array. 878 */ 879 case AifEnDeleteContainer: 880 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 881 if (container >= dev->maximum_num_containers) 882 break; 883 dev->fsa_dev[container].config_needed = DELETE; 884 dev->fsa_dev[container].config_waiting_on = 885 AifEnConfigChange; 886 dev->fsa_dev[container].config_waiting_stamp = jiffies; 887 break; 888 889 /* 890 * Container change detected. If we currently are not 891 * waiting on something else, setup to wait on a Config Change. 892 */ 893 case AifEnContainerChange: 894 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 895 if (container >= dev->maximum_num_containers) 896 break; 897 if (dev->fsa_dev[container].config_waiting_on && 898 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 899 break; 900 dev->fsa_dev[container].config_needed = CHANGE; 901 dev->fsa_dev[container].config_waiting_on = 902 AifEnConfigChange; 903 dev->fsa_dev[container].config_waiting_stamp = jiffies; 904 break; 905 906 case AifEnConfigChange: 907 break; 908 909 } 910 911 /* 912 * If we are waiting on something and this happens to be 913 * that thing then set the re-configure flag. 914 */ 915 if (container != (u32)-1) { 916 if (container >= dev->maximum_num_containers) 917 break; 918 if ((dev->fsa_dev[container].config_waiting_on == 919 le32_to_cpu(*(u32 *)aifcmd->data)) && 920 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 921 dev->fsa_dev[container].config_waiting_on = 0; 922 } else for (container = 0; 923 container < dev->maximum_num_containers; ++container) { 924 if ((dev->fsa_dev[container].config_waiting_on == 925 le32_to_cpu(*(u32 *)aifcmd->data)) && 926 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 927 dev->fsa_dev[container].config_waiting_on = 0; 928 } 929 break; 930 931 case AifCmdJobProgress: 932 /* 933 * These are job progress AIF's. When a Clear is being 934 * done on a container it is initially created then hidden from 935 * the OS. When the clear completes we don't get a config 936 * change so we monitor the job status complete on a clear then 937 * wait for a container change. 938 */ 939 940 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 941 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5]) 942 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) { 943 for (container = 0; 944 container < dev->maximum_num_containers; 945 ++container) { 946 /* 947 * Stomp on all config sequencing for all 948 * containers? 949 */ 950 dev->fsa_dev[container].config_waiting_on = 951 AifEnContainerChange; 952 dev->fsa_dev[container].config_needed = ADD; 953 dev->fsa_dev[container].config_waiting_stamp = 954 jiffies; 955 } 956 } 957 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 958 && (((u32 *)aifcmd->data)[6] == 0) 959 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) { 960 for (container = 0; 961 container < dev->maximum_num_containers; 962 ++container) { 963 /* 964 * Stomp on all config sequencing for all 965 * containers? 966 */ 967 dev->fsa_dev[container].config_waiting_on = 968 AifEnContainerChange; 969 dev->fsa_dev[container].config_needed = DELETE; 970 dev->fsa_dev[container].config_waiting_stamp = 971 jiffies; 972 } 973 } 974 break; 975 } 976 977 device_config_needed = NOTHING; 978 for (container = 0; container < dev->maximum_num_containers; 979 ++container) { 980 if ((dev->fsa_dev[container].config_waiting_on == 0) && 981 (dev->fsa_dev[container].config_needed != NOTHING) && 982 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { 983 device_config_needed = 984 dev->fsa_dev[container].config_needed; 985 dev->fsa_dev[container].config_needed = NOTHING; 986 break; 987 } 988 } 989 if (device_config_needed == NOTHING) 990 return; 991 992 /* 993 * If we decided that a re-configuration needs to be done, 994 * schedule it here on the way out the door, please close the door 995 * behind you. 996 */ 997 998 busy = 0; 999 1000 1001 /* 1002 * Find the scsi_device associated with the SCSI address, 1003 * and mark it as changed, invalidating the cache. This deals 1004 * with changes to existing device IDs. 1005 */ 1006 1007 if (!dev || !dev->scsi_host_ptr) 1008 return; 1009 /* 1010 * force reload of disk info via aac_probe_container 1011 */ 1012 if ((device_config_needed == CHANGE) 1013 && (dev->fsa_dev[container].valid == 1)) 1014 dev->fsa_dev[container].valid = 2; 1015 if ((device_config_needed == CHANGE) || 1016 (device_config_needed == ADD)) 1017 aac_probe_container(dev, container); 1018 device = scsi_device_lookup(dev->scsi_host_ptr, 1019 CONTAINER_TO_CHANNEL(container), 1020 CONTAINER_TO_ID(container), 1021 CONTAINER_TO_LUN(container)); 1022 if (device) { 1023 switch (device_config_needed) { 1024 case DELETE: 1025 scsi_remove_device(device); 1026 break; 1027 case CHANGE: 1028 if (!dev->fsa_dev[container].valid) { 1029 scsi_remove_device(device); 1030 break; 1031 } 1032 scsi_rescan_device(&device->sdev_gendev); 1033 1034 default: 1035 break; 1036 } 1037 scsi_device_put(device); 1038 } 1039 if (device_config_needed == ADD) { 1040 scsi_add_device(dev->scsi_host_ptr, 1041 CONTAINER_TO_CHANNEL(container), 1042 CONTAINER_TO_ID(container), 1043 CONTAINER_TO_LUN(container)); 1044 } 1045 1046 } 1047 1048 /** 1049 * aac_command_thread - command processing thread 1050 * @dev: Adapter to monitor 1051 * 1052 * Waits on the commandready event in it's queue. When the event gets set 1053 * it will pull FIBs off it's queue. It will continue to pull FIBs off 1054 * until the queue is empty. When the queue is empty it will wait for 1055 * more FIBs. 1056 */ 1057 1058 int aac_command_thread(void *data) 1059 { 1060 struct aac_dev *dev = data; 1061 struct hw_fib *hw_fib, *hw_newfib; 1062 struct fib *fib, *newfib; 1063 struct aac_fib_context *fibctx; 1064 unsigned long flags; 1065 DECLARE_WAITQUEUE(wait, current); 1066 1067 /* 1068 * We can only have one thread per adapter for AIF's. 1069 */ 1070 if (dev->aif_thread) 1071 return -EINVAL; 1072 1073 /* 1074 * Let the DPC know it has a place to send the AIF's to. 1075 */ 1076 dev->aif_thread = 1; 1077 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1078 set_current_state(TASK_INTERRUPTIBLE); 1079 dprintk ((KERN_INFO "aac_command_thread start\n")); 1080 while(1) 1081 { 1082 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1083 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { 1084 struct list_head *entry; 1085 struct aac_aifcmd * aifcmd; 1086 1087 set_current_state(TASK_RUNNING); 1088 1089 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; 1090 list_del(entry); 1091 1092 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1093 fib = list_entry(entry, struct fib, fiblink); 1094 /* 1095 * We will process the FIB here or pass it to a 1096 * worker thread that is TBD. We Really can't 1097 * do anything at this point since we don't have 1098 * anything defined for this thread to do. 1099 */ 1100 hw_fib = fib->hw_fib; 1101 memset(fib, 0, sizeof(struct fib)); 1102 fib->type = FSAFS_NTC_FIB_CONTEXT; 1103 fib->size = sizeof( struct fib ); 1104 fib->hw_fib = hw_fib; 1105 fib->data = hw_fib->data; 1106 fib->dev = dev; 1107 /* 1108 * We only handle AifRequest fibs from the adapter. 1109 */ 1110 aifcmd = (struct aac_aifcmd *) hw_fib->data; 1111 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { 1112 /* Handle Driver Notify Events */ 1113 aac_handle_aif(dev, fib); 1114 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1115 aac_fib_adapter_complete(fib, (u16)sizeof(u32)); 1116 } else { 1117 struct list_head *entry; 1118 /* The u32 here is important and intended. We are using 1119 32bit wrapping time to fit the adapter field */ 1120 1121 u32 time_now, time_last; 1122 unsigned long flagv; 1123 unsigned num; 1124 struct hw_fib ** hw_fib_pool, ** hw_fib_p; 1125 struct fib ** fib_pool, ** fib_p; 1126 1127 /* Sniff events */ 1128 if ((aifcmd->command == 1129 cpu_to_le32(AifCmdEventNotify)) || 1130 (aifcmd->command == 1131 cpu_to_le32(AifCmdJobProgress))) { 1132 aac_handle_aif(dev, fib); 1133 } 1134 1135 time_now = jiffies/HZ; 1136 1137 /* 1138 * Warning: no sleep allowed while 1139 * holding spinlock. We take the estimate 1140 * and pre-allocate a set of fibs outside the 1141 * lock. 1142 */ 1143 num = le32_to_cpu(dev->init->AdapterFibsSize) 1144 / sizeof(struct hw_fib); /* some extra */ 1145 spin_lock_irqsave(&dev->fib_lock, flagv); 1146 entry = dev->fib_list.next; 1147 while (entry != &dev->fib_list) { 1148 entry = entry->next; 1149 ++num; 1150 } 1151 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1152 hw_fib_pool = NULL; 1153 fib_pool = NULL; 1154 if (num 1155 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL))) 1156 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) { 1157 hw_fib_p = hw_fib_pool; 1158 fib_p = fib_pool; 1159 while (hw_fib_p < &hw_fib_pool[num]) { 1160 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) { 1161 --hw_fib_p; 1162 break; 1163 } 1164 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) { 1165 kfree(*(--hw_fib_p)); 1166 break; 1167 } 1168 } 1169 if ((num = hw_fib_p - hw_fib_pool) == 0) { 1170 kfree(fib_pool); 1171 fib_pool = NULL; 1172 kfree(hw_fib_pool); 1173 hw_fib_pool = NULL; 1174 } 1175 } else { 1176 kfree(hw_fib_pool); 1177 hw_fib_pool = NULL; 1178 } 1179 spin_lock_irqsave(&dev->fib_lock, flagv); 1180 entry = dev->fib_list.next; 1181 /* 1182 * For each Context that is on the 1183 * fibctxList, make a copy of the 1184 * fib, and then set the event to wake up the 1185 * thread that is waiting for it. 1186 */ 1187 hw_fib_p = hw_fib_pool; 1188 fib_p = fib_pool; 1189 while (entry != &dev->fib_list) { 1190 /* 1191 * Extract the fibctx 1192 */ 1193 fibctx = list_entry(entry, struct aac_fib_context, next); 1194 /* 1195 * Check if the queue is getting 1196 * backlogged 1197 */ 1198 if (fibctx->count > 20) 1199 { 1200 /* 1201 * It's *not* jiffies folks, 1202 * but jiffies / HZ so do not 1203 * panic ... 1204 */ 1205 time_last = fibctx->jiffies; 1206 /* 1207 * Has it been > 2 minutes 1208 * since the last read off 1209 * the queue? 1210 */ 1211 if ((time_now - time_last) > aif_timeout) { 1212 entry = entry->next; 1213 aac_close_fib_context(dev, fibctx); 1214 continue; 1215 } 1216 } 1217 /* 1218 * Warning: no sleep allowed while 1219 * holding spinlock 1220 */ 1221 if (hw_fib_p < &hw_fib_pool[num]) { 1222 hw_newfib = *hw_fib_p; 1223 *(hw_fib_p++) = NULL; 1224 newfib = *fib_p; 1225 *(fib_p++) = NULL; 1226 /* 1227 * Make the copy of the FIB 1228 */ 1229 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); 1230 memcpy(newfib, fib, sizeof(struct fib)); 1231 newfib->hw_fib = hw_newfib; 1232 /* 1233 * Put the FIB onto the 1234 * fibctx's fibs 1235 */ 1236 list_add_tail(&newfib->fiblink, &fibctx->fib_list); 1237 fibctx->count++; 1238 /* 1239 * Set the event to wake up the 1240 * thread that is waiting. 1241 */ 1242 up(&fibctx->wait_sem); 1243 } else { 1244 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1245 } 1246 entry = entry->next; 1247 } 1248 /* 1249 * Set the status of this FIB 1250 */ 1251 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1252 aac_fib_adapter_complete(fib, sizeof(u32)); 1253 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1254 /* Free up the remaining resources */ 1255 hw_fib_p = hw_fib_pool; 1256 fib_p = fib_pool; 1257 while (hw_fib_p < &hw_fib_pool[num]) { 1258 kfree(*hw_fib_p); 1259 kfree(*fib_p); 1260 ++fib_p; 1261 ++hw_fib_p; 1262 } 1263 kfree(hw_fib_pool); 1264 kfree(fib_pool); 1265 } 1266 kfree(fib); 1267 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1268 } 1269 /* 1270 * There are no more AIF's 1271 */ 1272 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1273 schedule(); 1274 1275 if (kthread_should_stop()) 1276 break; 1277 set_current_state(TASK_INTERRUPTIBLE); 1278 } 1279 if (dev->queues) 1280 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1281 dev->aif_thread = 0; 1282 return 0; 1283 } 1284