1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * commsup.c 26 * 27 * Abstract: Contain all routines that are required for FSA host/adapter 28 * communication. 29 * 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/init.h> 34 #include <linux/types.h> 35 #include <linux/sched.h> 36 #include <linux/pci.h> 37 #include <linux/spinlock.h> 38 #include <linux/slab.h> 39 #include <linux/completion.h> 40 #include <linux/blkdev.h> 41 #include <linux/delay.h> 42 #include <linux/kthread.h> 43 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_device.h> 45 #include <asm/semaphore.h> 46 47 #include "aacraid.h" 48 49 /** 50 * fib_map_alloc - allocate the fib objects 51 * @dev: Adapter to allocate for 52 * 53 * Allocate and map the shared PCI space for the FIB blocks used to 54 * talk to the Adaptec firmware. 55 */ 56 57 static int fib_map_alloc(struct aac_dev *dev) 58 { 59 dprintk((KERN_INFO 60 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", 61 dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, 62 AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); 63 if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size 64 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), 65 &dev->hw_fib_pa))==NULL) 66 return -ENOMEM; 67 return 0; 68 } 69 70 /** 71 * aac_fib_map_free - free the fib objects 72 * @dev: Adapter to free 73 * 74 * Free the PCI mappings and the memory allocated for FIB blocks 75 * on this adapter. 76 */ 77 78 void aac_fib_map_free(struct aac_dev *dev) 79 { 80 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 81 } 82 83 /** 84 * aac_fib_setup - setup the fibs 85 * @dev: Adapter to set up 86 * 87 * Allocate the PCI space for the fibs, map it and then intialise the 88 * fib area, the unmapped fib data and also the free list 89 */ 90 91 int aac_fib_setup(struct aac_dev * dev) 92 { 93 struct fib *fibptr; 94 struct hw_fib *hw_fib_va; 95 dma_addr_t hw_fib_pa; 96 int i; 97 98 while (((i = fib_map_alloc(dev)) == -ENOMEM) 99 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { 100 dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1); 101 dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB; 102 } 103 if (i<0) 104 return -ENOMEM; 105 106 hw_fib_va = dev->hw_fib_va; 107 hw_fib_pa = dev->hw_fib_pa; 108 memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 109 /* 110 * Initialise the fibs 111 */ 112 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) 113 { 114 fibptr->dev = dev; 115 fibptr->hw_fib = hw_fib_va; 116 fibptr->data = (void *) fibptr->hw_fib->data; 117 fibptr->next = fibptr+1; /* Forward chain the fibs */ 118 init_MUTEX_LOCKED(&fibptr->event_wait); 119 spin_lock_init(&fibptr->event_lock); 120 hw_fib_va->header.XferState = cpu_to_le32(0xffffffff); 121 hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size); 122 fibptr->hw_fib_pa = hw_fib_pa; 123 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size); 124 hw_fib_pa = hw_fib_pa + dev->max_fib_size; 125 } 126 /* 127 * Add the fib chain to the free list 128 */ 129 dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; 130 /* 131 * Enable this to debug out of queue space 132 */ 133 dev->free_fib = &dev->fibs[0]; 134 return 0; 135 } 136 137 /** 138 * aac_fib_alloc - allocate a fib 139 * @dev: Adapter to allocate the fib for 140 * 141 * Allocate a fib from the adapter fib pool. If the pool is empty we 142 * return NULL. 143 */ 144 145 struct fib *aac_fib_alloc(struct aac_dev *dev) 146 { 147 struct fib * fibptr; 148 unsigned long flags; 149 spin_lock_irqsave(&dev->fib_lock, flags); 150 fibptr = dev->free_fib; 151 if(!fibptr){ 152 spin_unlock_irqrestore(&dev->fib_lock, flags); 153 return fibptr; 154 } 155 dev->free_fib = fibptr->next; 156 spin_unlock_irqrestore(&dev->fib_lock, flags); 157 /* 158 * Set the proper node type code and node byte size 159 */ 160 fibptr->type = FSAFS_NTC_FIB_CONTEXT; 161 fibptr->size = sizeof(struct fib); 162 /* 163 * Null out fields that depend on being zero at the start of 164 * each I/O 165 */ 166 fibptr->hw_fib->header.XferState = 0; 167 fibptr->callback = NULL; 168 fibptr->callback_data = NULL; 169 170 return fibptr; 171 } 172 173 /** 174 * aac_fib_free - free a fib 175 * @fibptr: fib to free up 176 * 177 * Frees up a fib and places it on the appropriate queue 178 * (either free or timed out) 179 */ 180 181 void aac_fib_free(struct fib *fibptr) 182 { 183 unsigned long flags; 184 185 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 186 if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) { 187 aac_config.fib_timeouts++; 188 fibptr->next = fibptr->dev->timeout_fib; 189 fibptr->dev->timeout_fib = fibptr; 190 } else { 191 if (fibptr->hw_fib->header.XferState != 0) { 192 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 193 (void*)fibptr, 194 le32_to_cpu(fibptr->hw_fib->header.XferState)); 195 } 196 fibptr->next = fibptr->dev->free_fib; 197 fibptr->dev->free_fib = fibptr; 198 } 199 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); 200 } 201 202 /** 203 * aac_fib_init - initialise a fib 204 * @fibptr: The fib to initialize 205 * 206 * Set up the generic fib fields ready for use 207 */ 208 209 void aac_fib_init(struct fib *fibptr) 210 { 211 struct hw_fib *hw_fib = fibptr->hw_fib; 212 213 hw_fib->header.StructType = FIB_MAGIC; 214 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); 215 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); 216 hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */ 217 hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); 218 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); 219 } 220 221 /** 222 * fib_deallocate - deallocate a fib 223 * @fibptr: fib to deallocate 224 * 225 * Will deallocate and return to the free pool the FIB pointed to by the 226 * caller. 227 */ 228 229 static void fib_dealloc(struct fib * fibptr) 230 { 231 struct hw_fib *hw_fib = fibptr->hw_fib; 232 if(hw_fib->header.StructType != FIB_MAGIC) 233 BUG(); 234 hw_fib->header.XferState = 0; 235 } 236 237 /* 238 * Commuication primitives define and support the queuing method we use to 239 * support host to adapter commuication. All queue accesses happen through 240 * these routines and are the only routines which have a knowledge of the 241 * how these queues are implemented. 242 */ 243 244 /** 245 * aac_get_entry - get a queue entry 246 * @dev: Adapter 247 * @qid: Queue Number 248 * @entry: Entry return 249 * @index: Index return 250 * @nonotify: notification control 251 * 252 * With a priority the routine returns a queue entry if the queue has free entries. If the queue 253 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is 254 * returned. 255 */ 256 257 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) 258 { 259 struct aac_queue * q; 260 unsigned long idx; 261 262 /* 263 * All of the queues wrap when they reach the end, so we check 264 * to see if they have reached the end and if they have we just 265 * set the index back to zero. This is a wrap. You could or off 266 * the high bits in all updates but this is a bit faster I think. 267 */ 268 269 q = &dev->queues->queue[qid]; 270 271 idx = *index = le32_to_cpu(*(q->headers.producer)); 272 /* Interrupt Moderation, only interrupt for first two entries */ 273 if (idx != le32_to_cpu(*(q->headers.consumer))) { 274 if (--idx == 0) { 275 if (qid == AdapNormCmdQueue) 276 idx = ADAP_NORM_CMD_ENTRIES; 277 else 278 idx = ADAP_NORM_RESP_ENTRIES; 279 } 280 if (idx != le32_to_cpu(*(q->headers.consumer))) 281 *nonotify = 1; 282 } 283 284 if (qid == AdapNormCmdQueue) { 285 if (*index >= ADAP_NORM_CMD_ENTRIES) 286 *index = 0; /* Wrap to front of the Producer Queue. */ 287 } else { 288 if (*index >= ADAP_NORM_RESP_ENTRIES) 289 *index = 0; /* Wrap to front of the Producer Queue. */ 290 } 291 292 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ 293 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 294 qid, q->numpending); 295 return 0; 296 } else { 297 *entry = q->base + *index; 298 return 1; 299 } 300 } 301 302 /** 303 * aac_queue_get - get the next free QE 304 * @dev: Adapter 305 * @index: Returned index 306 * @priority: Priority of fib 307 * @fib: Fib to associate with the queue entry 308 * @wait: Wait if queue full 309 * @fibptr: Driver fib object to go with fib 310 * @nonotify: Don't notify the adapter 311 * 312 * Gets the next free QE off the requested priorty adapter command 313 * queue and associates the Fib with the QE. The QE represented by 314 * index is ready to insert on the queue when this routine returns 315 * success. 316 */ 317 318 static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) 319 { 320 struct aac_entry * entry = NULL; 321 int map = 0; 322 323 if (qid == AdapNormCmdQueue) { 324 /* if no entries wait for some if caller wants to */ 325 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 326 { 327 printk(KERN_ERR "GetEntries failed\n"); 328 } 329 /* 330 * Setup queue entry with a command, status and fib mapped 331 */ 332 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 333 map = 1; 334 } else { 335 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 336 { 337 /* if no entries wait for some if caller wants to */ 338 } 339 /* 340 * Setup queue entry with command, status and fib mapped 341 */ 342 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 343 entry->addr = hw_fib->header.SenderFibAddress; 344 /* Restore adapters pointer to the FIB */ 345 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ 346 map = 0; 347 } 348 /* 349 * If MapFib is true than we need to map the Fib and put pointers 350 * in the queue entry. 351 */ 352 if (map) 353 entry->addr = cpu_to_le32(fibptr->hw_fib_pa); 354 return 0; 355 } 356 357 /* 358 * Define the highest level of host to adapter communication routines. 359 * These routines will support host to adapter FS commuication. These 360 * routines have no knowledge of the commuication method used. This level 361 * sends and receives FIBs. This level has no knowledge of how these FIBs 362 * get passed back and forth. 363 */ 364 365 /** 366 * aac_fib_send - send a fib to the adapter 367 * @command: Command to send 368 * @fibptr: The fib 369 * @size: Size of fib data area 370 * @priority: Priority of Fib 371 * @wait: Async/sync select 372 * @reply: True if a reply is wanted 373 * @callback: Called with reply 374 * @callback_data: Passed to callback 375 * 376 * Sends the requested FIB to the adapter and optionally will wait for a 377 * response FIB. If the caller does not wish to wait for a response than 378 * an event to wait on must be supplied. This event will be set when a 379 * response FIB is received from the adapter. 380 */ 381 382 int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, 383 int priority, int wait, int reply, fib_callback callback, 384 void *callback_data) 385 { 386 struct aac_dev * dev = fibptr->dev; 387 struct hw_fib * hw_fib = fibptr->hw_fib; 388 struct aac_queue * q; 389 unsigned long flags = 0; 390 unsigned long qflags; 391 392 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 393 return -EBUSY; 394 /* 395 * There are 5 cases with the wait and reponse requested flags. 396 * The only invalid cases are if the caller requests to wait and 397 * does not request a response and if the caller does not want a 398 * response and the Fib is not allocated from pool. If a response 399 * is not requesed the Fib will just be deallocaed by the DPC 400 * routine when the response comes back from the adapter. No 401 * further processing will be done besides deleting the Fib. We 402 * will have a debug mode where the adapter can notify the host 403 * it had a problem and the host can log that fact. 404 */ 405 if (wait && !reply) { 406 return -EINVAL; 407 } else if (!wait && reply) { 408 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); 409 FIB_COUNTER_INCREMENT(aac_config.AsyncSent); 410 } else if (!wait && !reply) { 411 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); 412 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); 413 } else if (wait && reply) { 414 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); 415 FIB_COUNTER_INCREMENT(aac_config.NormalSent); 416 } 417 /* 418 * Map the fib into 32bits by using the fib number 419 */ 420 421 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); 422 hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); 423 /* 424 * Set FIB state to indicate where it came from and if we want a 425 * response from the adapter. Also load the command from the 426 * caller. 427 * 428 * Map the hw fib pointer as a 32bit value 429 */ 430 hw_fib->header.Command = cpu_to_le16(command); 431 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); 432 fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/ 433 /* 434 * Set the size of the Fib we want to send to the adapter 435 */ 436 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); 437 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { 438 return -EMSGSIZE; 439 } 440 /* 441 * Get a queue entry connect the FIB to it and send an notify 442 * the adapter a command is ready. 443 */ 444 hw_fib->header.XferState |= cpu_to_le32(NormalPriority); 445 446 /* 447 * Fill in the Callback and CallbackContext if we are not 448 * going to wait. 449 */ 450 if (!wait) { 451 fibptr->callback = callback; 452 fibptr->callback_data = callback_data; 453 } 454 455 fibptr->done = 0; 456 fibptr->flags = 0; 457 458 FIB_COUNTER_INCREMENT(aac_config.FibsSent); 459 460 dprintk((KERN_DEBUG "Fib contents:.\n")); 461 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); 462 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); 463 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); 464 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); 465 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 466 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 467 468 q = &dev->queues->queue[AdapNormCmdQueue]; 469 470 if(wait) 471 spin_lock_irqsave(&fibptr->event_lock, flags); 472 spin_lock_irqsave(q->lock, qflags); 473 if (dev->new_comm_interface) { 474 unsigned long count = 10000000L; /* 50 seconds */ 475 list_add_tail(&fibptr->queue, &q->pendingq); 476 q->numpending++; 477 spin_unlock_irqrestore(q->lock, qflags); 478 while (aac_adapter_send(fibptr) != 0) { 479 if (--count == 0) { 480 if (wait) 481 spin_unlock_irqrestore(&fibptr->event_lock, flags); 482 spin_lock_irqsave(q->lock, qflags); 483 q->numpending--; 484 list_del(&fibptr->queue); 485 spin_unlock_irqrestore(q->lock, qflags); 486 return -ETIMEDOUT; 487 } 488 udelay(5); 489 } 490 } else { 491 u32 index; 492 unsigned long nointr = 0; 493 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); 494 495 list_add_tail(&fibptr->queue, &q->pendingq); 496 q->numpending++; 497 *(q->headers.producer) = cpu_to_le32(index + 1); 498 spin_unlock_irqrestore(q->lock, qflags); 499 dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index)); 500 if (!(nointr & aac_config.irq_mod)) 501 aac_adapter_notify(dev, AdapNormCmdQueue); 502 } 503 504 /* 505 * If the caller wanted us to wait for response wait now. 506 */ 507 508 if (wait) { 509 spin_unlock_irqrestore(&fibptr->event_lock, flags); 510 /* Only set for first known interruptable command */ 511 if (wait < 0) { 512 /* 513 * *VERY* Dangerous to time out a command, the 514 * assumption is made that we have no hope of 515 * functioning because an interrupt routing or other 516 * hardware failure has occurred. 517 */ 518 unsigned long count = 36000000L; /* 3 minutes */ 519 while (down_trylock(&fibptr->event_wait)) { 520 if (--count == 0) { 521 spin_lock_irqsave(q->lock, qflags); 522 q->numpending--; 523 list_del(&fibptr->queue); 524 spin_unlock_irqrestore(q->lock, qflags); 525 if (wait == -1) { 526 printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" 527 "Usually a result of a PCI interrupt routing problem;\n" 528 "update mother board BIOS or consider utilizing one of\n" 529 "the SAFE mode kernel options (acpi, apic etc)\n"); 530 } 531 return -ETIMEDOUT; 532 } 533 udelay(5); 534 } 535 } else 536 down(&fibptr->event_wait); 537 if(fibptr->done == 0) 538 BUG(); 539 540 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 541 return -ETIMEDOUT; 542 } else { 543 return 0; 544 } 545 } 546 /* 547 * If the user does not want a response than return success otherwise 548 * return pending 549 */ 550 if (reply) 551 return -EINPROGRESS; 552 else 553 return 0; 554 } 555 556 /** 557 * aac_consumer_get - get the top of the queue 558 * @dev: Adapter 559 * @q: Queue 560 * @entry: Return entry 561 * 562 * Will return a pointer to the entry on the top of the queue requested that 563 * we are a consumer of, and return the address of the queue entry. It does 564 * not change the state of the queue. 565 */ 566 567 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) 568 { 569 u32 index; 570 int status; 571 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { 572 status = 0; 573 } else { 574 /* 575 * The consumer index must be wrapped if we have reached 576 * the end of the queue, else we just use the entry 577 * pointed to by the header index 578 */ 579 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 580 index = 0; 581 else 582 index = le32_to_cpu(*q->headers.consumer); 583 *entry = q->base + index; 584 status = 1; 585 } 586 return(status); 587 } 588 589 /** 590 * aac_consumer_free - free consumer entry 591 * @dev: Adapter 592 * @q: Queue 593 * @qid: Queue ident 594 * 595 * Frees up the current top of the queue we are a consumer of. If the 596 * queue was full notify the producer that the queue is no longer full. 597 */ 598 599 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) 600 { 601 int wasfull = 0; 602 u32 notify; 603 604 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) 605 wasfull = 1; 606 607 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 608 *q->headers.consumer = cpu_to_le32(1); 609 else 610 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); 611 612 if (wasfull) { 613 switch (qid) { 614 615 case HostNormCmdQueue: 616 notify = HostNormCmdNotFull; 617 break; 618 case HostNormRespQueue: 619 notify = HostNormRespNotFull; 620 break; 621 default: 622 BUG(); 623 return; 624 } 625 aac_adapter_notify(dev, notify); 626 } 627 } 628 629 /** 630 * aac_fib_adapter_complete - complete adapter issued fib 631 * @fibptr: fib to complete 632 * @size: size of fib 633 * 634 * Will do all necessary work to complete a FIB that was sent from 635 * the adapter. 636 */ 637 638 int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) 639 { 640 struct hw_fib * hw_fib = fibptr->hw_fib; 641 struct aac_dev * dev = fibptr->dev; 642 struct aac_queue * q; 643 unsigned long nointr = 0; 644 unsigned long qflags; 645 646 if (hw_fib->header.XferState == 0) { 647 if (dev->new_comm_interface) 648 kfree (hw_fib); 649 return 0; 650 } 651 /* 652 * If we plan to do anything check the structure type first. 653 */ 654 if ( hw_fib->header.StructType != FIB_MAGIC ) { 655 if (dev->new_comm_interface) 656 kfree (hw_fib); 657 return -EINVAL; 658 } 659 /* 660 * This block handles the case where the adapter had sent us a 661 * command and we have finished processing the command. We 662 * call completeFib when we are done processing the command 663 * and want to send a response back to the adapter. This will 664 * send the completed cdb to the adapter. 665 */ 666 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 667 if (dev->new_comm_interface) { 668 kfree (hw_fib); 669 } else { 670 u32 index; 671 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 672 if (size) { 673 size += sizeof(struct aac_fibhdr); 674 if (size > le16_to_cpu(hw_fib->header.SenderSize)) 675 return -EMSGSIZE; 676 hw_fib->header.Size = cpu_to_le16(size); 677 } 678 q = &dev->queues->queue[AdapNormRespQueue]; 679 spin_lock_irqsave(q->lock, qflags); 680 aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); 681 *(q->headers.producer) = cpu_to_le32(index + 1); 682 spin_unlock_irqrestore(q->lock, qflags); 683 if (!(nointr & (int)aac_config.irq_mod)) 684 aac_adapter_notify(dev, AdapNormRespQueue); 685 } 686 } 687 else 688 { 689 printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n"); 690 BUG(); 691 } 692 return 0; 693 } 694 695 /** 696 * aac_fib_complete - fib completion handler 697 * @fib: FIB to complete 698 * 699 * Will do all necessary work to complete a FIB. 700 */ 701 702 int aac_fib_complete(struct fib *fibptr) 703 { 704 struct hw_fib * hw_fib = fibptr->hw_fib; 705 706 /* 707 * Check for a fib which has already been completed 708 */ 709 710 if (hw_fib->header.XferState == 0) 711 return 0; 712 /* 713 * If we plan to do anything check the structure type first. 714 */ 715 716 if (hw_fib->header.StructType != FIB_MAGIC) 717 return -EINVAL; 718 /* 719 * This block completes a cdb which orginated on the host and we 720 * just need to deallocate the cdb or reinit it. At this point the 721 * command is complete that we had sent to the adapter and this 722 * cdb could be reused. 723 */ 724 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 725 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 726 { 727 fib_dealloc(fibptr); 728 } 729 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) 730 { 731 /* 732 * This handles the case when the host has aborted the I/O 733 * to the adapter because the adapter is not responding 734 */ 735 fib_dealloc(fibptr); 736 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { 737 fib_dealloc(fibptr); 738 } else { 739 BUG(); 740 } 741 return 0; 742 } 743 744 /** 745 * aac_printf - handle printf from firmware 746 * @dev: Adapter 747 * @val: Message info 748 * 749 * Print a message passed to us by the controller firmware on the 750 * Adaptec board 751 */ 752 753 void aac_printf(struct aac_dev *dev, u32 val) 754 { 755 char *cp = dev->printfbuf; 756 if (dev->printf_enabled) 757 { 758 int length = val & 0xffff; 759 int level = (val >> 16) & 0xffff; 760 761 /* 762 * The size of the printfbuf is set in port.c 763 * There is no variable or define for it 764 */ 765 if (length > 255) 766 length = 255; 767 if (cp[length] != 0) 768 cp[length] = 0; 769 if (level == LOG_AAC_HIGH_ERROR) 770 printk(KERN_WARNING "aacraid:%s", cp); 771 else 772 printk(KERN_INFO "aacraid:%s", cp); 773 } 774 memset(cp, 0, 256); 775 } 776 777 778 /** 779 * aac_handle_aif - Handle a message from the firmware 780 * @dev: Which adapter this fib is from 781 * @fibptr: Pointer to fibptr from adapter 782 * 783 * This routine handles a driver notify fib from the adapter and 784 * dispatches it to the appropriate routine for handling. 785 */ 786 787 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) 788 { 789 struct hw_fib * hw_fib = fibptr->hw_fib; 790 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; 791 int busy; 792 u32 container; 793 struct scsi_device *device; 794 enum { 795 NOTHING, 796 DELETE, 797 ADD, 798 CHANGE 799 } device_config_needed; 800 801 /* Sniff for container changes */ 802 803 if (!dev) 804 return; 805 container = (u32)-1; 806 807 /* 808 * We have set this up to try and minimize the number of 809 * re-configures that take place. As a result of this when 810 * certain AIF's come in we will set a flag waiting for another 811 * type of AIF before setting the re-config flag. 812 */ 813 switch (le32_to_cpu(aifcmd->command)) { 814 case AifCmdDriverNotify: 815 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 816 /* 817 * Morph or Expand complete 818 */ 819 case AifDenMorphComplete: 820 case AifDenVolumeExtendComplete: 821 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 822 if (container >= dev->maximum_num_containers) 823 break; 824 825 /* 826 * Find the scsi_device associated with the SCSI 827 * address. Make sure we have the right array, and if 828 * so set the flag to initiate a new re-config once we 829 * see an AifEnConfigChange AIF come through. 830 */ 831 832 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { 833 device = scsi_device_lookup(dev->scsi_host_ptr, 834 CONTAINER_TO_CHANNEL(container), 835 CONTAINER_TO_ID(container), 836 CONTAINER_TO_LUN(container)); 837 if (device) { 838 dev->fsa_dev[container].config_needed = CHANGE; 839 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; 840 scsi_device_put(device); 841 } 842 } 843 } 844 845 /* 846 * If we are waiting on something and this happens to be 847 * that thing then set the re-configure flag. 848 */ 849 if (container != (u32)-1) { 850 if (container >= dev->maximum_num_containers) 851 break; 852 if (dev->fsa_dev[container].config_waiting_on == 853 le32_to_cpu(*(u32 *)aifcmd->data)) 854 dev->fsa_dev[container].config_waiting_on = 0; 855 } else for (container = 0; 856 container < dev->maximum_num_containers; ++container) { 857 if (dev->fsa_dev[container].config_waiting_on == 858 le32_to_cpu(*(u32 *)aifcmd->data)) 859 dev->fsa_dev[container].config_waiting_on = 0; 860 } 861 break; 862 863 case AifCmdEventNotify: 864 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 865 /* 866 * Add an Array. 867 */ 868 case AifEnAddContainer: 869 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 870 if (container >= dev->maximum_num_containers) 871 break; 872 dev->fsa_dev[container].config_needed = ADD; 873 dev->fsa_dev[container].config_waiting_on = 874 AifEnConfigChange; 875 break; 876 877 /* 878 * Delete an Array. 879 */ 880 case AifEnDeleteContainer: 881 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 882 if (container >= dev->maximum_num_containers) 883 break; 884 dev->fsa_dev[container].config_needed = DELETE; 885 dev->fsa_dev[container].config_waiting_on = 886 AifEnConfigChange; 887 break; 888 889 /* 890 * Container change detected. If we currently are not 891 * waiting on something else, setup to wait on a Config Change. 892 */ 893 case AifEnContainerChange: 894 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 895 if (container >= dev->maximum_num_containers) 896 break; 897 if (dev->fsa_dev[container].config_waiting_on) 898 break; 899 dev->fsa_dev[container].config_needed = CHANGE; 900 dev->fsa_dev[container].config_waiting_on = 901 AifEnConfigChange; 902 break; 903 904 case AifEnConfigChange: 905 break; 906 907 } 908 909 /* 910 * If we are waiting on something and this happens to be 911 * that thing then set the re-configure flag. 912 */ 913 if (container != (u32)-1) { 914 if (container >= dev->maximum_num_containers) 915 break; 916 if (dev->fsa_dev[container].config_waiting_on == 917 le32_to_cpu(*(u32 *)aifcmd->data)) 918 dev->fsa_dev[container].config_waiting_on = 0; 919 } else for (container = 0; 920 container < dev->maximum_num_containers; ++container) { 921 if (dev->fsa_dev[container].config_waiting_on == 922 le32_to_cpu(*(u32 *)aifcmd->data)) 923 dev->fsa_dev[container].config_waiting_on = 0; 924 } 925 break; 926 927 case AifCmdJobProgress: 928 /* 929 * These are job progress AIF's. When a Clear is being 930 * done on a container it is initially created then hidden from 931 * the OS. When the clear completes we don't get a config 932 * change so we monitor the job status complete on a clear then 933 * wait for a container change. 934 */ 935 936 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 937 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5]) 938 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) { 939 for (container = 0; 940 container < dev->maximum_num_containers; 941 ++container) { 942 /* 943 * Stomp on all config sequencing for all 944 * containers? 945 */ 946 dev->fsa_dev[container].config_waiting_on = 947 AifEnContainerChange; 948 dev->fsa_dev[container].config_needed = ADD; 949 } 950 } 951 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 952 && (((u32 *)aifcmd->data)[6] == 0) 953 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) { 954 for (container = 0; 955 container < dev->maximum_num_containers; 956 ++container) { 957 /* 958 * Stomp on all config sequencing for all 959 * containers? 960 */ 961 dev->fsa_dev[container].config_waiting_on = 962 AifEnContainerChange; 963 dev->fsa_dev[container].config_needed = DELETE; 964 } 965 } 966 break; 967 } 968 969 device_config_needed = NOTHING; 970 for (container = 0; container < dev->maximum_num_containers; 971 ++container) { 972 if ((dev->fsa_dev[container].config_waiting_on == 0) 973 && (dev->fsa_dev[container].config_needed != NOTHING)) { 974 device_config_needed = 975 dev->fsa_dev[container].config_needed; 976 dev->fsa_dev[container].config_needed = NOTHING; 977 break; 978 } 979 } 980 if (device_config_needed == NOTHING) 981 return; 982 983 /* 984 * If we decided that a re-configuration needs to be done, 985 * schedule it here on the way out the door, please close the door 986 * behind you. 987 */ 988 989 busy = 0; 990 991 992 /* 993 * Find the scsi_device associated with the SCSI address, 994 * and mark it as changed, invalidating the cache. This deals 995 * with changes to existing device IDs. 996 */ 997 998 if (!dev || !dev->scsi_host_ptr) 999 return; 1000 /* 1001 * force reload of disk info via aac_probe_container 1002 */ 1003 if ((device_config_needed == CHANGE) 1004 && (dev->fsa_dev[container].valid == 1)) 1005 dev->fsa_dev[container].valid = 2; 1006 if ((device_config_needed == CHANGE) || 1007 (device_config_needed == ADD)) 1008 aac_probe_container(dev, container); 1009 device = scsi_device_lookup(dev->scsi_host_ptr, 1010 CONTAINER_TO_CHANNEL(container), 1011 CONTAINER_TO_ID(container), 1012 CONTAINER_TO_LUN(container)); 1013 if (device) { 1014 switch (device_config_needed) { 1015 case DELETE: 1016 scsi_remove_device(device); 1017 break; 1018 case CHANGE: 1019 if (!dev->fsa_dev[container].valid) { 1020 scsi_remove_device(device); 1021 break; 1022 } 1023 scsi_rescan_device(&device->sdev_gendev); 1024 1025 default: 1026 break; 1027 } 1028 scsi_device_put(device); 1029 } 1030 if (device_config_needed == ADD) { 1031 scsi_add_device(dev->scsi_host_ptr, 1032 CONTAINER_TO_CHANNEL(container), 1033 CONTAINER_TO_ID(container), 1034 CONTAINER_TO_LUN(container)); 1035 } 1036 1037 } 1038 1039 /** 1040 * aac_command_thread - command processing thread 1041 * @dev: Adapter to monitor 1042 * 1043 * Waits on the commandready event in it's queue. When the event gets set 1044 * it will pull FIBs off it's queue. It will continue to pull FIBs off 1045 * until the queue is empty. When the queue is empty it will wait for 1046 * more FIBs. 1047 */ 1048 1049 int aac_command_thread(void *data) 1050 { 1051 struct aac_dev *dev = data; 1052 struct hw_fib *hw_fib, *hw_newfib; 1053 struct fib *fib, *newfib; 1054 struct aac_fib_context *fibctx; 1055 unsigned long flags; 1056 DECLARE_WAITQUEUE(wait, current); 1057 1058 /* 1059 * We can only have one thread per adapter for AIF's. 1060 */ 1061 if (dev->aif_thread) 1062 return -EINVAL; 1063 1064 /* 1065 * Let the DPC know it has a place to send the AIF's to. 1066 */ 1067 dev->aif_thread = 1; 1068 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1069 set_current_state(TASK_INTERRUPTIBLE); 1070 dprintk ((KERN_INFO "aac_command_thread start\n")); 1071 while(1) 1072 { 1073 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1074 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { 1075 struct list_head *entry; 1076 struct aac_aifcmd * aifcmd; 1077 1078 set_current_state(TASK_RUNNING); 1079 1080 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; 1081 list_del(entry); 1082 1083 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1084 fib = list_entry(entry, struct fib, fiblink); 1085 /* 1086 * We will process the FIB here or pass it to a 1087 * worker thread that is TBD. We Really can't 1088 * do anything at this point since we don't have 1089 * anything defined for this thread to do. 1090 */ 1091 hw_fib = fib->hw_fib; 1092 memset(fib, 0, sizeof(struct fib)); 1093 fib->type = FSAFS_NTC_FIB_CONTEXT; 1094 fib->size = sizeof( struct fib ); 1095 fib->hw_fib = hw_fib; 1096 fib->data = hw_fib->data; 1097 fib->dev = dev; 1098 /* 1099 * We only handle AifRequest fibs from the adapter. 1100 */ 1101 aifcmd = (struct aac_aifcmd *) hw_fib->data; 1102 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { 1103 /* Handle Driver Notify Events */ 1104 aac_handle_aif(dev, fib); 1105 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1106 aac_fib_adapter_complete(fib, (u16)sizeof(u32)); 1107 } else { 1108 struct list_head *entry; 1109 /* The u32 here is important and intended. We are using 1110 32bit wrapping time to fit the adapter field */ 1111 1112 u32 time_now, time_last; 1113 unsigned long flagv; 1114 unsigned num; 1115 struct hw_fib ** hw_fib_pool, ** hw_fib_p; 1116 struct fib ** fib_pool, ** fib_p; 1117 1118 /* Sniff events */ 1119 if ((aifcmd->command == 1120 cpu_to_le32(AifCmdEventNotify)) || 1121 (aifcmd->command == 1122 cpu_to_le32(AifCmdJobProgress))) { 1123 aac_handle_aif(dev, fib); 1124 } 1125 1126 time_now = jiffies/HZ; 1127 1128 /* 1129 * Warning: no sleep allowed while 1130 * holding spinlock. We take the estimate 1131 * and pre-allocate a set of fibs outside the 1132 * lock. 1133 */ 1134 num = le32_to_cpu(dev->init->AdapterFibsSize) 1135 / sizeof(struct hw_fib); /* some extra */ 1136 spin_lock_irqsave(&dev->fib_lock, flagv); 1137 entry = dev->fib_list.next; 1138 while (entry != &dev->fib_list) { 1139 entry = entry->next; 1140 ++num; 1141 } 1142 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1143 hw_fib_pool = NULL; 1144 fib_pool = NULL; 1145 if (num 1146 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL))) 1147 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) { 1148 hw_fib_p = hw_fib_pool; 1149 fib_p = fib_pool; 1150 while (hw_fib_p < &hw_fib_pool[num]) { 1151 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) { 1152 --hw_fib_p; 1153 break; 1154 } 1155 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) { 1156 kfree(*(--hw_fib_p)); 1157 break; 1158 } 1159 } 1160 if ((num = hw_fib_p - hw_fib_pool) == 0) { 1161 kfree(fib_pool); 1162 fib_pool = NULL; 1163 kfree(hw_fib_pool); 1164 hw_fib_pool = NULL; 1165 } 1166 } else { 1167 kfree(hw_fib_pool); 1168 hw_fib_pool = NULL; 1169 } 1170 spin_lock_irqsave(&dev->fib_lock, flagv); 1171 entry = dev->fib_list.next; 1172 /* 1173 * For each Context that is on the 1174 * fibctxList, make a copy of the 1175 * fib, and then set the event to wake up the 1176 * thread that is waiting for it. 1177 */ 1178 hw_fib_p = hw_fib_pool; 1179 fib_p = fib_pool; 1180 while (entry != &dev->fib_list) { 1181 /* 1182 * Extract the fibctx 1183 */ 1184 fibctx = list_entry(entry, struct aac_fib_context, next); 1185 /* 1186 * Check if the queue is getting 1187 * backlogged 1188 */ 1189 if (fibctx->count > 20) 1190 { 1191 /* 1192 * It's *not* jiffies folks, 1193 * but jiffies / HZ so do not 1194 * panic ... 1195 */ 1196 time_last = fibctx->jiffies; 1197 /* 1198 * Has it been > 2 minutes 1199 * since the last read off 1200 * the queue? 1201 */ 1202 if ((time_now - time_last) > 120) { 1203 entry = entry->next; 1204 aac_close_fib_context(dev, fibctx); 1205 continue; 1206 } 1207 } 1208 /* 1209 * Warning: no sleep allowed while 1210 * holding spinlock 1211 */ 1212 if (hw_fib_p < &hw_fib_pool[num]) { 1213 hw_newfib = *hw_fib_p; 1214 *(hw_fib_p++) = NULL; 1215 newfib = *fib_p; 1216 *(fib_p++) = NULL; 1217 /* 1218 * Make the copy of the FIB 1219 */ 1220 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); 1221 memcpy(newfib, fib, sizeof(struct fib)); 1222 newfib->hw_fib = hw_newfib; 1223 /* 1224 * Put the FIB onto the 1225 * fibctx's fibs 1226 */ 1227 list_add_tail(&newfib->fiblink, &fibctx->fib_list); 1228 fibctx->count++; 1229 /* 1230 * Set the event to wake up the 1231 * thread that is waiting. 1232 */ 1233 up(&fibctx->wait_sem); 1234 } else { 1235 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1236 } 1237 entry = entry->next; 1238 } 1239 /* 1240 * Set the status of this FIB 1241 */ 1242 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1243 aac_fib_adapter_complete(fib, sizeof(u32)); 1244 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1245 /* Free up the remaining resources */ 1246 hw_fib_p = hw_fib_pool; 1247 fib_p = fib_pool; 1248 while (hw_fib_p < &hw_fib_pool[num]) { 1249 kfree(*hw_fib_p); 1250 kfree(*fib_p); 1251 ++fib_p; 1252 ++hw_fib_p; 1253 } 1254 kfree(hw_fib_pool); 1255 kfree(fib_pool); 1256 } 1257 kfree(fib); 1258 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1259 } 1260 /* 1261 * There are no more AIF's 1262 */ 1263 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1264 schedule(); 1265 1266 if (kthread_should_stop()) 1267 break; 1268 set_current_state(TASK_INTERRUPTIBLE); 1269 } 1270 if (dev->queues) 1271 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1272 dev->aif_thread = 0; 1273 return 0; 1274 } 1275