xref: /linux/drivers/scsi/aacraid/commsup.c (revision 60b2737de1b1ddfdb90f3ba622634eb49d6f3603)
1 /*
2  *	Adaptec AAC series RAID controller driver
3  *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; see the file COPYING.  If not, write to
22  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  * Module Name:
25  *  commsup.c
26  *
27  * Abstract: Contain all routines that are required for FSA host/adapter
28  *    communication.
29  *
30  */
31 
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <scsi/scsi_host.h>
42 #include <asm/semaphore.h>
43 
44 #include "aacraid.h"
45 
46 /**
47  *	fib_map_alloc		-	allocate the fib objects
48  *	@dev: Adapter to allocate for
49  *
50  *	Allocate and map the shared PCI space for the FIB blocks used to
51  *	talk to the Adaptec firmware.
52  */
53 
54 static int fib_map_alloc(struct aac_dev *dev)
55 {
56 	dprintk((KERN_INFO
57 	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
58 	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
59 	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
60 	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
61 	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
62 	  &dev->hw_fib_pa))==NULL)
63 		return -ENOMEM;
64 	return 0;
65 }
66 
67 /**
68  *	fib_map_free		-	free the fib objects
69  *	@dev: Adapter to free
70  *
71  *	Free the PCI mappings and the memory allocated for FIB blocks
72  *	on this adapter.
73  */
74 
75 void fib_map_free(struct aac_dev *dev)
76 {
77 	pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
78 }
79 
80 /**
81  *	fib_setup	-	setup the fibs
82  *	@dev: Adapter to set up
83  *
84  *	Allocate the PCI space for the fibs, map it and then intialise the
85  *	fib area, the unmapped fib data and also the free list
86  */
87 
88 int fib_setup(struct aac_dev * dev)
89 {
90 	struct fib *fibptr;
91 	struct hw_fib *hw_fib_va;
92 	dma_addr_t hw_fib_pa;
93 	int i;
94 
95 	while (((i = fib_map_alloc(dev)) == -ENOMEM)
96 	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
97 		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
98 		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
99 	}
100 	if (i<0)
101 		return -ENOMEM;
102 
103 	hw_fib_va = dev->hw_fib_va;
104 	hw_fib_pa = dev->hw_fib_pa;
105 	memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
106 	/*
107 	 *	Initialise the fibs
108 	 */
109 	for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
110 	{
111 		fibptr->dev = dev;
112 		fibptr->hw_fib = hw_fib_va;
113 		fibptr->data = (void *) fibptr->hw_fib->data;
114 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
115 		init_MUTEX_LOCKED(&fibptr->event_wait);
116 		spin_lock_init(&fibptr->event_lock);
117 		hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
118 		hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size);
119 		fibptr->hw_fib_pa = hw_fib_pa;
120 		hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size);
121 		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
122 	}
123 	/*
124 	 *	Add the fib chain to the free list
125 	 */
126 	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
127 	/*
128 	 *	Enable this to debug out of queue space
129 	 */
130 	dev->free_fib = &dev->fibs[0];
131 	return 0;
132 }
133 
134 /**
135  *	fib_alloc	-	allocate a fib
136  *	@dev: Adapter to allocate the fib for
137  *
138  *	Allocate a fib from the adapter fib pool. If the pool is empty we
139  *	return NULL.
140  */
141 
142 struct fib * fib_alloc(struct aac_dev *dev)
143 {
144 	struct fib * fibptr;
145 	unsigned long flags;
146 	spin_lock_irqsave(&dev->fib_lock, flags);
147 	fibptr = dev->free_fib;
148 	if(!fibptr){
149 		spin_unlock_irqrestore(&dev->fib_lock, flags);
150 		return fibptr;
151 	}
152 	dev->free_fib = fibptr->next;
153 	spin_unlock_irqrestore(&dev->fib_lock, flags);
154 	/*
155 	 *	Set the proper node type code and node byte size
156 	 */
157 	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
158 	fibptr->size = sizeof(struct fib);
159 	/*
160 	 *	Null out fields that depend on being zero at the start of
161 	 *	each I/O
162 	 */
163 	fibptr->hw_fib->header.XferState = 0;
164 	fibptr->callback = NULL;
165 	fibptr->callback_data = NULL;
166 
167 	return fibptr;
168 }
169 
170 /**
171  *	fib_free	-	free a fib
172  *	@fibptr: fib to free up
173  *
174  *	Frees up a fib and places it on the appropriate queue
175  *	(either free or timed out)
176  */
177 
178 void fib_free(struct fib * fibptr)
179 {
180 	unsigned long flags;
181 
182 	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
183 	if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
184 		aac_config.fib_timeouts++;
185 		fibptr->next = fibptr->dev->timeout_fib;
186 		fibptr->dev->timeout_fib = fibptr;
187 	} else {
188 		if (fibptr->hw_fib->header.XferState != 0) {
189 			printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
190 				 (void*)fibptr,
191 				 le32_to_cpu(fibptr->hw_fib->header.XferState));
192 		}
193 		fibptr->next = fibptr->dev->free_fib;
194 		fibptr->dev->free_fib = fibptr;
195 	}
196 	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
197 }
198 
199 /**
200  *	fib_init	-	initialise a fib
201  *	@fibptr: The fib to initialize
202  *
203  *	Set up the generic fib fields ready for use
204  */
205 
206 void fib_init(struct fib *fibptr)
207 {
208 	struct hw_fib *hw_fib = fibptr->hw_fib;
209 
210 	hw_fib->header.StructType = FIB_MAGIC;
211 	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
212 	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
213 	hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
214 	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
215 	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
216 }
217 
218 /**
219  *	fib_deallocate		-	deallocate a fib
220  *	@fibptr: fib to deallocate
221  *
222  *	Will deallocate and return to the free pool the FIB pointed to by the
223  *	caller.
224  */
225 
226 static void fib_dealloc(struct fib * fibptr)
227 {
228 	struct hw_fib *hw_fib = fibptr->hw_fib;
229 	if(hw_fib->header.StructType != FIB_MAGIC)
230 		BUG();
231 	hw_fib->header.XferState = 0;
232 }
233 
234 /*
235  *	Commuication primitives define and support the queuing method we use to
236  *	support host to adapter commuication. All queue accesses happen through
237  *	these routines and are the only routines which have a knowledge of the
238  *	 how these queues are implemented.
239  */
240 
241 /**
242  *	aac_get_entry		-	get a queue entry
243  *	@dev: Adapter
244  *	@qid: Queue Number
245  *	@entry: Entry return
246  *	@index: Index return
247  *	@nonotify: notification control
248  *
249  *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
250  *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
251  *	returned.
252  */
253 
254 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
255 {
256 	struct aac_queue * q;
257 
258 	/*
259 	 *	All of the queues wrap when they reach the end, so we check
260 	 *	to see if they have reached the end and if they have we just
261 	 *	set the index back to zero. This is a wrap. You could or off
262 	 *	the high bits in all updates but this is a bit faster I think.
263 	 */
264 
265 	q = &dev->queues->queue[qid];
266 
267 	*index = le32_to_cpu(*(q->headers.producer));
268 	if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
269 			*nonotify = 1;
270 
271 	if (qid == AdapHighCmdQueue) {
272 	        if (*index >= ADAP_HIGH_CMD_ENTRIES)
273         		*index = 0;
274 	} else if (qid == AdapNormCmdQueue) {
275 	        if (*index >= ADAP_NORM_CMD_ENTRIES)
276 			*index = 0; /* Wrap to front of the Producer Queue. */
277 	}
278 	else if (qid == AdapHighRespQueue)
279 	{
280 	        if (*index >= ADAP_HIGH_RESP_ENTRIES)
281 			*index = 0;
282 	}
283 	else if (qid == AdapNormRespQueue)
284 	{
285 		if (*index >= ADAP_NORM_RESP_ENTRIES)
286 			*index = 0; /* Wrap to front of the Producer Queue. */
287 	}
288 	else {
289 		printk("aacraid: invalid qid\n");
290 		BUG();
291 	}
292 
293         if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
294 		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
295 				qid, q->numpending);
296 		return 0;
297 	} else {
298 	        *entry = q->base + *index;
299 		return 1;
300 	}
301 }
302 
303 /**
304  *	aac_queue_get		-	get the next free QE
305  *	@dev: Adapter
306  *	@index: Returned index
307  *	@priority: Priority of fib
308  *	@fib: Fib to associate with the queue entry
309  *	@wait: Wait if queue full
310  *	@fibptr: Driver fib object to go with fib
311  *	@nonotify: Don't notify the adapter
312  *
313  *	Gets the next free QE off the requested priorty adapter command
314  *	queue and associates the Fib with the QE. The QE represented by
315  *	index is ready to insert on the queue when this routine returns
316  *	success.
317  */
318 
319 static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
320 {
321 	struct aac_entry * entry = NULL;
322 	int map = 0;
323 	struct aac_queue * q = &dev->queues->queue[qid];
324 
325 	spin_lock_irqsave(q->lock, q->SavedIrql);
326 
327 	if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
328 	{
329 		/*  if no entries wait for some if caller wants to */
330         	while (!aac_get_entry(dev, qid, &entry, index, nonotify))
331         	{
332 			printk(KERN_ERR "GetEntries failed\n");
333 		}
334 	        /*
335 	         *	Setup queue entry with a command, status and fib mapped
336 	         */
337 	        entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
338 	        map = 1;
339 	}
340 	else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
341 	{
342 	        while(!aac_get_entry(dev, qid, &entry, index, nonotify))
343 	        {
344 			/* if no entries wait for some if caller wants to */
345 		}
346         	/*
347         	 *	Setup queue entry with command, status and fib mapped
348         	 */
349         	entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
350         	entry->addr = hw_fib->header.SenderFibAddress;
351      			/* Restore adapters pointer to the FIB */
352 		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
353         	map = 0;
354 	}
355 	/*
356 	 *	If MapFib is true than we need to map the Fib and put pointers
357 	 *	in the queue entry.
358 	 */
359 	if (map)
360 		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
361 	return 0;
362 }
363 
364 
365 /**
366  *	aac_insert_entry	-	insert a queue entry
367  *	@dev: Adapter
368  *	@index: Index of entry to insert
369  *	@qid: Queue number
370  *	@nonotify: Suppress adapter notification
371  *
372  *	Gets the next free QE off the requested priorty adapter command
373  *	queue and associates the Fib with the QE. The QE represented by
374  *	index is ready to insert on the queue when this routine returns
375  *	success.
376  */
377 
378 static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
379 {
380 	struct aac_queue * q = &dev->queues->queue[qid];
381 
382 	if(q == NULL)
383 		BUG();
384 	*(q->headers.producer) = cpu_to_le32(index + 1);
385 	spin_unlock_irqrestore(q->lock, q->SavedIrql);
386 
387 	if (qid == AdapHighCmdQueue ||
388 	    qid == AdapNormCmdQueue ||
389 	    qid == AdapHighRespQueue ||
390 	    qid == AdapNormRespQueue)
391 	{
392 		if (!nonotify)
393 			aac_adapter_notify(dev, qid);
394 	}
395 	else
396 		printk("Suprise insert!\n");
397 	return 0;
398 }
399 
400 /*
401  *	Define the highest level of host to adapter communication routines.
402  *	These routines will support host to adapter FS commuication. These
403  *	routines have no knowledge of the commuication method used. This level
404  *	sends and receives FIBs. This level has no knowledge of how these FIBs
405  *	get passed back and forth.
406  */
407 
408 /**
409  *	fib_send	-	send a fib to the adapter
410  *	@command: Command to send
411  *	@fibptr: The fib
412  *	@size: Size of fib data area
413  *	@priority: Priority of Fib
414  *	@wait: Async/sync select
415  *	@reply: True if a reply is wanted
416  *	@callback: Called with reply
417  *	@callback_data: Passed to callback
418  *
419  *	Sends the requested FIB to the adapter and optionally will wait for a
420  *	response FIB. If the caller does not wish to wait for a response than
421  *	an event to wait on must be supplied. This event will be set when a
422  *	response FIB is received from the adapter.
423  */
424 
425 int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority, int wait, int reply, fib_callback callback, void * callback_data)
426 {
427 	u32 index;
428 	u32 qid;
429 	struct aac_dev * dev = fibptr->dev;
430 	unsigned long nointr = 0;
431 	struct hw_fib * hw_fib = fibptr->hw_fib;
432 	struct aac_queue * q;
433 	unsigned long flags = 0;
434 	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
435 		return -EBUSY;
436 	/*
437 	 *	There are 5 cases with the wait and reponse requested flags.
438 	 *	The only invalid cases are if the caller requests to wait and
439 	 *	does not request a response and if the caller does not want a
440 	 *	response and the Fib is not allocated from pool. If a response
441 	 *	is not requesed the Fib will just be deallocaed by the DPC
442 	 *	routine when the response comes back from the adapter. No
443 	 *	further processing will be done besides deleting the Fib. We
444 	 *	will have a debug mode where the adapter can notify the host
445 	 *	it had a problem and the host can log that fact.
446 	 */
447 	if (wait && !reply) {
448 		return -EINVAL;
449 	} else if (!wait && reply) {
450 		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
451 		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
452 	} else if (!wait && !reply) {
453 		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
454 		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
455 	} else if (wait && reply) {
456 		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
457 		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
458 	}
459 	/*
460 	 *	Map the fib into 32bits by using the fib number
461 	 */
462 
463 	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
464 	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
465 	/*
466 	 *	Set FIB state to indicate where it came from and if we want a
467 	 *	response from the adapter. Also load the command from the
468 	 *	caller.
469 	 *
470 	 *	Map the hw fib pointer as a 32bit value
471 	 */
472 	hw_fib->header.Command = cpu_to_le16(command);
473 	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
474 	fibptr->hw_fib->header.Flags = 0;	/* 0 the flags field - internal only*/
475 	/*
476 	 *	Set the size of the Fib we want to send to the adapter
477 	 */
478 	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
479 	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
480 		return -EMSGSIZE;
481 	}
482 	/*
483 	 *	Get a queue entry connect the FIB to it and send an notify
484 	 *	the adapter a command is ready.
485 	 */
486 	if (priority == FsaHigh) {
487 		hw_fib->header.XferState |= cpu_to_le32(HighPriority);
488 		qid = AdapHighCmdQueue;
489 	} else {
490 		hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
491 		qid = AdapNormCmdQueue;
492 	}
493 	q = &dev->queues->queue[qid];
494 
495 	if(wait)
496 		spin_lock_irqsave(&fibptr->event_lock, flags);
497 	if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
498 		return -EWOULDBLOCK;
499 	dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
500 	dprintk((KERN_DEBUG "Fib contents:.\n"));
501 	dprintk((KERN_DEBUG "  Command =               %d.\n", hw_fib->header.Command));
502 	dprintk((KERN_DEBUG "  XferState  =            %x.\n", hw_fib->header.XferState));
503 	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib));
504 	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
505 	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
506 	/*
507 	 *	Fill in the Callback and CallbackContext if we are not
508 	 *	going to wait.
509 	 */
510 	if (!wait) {
511 		fibptr->callback = callback;
512 		fibptr->callback_data = callback_data;
513 	}
514 	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
515 	list_add_tail(&fibptr->queue, &q->pendingq);
516 	q->numpending++;
517 
518 	fibptr->done = 0;
519 	fibptr->flags = 0;
520 
521 	if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
522 		return -EWOULDBLOCK;
523 	/*
524 	 *	If the caller wanted us to wait for response wait now.
525 	 */
526 
527 	if (wait) {
528 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
529 		down(&fibptr->event_wait);
530 		if(fibptr->done == 0)
531 			BUG();
532 
533 		if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
534 			return -ETIMEDOUT;
535 		} else {
536 			return 0;
537 		}
538 	}
539 	/*
540 	 *	If the user does not want a response than return success otherwise
541 	 *	return pending
542 	 */
543 	if (reply)
544 		return -EINPROGRESS;
545 	else
546 		return 0;
547 }
548 
549 /**
550  *	aac_consumer_get	-	get the top of the queue
551  *	@dev: Adapter
552  *	@q: Queue
553  *	@entry: Return entry
554  *
555  *	Will return a pointer to the entry on the top of the queue requested that
556  * 	we are a consumer of, and return the address of the queue entry. It does
557  *	not change the state of the queue.
558  */
559 
560 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
561 {
562 	u32 index;
563 	int status;
564 	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
565 		status = 0;
566 	} else {
567 		/*
568 		 *	The consumer index must be wrapped if we have reached
569 		 *	the end of the queue, else we just use the entry
570 		 *	pointed to by the header index
571 		 */
572 		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
573 			index = 0;
574 		else
575 		        index = le32_to_cpu(*q->headers.consumer);
576 		*entry = q->base + index;
577 		status = 1;
578 	}
579 	return(status);
580 }
581 
582 /**
583  *	aac_consumer_free	-	free consumer entry
584  *	@dev: Adapter
585  *	@q: Queue
586  *	@qid: Queue ident
587  *
588  *	Frees up the current top of the queue we are a consumer of. If the
589  *	queue was full notify the producer that the queue is no longer full.
590  */
591 
592 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
593 {
594 	int wasfull = 0;
595 	u32 notify;
596 
597 	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
598 		wasfull = 1;
599 
600 	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
601 		*q->headers.consumer = cpu_to_le32(1);
602 	else
603 		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
604 
605 	if (wasfull) {
606 		switch (qid) {
607 
608 		case HostNormCmdQueue:
609 			notify = HostNormCmdNotFull;
610 			break;
611 		case HostHighCmdQueue:
612 			notify = HostHighCmdNotFull;
613 			break;
614 		case HostNormRespQueue:
615 			notify = HostNormRespNotFull;
616 			break;
617 		case HostHighRespQueue:
618 			notify = HostHighRespNotFull;
619 			break;
620 		default:
621 			BUG();
622 			return;
623 		}
624 		aac_adapter_notify(dev, notify);
625 	}
626 }
627 
628 /**
629  *	fib_adapter_complete	-	complete adapter issued fib
630  *	@fibptr: fib to complete
631  *	@size: size of fib
632  *
633  *	Will do all necessary work to complete a FIB that was sent from
634  *	the adapter.
635  */
636 
637 int fib_adapter_complete(struct fib * fibptr, unsigned short size)
638 {
639 	struct hw_fib * hw_fib = fibptr->hw_fib;
640 	struct aac_dev * dev = fibptr->dev;
641 	unsigned long nointr = 0;
642 	if (hw_fib->header.XferState == 0)
643         	return 0;
644 	/*
645 	 *	If we plan to do anything check the structure type first.
646 	 */
647 	if ( hw_fib->header.StructType != FIB_MAGIC ) {
648         	return -EINVAL;
649 	}
650 	/*
651 	 *	This block handles the case where the adapter had sent us a
652 	 *	command and we have finished processing the command. We
653 	 *	call completeFib when we are done processing the command
654 	 *	and want to send a response back to the adapter. This will
655 	 *	send the completed cdb to the adapter.
656 	 */
657 	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
658 	        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
659 	        if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
660         		u32 index;
661        			if (size)
662 			{
663 				size += sizeof(struct aac_fibhdr);
664 				if (size > le16_to_cpu(hw_fib->header.SenderSize))
665 					return -EMSGSIZE;
666 				hw_fib->header.Size = cpu_to_le16(size);
667 			}
668 			if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
669 				return -EWOULDBLOCK;
670 			}
671 			if (aac_insert_entry(dev, index, AdapHighRespQueue,  (nointr & (int)aac_config.irq_mod)) != 0) {
672 			}
673 		} else if (hw_fib->header.XferState &
674 				cpu_to_le32(NormalPriority)) {
675 			u32 index;
676 
677 			if (size) {
678 				size += sizeof(struct aac_fibhdr);
679 				if (size > le16_to_cpu(hw_fib->header.SenderSize))
680 					return -EMSGSIZE;
681 				hw_fib->header.Size = cpu_to_le16(size);
682 			}
683 			if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
684 				return -EWOULDBLOCK;
685 			if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
686 			{
687 			}
688 		}
689 	}
690 	else
691 	{
692         	printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
693         	BUG();
694 	}
695 	return 0;
696 }
697 
698 /**
699  *	fib_complete	-	fib completion handler
700  *	@fib: FIB to complete
701  *
702  *	Will do all necessary work to complete a FIB.
703  */
704 
705 int fib_complete(struct fib * fibptr)
706 {
707 	struct hw_fib * hw_fib = fibptr->hw_fib;
708 
709 	/*
710 	 *	Check for a fib which has already been completed
711 	 */
712 
713 	if (hw_fib->header.XferState == 0)
714         	return 0;
715 	/*
716 	 *	If we plan to do anything check the structure type first.
717 	 */
718 
719 	if (hw_fib->header.StructType != FIB_MAGIC)
720 	        return -EINVAL;
721 	/*
722 	 *	This block completes a cdb which orginated on the host and we
723 	 *	just need to deallocate the cdb or reinit it. At this point the
724 	 *	command is complete that we had sent to the adapter and this
725 	 *	cdb could be reused.
726 	 */
727 	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
728 		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
729 	{
730 		fib_dealloc(fibptr);
731 	}
732 	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
733 	{
734 		/*
735 		 *	This handles the case when the host has aborted the I/O
736 		 *	to the adapter because the adapter is not responding
737 		 */
738 		fib_dealloc(fibptr);
739 	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
740 		fib_dealloc(fibptr);
741 	} else {
742 		BUG();
743 	}
744 	return 0;
745 }
746 
747 /**
748  *	aac_printf	-	handle printf from firmware
749  *	@dev: Adapter
750  *	@val: Message info
751  *
752  *	Print a message passed to us by the controller firmware on the
753  *	Adaptec board
754  */
755 
756 void aac_printf(struct aac_dev *dev, u32 val)
757 {
758 	char *cp = dev->printfbuf;
759 	if (dev->printf_enabled)
760 	{
761 		int length = val & 0xffff;
762 		int level = (val >> 16) & 0xffff;
763 
764 		/*
765 		 *	The size of the printfbuf is set in port.c
766 		 *	There is no variable or define for it
767 		 */
768 		if (length > 255)
769 			length = 255;
770 		if (cp[length] != 0)
771 			cp[length] = 0;
772 		if (level == LOG_AAC_HIGH_ERROR)
773 			printk(KERN_WARNING "aacraid:%s", cp);
774 		else
775 			printk(KERN_INFO "aacraid:%s", cp);
776 	}
777 	memset(cp, 0,  256);
778 }
779 
780 /**
781  *	aac_command_thread	-	command processing thread
782  *	@dev: Adapter to monitor
783  *
784  *	Waits on the commandready event in it's queue. When the event gets set
785  *	it will pull FIBs off it's queue. It will continue to pull FIBs off
786  *	until the queue is empty. When the queue is empty it will wait for
787  *	more FIBs.
788  */
789 
790 int aac_command_thread(struct aac_dev * dev)
791 {
792 	struct hw_fib *hw_fib, *hw_newfib;
793 	struct fib *fib, *newfib;
794 	struct aac_queue_block *queues = dev->queues;
795 	struct aac_fib_context *fibctx;
796 	unsigned long flags;
797 	DECLARE_WAITQUEUE(wait, current);
798 
799 	/*
800 	 *	We can only have one thread per adapter for AIF's.
801 	 */
802 	if (dev->aif_thread)
803 		return -EINVAL;
804 	/*
805 	 *	Set up the name that will appear in 'ps'
806 	 *	stored in  task_struct.comm[16].
807 	 */
808 	daemonize("aacraid");
809 	allow_signal(SIGKILL);
810 	/*
811 	 *	Let the DPC know it has a place to send the AIF's to.
812 	 */
813 	dev->aif_thread = 1;
814 	add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
815 	set_current_state(TASK_INTERRUPTIBLE);
816 	while(1)
817 	{
818 		spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
819 		while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
820 			struct list_head *entry;
821 			struct aac_aifcmd * aifcmd;
822 
823 			set_current_state(TASK_RUNNING);
824 
825 			entry = queues->queue[HostNormCmdQueue].cmdq.next;
826 			list_del(entry);
827 
828 			spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
829 			fib = list_entry(entry, struct fib, fiblink);
830 			/*
831 			 *	We will process the FIB here or pass it to a
832 			 *	worker thread that is TBD. We Really can't
833 			 *	do anything at this point since we don't have
834 			 *	anything defined for this thread to do.
835 			 */
836 			hw_fib = fib->hw_fib;
837 			memset(fib, 0, sizeof(struct fib));
838 			fib->type = FSAFS_NTC_FIB_CONTEXT;
839 			fib->size = sizeof( struct fib );
840 			fib->hw_fib = hw_fib;
841 			fib->data = hw_fib->data;
842 			fib->dev = dev;
843 			/*
844 			 *	We only handle AifRequest fibs from the adapter.
845 			 */
846 			aifcmd = (struct aac_aifcmd *) hw_fib->data;
847 			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
848 				/* Handle Driver Notify Events */
849 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
850 				fib_adapter_complete(fib, (u16)sizeof(u32));
851 			} else {
852 				struct list_head *entry;
853 				/* The u32 here is important and intended. We are using
854 				   32bit wrapping time to fit the adapter field */
855 
856 				u32 time_now, time_last;
857 				unsigned long flagv;
858 
859 				time_now = jiffies/HZ;
860 
861 				spin_lock_irqsave(&dev->fib_lock, flagv);
862 				entry = dev->fib_list.next;
863 				/*
864 				 * For each Context that is on the
865 				 * fibctxList, make a copy of the
866 				 * fib, and then set the event to wake up the
867 				 * thread that is waiting for it.
868 				 */
869 				while (entry != &dev->fib_list) {
870 					/*
871 					 * Extract the fibctx
872 					 */
873 					fibctx = list_entry(entry, struct aac_fib_context, next);
874 					/*
875 					 * Check if the queue is getting
876 					 * backlogged
877 					 */
878 					if (fibctx->count > 20)
879 					{
880 						/*
881 						 * It's *not* jiffies folks,
882 						 * but jiffies / HZ so do not
883 						 * panic ...
884 						 */
885 						time_last = fibctx->jiffies;
886 						/*
887 						 * Has it been > 2 minutes
888 						 * since the last read off
889 						 * the queue?
890 						 */
891 						if ((time_now - time_last) > 120) {
892 							entry = entry->next;
893 							aac_close_fib_context(dev, fibctx);
894 							continue;
895 						}
896 					}
897 					/*
898 					 * Warning: no sleep allowed while
899 					 * holding spinlock
900 					 */
901 					hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
902 					newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
903 					if (newfib && hw_newfib) {
904 						/*
905 						 * Make the copy of the FIB
906 						 */
907 						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
908 						memcpy(newfib, fib, sizeof(struct fib));
909 						newfib->hw_fib = hw_newfib;
910 						/*
911 						 * Put the FIB onto the
912 						 * fibctx's fibs
913 						 */
914 						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
915 						fibctx->count++;
916 						/*
917 						 * Set the event to wake up the
918 						 * thread that will waiting.
919 						 */
920 						up(&fibctx->wait_sem);
921 					} else {
922 						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
923 						if(newfib)
924 							kfree(newfib);
925 						if(hw_newfib)
926 							kfree(hw_newfib);
927 					}
928 					entry = entry->next;
929 				}
930 				/*
931 				 *	Set the status of this FIB
932 				 */
933 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
934 				fib_adapter_complete(fib, sizeof(u32));
935 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
936 			}
937 			spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
938 			kfree(fib);
939 		}
940 		/*
941 		 *	There are no more AIF's
942 		 */
943 		spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
944 		schedule();
945 
946 		if(signal_pending(current))
947 			break;
948 		set_current_state(TASK_INTERRUPTIBLE);
949 	}
950 	remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
951 	dev->aif_thread = 0;
952 	complete_and_exit(&dev->aif_completion, 0);
953 }
954