xref: /linux/drivers/scsi/aacraid/commsup.c (revision 2624f124b3b5d550ab2fbef7ee3bc0e1fed09722)
1 /*
2  *	Adaptec AAC series RAID controller driver
3  *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; see the file COPYING.  If not, write to
22  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  * Module Name:
25  *  commsup.c
26  *
27  * Abstract: Contain all routines that are required for FSA host/adapter
28  *    communication.
29  *
30  */
31 
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <scsi/scsi_host.h>
42 #include <asm/semaphore.h>
43 
44 #include "aacraid.h"
45 
46 /**
47  *	fib_map_alloc		-	allocate the fib objects
48  *	@dev: Adapter to allocate for
49  *
50  *	Allocate and map the shared PCI space for the FIB blocks used to
51  *	talk to the Adaptec firmware.
52  */
53 
54 static int fib_map_alloc(struct aac_dev *dev)
55 {
56 	dprintk((KERN_INFO
57 	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
58 	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
59 	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
60 	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
61 	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
62 	  &dev->hw_fib_pa))==NULL)
63 		return -ENOMEM;
64 	return 0;
65 }
66 
67 /**
68  *	fib_map_free		-	free the fib objects
69  *	@dev: Adapter to free
70  *
71  *	Free the PCI mappings and the memory allocated for FIB blocks
72  *	on this adapter.
73  */
74 
75 void fib_map_free(struct aac_dev *dev)
76 {
77 	pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
78 }
79 
80 /**
81  *	fib_setup	-	setup the fibs
82  *	@dev: Adapter to set up
83  *
84  *	Allocate the PCI space for the fibs, map it and then intialise the
85  *	fib area, the unmapped fib data and also the free list
86  */
87 
88 int fib_setup(struct aac_dev * dev)
89 {
90 	struct fib *fibptr;
91 	struct hw_fib *hw_fib_va;
92 	dma_addr_t hw_fib_pa;
93 	int i;
94 
95 	while (((i = fib_map_alloc(dev)) == -ENOMEM)
96 	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
97 		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
98 		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
99 	}
100 	if (i<0)
101 		return -ENOMEM;
102 
103 	hw_fib_va = dev->hw_fib_va;
104 	hw_fib_pa = dev->hw_fib_pa;
105 	memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
106 	/*
107 	 *	Initialise the fibs
108 	 */
109 	for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
110 	{
111 		fibptr->dev = dev;
112 		fibptr->hw_fib = hw_fib_va;
113 		fibptr->data = (void *) fibptr->hw_fib->data;
114 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
115 		init_MUTEX_LOCKED(&fibptr->event_wait);
116 		spin_lock_init(&fibptr->event_lock);
117 		hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
118 		hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size);
119 		fibptr->hw_fib_pa = hw_fib_pa;
120 		hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size);
121 		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
122 	}
123 	/*
124 	 *	Add the fib chain to the free list
125 	 */
126 	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
127 	/*
128 	 *	Enable this to debug out of queue space
129 	 */
130 	dev->free_fib = &dev->fibs[0];
131 	return 0;
132 }
133 
134 /**
135  *	fib_alloc	-	allocate a fib
136  *	@dev: Adapter to allocate the fib for
137  *
138  *	Allocate a fib from the adapter fib pool. If the pool is empty we
139  *	return NULL.
140  */
141 
142 struct fib * fib_alloc(struct aac_dev *dev)
143 {
144 	struct fib * fibptr;
145 	unsigned long flags;
146 	spin_lock_irqsave(&dev->fib_lock, flags);
147 	fibptr = dev->free_fib;
148 	if(!fibptr){
149 		spin_unlock_irqrestore(&dev->fib_lock, flags);
150 		return fibptr;
151 	}
152 	dev->free_fib = fibptr->next;
153 	spin_unlock_irqrestore(&dev->fib_lock, flags);
154 	/*
155 	 *	Set the proper node type code and node byte size
156 	 */
157 	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
158 	fibptr->size = sizeof(struct fib);
159 	/*
160 	 *	Null out fields that depend on being zero at the start of
161 	 *	each I/O
162 	 */
163 	fibptr->hw_fib->header.XferState = 0;
164 	fibptr->callback = NULL;
165 	fibptr->callback_data = NULL;
166 
167 	return fibptr;
168 }
169 
170 /**
171  *	fib_free	-	free a fib
172  *	@fibptr: fib to free up
173  *
174  *	Frees up a fib and places it on the appropriate queue
175  *	(either free or timed out)
176  */
177 
178 void fib_free(struct fib * fibptr)
179 {
180 	unsigned long flags;
181 
182 	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
183 	if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
184 		aac_config.fib_timeouts++;
185 		fibptr->next = fibptr->dev->timeout_fib;
186 		fibptr->dev->timeout_fib = fibptr;
187 	} else {
188 		if (fibptr->hw_fib->header.XferState != 0) {
189 			printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
190 				 (void*)fibptr,
191 				 le32_to_cpu(fibptr->hw_fib->header.XferState));
192 		}
193 		fibptr->next = fibptr->dev->free_fib;
194 		fibptr->dev->free_fib = fibptr;
195 	}
196 	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
197 }
198 
199 /**
200  *	fib_init	-	initialise a fib
201  *	@fibptr: The fib to initialize
202  *
203  *	Set up the generic fib fields ready for use
204  */
205 
206 void fib_init(struct fib *fibptr)
207 {
208 	struct hw_fib *hw_fib = fibptr->hw_fib;
209 
210 	hw_fib->header.StructType = FIB_MAGIC;
211 	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
212 	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
213 	hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
214 	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
215 	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
216 }
217 
218 /**
219  *	fib_deallocate		-	deallocate a fib
220  *	@fibptr: fib to deallocate
221  *
222  *	Will deallocate and return to the free pool the FIB pointed to by the
223  *	caller.
224  */
225 
226 static void fib_dealloc(struct fib * fibptr)
227 {
228 	struct hw_fib *hw_fib = fibptr->hw_fib;
229 	if(hw_fib->header.StructType != FIB_MAGIC)
230 		BUG();
231 	hw_fib->header.XferState = 0;
232 }
233 
234 /*
235  *	Commuication primitives define and support the queuing method we use to
236  *	support host to adapter commuication. All queue accesses happen through
237  *	these routines and are the only routines which have a knowledge of the
238  *	 how these queues are implemented.
239  */
240 
241 /**
242  *	aac_get_entry		-	get a queue entry
243  *	@dev: Adapter
244  *	@qid: Queue Number
245  *	@entry: Entry return
246  *	@index: Index return
247  *	@nonotify: notification control
248  *
249  *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
250  *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
251  *	returned.
252  */
253 
254 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
255 {
256 	struct aac_queue * q;
257 	unsigned long idx;
258 
259 	/*
260 	 *	All of the queues wrap when they reach the end, so we check
261 	 *	to see if they have reached the end and if they have we just
262 	 *	set the index back to zero. This is a wrap. You could or off
263 	 *	the high bits in all updates but this is a bit faster I think.
264 	 */
265 
266 	q = &dev->queues->queue[qid];
267 
268 	idx = *index = le32_to_cpu(*(q->headers.producer));
269 	/* Interrupt Moderation, only interrupt for first two entries */
270 	if (idx != le32_to_cpu(*(q->headers.consumer))) {
271 		if (--idx == 0) {
272 			if (qid == AdapHighCmdQueue)
273 				idx = ADAP_HIGH_CMD_ENTRIES;
274 			else if (qid == AdapNormCmdQueue)
275 				idx = ADAP_NORM_CMD_ENTRIES;
276 			else if (qid == AdapHighRespQueue)
277 	        		idx = ADAP_HIGH_RESP_ENTRIES;
278 			else if (qid == AdapNormRespQueue)
279 				idx = ADAP_NORM_RESP_ENTRIES;
280 		}
281 		if (idx != le32_to_cpu(*(q->headers.consumer)))
282 			*nonotify = 1;
283 	}
284 
285 	if (qid == AdapHighCmdQueue) {
286 	        if (*index >= ADAP_HIGH_CMD_ENTRIES)
287         		*index = 0;
288 	} else if (qid == AdapNormCmdQueue) {
289 	        if (*index >= ADAP_NORM_CMD_ENTRIES)
290 			*index = 0; /* Wrap to front of the Producer Queue. */
291 	}
292 	else if (qid == AdapHighRespQueue)
293 	{
294 	        if (*index >= ADAP_HIGH_RESP_ENTRIES)
295 			*index = 0;
296 	}
297 	else if (qid == AdapNormRespQueue)
298 	{
299 		if (*index >= ADAP_NORM_RESP_ENTRIES)
300 			*index = 0; /* Wrap to front of the Producer Queue. */
301 	}
302 	else {
303 		printk("aacraid: invalid qid\n");
304 		BUG();
305 	}
306 
307         if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
308 		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
309 				qid, q->numpending);
310 		return 0;
311 	} else {
312 	        *entry = q->base + *index;
313 		return 1;
314 	}
315 }
316 
317 /**
318  *	aac_queue_get		-	get the next free QE
319  *	@dev: Adapter
320  *	@index: Returned index
321  *	@priority: Priority of fib
322  *	@fib: Fib to associate with the queue entry
323  *	@wait: Wait if queue full
324  *	@fibptr: Driver fib object to go with fib
325  *	@nonotify: Don't notify the adapter
326  *
327  *	Gets the next free QE off the requested priorty adapter command
328  *	queue and associates the Fib with the QE. The QE represented by
329  *	index is ready to insert on the queue when this routine returns
330  *	success.
331  */
332 
333 static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
334 {
335 	struct aac_entry * entry = NULL;
336 	int map = 0;
337 	struct aac_queue * q = &dev->queues->queue[qid];
338 
339 	spin_lock_irqsave(q->lock, q->SavedIrql);
340 
341 	if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
342 	{
343 		/*  if no entries wait for some if caller wants to */
344         	while (!aac_get_entry(dev, qid, &entry, index, nonotify))
345         	{
346 			printk(KERN_ERR "GetEntries failed\n");
347 		}
348 	        /*
349 	         *	Setup queue entry with a command, status and fib mapped
350 	         */
351 	        entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
352 	        map = 1;
353 	}
354 	else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
355 	{
356 	        while(!aac_get_entry(dev, qid, &entry, index, nonotify))
357 	        {
358 			/* if no entries wait for some if caller wants to */
359 		}
360         	/*
361         	 *	Setup queue entry with command, status and fib mapped
362         	 */
363         	entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
364         	entry->addr = hw_fib->header.SenderFibAddress;
365      			/* Restore adapters pointer to the FIB */
366 		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
367         	map = 0;
368 	}
369 	/*
370 	 *	If MapFib is true than we need to map the Fib and put pointers
371 	 *	in the queue entry.
372 	 */
373 	if (map)
374 		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
375 	return 0;
376 }
377 
378 
379 /**
380  *	aac_insert_entry	-	insert a queue entry
381  *	@dev: Adapter
382  *	@index: Index of entry to insert
383  *	@qid: Queue number
384  *	@nonotify: Suppress adapter notification
385  *
386  *	Gets the next free QE off the requested priorty adapter command
387  *	queue and associates the Fib with the QE. The QE represented by
388  *	index is ready to insert on the queue when this routine returns
389  *	success.
390  */
391 
392 static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
393 {
394 	struct aac_queue * q = &dev->queues->queue[qid];
395 
396 	if(q == NULL)
397 		BUG();
398 	*(q->headers.producer) = cpu_to_le32(index + 1);
399 	spin_unlock_irqrestore(q->lock, q->SavedIrql);
400 
401 	if (qid == AdapHighCmdQueue ||
402 	    qid == AdapNormCmdQueue ||
403 	    qid == AdapHighRespQueue ||
404 	    qid == AdapNormRespQueue)
405 	{
406 		if (!nonotify)
407 			aac_adapter_notify(dev, qid);
408 	}
409 	else
410 		printk("Suprise insert!\n");
411 	return 0;
412 }
413 
414 /*
415  *	Define the highest level of host to adapter communication routines.
416  *	These routines will support host to adapter FS commuication. These
417  *	routines have no knowledge of the commuication method used. This level
418  *	sends and receives FIBs. This level has no knowledge of how these FIBs
419  *	get passed back and forth.
420  */
421 
422 /**
423  *	fib_send	-	send a fib to the adapter
424  *	@command: Command to send
425  *	@fibptr: The fib
426  *	@size: Size of fib data area
427  *	@priority: Priority of Fib
428  *	@wait: Async/sync select
429  *	@reply: True if a reply is wanted
430  *	@callback: Called with reply
431  *	@callback_data: Passed to callback
432  *
433  *	Sends the requested FIB to the adapter and optionally will wait for a
434  *	response FIB. If the caller does not wish to wait for a response than
435  *	an event to wait on must be supplied. This event will be set when a
436  *	response FIB is received from the adapter.
437  */
438 
439 int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority, int wait, int reply, fib_callback callback, void * callback_data)
440 {
441 	u32 index;
442 	u32 qid;
443 	struct aac_dev * dev = fibptr->dev;
444 	unsigned long nointr = 0;
445 	struct hw_fib * hw_fib = fibptr->hw_fib;
446 	struct aac_queue * q;
447 	unsigned long flags = 0;
448 	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
449 		return -EBUSY;
450 	/*
451 	 *	There are 5 cases with the wait and reponse requested flags.
452 	 *	The only invalid cases are if the caller requests to wait and
453 	 *	does not request a response and if the caller does not want a
454 	 *	response and the Fib is not allocated from pool. If a response
455 	 *	is not requesed the Fib will just be deallocaed by the DPC
456 	 *	routine when the response comes back from the adapter. No
457 	 *	further processing will be done besides deleting the Fib. We
458 	 *	will have a debug mode where the adapter can notify the host
459 	 *	it had a problem and the host can log that fact.
460 	 */
461 	if (wait && !reply) {
462 		return -EINVAL;
463 	} else if (!wait && reply) {
464 		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
465 		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
466 	} else if (!wait && !reply) {
467 		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
468 		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
469 	} else if (wait && reply) {
470 		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
471 		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
472 	}
473 	/*
474 	 *	Map the fib into 32bits by using the fib number
475 	 */
476 
477 	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
478 	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
479 	/*
480 	 *	Set FIB state to indicate where it came from and if we want a
481 	 *	response from the adapter. Also load the command from the
482 	 *	caller.
483 	 *
484 	 *	Map the hw fib pointer as a 32bit value
485 	 */
486 	hw_fib->header.Command = cpu_to_le16(command);
487 	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
488 	fibptr->hw_fib->header.Flags = 0;	/* 0 the flags field - internal only*/
489 	/*
490 	 *	Set the size of the Fib we want to send to the adapter
491 	 */
492 	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
493 	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
494 		return -EMSGSIZE;
495 	}
496 	/*
497 	 *	Get a queue entry connect the FIB to it and send an notify
498 	 *	the adapter a command is ready.
499 	 */
500 	if (priority == FsaHigh) {
501 		hw_fib->header.XferState |= cpu_to_le32(HighPriority);
502 		qid = AdapHighCmdQueue;
503 	} else {
504 		hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
505 		qid = AdapNormCmdQueue;
506 	}
507 	q = &dev->queues->queue[qid];
508 
509 	if(wait)
510 		spin_lock_irqsave(&fibptr->event_lock, flags);
511 	if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
512 		return -EWOULDBLOCK;
513 	dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
514 	dprintk((KERN_DEBUG "Fib contents:.\n"));
515 	dprintk((KERN_DEBUG "  Command =               %d.\n", hw_fib->header.Command));
516 	dprintk((KERN_DEBUG "  XferState  =            %x.\n", hw_fib->header.XferState));
517 	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib));
518 	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
519 	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
520 	/*
521 	 *	Fill in the Callback and CallbackContext if we are not
522 	 *	going to wait.
523 	 */
524 	if (!wait) {
525 		fibptr->callback = callback;
526 		fibptr->callback_data = callback_data;
527 	}
528 	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
529 	list_add_tail(&fibptr->queue, &q->pendingq);
530 	q->numpending++;
531 
532 	fibptr->done = 0;
533 	fibptr->flags = 0;
534 
535 	if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
536 		return -EWOULDBLOCK;
537 	/*
538 	 *	If the caller wanted us to wait for response wait now.
539 	 */
540 
541 	if (wait) {
542 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
543 		down(&fibptr->event_wait);
544 		if(fibptr->done == 0)
545 			BUG();
546 
547 		if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
548 			return -ETIMEDOUT;
549 		} else {
550 			return 0;
551 		}
552 	}
553 	/*
554 	 *	If the user does not want a response than return success otherwise
555 	 *	return pending
556 	 */
557 	if (reply)
558 		return -EINPROGRESS;
559 	else
560 		return 0;
561 }
562 
563 /**
564  *	aac_consumer_get	-	get the top of the queue
565  *	@dev: Adapter
566  *	@q: Queue
567  *	@entry: Return entry
568  *
569  *	Will return a pointer to the entry on the top of the queue requested that
570  * 	we are a consumer of, and return the address of the queue entry. It does
571  *	not change the state of the queue.
572  */
573 
574 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
575 {
576 	u32 index;
577 	int status;
578 	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
579 		status = 0;
580 	} else {
581 		/*
582 		 *	The consumer index must be wrapped if we have reached
583 		 *	the end of the queue, else we just use the entry
584 		 *	pointed to by the header index
585 		 */
586 		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
587 			index = 0;
588 		else
589 		        index = le32_to_cpu(*q->headers.consumer);
590 		*entry = q->base + index;
591 		status = 1;
592 	}
593 	return(status);
594 }
595 
596 /**
597  *	aac_consumer_free	-	free consumer entry
598  *	@dev: Adapter
599  *	@q: Queue
600  *	@qid: Queue ident
601  *
602  *	Frees up the current top of the queue we are a consumer of. If the
603  *	queue was full notify the producer that the queue is no longer full.
604  */
605 
606 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
607 {
608 	int wasfull = 0;
609 	u32 notify;
610 
611 	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
612 		wasfull = 1;
613 
614 	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
615 		*q->headers.consumer = cpu_to_le32(1);
616 	else
617 		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
618 
619 	if (wasfull) {
620 		switch (qid) {
621 
622 		case HostNormCmdQueue:
623 			notify = HostNormCmdNotFull;
624 			break;
625 		case HostHighCmdQueue:
626 			notify = HostHighCmdNotFull;
627 			break;
628 		case HostNormRespQueue:
629 			notify = HostNormRespNotFull;
630 			break;
631 		case HostHighRespQueue:
632 			notify = HostHighRespNotFull;
633 			break;
634 		default:
635 			BUG();
636 			return;
637 		}
638 		aac_adapter_notify(dev, notify);
639 	}
640 }
641 
642 /**
643  *	fib_adapter_complete	-	complete adapter issued fib
644  *	@fibptr: fib to complete
645  *	@size: size of fib
646  *
647  *	Will do all necessary work to complete a FIB that was sent from
648  *	the adapter.
649  */
650 
651 int fib_adapter_complete(struct fib * fibptr, unsigned short size)
652 {
653 	struct hw_fib * hw_fib = fibptr->hw_fib;
654 	struct aac_dev * dev = fibptr->dev;
655 	unsigned long nointr = 0;
656 	if (hw_fib->header.XferState == 0)
657         	return 0;
658 	/*
659 	 *	If we plan to do anything check the structure type first.
660 	 */
661 	if ( hw_fib->header.StructType != FIB_MAGIC ) {
662         	return -EINVAL;
663 	}
664 	/*
665 	 *	This block handles the case where the adapter had sent us a
666 	 *	command and we have finished processing the command. We
667 	 *	call completeFib when we are done processing the command
668 	 *	and want to send a response back to the adapter. This will
669 	 *	send the completed cdb to the adapter.
670 	 */
671 	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
672 	        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
673 	        if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
674         		u32 index;
675        			if (size)
676 			{
677 				size += sizeof(struct aac_fibhdr);
678 				if (size > le16_to_cpu(hw_fib->header.SenderSize))
679 					return -EMSGSIZE;
680 				hw_fib->header.Size = cpu_to_le16(size);
681 			}
682 			if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
683 				return -EWOULDBLOCK;
684 			}
685 			if (aac_insert_entry(dev, index, AdapHighRespQueue,  (nointr & (int)aac_config.irq_mod)) != 0) {
686 			}
687 		} else if (hw_fib->header.XferState &
688 				cpu_to_le32(NormalPriority)) {
689 			u32 index;
690 
691 			if (size) {
692 				size += sizeof(struct aac_fibhdr);
693 				if (size > le16_to_cpu(hw_fib->header.SenderSize))
694 					return -EMSGSIZE;
695 				hw_fib->header.Size = cpu_to_le16(size);
696 			}
697 			if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
698 				return -EWOULDBLOCK;
699 			if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
700 			{
701 			}
702 		}
703 	}
704 	else
705 	{
706         	printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
707         	BUG();
708 	}
709 	return 0;
710 }
711 
712 /**
713  *	fib_complete	-	fib completion handler
714  *	@fib: FIB to complete
715  *
716  *	Will do all necessary work to complete a FIB.
717  */
718 
719 int fib_complete(struct fib * fibptr)
720 {
721 	struct hw_fib * hw_fib = fibptr->hw_fib;
722 
723 	/*
724 	 *	Check for a fib which has already been completed
725 	 */
726 
727 	if (hw_fib->header.XferState == 0)
728         	return 0;
729 	/*
730 	 *	If we plan to do anything check the structure type first.
731 	 */
732 
733 	if (hw_fib->header.StructType != FIB_MAGIC)
734 	        return -EINVAL;
735 	/*
736 	 *	This block completes a cdb which orginated on the host and we
737 	 *	just need to deallocate the cdb or reinit it. At this point the
738 	 *	command is complete that we had sent to the adapter and this
739 	 *	cdb could be reused.
740 	 */
741 	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
742 		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
743 	{
744 		fib_dealloc(fibptr);
745 	}
746 	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
747 	{
748 		/*
749 		 *	This handles the case when the host has aborted the I/O
750 		 *	to the adapter because the adapter is not responding
751 		 */
752 		fib_dealloc(fibptr);
753 	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
754 		fib_dealloc(fibptr);
755 	} else {
756 		BUG();
757 	}
758 	return 0;
759 }
760 
761 /**
762  *	aac_printf	-	handle printf from firmware
763  *	@dev: Adapter
764  *	@val: Message info
765  *
766  *	Print a message passed to us by the controller firmware on the
767  *	Adaptec board
768  */
769 
770 void aac_printf(struct aac_dev *dev, u32 val)
771 {
772 	char *cp = dev->printfbuf;
773 	if (dev->printf_enabled)
774 	{
775 		int length = val & 0xffff;
776 		int level = (val >> 16) & 0xffff;
777 
778 		/*
779 		 *	The size of the printfbuf is set in port.c
780 		 *	There is no variable or define for it
781 		 */
782 		if (length > 255)
783 			length = 255;
784 		if (cp[length] != 0)
785 			cp[length] = 0;
786 		if (level == LOG_AAC_HIGH_ERROR)
787 			printk(KERN_WARNING "aacraid:%s", cp);
788 		else
789 			printk(KERN_INFO "aacraid:%s", cp);
790 	}
791 	memset(cp, 0,  256);
792 }
793 
794 /**
795  *	aac_command_thread	-	command processing thread
796  *	@dev: Adapter to monitor
797  *
798  *	Waits on the commandready event in it's queue. When the event gets set
799  *	it will pull FIBs off it's queue. It will continue to pull FIBs off
800  *	until the queue is empty. When the queue is empty it will wait for
801  *	more FIBs.
802  */
803 
804 int aac_command_thread(struct aac_dev * dev)
805 {
806 	struct hw_fib *hw_fib, *hw_newfib;
807 	struct fib *fib, *newfib;
808 	struct aac_queue_block *queues = dev->queues;
809 	struct aac_fib_context *fibctx;
810 	unsigned long flags;
811 	DECLARE_WAITQUEUE(wait, current);
812 
813 	/*
814 	 *	We can only have one thread per adapter for AIF's.
815 	 */
816 	if (dev->aif_thread)
817 		return -EINVAL;
818 	/*
819 	 *	Set up the name that will appear in 'ps'
820 	 *	stored in  task_struct.comm[16].
821 	 */
822 	daemonize("aacraid");
823 	allow_signal(SIGKILL);
824 	/*
825 	 *	Let the DPC know it has a place to send the AIF's to.
826 	 */
827 	dev->aif_thread = 1;
828 	add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
829 	set_current_state(TASK_INTERRUPTIBLE);
830 	while(1)
831 	{
832 		spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
833 		while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
834 			struct list_head *entry;
835 			struct aac_aifcmd * aifcmd;
836 
837 			set_current_state(TASK_RUNNING);
838 
839 			entry = queues->queue[HostNormCmdQueue].cmdq.next;
840 			list_del(entry);
841 
842 			spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
843 			fib = list_entry(entry, struct fib, fiblink);
844 			/*
845 			 *	We will process the FIB here or pass it to a
846 			 *	worker thread that is TBD. We Really can't
847 			 *	do anything at this point since we don't have
848 			 *	anything defined for this thread to do.
849 			 */
850 			hw_fib = fib->hw_fib;
851 			memset(fib, 0, sizeof(struct fib));
852 			fib->type = FSAFS_NTC_FIB_CONTEXT;
853 			fib->size = sizeof( struct fib );
854 			fib->hw_fib = hw_fib;
855 			fib->data = hw_fib->data;
856 			fib->dev = dev;
857 			/*
858 			 *	We only handle AifRequest fibs from the adapter.
859 			 */
860 			aifcmd = (struct aac_aifcmd *) hw_fib->data;
861 			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
862 				/* Handle Driver Notify Events */
863 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
864 				fib_adapter_complete(fib, (u16)sizeof(u32));
865 			} else {
866 				struct list_head *entry;
867 				/* The u32 here is important and intended. We are using
868 				   32bit wrapping time to fit the adapter field */
869 
870 				u32 time_now, time_last;
871 				unsigned long flagv;
872 
873 				time_now = jiffies/HZ;
874 
875 				spin_lock_irqsave(&dev->fib_lock, flagv);
876 				entry = dev->fib_list.next;
877 				/*
878 				 * For each Context that is on the
879 				 * fibctxList, make a copy of the
880 				 * fib, and then set the event to wake up the
881 				 * thread that is waiting for it.
882 				 */
883 				while (entry != &dev->fib_list) {
884 					/*
885 					 * Extract the fibctx
886 					 */
887 					fibctx = list_entry(entry, struct aac_fib_context, next);
888 					/*
889 					 * Check if the queue is getting
890 					 * backlogged
891 					 */
892 					if (fibctx->count > 20)
893 					{
894 						/*
895 						 * It's *not* jiffies folks,
896 						 * but jiffies / HZ so do not
897 						 * panic ...
898 						 */
899 						time_last = fibctx->jiffies;
900 						/*
901 						 * Has it been > 2 minutes
902 						 * since the last read off
903 						 * the queue?
904 						 */
905 						if ((time_now - time_last) > 120) {
906 							entry = entry->next;
907 							aac_close_fib_context(dev, fibctx);
908 							continue;
909 						}
910 					}
911 					/*
912 					 * Warning: no sleep allowed while
913 					 * holding spinlock
914 					 */
915 					hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
916 					newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
917 					if (newfib && hw_newfib) {
918 						/*
919 						 * Make the copy of the FIB
920 						 */
921 						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
922 						memcpy(newfib, fib, sizeof(struct fib));
923 						newfib->hw_fib = hw_newfib;
924 						/*
925 						 * Put the FIB onto the
926 						 * fibctx's fibs
927 						 */
928 						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
929 						fibctx->count++;
930 						/*
931 						 * Set the event to wake up the
932 						 * thread that will waiting.
933 						 */
934 						up(&fibctx->wait_sem);
935 					} else {
936 						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
937 						if(newfib)
938 							kfree(newfib);
939 						if(hw_newfib)
940 							kfree(hw_newfib);
941 					}
942 					entry = entry->next;
943 				}
944 				/*
945 				 *	Set the status of this FIB
946 				 */
947 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
948 				fib_adapter_complete(fib, sizeof(u32));
949 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
950 			}
951 			spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
952 			kfree(fib);
953 		}
954 		/*
955 		 *	There are no more AIF's
956 		 */
957 		spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
958 		schedule();
959 
960 		if(signal_pending(current))
961 			break;
962 		set_current_state(TASK_INTERRUPTIBLE);
963 	}
964 	remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
965 	dev->aif_thread = 0;
966 	complete_and_exit(&dev->aif_completion, 0);
967 }
968