xref: /freebsd/sys/dev/sym/sym_hipd.c (revision 807a5caa14df5ff04b331e24b45893f6a2f6bc1b)
1 /*
2  *  Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010
3  *  PCI-SCSI controllers.
4  *
5  *  Copyright (C) 1999-2000  Gerard Roudier <groudier@club-internet.fr>
6  *
7  *  This driver also supports the following Symbios/LSI PCI-SCSI chips:
8  *	53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895.
9  *
10  *  but does not support earlier chips as the following ones:
11  *	53C810, 53C815, 53C825.
12  *
13  *  This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver.
14  *  Copyright (C) 1998-1999  Gerard Roudier
15  *
16  *  The sym53c8xx driver is derived from the ncr53c8xx driver that had been
17  *  a port of the FreeBSD ncr driver to Linux-1.2.13.
18  *
19  *  The original ncr driver has been written for 386bsd and FreeBSD by
20  *          Wolfgang Stanglmeier        <wolf@cologne.de>
21  *          Stefan Esser                <se@mi.Uni-Koeln.de>
22  *  Copyright (C) 1994  Wolfgang Stanglmeier
23  *
24  *  The initialisation code, and part of the code that addresses
25  *  FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM
26  *  written by Justin T. Gibbs.
27  *
28  *  Other major contributions:
29  *
30  *  NVRAM detection and reading.
31  *  Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
32  *
33  *-----------------------------------------------------------------------------
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. The name of the author may not be used to endorse or promote products
44  *    derived from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
50  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  */
58 
59 /* $FreeBSD$ */
60 
61 #define SYM_DRIVER_NAME	"sym-1.4.1-20000326"
62 
63 #include <pci.h>
64 #include <stddef.h>	/* For offsetof */
65 
66 #include <sys/param.h>
67 /*
68  *  Only use the BUS stuff for PCI under FreeBSD 4 and later versions.
69  *  Note that the old BUS stuff also works for FreeBSD 4 and spares
70  *  about 1.5KB for the driver object file.
71  */
72 #if 	__FreeBSD_version >= 400000
73 #define	FreeBSD_4_Bus
74 #endif
75 
76 #if 	__FreeBSD_version >= 400000
77 #define	FreeBSD_Bus_Dma_Abstraction
78 #endif
79 
80 #include <sys/systm.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel.h>
83 #ifdef FreeBSD_4_Bus
84 #include <sys/module.h>
85 #include <sys/bus.h>
86 #endif
87 
88 #include <sys/buf.h>
89 #include <sys/proc.h>
90 
91 #include <pci/pcireg.h>
92 #include <pci/pcivar.h>
93 
94 #include <machine/bus_memio.h>
95 #include <machine/bus_pio.h>
96 #include <machine/bus.h>
97 #ifdef FreeBSD_4_Bus
98 #include <machine/resource.h>
99 #include <sys/rman.h>
100 #endif
101 #include <machine/clock.h>
102 
103 #include <cam/cam.h>
104 #include <cam/cam_ccb.h>
105 #include <cam/cam_sim.h>
106 #include <cam/cam_xpt_sim.h>
107 #include <cam/cam_debug.h>
108 
109 #include <cam/scsi/scsi_all.h>
110 #include <cam/scsi/scsi_message.h>
111 
112 #include <vm/vm.h>
113 #include <vm/vm_param.h>
114 #include <vm/pmap.h>
115 
116 #if 0
117 #include <sys/kernel.h>
118 #include <sys/sysctl.h>
119 #include <vm/vm_extern.h>
120 #endif
121 
122 /* Short and quite clear integer types */
123 typedef int8_t    s8;
124 typedef int16_t   s16;
125 typedef	int32_t   s32;
126 typedef u_int8_t  u8;
127 typedef u_int16_t u16;
128 typedef	u_int32_t u32;
129 
130 /* Driver configuration and definitions */
131 #if 1
132 #include "opt_sym.h"
133 #include <dev/sym/sym_conf.h>
134 #include <dev/sym/sym_defs.h>
135 #else
136 #include "ncr.h"	/* To know if the ncr has been configured */
137 #include <pci/sym_conf.h>
138 #include <pci/sym_defs.h>
139 #endif
140 
141 /*
142  *  On x86 architecture, write buffers management does not
143  *  reorder writes to memory. So, preventing compiler from
144  *  optimizing the code is enough to guarantee some ordering
145  *  when the CPU is writing data accessed by the PCI chip.
146  *  On Alpha architecture, explicit barriers are to be used.
147  *  By the way, the *BSD semantic associates the barrier
148  *  with some window on the BUS and the corresponding verbs
149  *  are for now unused. What a strangeness. The driver must
150  *  ensure that accesses from the CPU to the start and done
151  *  queues are not reordered by either the compiler or the
152  *  CPU and uses 'volatile' for this purpose.
153  */
154 
155 #ifdef	__alpha__
156 #define MEMORY_BARRIER()	alpha_mb()
157 #else /*__i386__*/
158 #define MEMORY_BARRIER()	do { ; } while(0)
159 #endif
160 
161 /*
162  *  A la VMS/CAM-3 queue management.
163  */
164 
165 typedef struct sym_quehead {
166 	struct sym_quehead *flink;	/* Forward  pointer */
167 	struct sym_quehead *blink;	/* Backward pointer */
168 } SYM_QUEHEAD;
169 
170 #define sym_que_init(ptr) do { \
171 	(ptr)->flink = (ptr); (ptr)->blink = (ptr); \
172 } while (0)
173 
174 static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head)
175 {
176 	return (head->flink == head) ? 0 : head->flink;
177 }
178 
179 static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head)
180 {
181 	return (head->blink == head) ? 0 : head->blink;
182 }
183 
184 static __inline void __sym_que_add(struct sym_quehead * new,
185 	struct sym_quehead * blink,
186 	struct sym_quehead * flink)
187 {
188 	flink->blink	= new;
189 	new->flink	= flink;
190 	new->blink	= blink;
191 	blink->flink	= new;
192 }
193 
194 static __inline void __sym_que_del(struct sym_quehead * blink,
195 	struct sym_quehead * flink)
196 {
197 	flink->blink = blink;
198 	blink->flink = flink;
199 }
200 
201 static __inline int sym_que_empty(struct sym_quehead *head)
202 {
203 	return head->flink == head;
204 }
205 
206 static __inline void sym_que_splice(struct sym_quehead *list,
207 	struct sym_quehead *head)
208 {
209 	struct sym_quehead *first = list->flink;
210 
211 	if (first != list) {
212 		struct sym_quehead *last = list->blink;
213 		struct sym_quehead *at   = head->flink;
214 
215 		first->blink = head;
216 		head->flink  = first;
217 
218 		last->flink = at;
219 		at->blink   = last;
220 	}
221 }
222 
223 #define sym_que_entry(ptr, type, member) \
224 	((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
225 
226 
227 #define sym_insque(new, pos)		__sym_que_add(new, pos, (pos)->flink)
228 
229 #define sym_remque(el)			__sym_que_del((el)->blink, (el)->flink)
230 
231 #define sym_insque_head(new, head)	__sym_que_add(new, head, (head)->flink)
232 
233 static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
234 {
235 	struct sym_quehead *elem = head->flink;
236 
237 	if (elem != head)
238 		__sym_que_del(head, elem->flink);
239 	else
240 		elem = 0;
241 	return elem;
242 }
243 
244 #define sym_insque_tail(new, head)	__sym_que_add(new, (head)->blink, head)
245 
246 static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head)
247 {
248 	struct sym_quehead *elem = head->blink;
249 
250 	if (elem != head)
251 		__sym_que_del(elem->blink, head);
252 	else
253 		elem = 0;
254 	return elem;
255 }
256 
257 /*
258  *  This one may be usefull.
259  */
260 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \
261 	for (qp = (head)->flink; qp != (head); qp = qp->flink)
262 /*
263  *  FreeBSD does not offer our kind of queue in the CAM CCB.
264  *  So, we have to cast.
265  */
266 #define sym_qptr(p)	((struct sym_quehead *) (p))
267 
268 /*
269  *  Simple bitmap operations.
270  */
271 #define sym_set_bit(p, n)	(((u32 *)(p))[(n)>>5] |=  (1<<((n)&0x1f)))
272 #define sym_clr_bit(p, n)	(((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f)))
273 #define sym_is_bit(p, n)	(((u32 *)(p))[(n)>>5] &   (1<<((n)&0x1f)))
274 
275 /*
276  *  Number of tasks per device we want to handle.
277  */
278 #if	SYM_CONF_MAX_TAG_ORDER > 8
279 #error	"more than 256 tags per logical unit not allowed."
280 #endif
281 #define	SYM_CONF_MAX_TASK	(1<<SYM_CONF_MAX_TAG_ORDER)
282 
283 /*
284  *  Donnot use more tasks that we can handle.
285  */
286 #ifndef	SYM_CONF_MAX_TAG
287 #define	SYM_CONF_MAX_TAG	SYM_CONF_MAX_TASK
288 #endif
289 #if	SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
290 #undef	SYM_CONF_MAX_TAG
291 #define	SYM_CONF_MAX_TAG	SYM_CONF_MAX_TASK
292 #endif
293 
294 /*
295  *    This one means 'NO TAG for this job'
296  */
297 #define NO_TAG	(256)
298 
299 /*
300  *  Number of SCSI targets.
301  */
302 #if	SYM_CONF_MAX_TARGET > 16
303 #error	"more than 16 targets not allowed."
304 #endif
305 
306 /*
307  *  Number of logical units per target.
308  */
309 #if	SYM_CONF_MAX_LUN > 64
310 #error	"more than 64 logical units per target not allowed."
311 #endif
312 
313 /*
314  *    Asynchronous pre-scaler (ns). Shall be 40 for
315  *    the SCSI timings to be compliant.
316  */
317 #define	SYM_CONF_MIN_ASYNC (40)
318 
319 /*
320  *  Number of entries in the START and DONE queues.
321  *
322  *  We limit to 1 PAGE in order to succeed allocation of
323  *  these queues. Each entry is 8 bytes long (2 DWORDS).
324  */
325 #ifdef	SYM_CONF_MAX_START
326 #define	SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
327 #else
328 #define	SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
329 #define	SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
330 #endif
331 
332 #if	SYM_CONF_MAX_QUEUE > PAGE_SIZE/8
333 #undef	SYM_CONF_MAX_QUEUE
334 #define	SYM_CONF_MAX_QUEUE   PAGE_SIZE/8
335 #undef	SYM_CONF_MAX_START
336 #define	SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
337 #endif
338 
339 /*
340  *  For this one, we want a short name :-)
341  */
342 #define MAX_QUEUE	SYM_CONF_MAX_QUEUE
343 
344 /*
345  *  These ones should have been already defined.
346  */
347 #ifndef offsetof
348 #define offsetof(t, m)	((size_t) (&((t *)0)->m))
349 #endif
350 #ifndef MIN
351 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
352 #endif
353 
354 /*
355  *  Active debugging tags and verbosity.
356  */
357 #define DEBUG_ALLOC	(0x0001)
358 #define DEBUG_PHASE	(0x0002)
359 #define DEBUG_POLL	(0x0004)
360 #define DEBUG_QUEUE	(0x0008)
361 #define DEBUG_RESULT	(0x0010)
362 #define DEBUG_SCATTER	(0x0020)
363 #define DEBUG_SCRIPT	(0x0040)
364 #define DEBUG_TINY	(0x0080)
365 #define DEBUG_TIMING	(0x0100)
366 #define DEBUG_NEGO	(0x0200)
367 #define DEBUG_TAGS	(0x0400)
368 #define DEBUG_POINTER	(0x0800)
369 
370 #if 0
371 static int sym_debug = 0;
372 	#define DEBUG_FLAGS sym_debug
373 #else
374 /*	#define DEBUG_FLAGS (0x0631) */
375 	#define DEBUG_FLAGS (0x0000)
376 
377 #endif
378 #define sym_verbose	(np->verbose)
379 
380 /*
381  *  Copy from main memory to PCI memory space.
382  */
383 #ifdef	__alpha__
384 #define memcpy_to_pci(d, s, n)	memcpy_toio((u32)(d), (void *)(s), (n))
385 #else /*__i386__*/
386 #define memcpy_to_pci(d, s, n)	bcopy((s), (void *)(d), (n))
387 #endif
388 
389 /*
390  *  Insert a delay in micro-seconds and milli-seconds.
391  */
392 static void UDELAY(long us) { DELAY(us); }
393 static void MDELAY(long ms) { while (ms--) UDELAY(1000); }
394 
395 /*
396  *  Simple power of two buddy-like allocator.
397  *
398  *  This simple code is not intended to be fast, but to
399  *  provide power of 2 aligned memory allocations.
400  *  Since the SCRIPTS processor only supplies 8 bit arithmetic,
401  *  this allocator allows simple and fast address calculations
402  *  from the SCRIPTS code. In addition, cache line alignment
403  *  is guaranteed for power of 2 cache line size.
404  *
405  *  This allocator has been developped for the Linux sym53c8xx
406  *  driver, since this O/S does not provide naturally aligned
407  *  allocations.
408  *  It has the vertue to allow the driver to use private pages
409  *  of memory that will be useful if we ever need to deal with
410  *  IO MMU for PCI.
411  */
412 
413 #define MEMO_SHIFT	4	/* 16 bytes minimum memory chunk */
414 #define MEMO_PAGE_ORDER	0	/* 1 PAGE  maximum */
415 #if 0
416 #define MEMO_FREE_UNUSED	/* Free unused pages immediately */
417 #endif
418 #define MEMO_WARN	1
419 #define MEMO_CLUSTER_SHIFT	(PAGE_SHIFT+MEMO_PAGE_ORDER)
420 #define MEMO_CLUSTER_SIZE	(1UL << MEMO_CLUSTER_SHIFT)
421 #define MEMO_CLUSTER_MASK	(MEMO_CLUSTER_SIZE-1)
422 
423 #define get_pages()		malloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_NOWAIT)
424 #define free_pages(p)		free((p), M_DEVBUF)
425 
426 typedef u_long m_addr_t;	/* Enough bits to bit-hack addresses */
427 
428 typedef struct m_link {		/* Link between free memory chunks */
429 	struct m_link *next;
430 } m_link_s;
431 
432 #ifdef	FreeBSD_Bus_Dma_Abstraction
433 typedef struct m_vtob {		/* Virtual to Bus address translation */
434 	struct m_vtob	*next;
435 	bus_dmamap_t	dmamap;	/* Map for this chunk */
436 	m_addr_t	vaddr;	/* Virtual address */
437 	m_addr_t	baddr;	/* Bus physical address */
438 } m_vtob_s;
439 /* Hash this stuff a bit to speed up translations */
440 #define VTOB_HASH_SHIFT		5
441 #define VTOB_HASH_SIZE		(1UL << VTOB_HASH_SHIFT)
442 #define VTOB_HASH_MASK		(VTOB_HASH_SIZE-1)
443 #define VTOB_HASH_CODE(m)	\
444 	((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
445 #endif
446 
447 typedef struct m_pool {		/* Memory pool of a given kind */
448 #ifdef	FreeBSD_Bus_Dma_Abstraction
449 	bus_dma_tag_t	 dev_dmat;	/* Identifies the pool */
450 	bus_dma_tag_t	 dmat;		/* Tag for our fixed allocations */
451 	m_addr_t (*getp)(struct m_pool *);
452 #ifdef	MEMO_FREE_UNUSED
453 	void (*freep)(struct m_pool *, m_addr_t);
454 #endif
455 #define M_GETP()		mp->getp(mp)
456 #define M_FREEP(p)		mp->freep(mp, p)
457 	int nump;
458 	m_vtob_s *(vtob[VTOB_HASH_SIZE]);
459 	struct m_pool *next;
460 #else
461 #define M_GETP()		get_pages()
462 #define M_FREEP(p)		free_pages(p)
463 #endif	/* FreeBSD_Bus_Dma_Abstraction */
464 	struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1];
465 } m_pool_s;
466 
467 static void *___sym_malloc(m_pool_s *mp, int size)
468 {
469 	int i = 0;
470 	int s = (1 << MEMO_SHIFT);
471 	int j;
472 	m_addr_t a;
473 	m_link_s *h = mp->h;
474 
475 	if (size > MEMO_CLUSTER_SIZE)
476 		return 0;
477 
478 	while (size > s) {
479 		s <<= 1;
480 		++i;
481 	}
482 
483 	j = i;
484 	while (!h[j].next) {
485 		if (s == MEMO_CLUSTER_SIZE) {
486 			h[j].next = (m_link_s *) M_GETP();
487 			if (h[j].next)
488 				h[j].next->next = 0;
489 			break;
490 		}
491 		++j;
492 		s <<= 1;
493 	}
494 	a = (m_addr_t) h[j].next;
495 	if (a) {
496 		h[j].next = h[j].next->next;
497 		while (j > i) {
498 			j -= 1;
499 			s >>= 1;
500 			h[j].next = (m_link_s *) (a+s);
501 			h[j].next->next = 0;
502 		}
503 	}
504 #ifdef DEBUG
505 	printf("___sym_malloc(%d) = %p\n", size, (void *) a);
506 #endif
507 	return (void *) a;
508 }
509 
510 static void ___sym_mfree(m_pool_s *mp, void *ptr, int size)
511 {
512 	int i = 0;
513 	int s = (1 << MEMO_SHIFT);
514 	m_link_s *q;
515 	m_addr_t a, b;
516 	m_link_s *h = mp->h;
517 
518 #ifdef DEBUG
519 	printf("___sym_mfree(%p, %d)\n", ptr, size);
520 #endif
521 
522 	if (size > MEMO_CLUSTER_SIZE)
523 		return;
524 
525 	while (size > s) {
526 		s <<= 1;
527 		++i;
528 	}
529 
530 	a = (m_addr_t) ptr;
531 
532 	while (1) {
533 #ifdef MEMO_FREE_UNUSED
534 		if (s == MEMO_CLUSTER_SIZE) {
535 			M_FREEP(a);
536 			break;
537 		}
538 #endif
539 		b = a ^ s;
540 		q = &h[i];
541 		while (q->next && q->next != (m_link_s *) b) {
542 			q = q->next;
543 		}
544 		if (!q->next) {
545 			((m_link_s *) a)->next = h[i].next;
546 			h[i].next = (m_link_s *) a;
547 			break;
548 		}
549 		q->next = q->next->next;
550 		a = a & b;
551 		s <<= 1;
552 		++i;
553 	}
554 }
555 
556 static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags)
557 {
558 	void *p;
559 
560 	p = ___sym_malloc(mp, size);
561 
562 	if (DEBUG_FLAGS & DEBUG_ALLOC)
563 		printf ("new %-10s[%4d] @%p.\n", name, size, p);
564 
565 	if (p)
566 		bzero(p, size);
567 	else if (uflags & MEMO_WARN)
568 		printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
569 
570 	return p;
571 }
572 
573 #define __sym_calloc(mp, s, n)	__sym_calloc2(mp, s, n, MEMO_WARN)
574 
575 static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name)
576 {
577 	if (DEBUG_FLAGS & DEBUG_ALLOC)
578 		printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
579 
580 	___sym_mfree(mp, ptr, size);
581 
582 }
583 
584 /*
585  * Default memory pool we donnot need to involve in DMA.
586  */
587 #ifndef	FreeBSD_Bus_Dma_Abstraction
588 /*
589  * Without the `bus dma abstraction', all the memory is assumed
590  * DMAable and a single pool is all what we need.
591  */
592 static m_pool_s mp0;
593 
594 #else
595 /*
596  * With the `bus dma abstraction', we use a separate pool for
597  * memory we donnot need to involve in DMA.
598  */
599 static m_addr_t ___mp0_getp(m_pool_s *mp)
600 {
601 	m_addr_t m = (m_addr_t) get_pages();
602 	if (m)
603 		++mp->nump;
604 	return m;
605 }
606 
607 #ifdef	MEMO_FREE_UNUSED
608 static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
609 {
610 	free_pages(m);
611 	--mp->nump;
612 }
613 #endif
614 
615 #ifdef	MEMO_FREE_UNUSED
616 static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep};
617 #else
618 static m_pool_s mp0 = {0, 0, ___mp0_getp};
619 #endif
620 
621 #endif	/* FreeBSD_Bus_Dma_Abstraction */
622 
623 /*
624  * Actual memory allocation routine for non-DMAed memory.
625  */
626 static void *sym_calloc(int size, char *name)
627 {
628 	void *m;
629 	/* Lock */
630 	m = __sym_calloc(&mp0, size, name);
631 	/* Unlock */
632 	return m;
633 }
634 
635 /*
636  * Actual memory allocation routine for non-DMAed memory.
637  */
638 static void sym_mfree(void *ptr, int size, char *name)
639 {
640 	/* Lock */
641 	__sym_mfree(&mp0, ptr, size, name);
642 	/* Unlock */
643 }
644 
645 /*
646  * DMAable pools.
647  */
648 #ifndef	FreeBSD_Bus_Dma_Abstraction
649 /*
650  * Without `bus dma abstraction', all the memory is DMAable, and
651  * only a single pool is needed (vtophys() is our friend).
652  */
653 #define __sym_calloc_dma(b, s, n)	sym_calloc(s, n)
654 #define __sym_mfree_dma(b, p, s, n)	sym_mfree(p, s, n)
655 #ifdef	__alpha__
656 #define	__vtobus(b, p)	alpha_XXX_dmamap((vm_offset_t)(p))
657 #else /*__i386__*/
658 #define __vtobus(b, p)	vtophys(p)
659 #endif
660 
661 #else
662 /*
663  * With `bus dma abstraction', we use a separate pool per parent
664  * BUS handle. A reverse table (hashed) is maintained for virtual
665  * to BUS address translation.
666  */
667 static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
668 {
669 	bus_addr_t *baddr;
670 	baddr = (bus_addr_t *)arg;
671 	*baddr = segs->ds_addr;
672 }
673 
674 static m_addr_t ___dma_getp(m_pool_s *mp)
675 {
676 	m_vtob_s *vbp;
677 	void *vaddr = 0;
678 	bus_addr_t baddr = 0;
679 
680 	vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
681 	if (!vbp)
682 		goto out_err;
683 
684 	if (bus_dmamem_alloc(mp->dmat, &vaddr,
685 			      BUS_DMA_NOWAIT, &vbp->dmamap))
686 		goto out_err;
687 	bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr,
688 			MEMO_CLUSTER_SIZE, getbaddrcb, &baddr, 0);
689 	if (baddr) {
690 		int hc = VTOB_HASH_CODE(vaddr);
691 		vbp->vaddr = (m_addr_t) vaddr;
692 		vbp->baddr = (m_addr_t) baddr;
693 		vbp->next = mp->vtob[hc];
694 		mp->vtob[hc] = vbp;
695 		++mp->nump;
696 		return (m_addr_t) vaddr;
697 	}
698 out_err:
699 	if (baddr)
700 		bus_dmamap_unload(mp->dmat, vbp->dmamap);
701 	if (vaddr)
702 		bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap);
703 	if (vbp->dmamap)
704 		bus_dmamap_destroy(mp->dmat, vbp->dmamap);
705 	if (vbp)
706 		__sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
707 	return 0;
708 }
709 
710 #ifdef	MEMO_FREE_UNUSED
711 static void ___dma_freep(m_pool_s *mp, m_addr_t m)
712 {
713 	m_vtob_s **vbpp, *vbp;
714 	int hc = VTOB_HASH_CODE(m);
715 
716 	vbpp = &mp->vtob[hc];
717 	while (*vbpp && (*vbpp)->vaddr != m)
718 		vbpp = &(*vbpp)->next;
719 	if (*vbpp) {
720 		vbp = *vbpp;
721 		*vbpp = (*vbpp)->next;
722 		bus_dmamap_unload(mp->dmat, vbp->dmamap);
723 		bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap);
724 		bus_dmamap_destroy(mp->dmat, vbp->dmamap);
725 		__sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
726 		--mp->nump;
727 	}
728 }
729 #endif
730 
731 static __inline__ m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat)
732 {
733 	m_pool_s *mp;
734 	for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next);
735 	return mp;
736 }
737 
738 static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat)
739 {
740 	m_pool_s *mp = 0;
741 
742 	mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
743 	if (mp) {
744 		mp->dev_dmat = dev_dmat;
745 		if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE,
746 			       BUS_SPACE_MAXADDR_32BIT,
747 			       BUS_SPACE_MAXADDR_32BIT,
748 			       NULL, NULL, MEMO_CLUSTER_SIZE, 1,
749 			       MEMO_CLUSTER_SIZE, 0, &mp->dmat)) {
750 			mp->getp = ___dma_getp;
751 #ifdef	MEMO_FREE_UNUSED
752 			mp->freep = ___dma_freep;
753 #endif
754 			mp->next = mp0.next;
755 			mp0.next = mp;
756 			return mp;
757 		}
758 	}
759 	if (mp)
760 		__sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL");
761 	return 0;
762 }
763 
764 #ifdef	MEMO_FREE_UNUSED
765 static void ___del_dma_pool(m_pool_s *p)
766 {
767 	struct m_pool **pp = &mp0.next;
768 
769 	while (*pp && *pp != p)
770 		pp = &(*pp)->next;
771 	if (*pp) {
772 		*pp = (*pp)->next;
773 		bus_dma_tag_destroy(p->dmat);
774 		__sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
775 	}
776 }
777 #endif
778 
779 static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name)
780 {
781 	struct m_pool *mp;
782 	void *m = 0;
783 
784 	/* Lock */
785 	mp = ___get_dma_pool(dev_dmat);
786 	if (!mp)
787 		mp = ___cre_dma_pool(dev_dmat);
788 	if (mp)
789 		m = __sym_calloc(mp, size, name);
790 #ifdef	MEMO_FREE_UNUSED
791 	if (mp && !mp->nump)
792 		___del_dma_pool(mp);
793 #endif
794 	/* Unlock */
795 
796 	return m;
797 }
798 
799 static void
800 __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name)
801 {
802 	struct m_pool *mp;
803 
804 	/* Lock */
805 	mp = ___get_dma_pool(dev_dmat);
806 	if (mp)
807 		__sym_mfree(mp, m, size, name);
808 #ifdef	MEMO_FREE_UNUSED
809 	if (mp && !mp->nump)
810 		___del_dma_pool(mp);
811 #endif
812 	/* Unlock */
813 }
814 
815 static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m)
816 {
817 	m_pool_s *mp;
818 	int hc = VTOB_HASH_CODE(m);
819 	m_vtob_s *vp = 0;
820 	m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
821 
822 	/* Lock */
823 	mp = ___get_dma_pool(dev_dmat);
824 	if (mp) {
825 		vp = mp->vtob[hc];
826 		while (vp && (m_addr_t) vp->vaddr != a)
827 			vp = vp->next;
828 	}
829 	/* Unlock */
830 	if (!vp)
831 		panic("sym: VTOBUS FAILED!\n");
832 	return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
833 }
834 
835 #endif	/* FreeBSD_Bus_Dma_Abstraction */
836 
837 /*
838  * Verbs for DMAable memory handling.
839  * The _uvptv_ macro avoids a nasty warning about pointer to volatile
840  * being discarded.
841  */
842 #define _uvptv_(p) ((void *)((vm_offset_t)(p)))
843 #define _sym_calloc_dma(np, s, n)	__sym_calloc_dma(np->bus_dmat, s, n)
844 #define _sym_mfree_dma(np, p, s, n)	\
845 				__sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n)
846 #define sym_calloc_dma(s, n)		_sym_calloc_dma(np, s, n)
847 #define sym_mfree_dma(p, s, n)		_sym_mfree_dma(np, p, s, n)
848 #define _vtobus(np, p)			__vtobus(np->bus_dmat, _uvptv_(p))
849 #define vtobus(p)			_vtobus(np, p)
850 
851 
852 /*
853  *  Print a buffer in hexadecimal format.
854  */
855 static void sym_printb_hex (u_char *p, int n)
856 {
857 	while (n-- > 0)
858 		printf (" %x", *p++);
859 }
860 
861 /*
862  *  Same with a label at beginning and .\n at end.
863  */
864 static void sym_printl_hex (char *label, u_char *p, int n)
865 {
866 	printf ("%s", label);
867 	sym_printb_hex (p, n);
868 	printf (".\n");
869 }
870 
871 /*
872  *  Return a string for SCSI BUS mode.
873  */
874 static char *sym_scsi_bus_mode(int mode)
875 {
876 	switch(mode) {
877 	case SMODE_HVD:	return "HVD";
878 	case SMODE_SE:	return "SE";
879 	case SMODE_LVD: return "LVD";
880 	}
881 	return "??";
882 }
883 
884 /*
885  *  Some poor sync table that refers to Tekram NVRAM layout.
886  */
887 #ifdef SYM_CONF_NVRAM_SUPPORT
888 static u_char Tekram_sync[16] =
889 	{25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10};
890 #endif
891 
892 /*
893  *  Union of supported NVRAM formats.
894  */
895 struct sym_nvram {
896 	int type;
897 #define	SYM_SYMBIOS_NVRAM	(1)
898 #define	SYM_TEKRAM_NVRAM	(2)
899 #ifdef	SYM_CONF_NVRAM_SUPPORT
900 	union {
901 		Symbios_nvram Symbios;
902 		Tekram_nvram Tekram;
903 	} data;
904 #endif
905 };
906 
907 /*
908  *  This one is hopefully useless, but actually useful. :-)
909  */
910 #ifndef assert
911 #define	assert(expression) { \
912 	if (!(expression)) { \
913 		(void)panic( \
914 			"assertion \"%s\" failed: file \"%s\", line %d\n", \
915 			#expression, \
916 			__FILE__, __LINE__); \
917 	} \
918 }
919 #endif
920 
921 /*
922  *  Some provision for a possible big endian support.
923  *  By the way some Symbios chips also may support some kind
924  *  of big endian byte ordering.
925  *  For now, this stuff does not deserve any comments. :)
926  */
927 
928 #define sym_offb(o)	(o)
929 #define sym_offw(o)	(o)
930 
931 #define cpu_to_scr(dw)	(dw)
932 #define scr_to_cpu(dw)	(dw)
933 
934 /*
935  *  Access to the controller chip.
936  *
937  *  If SYM_CONF_IOMAPPED is defined, the driver will use
938  *  normal IOs instead of the MEMORY MAPPED IO method
939  *  recommended by PCI specifications.
940  */
941 
942 /*
943  *  Define some understable verbs so we will not suffer of
944  *  having to deal with the stupid PC tokens for IO.
945  */
946 #define io_read8(p)	 scr_to_cpu(inb((p)))
947 #define	io_read16(p)	 scr_to_cpu(inw((p)))
948 #define io_read32(p)	 scr_to_cpu(inl((p)))
949 #define	io_write8(p, v)	 outb((p), cpu_to_scr(v))
950 #define io_write16(p, v) outw((p), cpu_to_scr(v))
951 #define io_write32(p, v) outl((p), cpu_to_scr(v))
952 
953 #ifdef	__alpha__
954 
955 #define mmio_read8(a)	     readb(a)
956 #define mmio_read16(a)	     readw(a)
957 #define mmio_read32(a)	     readl(a)
958 #define mmio_write8(a, b)    writeb(a, b)
959 #define mmio_write16(a, b)   writew(a, b)
960 #define mmio_write32(a, b)   writel(a, b)
961 
962 #else /*__i386__*/
963 
964 #define mmio_read8(a)	     scr_to_cpu((*(volatile unsigned char *) (a)))
965 #define mmio_read16(a)	     scr_to_cpu((*(volatile unsigned short *) (a)))
966 #define mmio_read32(a)	     scr_to_cpu((*(volatile unsigned int *) (a)))
967 #define mmio_write8(a, b)   (*(volatile unsigned char *) (a)) = cpu_to_scr(b)
968 #define mmio_write16(a, b)  (*(volatile unsigned short *) (a)) = cpu_to_scr(b)
969 #define mmio_write32(a, b)  (*(volatile unsigned int *) (a)) = cpu_to_scr(b)
970 
971 #endif
972 
973 /*
974  *  Normal IO
975  */
976 #if defined(SYM_CONF_IOMAPPED)
977 
978 #define	INB_OFF(o)	io_read8(np->io_port + sym_offb(o))
979 #define	OUTB_OFF(o, v)	io_write8(np->io_port + sym_offb(o), (v))
980 
981 #define	INW_OFF(o)	io_read16(np->io_port + sym_offw(o))
982 #define	OUTW_OFF(o, v)	io_write16(np->io_port + sym_offw(o), (v))
983 
984 #define	INL_OFF(o)	io_read32(np->io_port + (o))
985 #define	OUTL_OFF(o, v)	io_write32(np->io_port + (o), (v))
986 
987 #else	/* Memory mapped IO */
988 
989 #define	INB_OFF(o)	mmio_read8(np->mmio_va + sym_offb(o))
990 #define	OUTB_OFF(o, v)	mmio_write8(np->mmio_va + sym_offb(o), (v))
991 
992 #define	INW_OFF(o)	mmio_read16(np->mmio_va + sym_offw(o))
993 #define	OUTW_OFF(o, v)	mmio_write16(np->mmio_va + sym_offw(o), (v))
994 
995 #define	INL_OFF(o)	mmio_read32(np->mmio_va + (o))
996 #define	OUTL_OFF(o, v)	mmio_write32(np->mmio_va + (o), (v))
997 
998 #endif
999 
1000 /*
1001  *  Common to both normal IO and MMIO.
1002  */
1003 #define INB(r)		INB_OFF(offsetof(struct sym_reg,r))
1004 #define INW(r)		INW_OFF(offsetof(struct sym_reg,r))
1005 #define INL(r)		INL_OFF(offsetof(struct sym_reg,r))
1006 
1007 #define OUTB(r, v)	OUTB_OFF(offsetof(struct sym_reg,r), (v))
1008 #define OUTW(r, v)	OUTW_OFF(offsetof(struct sym_reg,r), (v))
1009 #define OUTL(r, v)	OUTL_OFF(offsetof(struct sym_reg,r), (v))
1010 
1011 #define OUTONB(r, m)	OUTB(r, INB(r) | (m))
1012 #define OUTOFFB(r, m)	OUTB(r, INB(r) & ~(m))
1013 #define OUTONW(r, m)	OUTW(r, INW(r) | (m))
1014 #define OUTOFFW(r, m)	OUTW(r, INW(r) & ~(m))
1015 #define OUTONL(r, m)	OUTL(r, INL(r) | (m))
1016 #define OUTOFFL(r, m)	OUTL(r, INL(r) & ~(m))
1017 
1018 /*
1019  *  Command control block states.
1020  */
1021 #define HS_IDLE		(0)
1022 #define HS_BUSY		(1)
1023 #define HS_NEGOTIATE	(2)	/* sync/wide data transfer*/
1024 #define HS_DISCONNECT	(3)	/* Disconnected by target */
1025 #define HS_WAIT		(4)	/* waiting for resource	  */
1026 
1027 #define HS_DONEMASK	(0x80)
1028 #define HS_COMPLETE	(4|HS_DONEMASK)
1029 #define HS_SEL_TIMEOUT	(5|HS_DONEMASK)	/* Selection timeout      */
1030 #define HS_UNEXPECTED	(6|HS_DONEMASK)	/* Unexpected disconnect  */
1031 #define HS_COMP_ERR	(7|HS_DONEMASK)	/* Completed with error	  */
1032 
1033 /*
1034  *  Software Interrupt Codes
1035  */
1036 #define	SIR_BAD_SCSI_STATUS	(1)
1037 #define	SIR_SEL_ATN_NO_MSG_OUT	(2)
1038 #define	SIR_MSG_RECEIVED	(3)
1039 #define	SIR_MSG_WEIRD		(4)
1040 #define	SIR_NEGO_FAILED		(5)
1041 #define	SIR_NEGO_PROTO		(6)
1042 #define	SIR_SCRIPT_STOPPED	(7)
1043 #define	SIR_REJECT_TO_SEND	(8)
1044 #define	SIR_SWIDE_OVERRUN	(9)
1045 #define	SIR_SODL_UNDERRUN	(10)
1046 #define	SIR_RESEL_NO_MSG_IN	(11)
1047 #define	SIR_RESEL_NO_IDENTIFY	(12)
1048 #define	SIR_RESEL_BAD_LUN	(13)
1049 #define	SIR_TARGET_SELECTED	(14)
1050 #define	SIR_RESEL_BAD_I_T_L	(15)
1051 #define	SIR_RESEL_BAD_I_T_L_Q	(16)
1052 #define	SIR_ABORT_SENT		(17)
1053 #define	SIR_RESEL_ABORTED	(18)
1054 #define	SIR_MSG_OUT_DONE	(19)
1055 #define	SIR_COMPLETE_ERROR	(20)
1056 #define	SIR_MAX			(20)
1057 
1058 /*
1059  *  Extended error bit codes.
1060  *  xerr_status field of struct sym_ccb.
1061  */
1062 #define	XE_EXTRA_DATA	(1)	/* unexpected data phase	 */
1063 #define	XE_BAD_PHASE	(1<<1)	/* illegal phase (4/5)		 */
1064 #define	XE_PARITY_ERR	(1<<2)	/* unrecovered SCSI parity error */
1065 #define	XE_SODL_UNRUN	(1<<3)	/* ODD transfer in DATA OUT phase */
1066 #define	XE_SWIDE_OVRUN	(1<<4)	/* ODD transfer in DATA IN phase */
1067 
1068 /*
1069  *  Negotiation status.
1070  *  nego_status field of struct sym_ccb.
1071  */
1072 #define NS_SYNC		(1)
1073 #define NS_WIDE		(2)
1074 #define NS_PPR		(3)
1075 
1076 /*
1077  *  A CCB hashed table is used to retrieve CCB address
1078  *  from DSA value.
1079  */
1080 #define CCB_HASH_SHIFT		8
1081 #define CCB_HASH_SIZE		(1UL << CCB_HASH_SHIFT)
1082 #define CCB_HASH_MASK		(CCB_HASH_SIZE-1)
1083 #define CCB_HASH_CODE(dsa)	(((dsa) >> 9) & CCB_HASH_MASK)
1084 
1085 /*
1086  *  Device flags.
1087  */
1088 #define SYM_DISC_ENABLED	(1)
1089 #define SYM_TAGS_ENABLED	(1<<1)
1090 #define SYM_SCAN_BOOT_DISABLED	(1<<2)
1091 #define SYM_SCAN_LUNS_DISABLED	(1<<3)
1092 
1093 /*
1094  *  Host adapter miscellaneous flags.
1095  */
1096 #define SYM_AVOID_BUS_RESET	(1)
1097 #define SYM_SCAN_TARGETS_HILO	(1<<1)
1098 
1099 /*
1100  *  Device quirks.
1101  *  Some devices, for example the CHEETAH 2 LVD, disconnects without
1102  *  saving the DATA POINTER then reconnect and terminates the IO.
1103  *  On reselection, the automatic RESTORE DATA POINTER makes the
1104  *  CURRENT DATA POINTER not point at the end of the IO.
1105  *  This behaviour just breaks our calculation of the residual.
1106  *  For now, we just force an AUTO SAVE on disconnection and will
1107  *  fix that in a further driver version.
1108  */
1109 #define SYM_QUIRK_AUTOSAVE 1
1110 
1111 /*
1112  *  Misc.
1113  */
1114 #define SYM_SNOOP_TIMEOUT (10000000)
1115 #define SYM_PCI_IO	PCIR_MAPS
1116 #define SYM_PCI_MMIO	(PCIR_MAPS + 4)
1117 #define SYM_PCI_RAM	(PCIR_MAPS + 8)
1118 #define SYM_PCI_RAM64	(PCIR_MAPS + 12)
1119 
1120 /*
1121  *  Back-pointer from the CAM CCB to our data structures.
1122  */
1123 #define sym_hcb_ptr	spriv_ptr0
1124 /* #define sym_ccb_ptr	spriv_ptr1 */
1125 
1126 /*
1127  *  We mostly have to deal with pointers.
1128  *  Thus these typedef's.
1129  */
1130 typedef struct sym_tcb *tcb_p;
1131 typedef struct sym_lcb *lcb_p;
1132 typedef struct sym_ccb *ccb_p;
1133 typedef struct sym_hcb *hcb_p;
1134 typedef struct sym_scr  *script_p;
1135 typedef struct sym_scrh *scripth_p;
1136 
1137 /*
1138  *  Gather negotiable parameters value
1139  */
1140 struct sym_trans {
1141 	u8 period;
1142 	u8 offset;
1143 	u8 width;
1144 	u8 options;	/* PPR options */
1145 };
1146 
1147 struct sym_tinfo {
1148 	struct sym_trans current;
1149 	struct sym_trans goal;
1150 	struct sym_trans user;
1151 };
1152 
1153 #define BUS_8_BIT	MSG_EXT_WDTR_BUS_8_BIT
1154 #define BUS_16_BIT	MSG_EXT_WDTR_BUS_16_BIT
1155 
1156 /*
1157  *  Target Control Block
1158  */
1159 struct sym_tcb {
1160 	/*
1161 	 *  LUN table used by the SCRIPTS processor.
1162 	 *  An array of bus addresses is used on reselection.
1163 	 *  LUN #0 is a special case, since multi-lun devices are rare,
1164 	 *  and we we want to speed-up the general case and not waste
1165 	 *  resources.
1166 	 */
1167 	u32	*luntbl;	/* LCBs bus address table	*/
1168 	u32	luntbl_sa;	/* bus address of this table	*/
1169 	u32	lun0_sa;	/* bus address of LCB #0	*/
1170 
1171 	/*
1172 	 *  LUN table used by the C code.
1173 	 */
1174 	lcb_p	lun0p;		/* LCB of LUN #0 (usual case)	*/
1175 #if SYM_CONF_MAX_LUN > 1
1176 	lcb_p	*lunmp;		/* Other LCBs [1..MAX_LUN]	*/
1177 #endif
1178 
1179 	/*
1180 	 *  Bitmap that tells about LUNs that succeeded at least
1181 	 *  1 IO and therefore assumed to be a real device.
1182 	 *  Avoid useless allocation of the LCB structure.
1183 	 */
1184 	u32	lun_map[(SYM_CONF_MAX_LUN+31)/32];
1185 
1186 	/*
1187 	 *  Bitmap that tells about LUNs that haven't yet an LCB
1188 	 *  allocated (not discovered or LCB allocation failed).
1189 	 */
1190 	u32	busy0_map[(SYM_CONF_MAX_LUN+31)/32];
1191 
1192 	/*
1193 	 *  Actual SYNC/WIDE IO registers value for this target.
1194 	 *  'sval', 'wval' and 'uval' are read from SCRIPTS and
1195 	 *  so have alignment constraints.
1196 	 */
1197 /*0*/	u_char	uval;		/* -> SCNTL4 register		*/
1198 /*1*/	u_char	sval;		/* -> SXFER  io register	*/
1199 /*2*/	u_char	filler1;
1200 /*3*/	u_char	wval;		/* -> SCNTL3 io register	*/
1201 
1202 	/*
1203 	 *  Transfer capabilities (SIP)
1204 	 */
1205 	struct sym_tinfo tinfo;
1206 
1207 	/*
1208 	 * Keep track of the CCB used for the negotiation in order
1209 	 * to ensure that only 1 negotiation is queued at a time.
1210 	 */
1211 	ccb_p   nego_cp;	/* CCB used for the nego		*/
1212 
1213 	/*
1214 	 *  Set when we want to reset the device.
1215 	 */
1216 	u_char	to_reset;
1217 
1218 	/*
1219 	 *  Other user settable limits and options.
1220 	 *  These limits are read from the NVRAM if present.
1221 	 */
1222 	u_char	usrflags;
1223 	u_short	usrtags;
1224 };
1225 
1226 /*
1227  *  Logical Unit Control Block
1228  */
1229 struct sym_lcb {
1230 	/*
1231 	 *  SCRIPTS address jumped by SCRIPTS on reselection.
1232 	 *  For not probed logical units, this address points to
1233 	 *  SCRIPTS that deal with bad LU handling (must be at
1234 	 *  offset zero for that reason).
1235 	 */
1236 /*0*/	u32	resel_sa;
1237 
1238 	/*
1239 	 *  Task (bus address of a CCB) read from SCRIPTS that points
1240 	 *  to the unique ITL nexus allowed to be disconnected.
1241 	 */
1242 	u32	itl_task_sa;
1243 
1244 	/*
1245 	 *  Task table read from SCRIPTS that contains pointers to
1246 	 *  ITLQ nexuses (bus addresses read from SCRIPTS).
1247 	 */
1248 	u32	*itlq_tbl;	/* Kernel virtual address	*/
1249 	u32	itlq_tbl_sa;	/* Bus address used by SCRIPTS	*/
1250 
1251 	/*
1252 	 *  Busy CCBs management.
1253 	 */
1254 	u_short	busy_itlq;	/* Number of busy tagged CCBs	*/
1255 	u_short	busy_itl;	/* Number of busy untagged CCBs	*/
1256 
1257 	/*
1258 	 *  Circular tag allocation buffer.
1259 	 */
1260 	u_short	ia_tag;		/* Tag allocation index		*/
1261 	u_short	if_tag;		/* Tag release index		*/
1262 	u_char	*cb_tags;	/* Circular tags buffer		*/
1263 
1264 	/*
1265 	 *  Set when we want to clear all tasks.
1266 	 */
1267 	u_char to_clear;
1268 
1269 	/*
1270 	 *  Capabilities.
1271 	 */
1272 	u_char	user_flags;
1273 	u_char	current_flags;
1274 };
1275 
1276 /*
1277  *  Action from SCRIPTS on a task.
1278  *  Is part of the CCB, but is also used separately to plug
1279  *  error handling action to perform from SCRIPTS.
1280  */
1281 struct sym_actscr {
1282 	u32	start;		/* Jumped by SCRIPTS after selection	*/
1283 	u32	restart;	/* Jumped by SCRIPTS on relection	*/
1284 };
1285 
1286 /*
1287  *  Phase mismatch context.
1288  *
1289  *  It is part of the CCB and is used as parameters for the
1290  *  DATA pointer. We need two contexts to handle correctly the
1291  *  SAVED DATA POINTER.
1292  */
1293 struct sym_pmc {
1294 	struct	sym_tblmove sg;	/* Updated interrupted SG block	*/
1295 	u32	ret;		/* SCRIPT return address	*/
1296 };
1297 
1298 /*
1299  *  LUN control block lookup.
1300  *  We use a direct pointer for LUN #0, and a table of
1301  *  pointers which is only allocated for devices that support
1302  *  LUN(s) > 0.
1303  */
1304 #if SYM_CONF_MAX_LUN <= 1
1305 #define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : 0
1306 #else
1307 #define sym_lp(np, tp, lun) \
1308 	(!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0
1309 #endif
1310 
1311 /*
1312  *  Status are used by the host and the script processor.
1313  *
1314  *  The last four bytes (status[4]) are copied to the
1315  *  scratchb register (declared as scr0..scr3) just after the
1316  *  select/reselect, and copied back just after disconnecting.
1317  *  Inside the script the XX_REG are used.
1318  *
1319  *  The first four bytes (scr_st[4]) are used inside the
1320  *  script by "LOAD/STORE" commands.
1321  *  Because source and destination must have the same alignment
1322  *  in a DWORD, the fields HAVE to be at the choosen offsets.
1323  *  	xerr_st		0	(0x34)	scratcha
1324  *  	nego_st		2
1325  */
1326 
1327 /*
1328  *  Last four bytes (script)
1329  */
1330 #define  QU_REG	scr0
1331 #define  HS_REG	scr1
1332 #define  HS_PRT	nc_scr1
1333 #define  SS_REG	scr2
1334 #define  SS_PRT	nc_scr2
1335 #define  HF_REG	scr3
1336 #define  HF_PRT	nc_scr3
1337 
1338 /*
1339  *  Last four bytes (host)
1340  */
1341 #define  actualquirks  phys.status[0]
1342 #define  host_status   phys.status[1]
1343 #define  ssss_status   phys.status[2]
1344 #define  host_flags    phys.status[3]
1345 
1346 /*
1347  *  Host flags
1348  */
1349 #define HF_IN_PM0	1u
1350 #define HF_IN_PM1	(1u<<1)
1351 #define HF_ACT_PM	(1u<<2)
1352 #define HF_DP_SAVED	(1u<<3)
1353 #define HF_SENSE	(1u<<4)
1354 #define HF_EXT_ERR	(1u<<5)
1355 #define HF_DATA_IN	(1u<<6)
1356 #ifdef SYM_CONF_IARB_SUPPORT
1357 #define HF_HINT_IARB	(1u<<7)
1358 #endif
1359 
1360 /*
1361  *  First four bytes (script)
1362  */
1363 #define  xerr_st       scr_st[0]
1364 #define  nego_st       scr_st[2]
1365 
1366 /*
1367  *  First four bytes (host)
1368  */
1369 #define  xerr_status   phys.xerr_st
1370 #define  nego_status   phys.nego_st
1371 
1372 /*
1373  *  Data Structure Block
1374  *
1375  *  During execution of a ccb by the script processor, the
1376  *  DSA (data structure address) register points to this
1377  *  substructure of the ccb.
1378  */
1379 struct dsb {
1380 	/*
1381 	 *  Start and restart SCRIPTS addresses (must be at 0).
1382 	 */
1383 /*0*/	struct sym_actscr go;
1384 
1385 	/*
1386 	 *  SCRIPTS jump address that deal with data pointers.
1387 	 *  'savep' points to the position in the script responsible
1388 	 *  for the	actual transfer of data.
1389 	 *  It's written on reception of a SAVE_DATA_POINTER message.
1390 	 */
1391 	u32	savep;		/* Jump address to saved data pointer	*/
1392 	u32	lastp;		/* SCRIPTS address at end of data	*/
1393 	u32	goalp;		/* Not used for now			*/
1394 
1395 	/*
1396 	 *  Status fields.
1397 	 */
1398 	u8	scr_st[4];	/* script status		*/
1399 	u8	status[4];	/* host status			*/
1400 
1401 	/*
1402 	 *  Table data for Script
1403 	 */
1404 	struct sym_tblsel  select;
1405 	struct sym_tblmove smsg;
1406 	struct sym_tblmove smsg_ext;
1407 	struct sym_tblmove cmd;
1408 	struct sym_tblmove sense;
1409 	struct sym_tblmove wresid;
1410 	struct sym_tblmove data [SYM_CONF_MAX_SG];
1411 
1412 	/*
1413 	 *  Phase mismatch contexts.
1414 	 *  We need two to handle correctly the SAVED DATA POINTER.
1415 	 */
1416 	struct sym_pmc pm0;
1417 	struct sym_pmc pm1;
1418 
1419 	/*
1420 	 *  Extra bytes count transferred in case of data overrun.
1421 	 */
1422 	u32	extra_bytes;
1423 };
1424 
1425 /*
1426  *  Our Command Control Block
1427  */
1428 struct sym_ccb {
1429 	/*
1430 	 *  This is the data structure which is pointed by the DSA
1431 	 *  register when it is executed by the script processor.
1432 	 *  It must be the first entry.
1433 	 */
1434 	struct dsb phys;
1435 
1436 	/*
1437 	 *  Pointer to CAM ccb and related stuff.
1438 	 */
1439 	union ccb *cam_ccb;	/* CAM scsiio ccb		*/
1440 	u8	cdb_buf[16];	/* Copy of CDB			*/
1441 	u8	*sns_bbuf;	/* Bounce buffer for sense data	*/
1442 #define SYM_SNS_BBUF_LEN	sizeof(struct scsi_sense_data)
1443 	int	data_len;	/* Total data length		*/
1444 	int	segments;	/* Number of SG segments	*/
1445 
1446 	/*
1447 	 *  Message areas.
1448 	 *  We prepare a message to be sent after selection.
1449 	 *  We may use a second one if the command is rescheduled
1450 	 *  due to CHECK_CONDITION or COMMAND TERMINATED.
1451 	 *  Contents are IDENTIFY and SIMPLE_TAG.
1452 	 *  While negotiating sync or wide transfer,
1453 	 *  a SDTR or WDTR message is appended.
1454 	 */
1455 	u_char	scsi_smsg [12];
1456 	u_char	scsi_smsg2[12];
1457 
1458 	/*
1459 	 *  Auto request sense related fields.
1460 	 */
1461 	u_char	sensecmd[6];	/* Request Sense command	*/
1462 	u_char	sv_scsi_status;	/* Saved SCSI status 		*/
1463 	u_char	sv_xerr_status;	/* Saved extended status	*/
1464 	int	sv_resid;	/* Saved residual		*/
1465 
1466 	/*
1467 	 *  Map for the DMA of user data.
1468 	 */
1469 #ifdef	FreeBSD_Bus_Dma_Abstraction
1470 	void		*arg;	/* Argument for some callback	*/
1471 	bus_dmamap_t	dmamap;	/* DMA map for user data	*/
1472 	u_char		dmamapped;
1473 #define SYM_DMA_NONE	0
1474 #define SYM_DMA_READ	1
1475 #define SYM_DMA_WRITE	2
1476 #endif
1477 	/*
1478 	 *  Other fields.
1479 	 */
1480 	u_long	ccb_ba;		/* BUS address of this CCB	*/
1481 	u_short	tag;		/* Tag for this transfer	*/
1482 				/*  NO_TAG means no tag		*/
1483 	u_char	target;
1484 	u_char	lun;
1485 	ccb_p	link_ccbh;	/* Host adapter CCB hash chain	*/
1486 	SYM_QUEHEAD
1487 		link_ccbq;	/* Link to free/busy CCB queue	*/
1488 	u32	startp;		/* Initial data pointer		*/
1489 	int	ext_sg;		/* Extreme data pointer, used	*/
1490 	int	ext_ofs;	/*  to calculate the residual.	*/
1491 	u_char	to_abort;	/* Want this IO to be aborted	*/
1492 };
1493 
1494 #define CCB_BA(cp,lbl)	(cp->ccb_ba + offsetof(struct sym_ccb, lbl))
1495 
1496 /*
1497  *  Host Control Block
1498  */
1499 struct sym_hcb {
1500 	/*
1501 	 *  Idle task and invalid task actions and
1502 	 *  their bus addresses.
1503 	 */
1504 	struct sym_actscr idletask, notask, bad_itl, bad_itlq;
1505 	vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
1506 
1507 	/*
1508 	 *  Dummy lun table to protect us against target
1509 	 *  returning bad lun number on reselection.
1510 	 */
1511 	u32	*badluntbl;	/* Table physical address	*/
1512 	u32	badlun_sa;	/* SCRIPT handler BUS address	*/
1513 
1514 	/*
1515 	 *  Bus address of this host control block.
1516 	 */
1517 	u32	hcb_ba;
1518 
1519 	/*
1520 	 *  Bit 32-63 of the on-chip RAM bus address in LE format.
1521 	 *  The START_RAM64 script loads the MMRS and MMWS from this
1522 	 *  field.
1523 	 */
1524 	u32	scr_ram_seg;
1525 
1526 	/*
1527 	 *  Chip and controller indentification.
1528 	 */
1529 #ifdef FreeBSD_4_Bus
1530 	device_t device;
1531 #else
1532 	pcici_t	pci_tag;
1533 #endif
1534 	int	unit;
1535 	char	inst_name[8];
1536 
1537 	/*
1538 	 *  Initial value of some IO register bits.
1539 	 *  These values are assumed to have been set by BIOS, and may
1540 	 *  be used to probe adapter implementation differences.
1541 	 */
1542 	u_char	sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
1543 		sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
1544 		sv_stest1;
1545 
1546 	/*
1547 	 *  Actual initial value of IO register bits used by the
1548 	 *  driver. They are loaded at initialisation according to
1549 	 *  features that are to be enabled/disabled.
1550 	 */
1551 	u_char	rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
1552 		rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
1553 
1554 	/*
1555 	 *  Target data used by the CPU.
1556 	 */
1557 	struct sym_tcb	target[SYM_CONF_MAX_TARGET];
1558 
1559 	/*
1560 	 *  Target control block bus address array used by the SCRIPT
1561 	 *  on reselection.
1562 	 */
1563 	u32		*targtbl;
1564 
1565 	/*
1566 	 *  CAM SIM information for this instance.
1567 	 */
1568 	struct		cam_sim  *sim;
1569 	struct		cam_path *path;
1570 
1571 	/*
1572 	 *  Allocated hardware resources.
1573 	 */
1574 #ifdef FreeBSD_4_Bus
1575 	struct resource	*irq_res;
1576 	struct resource	*io_res;
1577 	struct resource	*mmio_res;
1578 	struct resource	*ram_res;
1579 	int		ram_id;
1580 	void *intr;
1581 #endif
1582 
1583 	/*
1584 	 *  Bus stuff.
1585 	 *
1586 	 *  My understanding of PCI is that all agents must share the
1587 	 *  same addressing range and model.
1588 	 *  But some hardware architecture guys provide complex and
1589 	 *  brain-deaded stuff that makes shit.
1590 	 *  This driver only support PCI compliant implementations and
1591 	 *  deals with part of the BUS stuff complexity only to fit O/S
1592 	 *  requirements.
1593 	 */
1594 #ifdef FreeBSD_4_Bus
1595 	bus_space_handle_t	io_bsh;
1596 	bus_space_tag_t		io_tag;
1597 	bus_space_handle_t	mmio_bsh;
1598 	bus_space_tag_t		mmio_tag;
1599 	bus_space_handle_t	ram_bsh;
1600 	bus_space_tag_t		ram_tag;
1601 #endif
1602 
1603 	/*
1604 	 *  DMA stuff.
1605 	 */
1606 #ifdef	FreeBSD_Bus_Dma_Abstraction
1607 	bus_dma_tag_t	bus_dmat;	/* DMA tag from parent BUS	*/
1608 	bus_dma_tag_t	data_dmat;	/* DMA tag for user data	*/
1609 #endif
1610 	/*
1611 	 *  Virtual and physical bus addresses of the chip.
1612 	 */
1613 	vm_offset_t	mmio_va;	/* MMIO kernel virtual address	*/
1614 	vm_offset_t	mmio_pa;	/* MMIO CPU physical address	*/
1615 	vm_offset_t	mmio_ba;	/* MMIO BUS address		*/
1616 	int		mmio_ws;	/* MMIO Window size		*/
1617 
1618 	vm_offset_t	ram_va;		/* RAM kernel virtual address	*/
1619 	vm_offset_t	ram_pa;		/* RAM CPU physical address	*/
1620 	vm_offset_t	ram_ba;		/* RAM BUS address		*/
1621 	int		ram_ws;		/* RAM window size		*/
1622 	u32		io_port;	/* IO port address		*/
1623 
1624 	/*
1625 	 *  SCRIPTS virtual and physical bus addresses.
1626 	 *  'script'  is loaded in the on-chip RAM if present.
1627 	 *  'scripth' stays in main memory for all chips except the
1628 	 *  53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
1629 	 */
1630 	struct sym_scr	*script0;	/* Copies of script and scripth	*/
1631 	struct sym_scrh	*scripth0;	/*  relocated for this host.	*/
1632 	vm_offset_t	script_ba;	/* Actual script and scripth	*/
1633 	vm_offset_t	scripth_ba;	/*  bus addresses.		*/
1634 	vm_offset_t	scripth0_ba;
1635 
1636 	/*
1637 	 *  General controller parameters and configuration.
1638 	 */
1639 	u_short	device_id;	/* PCI device id		*/
1640 	u_char	revision_id;	/* PCI device revision id	*/
1641 	u_int	features;	/* Chip features map		*/
1642 	u_char	myaddr;		/* SCSI id of the adapter	*/
1643 	u_char	maxburst;	/* log base 2 of dwords burst	*/
1644 	u_char	maxwide;	/* Maximum transfer width	*/
1645 	u_char	minsync;	/* Min sync period factor (ST)	*/
1646 	u_char	maxsync;	/* Max sync period factor (ST)	*/
1647 	u_char	minsync_dt;	/* Min sync period factor (DT)	*/
1648 	u_char	maxsync_dt;	/* Max sync period factor (DT)	*/
1649 	u_char	maxoffs;	/* Max scsi offset		*/
1650 	u_char	multiplier;	/* Clock multiplier (1,2,4)	*/
1651 	u_char	clock_divn;	/* Number of clock divisors	*/
1652 	u_long	clock_khz;	/* SCSI clock frequency in KHz	*/
1653 
1654 	/*
1655 	 *  Start queue management.
1656 	 *  It is filled up by the host processor and accessed by the
1657 	 *  SCRIPTS processor in order to start SCSI commands.
1658 	 */
1659 	volatile		/* Prevent code optimizations	*/
1660 	u32	*squeue;	/* Start queue virtual address	*/
1661 	u32	squeue_ba;	/* Start queue BUS address	*/
1662 	u_short	squeueput;	/* Next free slot of the queue	*/
1663 	u_short	actccbs;	/* Number of allocated CCBs	*/
1664 
1665 	/*
1666 	 *  Command completion queue.
1667 	 *  It is the same size as the start queue to avoid overflow.
1668 	 */
1669 	u_short	dqueueget;	/* Next position to scan	*/
1670 	volatile		/* Prevent code optimizations	*/
1671 	u32	*dqueue;	/* Completion (done) queue	*/
1672 
1673 	/*
1674 	 *  Miscellaneous buffers accessed by the scripts-processor.
1675 	 *  They shall be DWORD aligned, because they may be read or
1676 	 *  written with a script command.
1677 	 */
1678 	u_char		msgout[8];	/* Buffer for MESSAGE OUT 	*/
1679 	u_char		msgin [8];	/* Buffer for MESSAGE IN	*/
1680 	u32		lastmsg;	/* Last SCSI message sent	*/
1681 	u_char		scratch;	/* Scratch for SCSI receive	*/
1682 
1683 	/*
1684 	 *  Miscellaneous configuration and status parameters.
1685 	 */
1686 	u_char		usrflags;	/* Miscellaneous user flags	*/
1687 	u_char		scsi_mode;	/* Current SCSI BUS mode	*/
1688 	u_char		verbose;	/* Verbosity for this controller*/
1689 	u32		cache;		/* Used for cache test at init.	*/
1690 
1691 	/*
1692 	 *  CCB lists and queue.
1693 	 */
1694 	ccb_p ccbh[CCB_HASH_SIZE];	/* CCB hashed by DSA value	*/
1695 	SYM_QUEHEAD	free_ccbq;	/* Queue of available CCBs	*/
1696 	SYM_QUEHEAD	busy_ccbq;	/* Queue of busy CCBs		*/
1697 
1698 	/*
1699 	 *  During error handling and/or recovery,
1700 	 *  active CCBs that are to be completed with
1701 	 *  error or requeued are moved from the busy_ccbq
1702 	 *  to the comp_ccbq prior to completion.
1703 	 */
1704 	SYM_QUEHEAD	comp_ccbq;
1705 
1706 	/*
1707 	 *  CAM CCB pending queue.
1708 	 */
1709 	SYM_QUEHEAD	cam_ccbq;
1710 
1711 	/*
1712 	 *  IMMEDIATE ARBITRATION (IARB) control.
1713 	 *
1714 	 *  We keep track in 'last_cp' of the last CCB that has been
1715 	 *  queued to the SCRIPTS processor and clear 'last_cp' when
1716 	 *  this CCB completes. If last_cp is not zero at the moment
1717 	 *  we queue a new CCB, we set a flag in 'last_cp' that is
1718 	 *  used by the SCRIPTS as a hint for setting IARB.
1719 	 *  We donnot set more than 'iarb_max' consecutive hints for
1720 	 *  IARB in order to leave devices a chance to reselect.
1721 	 *  By the way, any non zero value of 'iarb_max' is unfair. :)
1722 	 */
1723 #ifdef SYM_CONF_IARB_SUPPORT
1724 	u_short		iarb_max;	/* Max. # consecutive IARB hints*/
1725 	u_short		iarb_count;	/* Actual # of these hints	*/
1726 	ccb_p		last_cp;
1727 #endif
1728 
1729 	/*
1730 	 *  Command abort handling.
1731 	 *  We need to synchronize tightly with the SCRIPTS
1732 	 *  processor in order to handle things correctly.
1733 	 */
1734 	u_char		abrt_msg[4];	/* Message to send buffer	*/
1735 	struct sym_tblmove abrt_tbl;	/* Table for the MOV of it 	*/
1736 	struct sym_tblsel  abrt_sel;	/* Sync params for selection	*/
1737 	u_char		istat_sem;	/* Tells the chip to stop (SEM)	*/
1738 };
1739 
1740 #define HCB_BA(np, lbl)	    (np->hcb_ba      + offsetof(struct sym_hcb, lbl))
1741 #define SCRIPT_BA(np,lbl)   (np->script_ba   + offsetof(struct sym_scr, lbl))
1742 #define SCRIPTH_BA(np,lbl)  (np->scripth_ba  + offsetof(struct sym_scrh,lbl))
1743 #define SCRIPTH0_BA(np,lbl) (np->scripth0_ba + offsetof(struct sym_scrh,lbl))
1744 
1745 /*
1746  *  Scripts for SYMBIOS-Processor
1747  *
1748  *  Use sym_fill_scripts() to create the variable parts.
1749  *  Use sym_bind_script()  to make a copy and bind to
1750  *  physical bus addresses.
1751  *  We have to know the offsets of all labels before we reach
1752  *  them (for forward jumps). Therefore we declare a struct
1753  *  here. If you make changes inside the script,
1754  *
1755  *  DONT FORGET TO CHANGE THE LENGTHS HERE!
1756  */
1757 
1758 /*
1759  *  Script fragments which are loaded into the on-chip RAM
1760  *  of 825A, 875, 876, 895, 895A, 896 and 1010 chips.
1761  *  Must not exceed 4K bytes.
1762  */
1763 struct sym_scr {
1764 	u32 start		[ 14];
1765 	u32 getjob_begin	[  4];
1766 	u32 getjob_end		[  4];
1767 	u32 select		[  8];
1768 	u32 wf_sel_done		[  2];
1769 	u32 send_ident		[  2];
1770 #ifdef SYM_CONF_IARB_SUPPORT
1771 	u32 select2		[  8];
1772 #else
1773 	u32 select2		[  2];
1774 #endif
1775 	u32 command		[  2];
1776 	u32 dispatch		[ 30];
1777 	u32 sel_no_cmd		[ 10];
1778 	u32 init		[  6];
1779 	u32 clrack		[  4];
1780 	u32 disp_status		[  4];
1781 	u32 datai_done		[ 26];
1782 	u32 datao_done		[ 12];
1783 	u32 datai_phase		[  2];
1784 	u32 datao_phase		[  2];
1785 	u32 msg_in		[  2];
1786 	u32 msg_in2		[ 10];
1787 #ifdef SYM_CONF_IARB_SUPPORT
1788 	u32 status		[ 14];
1789 #else
1790 	u32 status		[ 10];
1791 #endif
1792 	u32 complete		[  8];
1793 	u32 complete2		[ 12];
1794 	u32 complete_error	[  4];
1795 	u32 done		[ 14];
1796 	u32 done_end		[  2];
1797 	u32 save_dp		[  8];
1798 	u32 restore_dp		[  4];
1799 	u32 disconnect		[ 20];
1800 #ifdef SYM_CONF_IARB_SUPPORT
1801 	u32 idle		[  4];
1802 #else
1803 	u32 idle		[  2];
1804 #endif
1805 #ifdef SYM_CONF_IARB_SUPPORT
1806 	u32 ungetjob		[  6];
1807 #else
1808 	u32 ungetjob		[  4];
1809 #endif
1810 	u32 reselect		[  4];
1811 	u32 reselected		[ 20];
1812 	u32 resel_scntl4	[ 28];
1813 #if   SYM_CONF_MAX_TASK*4 > 512
1814 	u32 resel_tag		[ 26];
1815 #elif SYM_CONF_MAX_TASK*4 > 256
1816 	u32 resel_tag		[ 20];
1817 #else
1818 	u32 resel_tag		[ 16];
1819 #endif
1820 	u32 resel_dsa		[  2];
1821 	u32 resel_dsa1		[  6];
1822 	u32 resel_no_tag	[  6];
1823 	u32 data_in		[SYM_CONF_MAX_SG * 2];
1824 	u32 data_in2		[  4];
1825 	u32 data_out		[SYM_CONF_MAX_SG * 2];
1826 	u32 data_out2		[  4];
1827 	u32 pm0_data		[ 12];
1828 	u32 pm0_data_out	[  6];
1829 	u32 pm0_data_end	[  6];
1830 	u32 pm1_data		[ 12];
1831 	u32 pm1_data_out	[  6];
1832 	u32 pm1_data_end	[  6];
1833 };
1834 
1835 /*
1836  *  Script fragments which stay in main memory for all chips
1837  *  except for chips that support 8K on-chip RAM.
1838  */
1839 struct sym_scrh {
1840 	u32 start64		[  2];
1841 	u32 no_data		[  2];
1842 	u32 sel_for_abort	[ 18];
1843 	u32 sel_for_abort_1	[  2];
1844 	u32 select_no_atn	[  8];
1845 	u32 wf_sel_done_no_atn	[  4];
1846 
1847 	u32 msg_in_etc		[ 14];
1848 	u32 msg_received	[  4];
1849 	u32 msg_weird_seen	[  4];
1850 	u32 msg_extended	[ 20];
1851 	u32 msg_bad		[  6];
1852 	u32 msg_weird		[  4];
1853 	u32 msg_weird1		[  8];
1854 
1855 	u32 wdtr_resp		[  6];
1856 	u32 send_wdtr		[  4];
1857 	u32 sdtr_resp		[  6];
1858 	u32 send_sdtr		[  4];
1859 	u32 ppr_resp		[  6];
1860 	u32 send_ppr		[  4];
1861 	u32 nego_bad_phase	[  4];
1862 	u32 msg_out		[  4];
1863 	u32 msg_out_done	[  4];
1864 	u32 data_ovrun		[ 18];
1865 	u32 data_ovrun1		[ 20];
1866 	u32 abort_resel		[ 16];
1867 	u32 resend_ident	[  4];
1868 	u32 ident_break		[  4];
1869 	u32 ident_break_atn	[  4];
1870 	u32 sdata_in		[  6];
1871 	u32 resel_bad_lun	[  4];
1872 	u32 bad_i_t_l		[  4];
1873 	u32 bad_i_t_l_q		[  4];
1874 	u32 bad_status		[  6];
1875 	u32 pm_handle		[ 20];
1876 	u32 pm_handle1		[  4];
1877 	u32 pm_save		[  4];
1878 	u32 pm0_save		[ 14];
1879 	u32 pm1_save		[ 14];
1880 
1881 	/* WSR handling */
1882 	u32 pm_wsr_handle	[ 42];
1883 	u32 wsr_ma_helper	[  4];
1884 
1885 	/* Data area */
1886 	u32 zero		[  1];
1887 	u32 scratch		[  1];
1888 	u32 pm0_data_addr	[  1];
1889 	u32 pm1_data_addr	[  1];
1890 	u32 saved_dsa		[  1];
1891 	u32 saved_drs		[  1];
1892 	u32 done_pos		[  1];
1893 	u32 startpos		[  1];
1894 	u32 targtbl		[  1];
1895 	/* End of data area */
1896 
1897 	u32 snooptest		[  6];
1898 	u32 snoopend		[  2];
1899 };
1900 
1901 /*
1902  *  Function prototypes.
1903  */
1904 static void sym_fill_scripts (script_p scr, scripth_p scrh);
1905 static void sym_bind_script (hcb_p np, u32 *src, u32 *dst, int len);
1906 static void sym_save_initial_setting (hcb_p np);
1907 static int  sym_prepare_setting (hcb_p np, struct sym_nvram *nvram);
1908 static int  sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr);
1909 static void sym_put_start_queue (hcb_p np, ccb_p cp);
1910 static void sym_chip_reset (hcb_p np);
1911 static void sym_soft_reset (hcb_p np);
1912 static void sym_start_reset (hcb_p np);
1913 static int  sym_reset_scsi_bus (hcb_p np, int enab_int);
1914 static int  sym_wakeup_done (hcb_p np);
1915 static void sym_flush_busy_queue (hcb_p np, int cam_status);
1916 static void sym_flush_comp_queue (hcb_p np, int cam_status);
1917 static void sym_init (hcb_p np, int reason);
1918 static int  sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp,
1919 		        u_char *fakp);
1920 static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per,
1921 			 u_char div, u_char fak);
1922 static void sym_setwide (hcb_p np, ccb_p cp, u_char wide);
1923 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
1924 			 u_char per, u_char wide, u_char div, u_char fak);
1925 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
1926 			 u_char per, u_char wide, u_char div, u_char fak);
1927 static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat);
1928 static void sym_intr (void *arg);
1929 static void sym_poll (struct cam_sim *sim);
1930 static void sym_recover_scsi_int (hcb_p np, u_char hsts);
1931 static void sym_int_sto (hcb_p np);
1932 static void sym_int_udc (hcb_p np);
1933 static void sym_int_sbmc (hcb_p np);
1934 static void sym_int_par (hcb_p np, u_short sist);
1935 static void sym_int_ma (hcb_p np);
1936 static int  sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun,
1937 				    int task);
1938 static void sym_sir_bad_scsi_status (hcb_p np, int num, ccb_p cp);
1939 static int  sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task);
1940 static void sym_sir_task_recovery (hcb_p np, int num);
1941 static int  sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs);
1942 static void sym_modify_dp (hcb_p np, tcb_p tp, ccb_p cp, int ofs);
1943 static int  sym_compute_residual (hcb_p np, ccb_p cp);
1944 static int  sym_show_msg (u_char * msg);
1945 static void sym_print_msg (ccb_p cp, char *label, u_char *msg);
1946 static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp);
1947 static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp);
1948 static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp);
1949 static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp);
1950 static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp);
1951 static void sym_int_sir (hcb_p np);
1952 static void sym_free_ccb (hcb_p np, ccb_p cp);
1953 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order);
1954 static ccb_p sym_alloc_ccb (hcb_p np);
1955 static ccb_p sym_ccb_from_dsa (hcb_p np, u_long dsa);
1956 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln);
1957 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln);
1958 static int  sym_snooptest (hcb_p np);
1959 static void sym_selectclock(hcb_p np, u_char scntl3);
1960 static void sym_getclock (hcb_p np, int mult);
1961 static int  sym_getpciclock (hcb_p np);
1962 static void sym_complete_ok (hcb_p np, ccb_p cp);
1963 static void sym_complete_error (hcb_p np, ccb_p cp);
1964 static void sym_timeout (void *arg);
1965 static int  sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out);
1966 static void sym_reset_dev (hcb_p np, union ccb *ccb);
1967 static void sym_action (struct cam_sim *sim, union ccb *ccb);
1968 static void sym_action1 (struct cam_sim *sim, union ccb *ccb);
1969 static int  sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp);
1970 static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio,
1971 				      ccb_p cp);
1972 #ifdef	FreeBSD_Bus_Dma_Abstraction
1973 static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp,
1974 					bus_dma_segment_t *psegs, int nsegs);
1975 #else
1976 static int  sym_scatter_virtual (hcb_p np, ccb_p cp, vm_offset_t vaddr,
1977 				 vm_size_t len);
1978 static int  sym_scatter_sg_virtual (hcb_p np, ccb_p cp,
1979 				    bus_dma_segment_t *psegs, int nsegs);
1980 static int  sym_scatter_physical (hcb_p np, ccb_p cp, vm_offset_t paddr,
1981 				  vm_size_t len);
1982 #endif
1983 static int sym_scatter_sg_physical (hcb_p np, ccb_p cp,
1984 				    bus_dma_segment_t *psegs, int nsegs);
1985 static void sym_action2 (struct cam_sim *sim, union ccb *ccb);
1986 static void sym_update_trans (hcb_p np, tcb_p tp, struct sym_trans *tip,
1987 			      struct ccb_trans_settings *cts);
1988 static void sym_update_dflags(hcb_p np, u_char *flags,
1989 			      struct ccb_trans_settings *cts);
1990 
1991 #ifdef FreeBSD_4_Bus
1992 static struct sym_pci_chip *sym_find_pci_chip (device_t dev);
1993 static int  sym_pci_probe (device_t dev);
1994 static int  sym_pci_attach (device_t dev);
1995 #else
1996 static struct sym_pci_chip *sym_find_pci_chip (pcici_t tag);
1997 static const char *sym_pci_probe (pcici_t tag, pcidi_t type);
1998 static void sym_pci_attach (pcici_t tag, int unit);
1999 static int sym_pci_attach2 (pcici_t tag, int unit);
2000 #endif
2001 
2002 static void sym_pci_free (hcb_p np);
2003 static int  sym_cam_attach (hcb_p np);
2004 static void sym_cam_free (hcb_p np);
2005 
2006 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram);
2007 static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp);
2008 static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp);
2009 
2010 /*
2011  *  Return the name of the controller.
2012  */
2013 static __inline char *sym_name(hcb_p np)
2014 {
2015 	return np->inst_name;
2016 }
2017 
2018 /*
2019  *  Scripts for SYMBIOS-Processor
2020  *
2021  *  Use sym_bind_script for binding to physical addresses.
2022  *
2023  *  NADDR generates a reference to a field of the controller data.
2024  *  PADDR generates a reference to another part of the script.
2025  *  RADDR generates a reference to a script processor register.
2026  *  FADDR generates a reference to a script processor register
2027  *        with offset.
2028  *
2029  */
2030 #define	RELOC_SOFTC	0x40000000
2031 #define	RELOC_LABEL	0x50000000
2032 #define	RELOC_REGISTER	0x60000000
2033 #if 0
2034 #define	RELOC_KVAR	0x70000000
2035 #endif
2036 #define	RELOC_LABELH	0x80000000
2037 #define	RELOC_MASK	0xf0000000
2038 
2039 #define	NADDR(label)	(RELOC_SOFTC  | offsetof(struct sym_hcb, label))
2040 #define PADDR(label)    (RELOC_LABEL  | offsetof(struct sym_scr, label))
2041 #define PADDRH(label)   (RELOC_LABELH | offsetof(struct sym_scrh, label))
2042 #define	RADDR(label)	(RELOC_REGISTER | REG(label))
2043 #define	FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
2044 #define	KVAR(which)	(RELOC_KVAR | (which))
2045 
2046 #define SCR_DATA_ZERO	0xf00ff00f
2047 
2048 #ifdef	RELOC_KVAR
2049 #define	SCRIPT_KVAR_JIFFIES	(0)
2050 #define	SCRIPT_KVAR_FIRST	SCRIPT_KVAR_XXXXXXX
2051 #define	SCRIPT_KVAR_LAST	SCRIPT_KVAR_XXXXXXX
2052 /*
2053  * Kernel variables referenced in the scripts.
2054  * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
2055  */
2056 static void *script_kvars[] =
2057 	{ (void *)&xxxxxxx };
2058 #endif
2059 
2060 static struct sym_scr script0 = {
2061 /*--------------------------< START >-----------------------*/ {
2062 	/*
2063 	 *  This NOP will be patched with LED ON
2064 	 *  SCR_REG_REG (gpreg, SCR_AND, 0xfe)
2065 	 */
2066 	SCR_NO_OP,
2067 		0,
2068 	/*
2069 	 *      Clear SIGP.
2070 	 */
2071 	SCR_FROM_REG (ctest2),
2072 		0,
2073 	/*
2074 	 *  Stop here if the C code wants to perform
2075 	 *  some error recovery procedure manually.
2076 	 *  (Indicate this by setting SEM in ISTAT)
2077 	 */
2078 	SCR_FROM_REG (istat),
2079 		0,
2080 	/*
2081 	 *  Report to the C code the next position in
2082 	 *  the start queue the SCRIPTS will schedule.
2083 	 *  The C code must not change SCRATCHA.
2084 	 */
2085 	SCR_LOAD_ABS (scratcha, 4),
2086 		PADDRH (startpos),
2087 	SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
2088 		SIR_SCRIPT_STOPPED,
2089 	/*
2090 	 *  Start the next job.
2091 	 *
2092 	 *  @DSA	 = start point for this job.
2093 	 *  SCRATCHA = address of this job in the start queue.
2094 	 *
2095 	 *  We will restore startpos with SCRATCHA if we fails the
2096 	 *  arbitration or if it is the idle job.
2097 	 *
2098 	 *  The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS
2099 	 *  is a critical path. If it is partially executed, it then
2100 	 *  may happen that the job address is not yet in the DSA
2101 	 *  and the the next queue position points to the next JOB.
2102 	 */
2103 	SCR_LOAD_ABS (dsa, 4),
2104 		PADDRH (startpos),
2105 	SCR_LOAD_REL (temp, 4),
2106 		4,
2107 }/*-------------------------< GETJOB_BEGIN >------------------*/,{
2108 	SCR_STORE_ABS (temp, 4),
2109 		PADDRH (startpos),
2110 	SCR_LOAD_REL (dsa, 4),
2111 		0,
2112 }/*-------------------------< GETJOB_END >--------------------*/,{
2113 	SCR_LOAD_REL (temp, 4),
2114 		0,
2115 	SCR_RETURN,
2116 		0,
2117 }/*-------------------------< SELECT >----------------------*/,{
2118 	/*
2119 	 *  DSA	contains the address of a scheduled
2120 	 *  	data structure.
2121 	 *
2122 	 *  SCRATCHA contains the address of the start queue
2123 	 *  	entry which points to the next job.
2124 	 *
2125 	 *  Set Initiator mode.
2126 	 *
2127 	 *  (Target mode is left as an exercise for the reader)
2128 	 */
2129 	SCR_CLR (SCR_TRG),
2130 		0,
2131 	/*
2132 	 *      And try to select this target.
2133 	 */
2134 	SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select),
2135 		PADDR (ungetjob),
2136 	/*
2137 	 *  Now there are 4 possibilities:
2138 	 *
2139 	 *  (1) The chip looses arbitration.
2140 	 *  This is ok, because it will try again,
2141 	 *  when the bus becomes idle.
2142 	 *  (But beware of the timeout function!)
2143 	 *
2144 	 *  (2) The chip is reselected.
2145 	 *  Then the script processor takes the jump
2146 	 *  to the RESELECT label.
2147 	 *
2148 	 *  (3) The chip wins arbitration.
2149 	 *  Then it will execute SCRIPTS instruction until
2150 	 *  the next instruction that checks SCSI phase.
2151 	 *  Then will stop and wait for selection to be
2152 	 *  complete or selection time-out to occur.
2153 	 *
2154 	 *  After having won arbitration, the SCRIPTS
2155 	 *  processor is able to execute instructions while
2156 	 *  the SCSI core is performing SCSI selection.
2157 	 */
2158 	/*
2159 	 *      load the savep (saved data pointer) into
2160 	 *      the actual data pointer.
2161 	 */
2162 	SCR_LOAD_REL (temp, 4),
2163 		offsetof (struct sym_ccb, phys.savep),
2164 	/*
2165 	 *      Initialize the status registers
2166 	 */
2167 	SCR_LOAD_REL (scr0, 4),
2168 		offsetof (struct sym_ccb, phys.status),
2169 }/*-------------------------< WF_SEL_DONE >----------------------*/,{
2170 	SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
2171 		SIR_SEL_ATN_NO_MSG_OUT,
2172 }/*-------------------------< SEND_IDENT >----------------------*/,{
2173 	/*
2174 	 *  Selection complete.
2175 	 *  Send the IDENTIFY and possibly the TAG message
2176 	 *  and negotiation message if present.
2177 	 */
2178 	SCR_MOVE_TBL ^ SCR_MSG_OUT,
2179 		offsetof (struct dsb, smsg),
2180 }/*-------------------------< SELECT2 >----------------------*/,{
2181 #ifdef SYM_CONF_IARB_SUPPORT
2182 	/*
2183 	 *  Set IMMEDIATE ARBITRATION if we have been given
2184 	 *  a hint to do so. (Some job to do after this one).
2185 	 */
2186 	SCR_FROM_REG (HF_REG),
2187 		0,
2188 	SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
2189 		8,
2190 	SCR_REG_REG (scntl1, SCR_OR, IARB),
2191 		0,
2192 #endif
2193 	/*
2194 	 *  Anticipate the COMMAND phase.
2195 	 *  This is the PHASE we expect at this point.
2196 	 */
2197 	SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
2198 		PADDR (sel_no_cmd),
2199 }/*-------------------------< COMMAND >--------------------*/,{
2200 	/*
2201 	 *  ... and send the command
2202 	 */
2203 	SCR_MOVE_TBL ^ SCR_COMMAND,
2204 		offsetof (struct dsb, cmd),
2205 }/*-----------------------< DISPATCH >----------------------*/,{
2206 	/*
2207 	 *  MSG_IN is the only phase that shall be
2208 	 *  entered at least once for each (re)selection.
2209 	 *  So we test it first.
2210 	 */
2211 	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
2212 		PADDR (msg_in),
2213 	SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
2214 		PADDR (datao_phase),
2215 	SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
2216 		PADDR (datai_phase),
2217 	SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
2218 		PADDR (status),
2219 	SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
2220 		PADDR (command),
2221 	SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
2222 		PADDRH (msg_out),
2223 
2224 	/*
2225 	 *  Set the extended error flag.
2226 	 */
2227 	SCR_REG_REG (HF_REG, SCR_OR, HF_EXT_ERR),
2228 		0,
2229 	/*
2230 	 *  Discard one illegal phase byte, if required.
2231 	 */
2232 	SCR_LOAD_REL (scratcha, 1),
2233 		offsetof (struct sym_ccb, xerr_status),
2234 	SCR_REG_REG (scratcha,  SCR_OR,  XE_BAD_PHASE),
2235 		0,
2236 	SCR_STORE_REL (scratcha, 1),
2237 		offsetof (struct sym_ccb, xerr_status),
2238 	SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)),
2239 		8,
2240 	SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
2241 		NADDR (scratch),
2242 	SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)),
2243 		8,
2244 	SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
2245 		NADDR (scratch),
2246 
2247 	SCR_JUMP,
2248 		PADDR (dispatch),
2249 }/*---------------------< SEL_NO_CMD >----------------------*/,{
2250 	/*
2251 	 *  The target does not switch to command
2252 	 *  phase after IDENTIFY has been sent.
2253 	 *
2254 	 *  If it stays in MSG OUT phase send it
2255 	 *  the IDENTIFY again.
2256 	 */
2257 	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
2258 		PADDRH (resend_ident),
2259 	/*
2260 	 *  If target does not switch to MSG IN phase
2261 	 *  and we sent a negotiation, assert the
2262 	 *  failure immediately.
2263 	 */
2264 	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
2265 		PADDR (dispatch),
2266 	SCR_FROM_REG (HS_REG),
2267 		0,
2268 	SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
2269 		SIR_NEGO_FAILED,
2270 	/*
2271 	 *  Jump to dispatcher.
2272 	 */
2273 	SCR_JUMP,
2274 		PADDR (dispatch),
2275 }/*-------------------------< INIT >------------------------*/,{
2276 	/*
2277 	 *  Wait for the SCSI RESET signal to be
2278 	 *  inactive before restarting operations,
2279 	 *  since the chip may hang on SEL_ATN
2280 	 *  if SCSI RESET is active.
2281 	 */
2282 	SCR_FROM_REG (sstat0),
2283 		0,
2284 	SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
2285 		-16,
2286 	SCR_JUMP,
2287 		PADDR (start),
2288 }/*-------------------------< CLRACK >----------------------*/,{
2289 	/*
2290 	 *  Terminate possible pending message phase.
2291 	 */
2292 	SCR_CLR (SCR_ACK),
2293 		0,
2294 	SCR_JUMP,
2295 		PADDR (dispatch),
2296 }/*-------------------------< DISP_STATUS >----------------------*/,{
2297 	/*
2298 	 *  Anticipate STATUS phase.
2299 	 *
2300 	 *  Does spare 3 SCRIPTS instructions when we have
2301 	 *  completed the INPUT of the data.
2302 	 */
2303 	SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
2304 		PADDR (status),
2305 	SCR_JUMP,
2306 		PADDR (dispatch),
2307 }/*-------------------------< DATAI_DONE >-------------------*/,{
2308 	/*
2309 	 *  If the device still wants to send us data,
2310 	 *  we must count the extra bytes.
2311 	 */
2312 	SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_IN)),
2313 		PADDRH (data_ovrun),
2314 	/*
2315 	 *  If the SWIDE is not full, jump to dispatcher.
2316 	 *  We anticipate a STATUS phase.
2317 	 */
2318 	SCR_FROM_REG (scntl2),
2319 		0,
2320 	SCR_JUMP ^ IFFALSE (MASK (WSR, WSR)),
2321 		PADDR (disp_status),
2322 	/*
2323 	 *  The SWIDE is full.
2324 	 *  Clear this condition.
2325 	 */
2326 	SCR_REG_REG (scntl2, SCR_OR, WSR),
2327 		0,
2328 	/*
2329 	 *  We are expecting an IGNORE RESIDUE message
2330 	 *  from the device, otherwise we are in data
2331 	 *  overrun condition. Check against MSG_IN phase.
2332 	 */
2333 	SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
2334 		SIR_SWIDE_OVERRUN,
2335 	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
2336 		PADDR (disp_status),
2337 	/*
2338 	 *  We are in MSG_IN phase,
2339 	 *  Read the first byte of the message.
2340 	 *  If it is not an IGNORE RESIDUE message,
2341 	 *  signal overrun and jump to message
2342 	 *  processing.
2343 	 */
2344 	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
2345 		NADDR (msgin[0]),
2346 	SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
2347 		SIR_SWIDE_OVERRUN,
2348 	SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
2349 		PADDR (msg_in2),
2350 	/*
2351 	 *  We got the message we expected.
2352 	 *  Read the 2nd byte, and jump to dispatcher.
2353 	 */
2354 	SCR_CLR (SCR_ACK),
2355 		0,
2356 	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
2357 		NADDR (msgin[1]),
2358 	SCR_CLR (SCR_ACK),
2359 		0,
2360 	SCR_JUMP,
2361 		PADDR (disp_status),
2362 }/*-------------------------< DATAO_DONE >-------------------*/,{
2363 	/*
2364 	 *  If the device wants us to send more data,
2365 	 *  we must count the extra bytes.
2366 	 */
2367 	SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
2368 		PADDRH (data_ovrun),
2369 	/*
2370 	 *  If the SODL is not full jump to dispatcher.
2371 	 *  We anticipate a STATUS phase.
2372 	 */
2373 	SCR_FROM_REG (scntl2),
2374 		0,
2375 	SCR_JUMP ^ IFFALSE (MASK (WSS, WSS)),
2376 		PADDR (disp_status),
2377 	/*
2378 	 *  The SODL is full, clear this condition.
2379 	 */
2380 	SCR_REG_REG (scntl2, SCR_OR, WSS),
2381 		0,
2382 	/*
2383 	 *  And signal a DATA UNDERRUN condition
2384 	 *  to the C code.
2385 	 */
2386 	SCR_INT,
2387 		SIR_SODL_UNDERRUN,
2388 	SCR_JUMP,
2389 		PADDR (dispatch),
2390 }/*-------------------------< DATAI_PHASE >------------------*/,{
2391 	SCR_RETURN,
2392 		0,
2393 }/*-------------------------< DATAO_PHASE >------------------*/,{
2394 	SCR_RETURN,
2395 		0,
2396 }/*-------------------------< MSG_IN >--------------------*/,{
2397 	/*
2398 	 *  Get the first byte of the message.
2399 	 *
2400 	 *  The script processor doesn't negate the
2401 	 *  ACK signal after this transfer.
2402 	 */
2403 	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
2404 		NADDR (msgin[0]),
2405 }/*-------------------------< MSG_IN2 >--------------------*/,{
2406 	/*
2407 	 *  Check first against 1 byte messages
2408 	 *  that we handle from SCRIPTS.
2409 	 */
2410 	SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
2411 		PADDR (complete),
2412 	SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
2413 		PADDR (disconnect),
2414 	SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
2415 		PADDR (save_dp),
2416 	SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
2417 		PADDR (restore_dp),
2418 	/*
2419 	 *  We handle all other messages from the
2420 	 *  C code, so no need to waste on-chip RAM
2421 	 *  for those ones.
2422 	 */
2423 	SCR_JUMP,
2424 		PADDRH (msg_in_etc),
2425 }/*-------------------------< STATUS >--------------------*/,{
2426 	/*
2427 	 *  get the status
2428 	 */
2429 	SCR_MOVE_ABS (1) ^ SCR_STATUS,
2430 		NADDR (scratch),
2431 #ifdef SYM_CONF_IARB_SUPPORT
2432 	/*
2433 	 *  If STATUS is not GOOD, clear IMMEDIATE ARBITRATION,
2434 	 *  since we may have to tamper the start queue from
2435 	 *  the C code.
2436 	 */
2437 	SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
2438 		8,
2439 	SCR_REG_REG (scntl1, SCR_AND, ~IARB),
2440 		0,
2441 #endif
2442 	/*
2443 	 *  save status to scsi_status.
2444 	 *  mark as complete.
2445 	 */
2446 	SCR_TO_REG (SS_REG),
2447 		0,
2448 	SCR_LOAD_REG (HS_REG, HS_COMPLETE),
2449 		0,
2450 	/*
2451 	 *  Anticipate the MESSAGE PHASE for
2452 	 *  the TASK COMPLETE message.
2453 	 */
2454 	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
2455 		PADDR (msg_in),
2456 	SCR_JUMP,
2457 		PADDR (dispatch),
2458 }/*-------------------------< COMPLETE >-----------------*/,{
2459 	/*
2460 	 *  Complete message.
2461 	 *
2462 	 *  Copy the data pointer to LASTP.
2463 	 */
2464 	SCR_STORE_REL (temp, 4),
2465 		offsetof (struct sym_ccb, phys.lastp),
2466 	/*
2467 	 *  When we terminate the cycle by clearing ACK,
2468 	 *  the target may disconnect immediately.
2469 	 *
2470 	 *  We don't want to be told of an "unexpected disconnect",
2471 	 *  so we disable this feature.
2472 	 */
2473 	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
2474 		0,
2475 	/*
2476 	 *  Terminate cycle ...
2477 	 */
2478 	SCR_CLR (SCR_ACK|SCR_ATN),
2479 		0,
2480 	/*
2481 	 *  ... and wait for the disconnect.
2482 	 */
2483 	SCR_WAIT_DISC,
2484 		0,
2485 }/*-------------------------< COMPLETE2 >-----------------*/,{
2486 	/*
2487 	 *  Save host status.
2488 	 */
2489 	SCR_STORE_REL (scr0, 4),
2490 		offsetof (struct sym_ccb, phys.status),
2491 	/*
2492 	 *  Some bridges may reorder DMA writes to memory.
2493 	 *  We donnot want the CPU to deal with completions
2494 	 *  without all the posted write having been flushed
2495 	 *  to memory. This DUMMY READ should flush posted
2496 	 *  buffers prior to the CPU having to deal with
2497 	 *  completions.
2498 	 */
2499 	SCR_LOAD_REL (scr0, 4),	/* DUMMY READ */
2500 		offsetof (struct sym_ccb, phys.status),
2501 
2502 	/*
2503 	 *  If command resulted in not GOOD status,
2504 	 *  call the C code if needed.
2505 	 */
2506 	SCR_FROM_REG (SS_REG),
2507 		0,
2508 	SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
2509 		PADDRH (bad_status),
2510 	/*
2511 	 *  If we performed an auto-sense, call
2512 	 *  the C code to synchronyze task aborts
2513 	 *  with UNIT ATTENTION conditions.
2514 	 */
2515 	SCR_FROM_REG (HF_REG),
2516 		0,
2517 	SCR_JUMPR ^ IFTRUE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))),
2518 		16,
2519 }/*-------------------------< COMPLETE_ERROR >-----------------*/,{
2520 	SCR_LOAD_ABS (scratcha, 4),
2521 		PADDRH (startpos),
2522 	SCR_INT,
2523 		SIR_COMPLETE_ERROR,
2524 }/*------------------------< DONE >-----------------*/,{
2525 	/*
2526 	 *  Copy the DSA to the DONE QUEUE and
2527 	 *  signal completion to the host.
2528 	 *  If we are interrupted between DONE
2529 	 *  and DONE_END, we must reset, otherwise
2530 	 *  the completed CCB may be lost.
2531 	 */
2532 	SCR_STORE_ABS (dsa, 4),
2533 		PADDRH (saved_dsa),
2534 	SCR_LOAD_ABS (dsa, 4),
2535 		PADDRH (done_pos),
2536 	SCR_LOAD_ABS (scratcha, 4),
2537 		PADDRH (saved_dsa),
2538 	SCR_STORE_REL (scratcha, 4),
2539 		0,
2540 	/*
2541 	 *  The instruction below reads the DONE QUEUE next
2542 	 *  free position from memory.
2543 	 *  In addition it ensures that all PCI posted writes
2544 	 *  are flushed and so the DSA value of the done
2545 	 *  CCB is visible by the CPU before INTFLY is raised.
2546 	 */
2547 	SCR_LOAD_REL (temp, 4),
2548 		4,
2549 	SCR_INT_FLY,
2550 		0,
2551 	SCR_STORE_ABS (temp, 4),
2552 		PADDRH (done_pos),
2553 }/*------------------------< DONE_END >-----------------*/,{
2554 	SCR_JUMP,
2555 		PADDR (start),
2556 }/*-------------------------< SAVE_DP >------------------*/,{
2557 	/*
2558 	 *  Clear ACK immediately.
2559 	 *  No need to delay it.
2560 	 */
2561 	SCR_CLR (SCR_ACK),
2562 		0,
2563 	/*
2564 	 *  Keep track we received a SAVE DP, so
2565 	 *  we will switch to the other PM context
2566 	 *  on the next PM since the DP may point
2567 	 *  to the current PM context.
2568 	 */
2569 	SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
2570 		0,
2571 	/*
2572 	 *  SAVE_DP message:
2573 	 *  Copy the data pointer to SAVEP.
2574 	 */
2575 	SCR_STORE_REL (temp, 4),
2576 		offsetof (struct sym_ccb, phys.savep),
2577 	SCR_JUMP,
2578 		PADDR (dispatch),
2579 }/*-------------------------< RESTORE_DP >---------------*/,{
2580 	/*
2581 	 *  RESTORE_DP message:
2582 	 *  Copy SAVEP to actual data pointer.
2583 	 */
2584 	SCR_LOAD_REL  (temp, 4),
2585 		offsetof (struct sym_ccb, phys.savep),
2586 	SCR_JUMP,
2587 		PADDR (clrack),
2588 }/*-------------------------< DISCONNECT >---------------*/,{
2589 	/*
2590 	 *  DISCONNECTing  ...
2591 	 *
2592 	 *  disable the "unexpected disconnect" feature,
2593 	 *  and remove the ACK signal.
2594 	 */
2595 	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
2596 		0,
2597 	SCR_CLR (SCR_ACK|SCR_ATN),
2598 		0,
2599 	/*
2600 	 *  Wait for the disconnect.
2601 	 */
2602 	SCR_WAIT_DISC,
2603 		0,
2604 	/*
2605 	 *  Status is: DISCONNECTED.
2606 	 */
2607 	SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
2608 		0,
2609 	/*
2610 	 *  Save host status.
2611 	 */
2612 	SCR_STORE_REL (scr0, 4),
2613 		offsetof (struct sym_ccb, phys.status),
2614 	/*
2615 	 *  If QUIRK_AUTOSAVE is set,
2616 	 *  do an "save pointer" operation.
2617 	 */
2618 	SCR_FROM_REG (QU_REG),
2619 		0,
2620 	SCR_JUMP ^ IFFALSE (MASK (SYM_QUIRK_AUTOSAVE, SYM_QUIRK_AUTOSAVE)),
2621 		PADDR (start),
2622 	/*
2623 	 *  like SAVE_DP message:
2624 	 *  Remember we saved the data pointer.
2625 	 *  Copy data pointer to SAVEP.
2626 	 */
2627 	SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
2628 		0,
2629 	SCR_STORE_REL (temp, 4),
2630 		offsetof (struct sym_ccb, phys.savep),
2631 	SCR_JUMP,
2632 		PADDR (start),
2633 }/*-------------------------< IDLE >------------------------*/,{
2634 	/*
2635 	 *  Nothing to do?
2636 	 *  Wait for reselect.
2637 	 *  This NOP will be patched with LED OFF
2638 	 *  SCR_REG_REG (gpreg, SCR_OR, 0x01)
2639 	 */
2640 	SCR_NO_OP,
2641 		0,
2642 #ifdef SYM_CONF_IARB_SUPPORT
2643 	SCR_JUMPR,
2644 		8,
2645 #endif
2646 }/*-------------------------< UNGETJOB >-----------------*/,{
2647 #ifdef SYM_CONF_IARB_SUPPORT
2648 	/*
2649 	 *  Set IMMEDIATE ARBITRATION, for the next time.
2650 	 *  This will give us better chance to win arbitration
2651 	 *  for the job we just wanted to do.
2652 	 */
2653 	SCR_REG_REG (scntl1, SCR_OR, IARB),
2654 		0,
2655 #endif
2656 	/*
2657 	 *  We are not able to restart the SCRIPTS if we are
2658 	 *  interrupted and these instruction haven't been
2659 	 *  all executed. BTW, this is very unlikely to
2660 	 *  happen, but we check that from the C code.
2661 	 */
2662 	SCR_LOAD_REG (dsa, 0xff),
2663 		0,
2664 	SCR_STORE_ABS (scratcha, 4),
2665 		PADDRH (startpos),
2666 }/*-------------------------< RESELECT >--------------------*/,{
2667 	/*
2668 	 *  Make sure we are in initiator mode.
2669 	 */
2670 	SCR_CLR (SCR_TRG),
2671 		0,
2672 	/*
2673 	 *  Sleep waiting for a reselection.
2674 	 */
2675 	SCR_WAIT_RESEL,
2676 		PADDR(start),
2677 }/*-------------------------< RESELECTED >------------------*/,{
2678 	/*
2679 	 *  This NOP will be patched with LED ON
2680 	 *  SCR_REG_REG (gpreg, SCR_AND, 0xfe)
2681 	 */
2682 	SCR_NO_OP,
2683 		0,
2684 	/*
2685 	 *  load the target id into the sdid
2686 	 */
2687 	SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
2688 		0,
2689 	SCR_TO_REG (sdid),
2690 		0,
2691 	/*
2692 	 *  Load the target control block address
2693 	 */
2694 	SCR_LOAD_ABS (dsa, 4),
2695 		PADDRH (targtbl),
2696 	SCR_SFBR_REG (dsa, SCR_SHL, 0),
2697 		0,
2698 	SCR_REG_REG (dsa, SCR_SHL, 0),
2699 		0,
2700 	SCR_REG_REG (dsa, SCR_AND, 0x3c),
2701 		0,
2702 	SCR_LOAD_REL (dsa, 4),
2703 		0,
2704 	/*
2705 	 *  Load the legacy synchronous transfer registers.
2706 	 */
2707 	SCR_LOAD_REL (scntl3, 1),
2708 		offsetof(struct sym_tcb, wval),
2709 	SCR_LOAD_REL (sxfer, 1),
2710 		offsetof(struct sym_tcb, sval),
2711 }/*-------------------------< RESEL_SCNTL4 >------------------*/,{
2712 	/*
2713 	 *  If C1010, patched with the load of SCNTL4 that
2714 	 *  allows a new synchronous timing scheme.
2715 	 *
2716 	 *	SCR_LOAD_REL (scntl4, 1),
2717 	 * 		offsetof(struct tcb, uval),
2718 	 */
2719 	SCR_NO_OP,
2720 		0,
2721 	/*
2722 	 *  We expect MESSAGE IN phase.
2723 	 *  If not, get help from the C code.
2724 	 */
2725 	SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
2726 		SIR_RESEL_NO_MSG_IN,
2727 	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
2728 		NADDR (msgin),
2729 	/*
2730 	 *  If IDENTIFY LUN #0, use a faster path
2731 	 *  to find the LCB structure.
2732 	 */
2733 	SCR_JUMPR ^ IFTRUE (MASK (0x80, 0xbf)),
2734 		56,
2735 	/*
2736 	 *  If message isn't an IDENTIFY,
2737 	 *  tell the C code about.
2738 	 */
2739 	SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
2740 		SIR_RESEL_NO_IDENTIFY,
2741 	/*
2742 	 *  It is an IDENTIFY message,
2743 	 *  Load the LUN control block address.
2744 	 */
2745 	SCR_LOAD_REL (dsa, 4),
2746 		offsetof(struct sym_tcb, luntbl_sa),
2747 	SCR_SFBR_REG (dsa, SCR_SHL, 0),
2748 		0,
2749 	SCR_REG_REG (dsa, SCR_SHL, 0),
2750 		0,
2751 	SCR_REG_REG (dsa, SCR_AND, 0xfc),
2752 		0,
2753 	SCR_LOAD_REL (dsa, 4),
2754 		0,
2755 	SCR_JUMPR,
2756 		8,
2757 	/*
2758 	 *  LUN 0 special case (but usual one :))
2759 	 */
2760 	SCR_LOAD_REL (dsa, 4),
2761 		offsetof(struct sym_tcb, lun0_sa),
2762 	/*
2763 	 *  Jump indirectly to the reselect action for this LUN.
2764 	 */
2765 	SCR_LOAD_REL (temp, 4),
2766 		offsetof(struct sym_lcb, resel_sa),
2767 	SCR_RETURN,
2768 		0,
2769 	/* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */
2770 }/*-------------------------< RESEL_TAG >-------------------*/,{
2771 	/*
2772 	 *  ACK the IDENTIFY or TAG previously received.
2773 	 */
2774 	SCR_CLR (SCR_ACK),
2775 		0,
2776 	/*
2777 	 *  It shall be a tagged command.
2778 	 *  Read SIMPLE+TAG.
2779 	 *  The C code will deal with errors.
2780 	 *  Agressive optimization, is'nt it? :)
2781 	 */
2782 	SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
2783 		NADDR (msgin),
2784 	/*
2785 	 *  Load the pointer to the tagged task
2786 	 *  table for this LUN.
2787 	 */
2788 	SCR_LOAD_REL (dsa, 4),
2789 		offsetof(struct sym_lcb, itlq_tbl_sa),
2790 	/*
2791 	 *  The SIDL still contains the TAG value.
2792 	 *  Agressive optimization, isn't it? :):)
2793 	 */
2794 	SCR_REG_SFBR (sidl, SCR_SHL, 0),
2795 		0,
2796 #if SYM_CONF_MAX_TASK*4 > 512
2797 	SCR_JUMPR ^ IFFALSE (CARRYSET),
2798 		8,
2799 	SCR_REG_REG (dsa1, SCR_OR, 2),
2800 		0,
2801 	SCR_REG_REG (sfbr, SCR_SHL, 0),
2802 		0,
2803 	SCR_JUMPR ^ IFFALSE (CARRYSET),
2804 		8,
2805 	SCR_REG_REG (dsa1, SCR_OR, 1),
2806 		0,
2807 #elif SYM_CONF_MAX_TASK*4 > 256
2808 	SCR_JUMPR ^ IFFALSE (CARRYSET),
2809 		8,
2810 	SCR_REG_REG (dsa1, SCR_OR, 1),
2811 		0,
2812 #endif
2813 	/*
2814 	 *  Retrieve the DSA of this task.
2815 	 *  JUMP indirectly to the restart point of the CCB.
2816 	 */
2817 	SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
2818 		0,
2819 	SCR_LOAD_REL (dsa, 4),
2820 		0,
2821 	SCR_LOAD_REL (temp, 4),
2822 		offsetof(struct sym_ccb, phys.go.restart),
2823 	SCR_RETURN,
2824 		0,
2825 	/* In normal situations we branch to RESEL_DSA */
2826 }/*-------------------------< RESEL_DSA >-------------------*/,{
2827 	/*
2828 	 *  ACK the IDENTIFY or TAG previously received.
2829 	 */
2830 	SCR_CLR (SCR_ACK),
2831 		0,
2832 }/*-------------------------< RESEL_DSA1 >------------------*/,{
2833 	/*
2834 	 *      load the savep (saved pointer) into
2835 	 *      the actual data pointer.
2836 	 */
2837 	SCR_LOAD_REL (temp, 4),
2838 		offsetof (struct sym_ccb, phys.savep),
2839 	/*
2840 	 *      Initialize the status registers
2841 	 */
2842 	SCR_LOAD_REL (scr0, 4),
2843 		offsetof (struct sym_ccb, phys.status),
2844 	/*
2845 	 *  Jump to dispatcher.
2846 	 */
2847 	SCR_JUMP,
2848 		PADDR (dispatch),
2849 }/*-------------------------< RESEL_NO_TAG >-------------------*/,{
2850 	/*
2851 	 *  Load the DSA with the unique ITL task.
2852 	 */
2853 	SCR_LOAD_REL (dsa, 4),
2854 		offsetof(struct sym_lcb, itl_task_sa),
2855 	/*
2856 	 *  JUMP indirectly to the restart point of the CCB.
2857 	 */
2858 	SCR_LOAD_REL (temp, 4),
2859 		offsetof(struct sym_ccb, phys.go.restart),
2860 	SCR_RETURN,
2861 		0,
2862 	/* In normal situations we branch to RESEL_DSA */
2863 }/*-------------------------< DATA_IN >--------------------*/,{
2864 /*
2865  *  Because the size depends on the
2866  *  #define SYM_CONF_MAX_SG parameter,
2867  *  it is filled in at runtime.
2868  *
2869  *  ##===========< i=0; i<SYM_CONF_MAX_SG >=========
2870  *  ||	SCR_CHMOV_TBL ^ SCR_DATA_IN,
2871  *  ||		offsetof (struct dsb, data[ i]),
2872  *  ##==========================================
2873  */
2874 0
2875 }/*-------------------------< DATA_IN2 >-------------------*/,{
2876 	SCR_CALL,
2877 		PADDR (datai_done),
2878 	SCR_JUMP,
2879 		PADDRH (data_ovrun),
2880 }/*-------------------------< DATA_OUT >--------------------*/,{
2881 /*
2882  *  Because the size depends on the
2883  *  #define SYM_CONF_MAX_SG parameter,
2884  *  it is filled in at runtime.
2885  *
2886  *  ##===========< i=0; i<SYM_CONF_MAX_SG >=========
2887  *  ||	SCR_CHMOV_TBL ^ SCR_DATA_OUT,
2888  *  ||		offsetof (struct dsb, data[ i]),
2889  *  ##==========================================
2890  */
2891 0
2892 }/*-------------------------< DATA_OUT2 >-------------------*/,{
2893 	SCR_CALL,
2894 		PADDR (datao_done),
2895 	SCR_JUMP,
2896 		PADDRH (data_ovrun),
2897 
2898 }/*-------------------------< PM0_DATA >--------------------*/,{
2899 	/*
2900 	 *  Read our host flags to SFBR, so we will be able
2901 	 *  to check against the data direction we expect.
2902 	 */
2903 	SCR_FROM_REG (HF_REG),
2904 		0,
2905 	/*
2906 	 *  Check against actual DATA PHASE.
2907 	 */
2908 	SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
2909 		PADDR (pm0_data_out),
2910 	/*
2911 	 *  Actual phase is DATA IN.
2912 	 *  Check against expected direction.
2913 	 */
2914 	SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
2915 		PADDRH (data_ovrun),
2916 	/*
2917 	 *  Keep track we are moving data from the
2918 	 *  PM0 DATA mini-script.
2919 	 */
2920 	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
2921 		0,
2922 	/*
2923 	 *  Move the data to memory.
2924 	 */
2925 	SCR_CHMOV_TBL ^ SCR_DATA_IN,
2926 		offsetof (struct sym_ccb, phys.pm0.sg),
2927 	SCR_JUMP,
2928 		PADDR (pm0_data_end),
2929 }/*-------------------------< PM0_DATA_OUT >----------------*/,{
2930 	/*
2931 	 *  Actual phase is DATA OUT.
2932 	 *  Check against expected direction.
2933 	 */
2934 	SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
2935 		PADDRH (data_ovrun),
2936 	/*
2937 	 *  Keep track we are moving data from the
2938 	 *  PM0 DATA mini-script.
2939 	 */
2940 	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
2941 		0,
2942 	/*
2943 	 *  Move the data from memory.
2944 	 */
2945 	SCR_CHMOV_TBL ^ SCR_DATA_OUT,
2946 		offsetof (struct sym_ccb, phys.pm0.sg),
2947 }/*-------------------------< PM0_DATA_END >----------------*/,{
2948 	/*
2949 	 *  Clear the flag that told we were moving
2950 	 *  data from the PM0 DATA mini-script.
2951 	 */
2952 	SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
2953 		0,
2954 	/*
2955 	 *  Return to the previous DATA script which
2956 	 *  is guaranteed by design (if no bug) to be
2957 	 *  the main DATA script for this transfer.
2958 	 */
2959 	SCR_LOAD_REL (temp, 4),
2960 		offsetof (struct sym_ccb, phys.pm0.ret),
2961 	SCR_RETURN,
2962 		0,
2963 }/*-------------------------< PM1_DATA >--------------------*/,{
2964 	/*
2965 	 *  Read our host flags to SFBR, so we will be able
2966 	 *  to check against the data direction we expect.
2967 	 */
2968 	SCR_FROM_REG (HF_REG),
2969 		0,
2970 	/*
2971 	 *  Check against actual DATA PHASE.
2972 	 */
2973 	SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
2974 		PADDR (pm1_data_out),
2975 	/*
2976 	 *  Actual phase is DATA IN.
2977 	 *  Check against expected direction.
2978 	 */
2979 	SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
2980 		PADDRH (data_ovrun),
2981 	/*
2982 	 *  Keep track we are moving data from the
2983 	 *  PM1 DATA mini-script.
2984 	 */
2985 	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
2986 		0,
2987 	/*
2988 	 *  Move the data to memory.
2989 	 */
2990 	SCR_CHMOV_TBL ^ SCR_DATA_IN,
2991 		offsetof (struct sym_ccb, phys.pm1.sg),
2992 	SCR_JUMP,
2993 		PADDR (pm1_data_end),
2994 }/*-------------------------< PM1_DATA_OUT >----------------*/,{
2995 	/*
2996 	 *  Actual phase is DATA OUT.
2997 	 *  Check against expected direction.
2998 	 */
2999 	SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
3000 		PADDRH (data_ovrun),
3001 	/*
3002 	 *  Keep track we are moving data from the
3003 	 *  PM1 DATA mini-script.
3004 	 */
3005 	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
3006 		0,
3007 	/*
3008 	 *  Move the data from memory.
3009 	 */
3010 	SCR_CHMOV_TBL ^ SCR_DATA_OUT,
3011 		offsetof (struct sym_ccb, phys.pm1.sg),
3012 }/*-------------------------< PM1_DATA_END >----------------*/,{
3013 	/*
3014 	 *  Clear the flag that told we were moving
3015 	 *  data from the PM1 DATA mini-script.
3016 	 */
3017 	SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
3018 		0,
3019 	/*
3020 	 *  Return to the previous DATA script which
3021 	 *  is guaranteed by design (if no bug) to be
3022 	 *  the main DATA script for this transfer.
3023 	 */
3024 	SCR_LOAD_REL (temp, 4),
3025 		offsetof (struct sym_ccb, phys.pm1.ret),
3026 	SCR_RETURN,
3027 		0,
3028 }/*---------------------------------------------------------*/
3029 };
3030 
3031 static struct sym_scrh scripth0 = {
3032 /*------------------------< START64 >-----------------------*/{
3033 	/*
3034 	 *  SCRIPT entry point for the 895A, 896 and 1010.
3035 	 *  For now, there is no specific stuff for those
3036 	 *  chips at this point, but this may come.
3037 	 */
3038 	SCR_JUMP,
3039 		PADDR (init),
3040 }/*-------------------------< NO_DATA >-------------------*/,{
3041 	SCR_JUMP,
3042 		PADDRH (data_ovrun),
3043 }/*-----------------------< SEL_FOR_ABORT >------------------*/,{
3044 	/*
3045 	 *  We are jumped here by the C code, if we have
3046 	 *  some target to reset or some disconnected
3047 	 *  job to abort. Since error recovery is a serious
3048 	 *  busyness, we will really reset the SCSI BUS, if
3049 	 *  case of a SCSI interrupt occuring in this path.
3050 	 */
3051 
3052 	/*
3053 	 *  Set initiator mode.
3054 	 */
3055 	SCR_CLR (SCR_TRG),
3056 		0,
3057 	/*
3058 	 *      And try to select this target.
3059 	 */
3060 	SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel),
3061 		PADDR (reselect),
3062 	/*
3063 	 *  Wait for the selection to complete or
3064 	 *  the selection to time out.
3065 	 */
3066 	SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
3067 		-8,
3068 	/*
3069 	 *  Call the C code.
3070 	 */
3071 	SCR_INT,
3072 		SIR_TARGET_SELECTED,
3073 	/*
3074 	 *  The C code should let us continue here.
3075 	 *  Send the 'kiss of death' message.
3076 	 *  We expect an immediate disconnect once
3077 	 *  the target has eaten the message.
3078 	 */
3079 	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
3080 		0,
3081 	SCR_MOVE_TBL ^ SCR_MSG_OUT,
3082 		offsetof (struct sym_hcb, abrt_tbl),
3083 	SCR_CLR (SCR_ACK|SCR_ATN),
3084 		0,
3085 	SCR_WAIT_DISC,
3086 		0,
3087 	/*
3088 	 *  Tell the C code that we are done.
3089 	 */
3090 	SCR_INT,
3091 		SIR_ABORT_SENT,
3092 }/*-----------------------< SEL_FOR_ABORT_1 >--------------*/,{
3093 	/*
3094 	 *  Jump at scheduler.
3095 	 */
3096 	SCR_JUMP,
3097 		PADDR (start),
3098 
3099 }/*------------------------< SELECT_NO_ATN >-----------------*/,{
3100 	/*
3101 	 *  Set Initiator mode.
3102 	 *  And try to select this target without ATN.
3103 	 */
3104 	SCR_CLR (SCR_TRG),
3105 		0,
3106 	SCR_SEL_TBL ^ offsetof (struct dsb, select),
3107 		PADDR (ungetjob),
3108 	/*
3109 	 *  load the savep (saved pointer) into
3110 	 *  the actual data pointer.
3111 	 */
3112 	SCR_LOAD_REL (temp, 4),
3113 		offsetof (struct sym_ccb, phys.savep),
3114 	/*
3115 	 *  Initialize the status registers
3116 	 */
3117 	SCR_LOAD_REL (scr0, 4),
3118 		offsetof (struct sym_ccb, phys.status),
3119 }/*------------------------< WF_SEL_DONE_NO_ATN >-----------------*/,{
3120 	/*
3121 	 *  Wait immediately for the next phase or
3122 	 *  the selection to complete or time-out.
3123 	 */
3124 	SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
3125 		0,
3126 	SCR_JUMP,
3127 		PADDR (select2),
3128 }/*-------------------------< MSG_IN_ETC >--------------------*/,{
3129 	/*
3130 	 *  If it is an EXTENDED (variable size message)
3131 	 *  Handle it.
3132 	 */
3133 	SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
3134 		PADDRH (msg_extended),
3135 	/*
3136 	 *  Let the C code handle any other
3137 	 *  1 byte message.
3138 	 */
3139 	SCR_INT ^ IFTRUE (MASK (0x00, 0xf0)),
3140 		SIR_MSG_RECEIVED,
3141 	SCR_INT ^ IFTRUE (MASK (0x10, 0xf0)),
3142 		SIR_MSG_RECEIVED,
3143 	/*
3144 	 *  We donnot handle 2 bytes messages from SCRIPTS.
3145 	 *  So, let the C code deal with these ones too.
3146 	 */
3147 	SCR_INT ^ IFFALSE (MASK (0x20, 0xf0)),
3148 		SIR_MSG_WEIRD,
3149 	SCR_CLR (SCR_ACK),
3150 		0,
3151 	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
3152 		NADDR (msgin[1]),
3153 	SCR_INT,
3154 		SIR_MSG_RECEIVED,
3155 
3156 }/*-------------------------< MSG_RECEIVED >--------------------*/,{
3157 	SCR_LOAD_REL (scratcha, 4),	/* DUMMY READ */
3158 		0,
3159 	SCR_INT,
3160 		SIR_MSG_RECEIVED,
3161 
3162 }/*-------------------------< MSG_WEIRD_SEEN >------------------*/,{
3163 	SCR_LOAD_REL (scratcha, 4),	/* DUMMY READ */
3164 		0,
3165 	SCR_INT,
3166 		SIR_MSG_WEIRD,
3167 
3168 }/*-------------------------< MSG_EXTENDED >--------------------*/,{
3169 	/*
3170 	 *  Clear ACK and get the next byte
3171 	 *  assumed to be the message length.
3172 	 */
3173 	SCR_CLR (SCR_ACK),
3174 		0,
3175 	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
3176 		NADDR (msgin[1]),
3177 	/*
3178 	 *  Try to catch some unlikely situations as 0 length
3179 	 *  or too large the length.
3180 	 */
3181 	SCR_JUMP ^ IFTRUE (DATA (0)),
3182 		PADDRH (msg_weird_seen),
3183 	SCR_TO_REG (scratcha),
3184 		0,
3185 	SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
3186 		0,
3187 	SCR_JUMP ^ IFTRUE (CARRYSET),
3188 		PADDRH (msg_weird_seen),
3189 	/*
3190 	 *  We donnot handle extended messages from SCRIPTS.
3191 	 *  Read the amount of data correponding to the
3192 	 *  message length and call the C code.
3193 	 */
3194 	SCR_STORE_REL (scratcha, 1),
3195 		offsetof (struct dsb, smsg_ext.size),
3196 	SCR_CLR (SCR_ACK),
3197 		0,
3198 	SCR_MOVE_TBL ^ SCR_MSG_IN,
3199 		offsetof (struct dsb, smsg_ext),
3200 	SCR_JUMP,
3201 		PADDRH (msg_received),
3202 
3203 }/*-------------------------< MSG_BAD >------------------*/,{
3204 	/*
3205 	 *  unimplemented message - reject it.
3206 	 */
3207 	SCR_INT,
3208 		SIR_REJECT_TO_SEND,
3209 	SCR_SET (SCR_ATN),
3210 		0,
3211 	SCR_JUMP,
3212 		PADDR (clrack),
3213 }/*-------------------------< MSG_WEIRD >--------------------*/,{
3214 	/*
3215 	 *  weird message received
3216 	 *  ignore all MSG IN phases and reject it.
3217 	 */
3218 	SCR_INT,
3219 		SIR_REJECT_TO_SEND,
3220 	SCR_SET (SCR_ATN),
3221 		0,
3222 }/*-------------------------< MSG_WEIRD1 >--------------------*/,{
3223 	SCR_CLR (SCR_ACK),
3224 		0,
3225 	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
3226 		PADDR (dispatch),
3227 	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
3228 		NADDR (scratch),
3229 	SCR_JUMP,
3230 		PADDRH (msg_weird1),
3231 }/*-------------------------< WDTR_RESP >----------------*/,{
3232 	/*
3233 	 *  let the target fetch our answer.
3234 	 */
3235 	SCR_SET (SCR_ATN),
3236 		0,
3237 	SCR_CLR (SCR_ACK),
3238 		0,
3239 	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
3240 		PADDRH (nego_bad_phase),
3241 }/*-------------------------< SEND_WDTR >----------------*/,{
3242 	/*
3243 	 *  Send the M_X_WIDE_REQ
3244 	 */
3245 	SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
3246 		NADDR (msgout),
3247 	SCR_JUMP,
3248 		PADDRH (msg_out_done),
3249 }/*-------------------------< SDTR_RESP >-------------*/,{
3250 	/*
3251 	 *  let the target fetch our answer.
3252 	 */
3253 	SCR_SET (SCR_ATN),
3254 		0,
3255 	SCR_CLR (SCR_ACK),
3256 		0,
3257 	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
3258 		PADDRH (nego_bad_phase),
3259 }/*-------------------------< SEND_SDTR >-------------*/,{
3260 	/*
3261 	 *  Send the M_X_SYNC_REQ
3262 	 */
3263 	SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
3264 		NADDR (msgout),
3265 	SCR_JUMP,
3266 		PADDRH (msg_out_done),
3267 }/*-------------------------< PPR_RESP >-------------*/,{
3268 	/*
3269 	 *  let the target fetch our answer.
3270 	 */
3271 	SCR_SET (SCR_ATN),
3272 		0,
3273 	SCR_CLR (SCR_ACK),
3274 		0,
3275 	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
3276 		PADDRH (nego_bad_phase),
3277 }/*-------------------------< SEND_PPR >-------------*/,{
3278 	/*
3279 	 *  Send the M_X_PPR_REQ
3280 	 */
3281 	SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
3282 		NADDR (msgout),
3283 	SCR_JUMP,
3284 		PADDRH (msg_out_done),
3285 }/*-------------------------< NEGO_BAD_PHASE >------------*/,{
3286 	SCR_INT,
3287 		SIR_NEGO_PROTO,
3288 	SCR_JUMP,
3289 		PADDR (dispatch),
3290 }/*-------------------------< MSG_OUT >-------------------*/,{
3291 	/*
3292 	 *  The target requests a message.
3293 	 *  We donnot send messages that may
3294 	 *  require the device to go to bus free.
3295 	 */
3296 	SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
3297 		NADDR (msgout),
3298 	/*
3299 	 *  ... wait for the next phase
3300 	 *  if it's a message out, send it again, ...
3301 	 */
3302 	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
3303 		PADDRH (msg_out),
3304 }/*-------------------------< MSG_OUT_DONE >--------------*/,{
3305 	/*
3306 	 *  Let the C code be aware of the
3307 	 *  sent message and clear the message.
3308 	 */
3309 	SCR_INT,
3310 		SIR_MSG_OUT_DONE,
3311 	/*
3312 	 *  ... and process the next phase
3313 	 */
3314 	SCR_JUMP,
3315 		PADDR (dispatch),
3316 
3317 }/*-------------------------< DATA_OVRUN >--------------------*/,{
3318 	/*
3319 	 *  The target may want to transfer too much data.
3320 	 *
3321 	 *  If phase is DATA OUT write 1 byte and count it.
3322 	 */
3323 	SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
3324 		16,
3325 	SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
3326 		NADDR (scratch),
3327 	SCR_JUMP,
3328 		PADDRH (data_ovrun1),
3329 	/*
3330 	 *  If WSR is set, clear this condition, and
3331 	 *  count this byte.
3332 	 */
3333 	SCR_FROM_REG (scntl2),
3334 		0,
3335 	SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
3336 		16,
3337 	SCR_REG_REG (scntl2, SCR_OR, WSR),
3338 		0,
3339 	SCR_JUMP,
3340 		PADDRH (data_ovrun1),
3341 	/*
3342 	 *  Finally check against DATA IN phase.
3343 	 *  Jump to dispatcher if not so.
3344 	 *  Read 1 byte otherwise and count it.
3345 	 */
3346 	SCR_JUMP ^ IFFALSE (IF (SCR_DATA_IN)),
3347 		PADDR (dispatch),
3348 	SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
3349 		NADDR (scratch),
3350 }/*-------------------------< DATA_OVRUN1 >--------------------*/,{
3351 	/*
3352 	 *  Set the extended error flag.
3353 	 */
3354 	SCR_REG_REG (HF_REG, SCR_OR, HF_EXT_ERR),
3355 		0,
3356 	SCR_LOAD_REL (scratcha, 1),
3357 		offsetof (struct sym_ccb, xerr_status),
3358 	SCR_REG_REG (scratcha,  SCR_OR,  XE_EXTRA_DATA),
3359 		0,
3360 	SCR_STORE_REL (scratcha, 1),
3361 		offsetof (struct sym_ccb, xerr_status),
3362 	/*
3363 	 *  Count this byte.
3364 	 *  This will allow to return a negative
3365 	 *  residual to user.
3366 	 */
3367 	SCR_LOAD_REL (scratcha, 4),
3368 		offsetof (struct sym_ccb, phys.extra_bytes),
3369 	SCR_REG_REG (scratcha,  SCR_ADD,  0x01),
3370 		0,
3371 	SCR_REG_REG (scratcha1, SCR_ADDC, 0),
3372 		0,
3373 	SCR_REG_REG (scratcha2, SCR_ADDC, 0),
3374 		0,
3375 	SCR_STORE_REL (scratcha, 4),
3376 		offsetof (struct sym_ccb, phys.extra_bytes),
3377 	/*
3378 	 *  .. and repeat as required.
3379 	 */
3380 	SCR_JUMP,
3381 		PADDRH (data_ovrun),
3382 
3383 }/*-------------------------< ABORT_RESEL >----------------*/,{
3384 	SCR_SET (SCR_ATN),
3385 		0,
3386 	SCR_CLR (SCR_ACK),
3387 		0,
3388 	/*
3389 	 *  send the abort/abortag/reset message
3390 	 *  we expect an immediate disconnect
3391 	 */
3392 	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
3393 		0,
3394 	SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
3395 		NADDR (msgout),
3396 	SCR_CLR (SCR_ACK|SCR_ATN),
3397 		0,
3398 	SCR_WAIT_DISC,
3399 		0,
3400 	SCR_INT,
3401 		SIR_RESEL_ABORTED,
3402 	SCR_JUMP,
3403 		PADDR (start),
3404 }/*-------------------------< RESEND_IDENT >-------------------*/,{
3405 	/*
3406 	 *  The target stays in MSG OUT phase after having acked
3407 	 *  Identify [+ Tag [+ Extended message ]]. Targets shall
3408 	 *  behave this way on parity error.
3409 	 *  We must send it again all the messages.
3410 	 */
3411 	SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the  */
3412 		0,         /* 1rst ACK = 90 ns. Hope the chip isn't too fast */
3413 	SCR_JUMP,
3414 		PADDR (send_ident),
3415 }/*-------------------------< IDENT_BREAK >-------------------*/,{
3416 	SCR_CLR (SCR_ATN),
3417 		0,
3418 	SCR_JUMP,
3419 		PADDR (select2),
3420 }/*-------------------------< IDENT_BREAK_ATN >----------------*/,{
3421 	SCR_SET (SCR_ATN),
3422 		0,
3423 	SCR_JUMP,
3424 		PADDR (select2),
3425 }/*-------------------------< SDATA_IN >-------------------*/,{
3426 	SCR_CHMOV_TBL ^ SCR_DATA_IN,
3427 		offsetof (struct dsb, sense),
3428 	SCR_CALL,
3429 		PADDR (datai_done),
3430 	SCR_JUMP,
3431 		PADDRH (data_ovrun),
3432 
3433 }/*-------------------------< RESEL_BAD_LUN >---------------*/,{
3434 	/*
3435 	 *  Message is an IDENTIFY, but lun is unknown.
3436 	 *  Signal problem to C code for logging the event.
3437 	 *  Send a M_ABORT to clear all pending tasks.
3438 	 */
3439 	SCR_INT,
3440 		SIR_RESEL_BAD_LUN,
3441 	SCR_JUMP,
3442 		PADDRH (abort_resel),
3443 }/*-------------------------< BAD_I_T_L >------------------*/,{
3444 	/*
3445 	 *  We donnot have a task for that I_T_L.
3446 	 *  Signal problem to C code for logging the event.
3447 	 *  Send a M_ABORT message.
3448 	 */
3449 	SCR_INT,
3450 		SIR_RESEL_BAD_I_T_L,
3451 	SCR_JUMP,
3452 		PADDRH (abort_resel),
3453 }/*-------------------------< BAD_I_T_L_Q >----------------*/,{
3454 	/*
3455 	 *  We donnot have a task that matches the tag.
3456 	 *  Signal problem to C code for logging the event.
3457 	 *  Send a M_ABORTTAG message.
3458 	 */
3459 	SCR_INT,
3460 		SIR_RESEL_BAD_I_T_L_Q,
3461 	SCR_JUMP,
3462 		PADDRH (abort_resel),
3463 }/*-------------------------< BAD_STATUS >-----------------*/,{
3464 	/*
3465 	 *  Anything different from INTERMEDIATE
3466 	 *  CONDITION MET should be a bad SCSI status,
3467 	 *  given that GOOD status has already been tested.
3468 	 *  Call the C code.
3469 	 */
3470 	SCR_LOAD_ABS (scratcha, 4),
3471 		PADDRH (startpos),
3472 	SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
3473 		SIR_BAD_SCSI_STATUS,
3474 	SCR_RETURN,
3475 		0,
3476 
3477 }/*-------------------------< PM_HANDLE >------------------*/,{
3478 	/*
3479 	 *  Phase mismatch handling.
3480 	 *
3481 	 *  Since we have to deal with 2 SCSI data pointers
3482 	 *  (current and saved), we need at least 2 contexts.
3483 	 *  Each context (pm0 and pm1) has a saved area, a
3484 	 *  SAVE mini-script and a DATA phase mini-script.
3485 	 */
3486 	/*
3487 	 *  Get the PM handling flags.
3488 	 */
3489 	SCR_FROM_REG (HF_REG),
3490 		0,
3491 	/*
3492 	 *  If no flags (1rst PM for example), avoid
3493 	 *  all the below heavy flags testing.
3494 	 *  This makes the normal case a bit faster.
3495 	 */
3496 	SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED))),
3497 		PADDRH (pm_handle1),
3498 	/*
3499 	 *  If we received a SAVE DP, switch to the
3500 	 *  other PM context since the savep may point
3501 	 *  to the current PM context.
3502 	 */
3503 	SCR_JUMPR ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED)),
3504 		8,
3505 	SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM),
3506 		0,
3507 	/*
3508 	 *  If we have been interrupt in a PM DATA mini-script,
3509 	 *  we take the return address from the corresponding
3510 	 *  saved area.
3511 	 *  This ensure the return address always points to the
3512 	 *  main DATA script for this transfer.
3513 	 */
3514 	SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1))),
3515 		PADDRH (pm_handle1),
3516 	SCR_JUMPR ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0)),
3517 		16,
3518 	SCR_LOAD_REL (ia, 4),
3519 		offsetof(struct sym_ccb, phys.pm0.ret),
3520 	SCR_JUMP,
3521 		PADDRH (pm_save),
3522 	SCR_LOAD_REL (ia, 4),
3523 		offsetof(struct sym_ccb, phys.pm1.ret),
3524 	SCR_JUMP,
3525 		PADDRH (pm_save),
3526 }/*-------------------------< PM_HANDLE1 >-----------------*/,{
3527 	/*
3528 	 *  Normal case.
3529 	 *  Update the return address so that it
3530 	 *  will point after the interrupted MOVE.
3531 	 */
3532 	SCR_REG_REG (ia, SCR_ADD, 8),
3533 		0,
3534 	SCR_REG_REG (ia1, SCR_ADDC, 0),
3535 		0,
3536 }/*-------------------------< PM_SAVE >--------------------*/,{
3537 	/*
3538 	 *  Clear all the flags that told us if we were
3539 	 *  interrupted in a PM DATA mini-script and/or
3540 	 *  we received a SAVE DP.
3541 	 */
3542 	SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED))),
3543 		0,
3544 	/*
3545 	 *  Choose the current PM context.
3546 	 */
3547 	SCR_JUMP ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM)),
3548 		PADDRH (pm1_save),
3549 }/*-------------------------< PM0_SAVE >-------------------*/,{
3550 	SCR_STORE_REL (ia, 4),
3551 		offsetof(struct sym_ccb, phys.pm0.ret),
3552 	/*
3553 	 *  If WSR bit is set, either UA and RBC may
3554 	 *  have to be changed whether the device wants
3555 	 *  to ignore this residue or not.
3556 	 */
3557 	SCR_FROM_REG (scntl2),
3558 		0,
3559 	SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
3560 		PADDRH (pm_wsr_handle),
3561 	/*
3562 	 *  Save the remaining byte count, the updated
3563 	 *  address and the return address.
3564 	 */
3565 	SCR_STORE_REL (rbc, 4),
3566 		offsetof(struct sym_ccb, phys.pm0.sg.size),
3567 	SCR_STORE_REL (ua, 4),
3568 		offsetof(struct sym_ccb, phys.pm0.sg.addr),
3569 	/*
3570 	 *  Set the current pointer at the PM0 DATA mini-script.
3571 	 */
3572 	SCR_LOAD_ABS (temp, 4),
3573 		PADDRH (pm0_data_addr),
3574 	SCR_JUMP,
3575 		PADDR (dispatch),
3576 }/*-------------------------< PM1_SAVE >-------------------*/,{
3577 	SCR_STORE_REL (ia, 4),
3578 		offsetof(struct sym_ccb, phys.pm1.ret),
3579 	/*
3580 	 *  If WSR bit is set, either UA and RBC may
3581 	 *  have to be changed whether the device wants
3582 	 *  to ignore this residue or not.
3583 	 */
3584 	SCR_FROM_REG (scntl2),
3585 		0,
3586 	SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
3587 		PADDRH (pm_wsr_handle),
3588 	/*
3589 	 *  Save the remaining byte count, the updated
3590 	 *  address and the return address.
3591 	 */
3592 	SCR_STORE_REL (rbc, 4),
3593 		offsetof(struct sym_ccb, phys.pm1.sg.size),
3594 	SCR_STORE_REL (ua, 4),
3595 		offsetof(struct sym_ccb, phys.pm1.sg.addr),
3596 	/*
3597 	 *  Set the current pointer at the PM1 DATA mini-script.
3598 	 */
3599 	SCR_LOAD_ABS (temp, 4),
3600 		PADDRH (pm1_data_addr),
3601 	SCR_JUMP,
3602 		PADDR (dispatch),
3603 
3604 }/*--------------------------< PM_WSR_HANDLE >-----------------------*/,{
3605 	/*
3606 	 *  Phase mismatch handling from SCRIPT with WSR set.
3607 	 *  Such a condition can occur if the chip wants to
3608 	 *  execute a CHMOV(size > 1) when the WSR bit is
3609 	 *  set and the target changes PHASE.
3610 	 *
3611 	 *  We must move the residual byte to memory.
3612 	 *
3613 	 *  UA contains bit 0..31 of the address to
3614 	 *  move the residual byte.
3615 	 *  Move it to the table indirect.
3616 	 */
3617 	SCR_STORE_REL (ua, 4),
3618 		offsetof (struct sym_ccb, phys.wresid.addr),
3619 	/*
3620 	 *  Increment UA (move address to next position).
3621 	 */
3622 	SCR_REG_REG (ua, SCR_ADD, 1),
3623 		0,
3624 	SCR_REG_REG (ua1, SCR_ADDC, 0),
3625 		0,
3626 	SCR_REG_REG (ua2, SCR_ADDC, 0),
3627 		0,
3628 	SCR_REG_REG (ua3, SCR_ADDC, 0),
3629 		0,
3630 	/*
3631 	 *  Compute SCRATCHA as:
3632 	 *  - size to transfer = 1 byte.
3633 	 *  - bit 24..31 = high address bit [32...39].
3634 	 */
3635 	SCR_LOAD_ABS (scratcha, 4),
3636 		PADDRH (zero),
3637 	SCR_REG_REG (scratcha, SCR_OR, 1),
3638 		0,
3639 	SCR_FROM_REG (rbc3),
3640 		0,
3641 	SCR_TO_REG (scratcha3),
3642 		0,
3643 	/*
3644 	 *  Move this value to the table indirect.
3645 	 */
3646 	SCR_STORE_REL (scratcha, 4),
3647 		offsetof (struct sym_ccb, phys.wresid.size),
3648 	/*
3649 	 *  Wait for a valid phase.
3650 	 *  While testing with bogus QUANTUM drives, the C1010
3651 	 *  sometimes raised a spurious phase mismatch with
3652 	 *  WSR and the CHMOV(1) triggered another PM.
3653 	 *  Waiting explicitely for the PHASE seemed to avoid
3654 	 *  the nested phase mismatch. Btw, this didn't happen
3655 	 *  using my IBM drives.
3656 	 */
3657 	SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)),
3658 		0,
3659 	/*
3660 	 *  Perform the move of the residual byte.
3661 	 */
3662 	SCR_CHMOV_TBL ^ SCR_DATA_IN,
3663 		offsetof (struct sym_ccb, phys.wresid),
3664 	/*
3665 	 *  We can now handle the phase mismatch with UA fixed.
3666 	 *  RBC[0..23]=0 is a special case that does not require
3667 	 *  a PM context. The C code also checks against this.
3668 	 */
3669 	SCR_FROM_REG (rbc),
3670 		0,
3671 	SCR_RETURN ^ IFFALSE (DATA (0)),
3672 		0,
3673 	SCR_FROM_REG (rbc1),
3674 		0,
3675 	SCR_RETURN ^ IFFALSE (DATA (0)),
3676 		0,
3677 	SCR_FROM_REG (rbc2),
3678 		0,
3679 	SCR_RETURN ^ IFFALSE (DATA (0)),
3680 		0,
3681 	/*
3682 	 *  RBC[0..23]=0.
3683 	 *  Not only we donnot need a PM context, but this would
3684 	 *  lead to a bogus CHMOV(0). This condition means that
3685 	 *  the residual was the last byte to move from this CHMOV.
3686 	 *  So, we just have to move the current data script pointer
3687 	 *  (i.e. TEMP) to the SCRIPTS address following the
3688 	 *  interrupted CHMOV and jump to dispatcher.
3689 	 */
3690 	SCR_STORE_ABS (ia, 4),
3691 		PADDRH (scratch),
3692 	SCR_LOAD_ABS (temp, 4),
3693 		PADDRH (scratch),
3694 	SCR_JUMP,
3695 		PADDR (dispatch),
3696 }/*--------------------------< WSR_MA_HELPER >-----------------------*/,{
3697 	/*
3698 	 *  Helper for the C code when WSR bit is set.
3699 	 *  Perform the move of the residual byte.
3700 	 */
3701 	SCR_CHMOV_TBL ^ SCR_DATA_IN,
3702 		offsetof (struct sym_ccb, phys.wresid),
3703 	SCR_JUMP,
3704 		PADDR (dispatch),
3705 
3706 }/*-------------------------< ZERO >------------------------*/,{
3707 	SCR_DATA_ZERO,
3708 }/*-------------------------< SCRATCH >---------------------*/,{
3709 	SCR_DATA_ZERO,
3710 }/*-------------------------< PM0_DATA_ADDR >---------------*/,{
3711 	SCR_DATA_ZERO,
3712 }/*-------------------------< PM1_DATA_ADDR >---------------*/,{
3713 	SCR_DATA_ZERO,
3714 }/*-------------------------< SAVED_DSA >-------------------*/,{
3715 	SCR_DATA_ZERO,
3716 }/*-------------------------< SAVED_DRS >-------------------*/,{
3717 	SCR_DATA_ZERO,
3718 }/*-------------------------< DONE_POS >--------------------*/,{
3719 	SCR_DATA_ZERO,
3720 }/*-------------------------< STARTPOS >--------------------*/,{
3721 	SCR_DATA_ZERO,
3722 }/*-------------------------< TARGTBL >---------------------*/,{
3723 	SCR_DATA_ZERO,
3724 
3725 }/*-------------------------< SNOOPTEST >-------------------*/,{
3726 	/*
3727 	 *  Read the variable.
3728 	 */
3729 	SCR_LOAD_REL (scratcha, 4),
3730 		offsetof(struct sym_hcb, cache),
3731 	SCR_STORE_REL (temp, 4),
3732 		offsetof(struct sym_hcb, cache),
3733 	SCR_LOAD_REL (temp, 4),
3734 		offsetof(struct sym_hcb, cache),
3735 }/*-------------------------< SNOOPEND >-------------------*/,{
3736 	/*
3737 	 *  And stop.
3738 	 */
3739 	SCR_INT,
3740 		99,
3741 }/*--------------------------------------------------------*/
3742 };
3743 
3744 /*
3745  *  Fill in #define dependent parts of the scripts
3746  */
3747 static void sym_fill_scripts (script_p scr, scripth_p scrh)
3748 {
3749 	int	i;
3750 	u32	*p;
3751 
3752 	p = scr->data_in;
3753 	for (i=0; i<SYM_CONF_MAX_SG; i++) {
3754 		*p++ =SCR_CHMOV_TBL ^ SCR_DATA_IN;
3755 		*p++ =offsetof (struct dsb, data[i]);
3756 	};
3757 	assert ((u_long)p == (u_long)&scr->data_in + sizeof (scr->data_in));
3758 
3759 	p = scr->data_out;
3760 	for (i=0; i<SYM_CONF_MAX_SG; i++) {
3761 		*p++ =SCR_CHMOV_TBL ^ SCR_DATA_OUT;
3762 		*p++ =offsetof (struct dsb, data[i]);
3763 	};
3764 	assert ((u_long)p == (u_long)&scr->data_out + sizeof (scr->data_out));
3765 }
3766 
3767 /*
3768  *  Copy and bind a script.
3769  */
3770 static void sym_bind_script (hcb_p np, u32 *src, u32 *dst, int len)
3771 {
3772 	u32 opcode, new, old, tmp1, tmp2;
3773 	u32 *start, *end;
3774 	int relocs;
3775 	int opchanged = 0;
3776 
3777 	start = src;
3778 	end = src + len/4;
3779 
3780 	while (src < end) {
3781 
3782 		opcode = *src++;
3783 		*dst++ = cpu_to_scr(opcode);
3784 
3785 		/*
3786 		 *  If we forget to change the length
3787 		 *  in scripts, a field will be
3788 		 *  padded with 0. This is an illegal
3789 		 *  command.
3790 		 */
3791 		if (opcode == 0) {
3792 			printf ("%s: ERROR0 IN SCRIPT at %d.\n",
3793 				sym_name(np), (int) (src-start-1));
3794 			MDELAY (10000);
3795 			continue;
3796 		};
3797 
3798 		/*
3799 		 *  We use the bogus value 0xf00ff00f ;-)
3800 		 *  to reserve data area in SCRIPTS.
3801 		 */
3802 		if (opcode == SCR_DATA_ZERO) {
3803 			dst[-1] = 0;
3804 			continue;
3805 		}
3806 
3807 		if (DEBUG_FLAGS & DEBUG_SCRIPT)
3808 			printf ("%p:  <%x>\n", (src-1), (unsigned)opcode);
3809 
3810 		/*
3811 		 *  We don't have to decode ALL commands
3812 		 */
3813 		switch (opcode >> 28) {
3814 		case 0xf:
3815 			/*
3816 			 *  LOAD / STORE DSA relative, don't relocate.
3817 			 */
3818 			relocs = 0;
3819 			break;
3820 		case 0xe:
3821 			/*
3822 			 *  LOAD / STORE absolute.
3823 			 */
3824 			relocs = 1;
3825 			break;
3826 		case 0xc:
3827 			/*
3828 			 *  COPY has TWO arguments.
3829 			 */
3830 			relocs = 2;
3831 			tmp1 = src[0];
3832 			tmp2 = src[1];
3833 #ifdef	RELOC_KVAR
3834 			if ((tmp1 & RELOC_MASK) == RELOC_KVAR)
3835 				tmp1 = 0;
3836 			if ((tmp2 & RELOC_MASK) == RELOC_KVAR)
3837 				tmp2 = 0;
3838 #endif
3839 			if ((tmp1 ^ tmp2) & 3) {
3840 				printf ("%s: ERROR1 IN SCRIPT at %d.\n",
3841 					sym_name(np), (int) (src-start-1));
3842 				MDELAY (1000);
3843 			}
3844 			/*
3845 			 *  If PREFETCH feature not enabled, remove
3846 			 *  the NO FLUSH bit if present.
3847 			 */
3848 			if ((opcode & SCR_NO_FLUSH) &&
3849 			    !(np->features & FE_PFEN)) {
3850 				dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH);
3851 				++opchanged;
3852 			}
3853 			break;
3854 		case 0x0:
3855 			/*
3856 			 *  MOVE/CHMOV (absolute address)
3857 			 */
3858 			if (!(np->features & FE_WIDE))
3859 				dst[-1] = cpu_to_scr(opcode | OPC_MOVE);
3860 			relocs = 1;
3861 			break;
3862 		case 0x1:
3863 			/*
3864 			 *  MOVE/CHMOV (table indirect)
3865 			 */
3866 			if (!(np->features & FE_WIDE))
3867 				dst[-1] = cpu_to_scr(opcode | OPC_MOVE);
3868 			relocs = 0;
3869 			break;
3870 		case 0x8:
3871 			/*
3872 			 *  JUMP / CALL
3873 			 *  dont't relocate if relative :-)
3874 			 */
3875 			if (opcode & 0x00800000)
3876 				relocs = 0;
3877 			else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
3878 				relocs = 2;
3879 			else
3880 				relocs = 1;
3881 			break;
3882 		case 0x4:
3883 		case 0x5:
3884 		case 0x6:
3885 		case 0x7:
3886 			relocs = 1;
3887 			break;
3888 		default:
3889 			relocs = 0;
3890 			break;
3891 		};
3892 
3893 		if (!relocs) {
3894 			*dst++ = cpu_to_scr(*src++);
3895 			continue;
3896 		}
3897 		while (relocs--) {
3898 			old = *src++;
3899 
3900 			switch (old & RELOC_MASK) {
3901 			case RELOC_REGISTER:
3902 				new = (old & ~RELOC_MASK) + np->mmio_ba;
3903 				break;
3904 			case RELOC_LABEL:
3905 				new = (old & ~RELOC_MASK) + np->script_ba;
3906 				break;
3907 			case RELOC_LABELH:
3908 				new = (old & ~RELOC_MASK) + np->scripth_ba;
3909 				break;
3910 			case RELOC_SOFTC:
3911 				new = (old & ~RELOC_MASK) + np->hcb_ba;
3912 				break;
3913 #ifdef	RELOC_KVAR
3914 			case RELOC_KVAR:
3915 				if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) ||
3916 				    ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST))
3917 					panic("KVAR out of range");
3918 				new = vtobus(script_kvars[old & ~RELOC_MASK]);
3919 #endif
3920 				break;
3921 			case 0:
3922 				/* Don't relocate a 0 address. */
3923 				if (old == 0) {
3924 					new = old;
3925 					break;
3926 				}
3927 				/* fall through */
3928 			default:
3929 				new = 0;	/* For 'cc' not to complain */
3930 				panic("sym_bind_script: "
3931 				      "weird relocation %x\n", old);
3932 				break;
3933 			}
3934 
3935 			*dst++ = cpu_to_scr(new);
3936 		}
3937 	};
3938 }
3939 
3940 /*
3941  *  Print something which allows to retrieve the controler type,
3942  *  unit, target, lun concerned by a kernel message.
3943  */
3944 static void PRINT_TARGET (hcb_p np, int target)
3945 {
3946 	printf ("%s:%d:", sym_name(np), target);
3947 }
3948 
3949 static void PRINT_LUN(hcb_p np, int target, int lun)
3950 {
3951 	printf ("%s:%d:%d:", sym_name(np), target, lun);
3952 }
3953 
3954 static void PRINT_ADDR (ccb_p cp)
3955 {
3956 	if (cp && cp->cam_ccb)
3957 		xpt_print_path(cp->cam_ccb->ccb_h.path);
3958 }
3959 
3960 /*
3961  *  Take into account this ccb in the freeze count.
3962  *  The flag that tells user about avoids doing that
3963  *  more than once for a ccb.
3964  */
3965 static void sym_freeze_cam_ccb(union ccb *ccb)
3966 {
3967 	if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) {
3968 		if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
3969 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
3970 			xpt_freeze_devq(ccb->ccb_h.path, 1);
3971 		}
3972 	}
3973 }
3974 
3975 /*
3976  *  Set the status field of a CAM CCB.
3977  */
3978 static __inline void sym_set_cam_status(union ccb *ccb, cam_status status)
3979 {
3980 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3981 	ccb->ccb_h.status |= status;
3982 }
3983 
3984 /*
3985  *  Get the status field of a CAM CCB.
3986  */
3987 static __inline int sym_get_cam_status(union ccb *ccb)
3988 {
3989 	return ccb->ccb_h.status & CAM_STATUS_MASK;
3990 }
3991 
3992 /*
3993  *  Enqueue a CAM CCB.
3994  */
3995 static void sym_enqueue_cam_ccb(hcb_p np, union ccb *ccb)
3996 {
3997 	assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED));
3998 	ccb->ccb_h.status = CAM_REQ_INPROG;
3999 
4000 	ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb,
4001 				       ccb->ccb_h.timeout*hz/1000);
4002 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
4003 	ccb->ccb_h.sym_hcb_ptr = np;
4004 
4005 	sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq);
4006 }
4007 
4008 /*
4009  *  Complete a pending CAM CCB.
4010  */
4011 static void sym_xpt_done(hcb_p np, union ccb *ccb)
4012 {
4013 	if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
4014 		untimeout(sym_timeout, (caddr_t) ccb, ccb->ccb_h.timeout_ch);
4015 		sym_remque(sym_qptr(&ccb->ccb_h.sim_links));
4016 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4017 		ccb->ccb_h.sym_hcb_ptr = 0;
4018 	}
4019 	if (ccb->ccb_h.flags & CAM_DEV_QFREEZE)
4020 		sym_freeze_cam_ccb(ccb);
4021 	xpt_done(ccb);
4022 }
4023 
4024 static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status)
4025 {
4026 	sym_set_cam_status(ccb, cam_status);
4027 	sym_xpt_done(np, ccb);
4028 }
4029 
4030 /*
4031  *  SYMBIOS chip clock divisor table.
4032  *
4033  *  Divisors are multiplied by 10,000,000 in order to make
4034  *  calculations more simple.
4035  */
4036 #define _5M 5000000
4037 static u_long div_10M[] =
4038 	{2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
4039 
4040 /*
4041  *  SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
4042  *  128 transfers. All chips support at least 16 transfers
4043  *  bursts. The 825A, 875 and 895 chips support bursts of up
4044  *  to 128 transfers and the 895A and 896 support bursts of up
4045  *  to 64 transfers. All other chips support up to 16
4046  *  transfers bursts.
4047  *
4048  *  For PCI 32 bit data transfers each transfer is a DWORD.
4049  *  It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
4050  *  Only the 896 is able to perform 64 bit data transfers.
4051  *
4052  *  We use log base 2 (burst length) as internal code, with
4053  *  value 0 meaning "burst disabled".
4054  */
4055 
4056 /*
4057  *  Burst length from burst code.
4058  */
4059 #define burst_length(bc) (!(bc))? 0 : 1 << (bc)
4060 
4061 /*
4062  *  Burst code from io register bits.
4063  */
4064 #define burst_code(dmode, ctest4, ctest5) \
4065 	(ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
4066 
4067 /*
4068  *  Set initial io register bits from burst code.
4069  */
4070 static __inline void sym_init_burst(hcb_p np, u_char bc)
4071 {
4072 	np->rv_ctest4	&= ~0x80;
4073 	np->rv_dmode	&= ~(0x3 << 6);
4074 	np->rv_ctest5	&= ~0x4;
4075 
4076 	if (!bc) {
4077 		np->rv_ctest4	|= 0x80;
4078 	}
4079 	else {
4080 		--bc;
4081 		np->rv_dmode	|= ((bc & 0x3) << 6);
4082 		np->rv_ctest5	|= (bc & 0x4);
4083 	}
4084 }
4085 
4086 
4087 /*
4088  * Print out the list of targets that have some flag disabled by user.
4089  */
4090 static void sym_print_targets_flag(hcb_p np, int mask, char *msg)
4091 {
4092 	int cnt;
4093 	int i;
4094 
4095 	for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
4096 		if (i == np->myaddr)
4097 			continue;
4098 		if (np->target[i].usrflags & mask) {
4099 			if (!cnt++)
4100 				printf("%s: %s disabled for targets",
4101 					sym_name(np), msg);
4102 			printf(" %d", i);
4103 		}
4104 	}
4105 	if (cnt)
4106 		printf(".\n");
4107 }
4108 
4109 /*
4110  *  Save initial settings of some IO registers.
4111  *  Assumed to have been set by BIOS.
4112  *  We cannot reset the chip prior to reading the
4113  *  IO registers, since informations will be lost.
4114  *  Since the SCRIPTS processor may be running, this
4115  *  is not safe on paper, but it seems to work quite
4116  *  well. :)
4117  */
4118 static void sym_save_initial_setting (hcb_p np)
4119 {
4120 	np->sv_scntl0	= INB(nc_scntl0) & 0x0a;
4121 	np->sv_scntl3	= INB(nc_scntl3) & 0x07;
4122 	np->sv_dmode	= INB(nc_dmode)  & 0xce;
4123 	np->sv_dcntl	= INB(nc_dcntl)  & 0xa8;
4124 	np->sv_ctest3	= INB(nc_ctest3) & 0x01;
4125 	np->sv_ctest4	= INB(nc_ctest4) & 0x80;
4126 	np->sv_gpcntl	= INB(nc_gpcntl);
4127 	np->sv_stest1	= INB(nc_stest1);
4128 	np->sv_stest2	= INB(nc_stest2) & 0x20;
4129 	np->sv_stest4	= INB(nc_stest4);
4130 	if (np->features & FE_C10) {	/* Always large DMA fifo + ultra3 */
4131 		np->sv_scntl4	= INB(nc_scntl4);
4132 		np->sv_ctest5	= INB(nc_ctest5) & 0x04;
4133 	}
4134 	else
4135 		np->sv_ctest5	= INB(nc_ctest5) & 0x24;
4136 }
4137 
4138 /*
4139  *  Prepare io register values used by sym_init() according
4140  *  to selected and supported features.
4141  */
4142 static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram)
4143 {
4144 	u_char	burst_max;
4145 	u_long	period;
4146 	int i;
4147 
4148 	/*
4149 	 *  Wide ?
4150 	 */
4151 	np->maxwide	= (np->features & FE_WIDE)? 1 : 0;
4152 
4153 	/*
4154 	 *  Get the frequency of the chip's clock.
4155 	 */
4156 	if	(np->features & FE_QUAD)
4157 		np->multiplier	= 4;
4158 	else if	(np->features & FE_DBLR)
4159 		np->multiplier	= 2;
4160 	else
4161 		np->multiplier	= 1;
4162 
4163 	np->clock_khz	= (np->features & FE_CLK80)? 80000 : 40000;
4164 	np->clock_khz	*= np->multiplier;
4165 
4166 	if (np->clock_khz != 40000)
4167 		sym_getclock(np, np->multiplier);
4168 
4169 	/*
4170 	 * Divisor to be used for async (timer pre-scaler).
4171 	 */
4172 	i = np->clock_divn - 1;
4173 	while (--i >= 0) {
4174 		if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) {
4175 			++i;
4176 			break;
4177 		}
4178 	}
4179 	np->rv_scntl3 = i+1;
4180 
4181 	/*
4182 	 * The C1010 uses hardwired divisors for async.
4183 	 * So, we just throw away, the async. divisor.:-)
4184 	 */
4185 	if (np->features & FE_C10)
4186 		np->rv_scntl3 = 0;
4187 
4188 	/*
4189 	 * Minimum synchronous period factor supported by the chip.
4190 	 * Btw, 'period' is in tenths of nanoseconds.
4191 	 */
4192 	period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
4193 	if	(period <= 250)		np->minsync = 10;
4194 	else if	(period <= 303)		np->minsync = 11;
4195 	else if	(period <= 500)		np->minsync = 12;
4196 	else				np->minsync = (period + 40 - 1) / 40;
4197 
4198 	/*
4199 	 * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
4200 	 */
4201 	if	(np->minsync < 25 &&
4202 		 !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
4203 		np->minsync = 25;
4204 	else if	(np->minsync < 12 &&
4205 		 !(np->features & (FE_ULTRA2|FE_ULTRA3)))
4206 		np->minsync = 12;
4207 
4208 	/*
4209 	 * Maximum synchronous period factor supported by the chip.
4210 	 */
4211 	period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
4212 	np->maxsync = period > 2540 ? 254 : period / 10;
4213 
4214 	/*
4215 	 * If chip is a C1010, guess the sync limits in DT mode.
4216 	 */
4217 	if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) {
4218 		if (np->clock_khz == 160000) {
4219 			np->minsync_dt = 9;
4220 			np->maxsync_dt = 50;
4221 		}
4222 	}
4223 
4224 	/*
4225 	 *  64 bit (53C895A or 53C896) ?
4226 	 */
4227 	if (np->features & FE_64BIT)
4228 #if BITS_PER_LONG > 32
4229 		np->rv_ccntl1	|= (XTIMOD | EXTIBMV);
4230 #else
4231 		np->rv_ccntl1	|= (DDAC);
4232 #endif
4233 
4234 	/*
4235 	 *  Phase mismatch handled by SCRIPTS (895A/896/1010) ?
4236   	 */
4237 	if (np->features & FE_NOPM)
4238 		np->rv_ccntl0	|= (ENPMJ);
4239 
4240  	/*
4241 	 *  C1010 Errata.
4242 	 *  In dual channel mode, contention occurs if internal cycles
4243 	 *  are used. Disable internal cycles.
4244 	 */
4245 	if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x45)
4246 		np->rv_ccntl0	|=  DILS;
4247 
4248 	/*
4249 	 *  Select burst length (dwords)
4250 	 */
4251 	burst_max	= SYM_SETUP_BURST_ORDER;
4252 	if (burst_max == 255)
4253 		burst_max = burst_code(np->sv_dmode, np->sv_ctest4,
4254 				       np->sv_ctest5);
4255 	if (burst_max > 7)
4256 		burst_max = 7;
4257 	if (burst_max > np->maxburst)
4258 		burst_max = np->maxburst;
4259 
4260 	/*
4261 	 *  DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
4262 	 *  This chip and the 860 Rev 1 may wrongly use PCI cache line
4263 	 *  based transactions on LOAD/STORE instructions. So we have
4264 	 *  to prevent these chips from using such PCI transactions in
4265 	 *  this driver. The generic ncr driver that does not use
4266 	 *  LOAD/STORE instructions does not need this work-around.
4267 	 */
4268 	if ((np->device_id == PCI_ID_SYM53C810 &&
4269 	     np->revision_id >= 0x10 && np->revision_id <= 0x11) ||
4270 	    (np->device_id == PCI_ID_SYM53C860 &&
4271 	     np->revision_id <= 0x1))
4272 		np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
4273 
4274 	/*
4275 	 *  Select all supported special features.
4276 	 *  If we are using on-board RAM for scripts, prefetch (PFEN)
4277 	 *  does not help, but burst op fetch (BOF) does.
4278 	 *  Disabling PFEN makes sure BOF will be used.
4279 	 */
4280 	if (np->features & FE_ERL)
4281 		np->rv_dmode	|= ERL;		/* Enable Read Line */
4282 	if (np->features & FE_BOF)
4283 		np->rv_dmode	|= BOF;		/* Burst Opcode Fetch */
4284 	if (np->features & FE_ERMP)
4285 		np->rv_dmode	|= ERMP;	/* Enable Read Multiple */
4286 #if 1
4287 	if ((np->features & FE_PFEN) && !np->ram_ba)
4288 #else
4289 	if (np->features & FE_PFEN)
4290 #endif
4291 		np->rv_dcntl	|= PFEN;	/* Prefetch Enable */
4292 	if (np->features & FE_CLSE)
4293 		np->rv_dcntl	|= CLSE;	/* Cache Line Size Enable */
4294 	if (np->features & FE_WRIE)
4295 		np->rv_ctest3	|= WRIE;	/* Write and Invalidate */
4296 	if (np->features & FE_DFS)
4297 		np->rv_ctest5	|= DFS;		/* Dma Fifo Size */
4298 
4299 	/*
4300 	 *  Select some other
4301 	 */
4302 	if (SYM_SETUP_PCI_PARITY)
4303 		np->rv_ctest4	|= MPEE; /* Master parity checking */
4304 	if (SYM_SETUP_SCSI_PARITY)
4305 		np->rv_scntl0	|= 0x0a; /*  full arb., ena parity, par->ATN  */
4306 
4307 	/*
4308 	 *  Get parity checking, host ID and verbose mode from NVRAM
4309 	 */
4310 	np->myaddr = 255;
4311 	sym_nvram_setup_host (np, nvram);
4312 
4313 	/*
4314 	 *  Get SCSI addr of host adapter (set by bios?).
4315 	 */
4316 	if (np->myaddr == 255) {
4317 		np->myaddr = INB(nc_scid) & 0x07;
4318 		if (!np->myaddr)
4319 			np->myaddr = SYM_SETUP_HOST_ID;
4320 	}
4321 
4322 	/*
4323 	 *  Prepare initial io register bits for burst length
4324 	 */
4325 	sym_init_burst(np, burst_max);
4326 
4327 	/*
4328 	 *  Set SCSI BUS mode.
4329 	 *  - LVD capable chips (895/895A/896/1010) report the
4330 	 *    current BUS mode through the STEST4 IO register.
4331 	 *  - For previous generation chips (825/825A/875),
4332 	 *    user has to tell us how to check against HVD,
4333 	 *    since a 100% safe algorithm is not possible.
4334 	 */
4335 	np->scsi_mode = SMODE_SE;
4336 	if (np->features & (FE_ULTRA2|FE_ULTRA3))
4337 		np->scsi_mode = (np->sv_stest4 & SMODE);
4338 	else if	(np->features & FE_DIFF) {
4339 		if (SYM_SETUP_SCSI_DIFF == 1) {
4340 			if (np->sv_scntl3) {
4341 				if (np->sv_stest2 & 0x20)
4342 					np->scsi_mode = SMODE_HVD;
4343 			}
4344 			else if (nvram->type == SYM_SYMBIOS_NVRAM) {
4345 				if (INB(nc_gpreg) & 0x08)
4346 					np->scsi_mode = SMODE_HVD;
4347 			}
4348 		}
4349 		else if	(SYM_SETUP_SCSI_DIFF == 2)
4350 			np->scsi_mode = SMODE_HVD;
4351 	}
4352 	if (np->scsi_mode == SMODE_HVD)
4353 		np->rv_stest2 |= 0x20;
4354 
4355 	/*
4356 	 *  Set LED support from SCRIPTS.
4357 	 *  Ignore this feature for boards known to use a
4358 	 *  specific GPIO wiring and for the 895A or 896
4359 	 *  that drive the LED directly.
4360 	 */
4361 	if ((SYM_SETUP_SCSI_LED || nvram->type == SYM_SYMBIOS_NVRAM) &&
4362 	    !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
4363 		np->features |= FE_LED0;
4364 
4365 	/*
4366 	 *  Set irq mode.
4367 	 */
4368 	switch(SYM_SETUP_IRQ_MODE & 3) {
4369 	case 2:
4370 		np->rv_dcntl	|= IRQM;
4371 		break;
4372 	case 1:
4373 		np->rv_dcntl	|= (np->sv_dcntl & IRQM);
4374 		break;
4375 	default:
4376 		break;
4377 	}
4378 
4379 	/*
4380 	 *  Configure targets according to driver setup.
4381 	 *  If NVRAM present get targets setup from NVRAM.
4382 	 */
4383 	for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
4384 		tcb_p tp = &np->target[i];
4385 
4386 		tp->tinfo.user.period = np->minsync;
4387 		tp->tinfo.user.offset = np->maxoffs;
4388 		tp->tinfo.user.width  = np->maxwide ? BUS_16_BIT : BUS_8_BIT;
4389 		tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
4390 		tp->usrtags = SYM_SETUP_MAX_TAG;
4391 
4392 		sym_nvram_setup_target (np, i, nvram);
4393 
4394 		if (!tp->usrtags)
4395 			tp->usrflags &= ~SYM_TAGS_ENABLED;
4396 	}
4397 
4398 	/*
4399 	 *  Let user know about the settings.
4400 	 */
4401 	i = nvram->type;
4402 	printf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np),
4403 		i  == SYM_SYMBIOS_NVRAM ? "Symbios" :
4404 		(i == SYM_TEKRAM_NVRAM  ? "Tekram" : "No"),
4405 		np->myaddr,
4406 		(np->features & FE_ULTRA3) ? 80 :
4407 		(np->features & FE_ULTRA2) ? 40 :
4408 		(np->features & FE_ULTRA)  ? 20 : 10,
4409 		sym_scsi_bus_mode(np->scsi_mode),
4410 		(np->rv_scntl0 & 0xa)	? "parity checking" : "NO parity");
4411 	/*
4412 	 *  Tell him more on demand.
4413 	 */
4414 	if (sym_verbose) {
4415 		printf("%s: %s IRQ line driver%s\n",
4416 			sym_name(np),
4417 			np->rv_dcntl & IRQM ? "totem pole" : "open drain",
4418 			np->ram_ba ? ", using on-chip SRAM" : "");
4419 		if (np->features & FE_NOPM)
4420 			printf("%s: handling phase mismatch from SCRIPTS.\n",
4421 			       sym_name(np));
4422 	}
4423 	/*
4424 	 *  And still more.
4425 	 */
4426 	if (sym_verbose > 1) {
4427 		printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
4428 			"(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
4429 			sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
4430 			np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
4431 
4432 		printf ("%s: final   SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
4433 			"(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
4434 			sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
4435 			np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
4436 	}
4437 	/*
4438 	 *  Let user be aware of targets that have some disable flags set.
4439 	 */
4440 	sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT");
4441 	if (sym_verbose)
4442 		sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED,
4443 				       "SCAN FOR LUNS");
4444 
4445 	return 0;
4446 }
4447 
4448 /*
4449  *  Prepare the next negotiation message if needed.
4450  *
4451  *  Fill in the part of message buffer that contains the
4452  *  negotiation and the nego_status field of the CCB.
4453  *  Returns the size of the message in bytes.
4454  */
4455 
4456 static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr)
4457 {
4458 	tcb_p tp = &np->target[cp->target];
4459 	int msglen = 0;
4460 
4461 #if 1
4462 	/*
4463 	 *  For now, only use PPR with DT option if period factor = 9.
4464 	 */
4465 	if (tp->tinfo.goal.period == 9) {
4466 		tp->tinfo.goal.width = BUS_16_BIT;
4467 		tp->tinfo.goal.options |= PPR_OPT_DT;
4468 	}
4469 	else
4470 		tp->tinfo.goal.options &= ~PPR_OPT_DT;
4471 #endif
4472 	/*
4473 	 *  Early C1010 chips need a work-around for DT
4474 	 *  data transfer to work.
4475 	 */
4476 	if (!(np->features & FE_U3EN))
4477 		tp->tinfo.goal.options = 0;
4478 	/*
4479 	 *  negotiate using PPR ?
4480 	 */
4481 	if (tp->tinfo.goal.options & PPR_OPT_MASK)
4482 		nego = NS_PPR;
4483 	/*
4484 	 *  negotiate wide transfers ?
4485 	 */
4486 	else if (tp->tinfo.current.width != tp->tinfo.goal.width)
4487 		nego = NS_WIDE;
4488 	/*
4489 	 *  negotiate synchronous transfers?
4490 	 */
4491 	else if (tp->tinfo.current.period != tp->tinfo.goal.period ||
4492 		 tp->tinfo.current.offset != tp->tinfo.goal.offset)
4493 		nego = NS_SYNC;
4494 
4495 	switch (nego) {
4496 	case NS_SYNC:
4497 		msgptr[msglen++] = M_EXTENDED;
4498 		msgptr[msglen++] = 3;
4499 		msgptr[msglen++] = M_X_SYNC_REQ;
4500 		msgptr[msglen++] = tp->tinfo.goal.period;
4501 		msgptr[msglen++] = tp->tinfo.goal.offset;
4502 		break;
4503 	case NS_WIDE:
4504 		msgptr[msglen++] = M_EXTENDED;
4505 		msgptr[msglen++] = 2;
4506 		msgptr[msglen++] = M_X_WIDE_REQ;
4507 		msgptr[msglen++] = tp->tinfo.goal.width;
4508 		break;
4509 	case NS_PPR:
4510 		msgptr[msglen++] = M_EXTENDED;
4511 		msgptr[msglen++] = 6;
4512 		msgptr[msglen++] = M_X_PPR_REQ;
4513 		msgptr[msglen++] = tp->tinfo.goal.period;
4514 		msgptr[msglen++] = 0;
4515 		msgptr[msglen++] = tp->tinfo.goal.offset;
4516 		msgptr[msglen++] = tp->tinfo.goal.width;
4517 		msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT;
4518 		break;
4519 	};
4520 
4521 	cp->nego_status = nego;
4522 
4523 	if (nego) {
4524 		tp->nego_cp = cp; /* Keep track a nego will be performed */
4525 		if (DEBUG_FLAGS & DEBUG_NEGO) {
4526 			sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" :
4527 					  nego == NS_WIDE ? "wide msgout" :
4528 					  "ppr msgout", msgptr);
4529 		};
4530 	};
4531 
4532 	return msglen;
4533 }
4534 
4535 /*
4536  *  Insert a job into the start queue.
4537  */
4538 static void sym_put_start_queue(hcb_p np, ccb_p cp)
4539 {
4540 	u_short	qidx;
4541 
4542 #ifdef SYM_CONF_IARB_SUPPORT
4543 	/*
4544 	 *  If the previously queued CCB is not yet done,
4545 	 *  set the IARB hint. The SCRIPTS will go with IARB
4546 	 *  for this job when starting the previous one.
4547 	 *  We leave devices a chance to win arbitration by
4548 	 *  not using more than 'iarb_max' consecutive
4549 	 *  immediate arbitrations.
4550 	 */
4551 	if (np->last_cp && np->iarb_count < np->iarb_max) {
4552 		np->last_cp->host_flags |= HF_HINT_IARB;
4553 		++np->iarb_count;
4554 	}
4555 	else
4556 		np->iarb_count = 0;
4557 	np->last_cp = cp;
4558 #endif
4559 
4560 	/*
4561 	 *  Insert first the idle task and then our job.
4562 	 *  The MB should ensure proper ordering.
4563 	 */
4564 	qidx = np->squeueput + 2;
4565 	if (qidx >= MAX_QUEUE*2) qidx = 0;
4566 
4567 	np->squeue [qidx]	   = cpu_to_scr(np->idletask_ba);
4568 	MEMORY_BARRIER();
4569 	np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba);
4570 
4571 	np->squeueput = qidx;
4572 
4573 	if (DEBUG_FLAGS & DEBUG_QUEUE)
4574 		printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput);
4575 
4576 	/*
4577 	 *  Script processor may be waiting for reselect.
4578 	 *  Wake it up.
4579 	 */
4580 	MEMORY_BARRIER();
4581 	OUTB (nc_istat, SIGP|np->istat_sem);
4582 }
4583 
4584 
4585 /*
4586  *  Soft reset the chip.
4587  *
4588  *  Raising SRST when the chip is running may cause
4589  *  problems on dual function chips (see below).
4590  *  On the other hand, LVD devices need some delay
4591  *  to settle and report actual BUS mode in STEST4.
4592  */
4593 static void sym_chip_reset (hcb_p np)
4594 {
4595 	OUTB (nc_istat, SRST);
4596 	UDELAY (10);
4597 	OUTB (nc_istat, 0);
4598 	UDELAY(2000);	/* For BUS MODE to settle */
4599 }
4600 
4601 /*
4602  *  Soft reset the chip.
4603  *
4604  *  Some 896 and 876 chip revisions may hang-up if we set
4605  *  the SRST (soft reset) bit at the wrong time when SCRIPTS
4606  *  are running.
4607  *  So, we need to abort the current operation prior to
4608  *  soft resetting the chip.
4609  */
4610 static void sym_soft_reset (hcb_p np)
4611 {
4612 	u_char istat;
4613 	int i;
4614 
4615 	OUTB (nc_istat, CABRT);
4616 	for (i = 1000000 ; i ; --i) {
4617 		istat = INB (nc_istat);
4618 		if (istat & SIP) {
4619 			INW (nc_sist);
4620 			continue;
4621 		}
4622 		if (istat & DIP) {
4623 			OUTB (nc_istat, 0);
4624 			INB (nc_dstat);
4625 			break;
4626 		}
4627 	}
4628 	if (!i)
4629 		printf("%s: unable to abort current chip operation.\n",
4630 			sym_name(np));
4631 	sym_chip_reset (np);
4632 }
4633 
4634 /*
4635  *  Start reset process.
4636  *
4637  *  The interrupt handler will reinitialize the chip.
4638  */
4639 static void sym_start_reset(hcb_p np)
4640 {
4641 	(void) sym_reset_scsi_bus(np, 1);
4642 }
4643 
4644 static int sym_reset_scsi_bus(hcb_p np, int enab_int)
4645 {
4646 	u32 term;
4647 	int retv = 0;
4648 
4649 	sym_soft_reset(np);	/* Soft reset the chip */
4650 	if (enab_int)
4651 		OUTW (nc_sien, RST);
4652 	/*
4653 	 *  Enable Tolerant, reset IRQD if present and
4654 	 *  properly set IRQ mode, prior to resetting the bus.
4655 	 */
4656 	OUTB (nc_stest3, TE);
4657 	OUTB (nc_dcntl, (np->rv_dcntl & IRQM));
4658 	OUTB (nc_scntl1, CRST);
4659 	UDELAY (200);
4660 
4661 	if (!SYM_SETUP_SCSI_BUS_CHECK)
4662 		goto out;
4663 	/*
4664 	 *  Check for no terminators or SCSI bus shorts to ground.
4665 	 *  Read SCSI data bus, data parity bits and control signals.
4666 	 *  We are expecting RESET to be TRUE and other signals to be
4667 	 *  FALSE.
4668 	 */
4669 	term =	INB(nc_sstat0);
4670 	term =	((term & 2) << 7) + ((term & 1) << 17);	/* rst sdp0 */
4671 	term |= ((INB(nc_sstat2) & 0x01) << 26) |	/* sdp1     */
4672 		((INW(nc_sbdl) & 0xff)   << 9)  |	/* d7-0     */
4673 		((INW(nc_sbdl) & 0xff00) << 10) |	/* d15-8    */
4674 		INB(nc_sbcl);	/* req ack bsy sel atn msg cd io    */
4675 
4676 	if (!(np->features & FE_WIDE))
4677 		term &= 0x3ffff;
4678 
4679 	if (term != (2<<7)) {
4680 		printf("%s: suspicious SCSI data while resetting the BUS.\n",
4681 			sym_name(np));
4682 		printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
4683 			"0x%lx, expecting 0x%lx\n",
4684 			sym_name(np),
4685 			(np->features & FE_WIDE) ? "dp1,d15-8," : "",
4686 			(u_long)term, (u_long)(2<<7));
4687 		if (SYM_SETUP_SCSI_BUS_CHECK == 1)
4688 			retv = 1;
4689 	}
4690 out:
4691 	OUTB (nc_scntl1, 0);
4692 	/* MDELAY(100); */
4693 	return retv;
4694 }
4695 
4696 /*
4697  *  The chip may have completed jobs. Look at the DONE QUEUE.
4698  */
4699 static int sym_wakeup_done (hcb_p np)
4700 {
4701 	ccb_p cp;
4702 	int i, n;
4703 	u_long dsa;
4704 
4705 	n = 0;
4706 	i = np->dqueueget;
4707 	while (1) {
4708 		dsa = scr_to_cpu(np->dqueue[i]);
4709 		if (!dsa)
4710 			break;
4711 		np->dqueue[i] = 0;
4712 		if ((i = i+2) >= MAX_QUEUE*2)
4713 			i = 0;
4714 
4715 		cp = sym_ccb_from_dsa(np, dsa);
4716 		if (cp) {
4717 			sym_complete_ok (np, cp);
4718 			++n;
4719 		}
4720 		else
4721 			printf ("%s: bad DSA (%lx) in done queue.\n",
4722 				sym_name(np), dsa);
4723 	}
4724 	np->dqueueget = i;
4725 
4726 	return n;
4727 }
4728 
4729 /*
4730  *  Complete all active CCBs with error.
4731  *  Used on CHIP/SCSI RESET.
4732  */
4733 static void sym_flush_busy_queue (hcb_p np, int cam_status)
4734 {
4735 	/*
4736 	 *  Move all active CCBs to the COMP queue
4737 	 *  and flush this queue.
4738 	 */
4739 	sym_que_splice(&np->busy_ccbq, &np->comp_ccbq);
4740 	sym_que_init(&np->busy_ccbq);
4741 	sym_flush_comp_queue(np, cam_status);
4742 }
4743 
4744 /*
4745  *  Start chip.
4746  *
4747  *  'reason' means:
4748  *     0: initialisation.
4749  *     1: SCSI BUS RESET delivered or received.
4750  *     2: SCSI BUS MODE changed.
4751  */
4752 static void sym_init (hcb_p np, int reason)
4753 {
4754  	int	i;
4755 	u_long	phys;
4756 
4757  	/*
4758 	 *  Reset chip if asked, otherwise just clear fifos.
4759  	 */
4760 	if (reason == 1)
4761 		sym_soft_reset(np);
4762 	else {
4763 		OUTB (nc_stest3, TE|CSF);
4764 		OUTONB (nc_ctest3, CLF);
4765 	}
4766 
4767 	/*
4768 	 *  Clear Start Queue
4769 	 */
4770 	phys = np->squeue_ba;
4771 	for (i = 0; i < MAX_QUEUE*2; i += 2) {
4772 		np->squeue[i]   = cpu_to_scr(np->idletask_ba);
4773 		np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
4774 	}
4775 	np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
4776 
4777 	/*
4778 	 *  Start at first entry.
4779 	 */
4780 	np->squeueput = 0;
4781 	np->scripth0->startpos[0] = cpu_to_scr(phys);
4782 
4783 	/*
4784 	 *  Clear Done Queue
4785 	 */
4786 	phys = vtobus(np->dqueue);
4787 	for (i = 0; i < MAX_QUEUE*2; i += 2) {
4788 		np->dqueue[i]   = 0;
4789 		np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
4790 	}
4791 	np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
4792 
4793 	/*
4794 	 *  Start at first entry.
4795 	 */
4796 	np->scripth0->done_pos[0] = cpu_to_scr(phys);
4797 	np->dqueueget = 0;
4798 
4799 	/*
4800 	 *  Wakeup all pending jobs.
4801 	 */
4802 	sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET);
4803 
4804 	/*
4805 	 *  Init chip.
4806 	 */
4807 	OUTB (nc_istat,  0x00   );	/*  Remove Reset, abort */
4808 	UDELAY (2000);	/* The 895 needs time for the bus mode to settle */
4809 
4810 	OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
4811 					/*  full arb., ena parity, par->ATN  */
4812 	OUTB (nc_scntl1, 0x00);		/*  odd parity, and remove CRST!! */
4813 
4814 	sym_selectclock(np, np->rv_scntl3);	/* Select SCSI clock */
4815 
4816 	OUTB (nc_scid  , RRE|np->myaddr);	/* Adapter SCSI address */
4817 	OUTW (nc_respid, 1ul<<np->myaddr);	/* Id to respond to */
4818 	OUTB (nc_istat , SIGP	);		/*  Signal Process */
4819 	OUTB (nc_dmode , np->rv_dmode);		/* Burst length, dma mode */
4820 	OUTB (nc_ctest5, np->rv_ctest5);	/* Large fifo + large burst */
4821 
4822 	OUTB (nc_dcntl , NOCOM|np->rv_dcntl);	/* Protect SFBR */
4823 	OUTB (nc_ctest3, np->rv_ctest3);	/* Write and invalidate */
4824 	OUTB (nc_ctest4, np->rv_ctest4);	/* Master parity checking */
4825 
4826 	/* Extended Sreq/Sack filtering not supported on the C10 */
4827 	if (np->features & FE_C10)
4828 		OUTB (nc_stest2, np->rv_stest2);
4829 	else
4830 		OUTB (nc_stest2, EXT|np->rv_stest2);
4831 
4832 	OUTB (nc_stest3, TE);			/* TolerANT enable */
4833 	OUTB (nc_stime0, 0x0c);			/* HTH disabled  STO 0.25 sec */
4834 
4835 	/*
4836 	 *  C10101 Errata.
4837 	 *  Errant SGE's when in narrow. Write bits 4 & 5 of
4838 	 *  STEST1 register to disable SGE. We probably should do
4839 	 *  that from SCRIPTS for each selection/reselection, but
4840 	 *  I just don't want. :)
4841 	 */
4842 	if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x45)
4843 		OUTB (nc_stest1, INB(nc_stest1) | 0x30);
4844 
4845 	/*
4846 	 *  DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
4847 	 *  Disable overlapped arbitration for some dual function devices,
4848 	 *  regardless revision id (kind of post-chip-design feature. ;-))
4849 	 */
4850 	if (np->device_id == PCI_ID_SYM53C875)
4851 		OUTB (nc_ctest0, (1<<5));
4852 	else if (np->device_id == PCI_ID_SYM53C896)
4853 		np->rv_ccntl0 |= DPR;
4854 
4855 	/*
4856 	 *  If 64 bit (895A/896/1010) write CCNTL1 to enable 40 bit
4857 	 *  address table indirect addressing for MOVE.
4858 	 *  Also write CCNTL0 if 64 bit chip, since this register seems
4859 	 *  to only be used by 64 bit cores.
4860 	 */
4861 	if (np->features & FE_64BIT) {
4862 		OUTB (nc_ccntl0, np->rv_ccntl0);
4863 		OUTB (nc_ccntl1, np->rv_ccntl1);
4864 	}
4865 
4866 	/*
4867 	 *  If phase mismatch handled by scripts (895A/896/1010),
4868 	 *  set PM jump addresses.
4869 	 */
4870 	if (np->features & FE_NOPM) {
4871 		OUTL (nc_pmjad1, SCRIPTH_BA (np, pm_handle));
4872 		OUTL (nc_pmjad2, SCRIPTH_BA (np, pm_handle));
4873 	}
4874 
4875 	/*
4876 	 *    Enable GPIO0 pin for writing if LED support from SCRIPTS.
4877 	 *    Also set GPIO5 and clear GPIO6 if hardware LED control.
4878 	 */
4879 	if (np->features & FE_LED0)
4880 		OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01);
4881 	else if (np->features & FE_LEDC)
4882 		OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20);
4883 
4884 	/*
4885 	 *      enable ints
4886 	 */
4887 	OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
4888 	OUTB (nc_dien , MDPE|BF|SSI|SIR|IID);
4889 
4890 	/*
4891 	 *  For 895/6 enable SBMC interrupt and save current SCSI bus mode.
4892 	 *  Try to eat the spurious SBMC interrupt that may occur when
4893 	 *  we reset the chip but not the SCSI BUS (at initialization).
4894 	 */
4895 	if (np->features & (FE_ULTRA2|FE_ULTRA3)) {
4896 		OUTONW (nc_sien, SBMC);
4897 		if (reason == 0) {
4898 			MDELAY(100);
4899 			INW (nc_sist);
4900 		}
4901 		np->scsi_mode = INB (nc_stest4) & SMODE;
4902 	}
4903 
4904 	/*
4905 	 *  Fill in target structure.
4906 	 *  Reinitialize usrsync.
4907 	 *  Reinitialize usrwide.
4908 	 *  Prepare sync negotiation according to actual SCSI bus mode.
4909 	 */
4910 	for (i=0;i<SYM_CONF_MAX_TARGET;i++) {
4911 		tcb_p tp = &np->target[i];
4912 
4913 		tp->to_reset = 0;
4914 		tp->sval    = 0;
4915 		tp->wval    = np->rv_scntl3;
4916 		tp->uval    = 0;
4917 
4918 		tp->tinfo.current.period = 0;
4919 		tp->tinfo.current.offset = 0;
4920 		tp->tinfo.current.width  = BUS_8_BIT;
4921 		tp->tinfo.current.options = 0;
4922 	}
4923 
4924 	/*
4925 	 *  Download SCSI SCRIPTS to on-chip RAM if present,
4926 	 *  and start script processor.
4927 	 */
4928 	if (np->ram_ba) {
4929 		if (sym_verbose > 1)
4930 			printf ("%s: Downloading SCSI SCRIPTS.\n",
4931 				sym_name(np));
4932 		if (np->ram_ws == 8192) {
4933 			memcpy_to_pci(np->ram_va + 4096,
4934 					np->scripth0, sizeof(struct sym_scrh));
4935 			OUTL (nc_mmws, np->scr_ram_seg);
4936 			OUTL (nc_mmrs, np->scr_ram_seg);
4937 			OUTL (nc_sfs,  np->scr_ram_seg);
4938 			phys = SCRIPTH_BA (np, start64);
4939 		}
4940 		else
4941 			phys = SCRIPT_BA (np, init);
4942 		memcpy_to_pci(np->ram_va,np->script0,sizeof(struct sym_scr));
4943 	}
4944 	else
4945 		phys = SCRIPT_BA (np, init);
4946 
4947 	np->istat_sem = 0;
4948 
4949 	MEMORY_BARRIER();
4950 	OUTL (nc_dsa, np->hcb_ba);
4951 	OUTL (nc_dsp, phys);
4952 
4953 	/*
4954 	 *  Notify the XPT about the RESET condition.
4955 	 */
4956 	if (reason != 0)
4957 		xpt_async(AC_BUS_RESET, np->path, NULL);
4958 }
4959 
4960 /*
4961  *  Get clock factor and sync divisor for a given
4962  *  synchronous factor period.
4963  */
4964 static int
4965 sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp)
4966 {
4967 	u32	clk = np->clock_khz;	/* SCSI clock frequency in kHz	*/
4968 	int	div = np->clock_divn;	/* Number of divisors supported	*/
4969 	u32	fak;			/* Sync factor in sxfer		*/
4970 	u32	per;			/* Period in tenths of ns	*/
4971 	u32	kpc;			/* (per * clk)			*/
4972 	int	ret;
4973 
4974 	/*
4975 	 *  Compute the synchronous period in tenths of nano-seconds
4976 	 */
4977 	if (dt && sfac <= 9)	per = 125;
4978 	else if	(sfac <= 10)	per = 250;
4979 	else if	(sfac == 11)	per = 303;
4980 	else if	(sfac == 12)	per = 500;
4981 	else			per = 40 * sfac;
4982 	ret = per;
4983 
4984 	kpc = per * clk;
4985 	if (dt)
4986 		kpc <<= 1;
4987 
4988 	/*
4989 	 *  For earliest C10, the extra clocks does not apply
4990 	 *  to CRC cycles, so it may be safe not to use them.
4991 	 *  Note that this limits the lowest sync data transfer
4992 	 *  to 5 Mega-transfers per second and may result in
4993 	 *  using higher clock divisors.
4994 	 */
4995 #if 1
4996 	if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) {
4997 		/*
4998 		 *  Look for the lowest clock divisor that allows an
4999 		 *  output speed not faster than the period.
5000 		 */
5001 		while (div > 0) {
5002 			--div;
5003 			if (kpc > (div_10M[div] << 2)) {
5004 				++div;
5005 				break;
5006 			}
5007 		}
5008 		fak = 0;			/* No extra clocks */
5009 		if (div == np->clock_divn) {	/* Are we too fast ? */
5010 			ret = -1;
5011 		}
5012 		*divp = div;
5013 		*fakp = fak;
5014 		return ret;
5015 	}
5016 #endif
5017 
5018 	/*
5019 	 *  Look for the greatest clock divisor that allows an
5020 	 *  input speed faster than the period.
5021 	 */
5022 	while (div-- > 0)
5023 		if (kpc >= (div_10M[div] << 2)) break;
5024 
5025 	/*
5026 	 *  Calculate the lowest clock factor that allows an output
5027 	 *  speed not faster than the period, and the max output speed.
5028 	 *  If fak >= 1 we will set both XCLKH_ST and XCLKH_DT.
5029 	 *  If fak >= 2 we will also set XCLKS_ST and XCLKS_DT.
5030 	 */
5031 	if (dt) {
5032 		fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2;
5033 		/* ret = ((2+fak)*div_10M[div])/np->clock_khz; */
5034 	}
5035 	else {
5036 		fak = (kpc - 1) / div_10M[div] + 1 - 4;
5037 		/* ret = ((4+fak)*div_10M[div])/np->clock_khz; */
5038 	}
5039 
5040 	/*
5041 	 *  Check against our hardware limits, or bugs :).
5042 	 */
5043 	if (fak < 0)	{fak = 0; ret = -1;}
5044 	if (fak > 2)	{fak = 2; ret = -1;}
5045 
5046 	/*
5047 	 *  Compute and return sync parameters.
5048 	 */
5049 	*divp = div;
5050 	*fakp = fak;
5051 
5052 	return ret;
5053 }
5054 
5055 /*
5056  *  We received a WDTR.
5057  *  Let everything be aware of the changes.
5058  */
5059 static void sym_setwide(hcb_p np, ccb_p cp, u_char wide)
5060 {
5061 	struct	ccb_trans_settings neg;
5062 	union ccb *ccb = cp->cam_ccb;
5063 	tcb_p tp = &np->target[cp->target];
5064 
5065 	sym_settrans(np, cp, 0, 0, 0, wide, 0, 0);
5066 
5067 	/*
5068 	 *  Tell the SCSI layer about the new transfer parameters.
5069 	 */
5070 	tp->tinfo.goal.width = tp->tinfo.current.width = wide;
5071 	tp->tinfo.current.offset = 0;
5072 	tp->tinfo.current.period = 0;
5073 	tp->tinfo.current.options = 0;
5074 	neg.bus_width = wide ? BUS_16_BIT : BUS_8_BIT;
5075 	neg.sync_period = tp->tinfo.current.period;
5076 	neg.sync_offset = tp->tinfo.current.offset;
5077 	neg.valid = CCB_TRANS_BUS_WIDTH_VALID
5078 		  | CCB_TRANS_SYNC_RATE_VALID
5079 		  | CCB_TRANS_SYNC_OFFSET_VALID;
5080 	xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1);
5081 	xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg);
5082 }
5083 
5084 /*
5085  *  We received a SDTR.
5086  *  Let everything be aware of the changes.
5087  */
5088 static void
5089 sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak)
5090 {
5091 	struct	ccb_trans_settings neg;
5092 	union ccb *ccb = cp->cam_ccb;
5093 	tcb_p tp = &np->target[cp->target];
5094 	u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0;
5095 
5096 	sym_settrans(np, cp, 0, ofs, per, wide, div, fak);
5097 
5098 	/*
5099 	 *  Tell the SCSI layer about the new transfer parameters.
5100 	 */
5101 	tp->tinfo.goal.period	= tp->tinfo.current.period  = per;
5102 	tp->tinfo.goal.offset	= tp->tinfo.current.offset  = ofs;
5103 	tp->tinfo.goal.options	= tp->tinfo.current.options = 0;
5104 	neg.sync_period = tp->tinfo.current.period;
5105 	neg.sync_offset = tp->tinfo.current.offset;
5106 	neg.valid = CCB_TRANS_SYNC_RATE_VALID
5107 		  | CCB_TRANS_SYNC_OFFSET_VALID;
5108 	xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1);
5109 	xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg);
5110 }
5111 
5112 /*
5113  *  We received a PPR.
5114  *  Let everything be aware of the changes.
5115  */
5116 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
5117 			 u_char per, u_char wide, u_char div, u_char fak)
5118 {
5119 	struct	ccb_trans_settings neg;
5120 	union ccb *ccb = cp->cam_ccb;
5121 	tcb_p tp = &np->target[cp->target];
5122 
5123 	sym_settrans(np, cp, dt, ofs, per, wide, div, fak);
5124 
5125 	/*
5126 	 *  Tell the SCSI layer about the new transfer parameters.
5127 	 */
5128 	tp->tinfo.goal.width	= tp->tinfo.current.width  = wide;
5129 	tp->tinfo.goal.period	= tp->tinfo.current.period = per;
5130 	tp->tinfo.goal.offset	= tp->tinfo.current.offset = ofs;
5131 	tp->tinfo.goal.options	= tp->tinfo.current.options = dt;
5132 	neg.sync_period = tp->tinfo.current.period;
5133 	neg.sync_offset = tp->tinfo.current.offset;
5134 	neg.bus_width = wide ? BUS_16_BIT : BUS_8_BIT;
5135 	neg.valid = CCB_TRANS_BUS_WIDTH_VALID
5136 		  | CCB_TRANS_SYNC_RATE_VALID
5137 		  | CCB_TRANS_SYNC_OFFSET_VALID;
5138 	xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1);
5139 	xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg);
5140 }
5141 
5142 /*
5143  *  Switch trans mode for current job and it's target.
5144  */
5145 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
5146 			 u_char per, u_char wide, u_char div, u_char fak)
5147 {
5148 	SYM_QUEHEAD *qp;
5149 	union	ccb *ccb;
5150 	tcb_p tp;
5151 	u_char target = INB (nc_sdid) & 0x0f;
5152 	u_char sval, wval, uval;
5153 
5154 	assert (cp);
5155 	if (!cp) return;
5156 	ccb = cp->cam_ccb;
5157 	assert (ccb);
5158 	if (!ccb) return;
5159 	assert (target == (cp->target & 0xf));
5160 	tp = &np->target[target];
5161 
5162 	sval = tp->sval;
5163 	wval = tp->wval;
5164 	uval = tp->uval;
5165 
5166 #if 0
5167 	printf("XXXX sval=%x wval=%x uval=%x (%x)\n",
5168 		sval, wval, uval, np->rv_scntl3);
5169 #endif
5170 	/*
5171 	 *  Set the offset.
5172 	 */
5173 	if (!(np->features & FE_C10))
5174 		sval = (sval & ~0x1f) | ofs;
5175 	else
5176 		sval = (sval & ~0x3f) | ofs;
5177 
5178 	/*
5179 	 *  Set the sync divisor and extra clock factor.
5180 	 */
5181 	if (ofs != 0) {
5182 		wval = (wval & ~0x70) | ((div+1) << 4);
5183 		if (!(np->features & FE_C10))
5184 			sval = (sval & ~0xe0) | (fak << 5);
5185 		else {
5186 			uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT);
5187 			if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT);
5188 			if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT);
5189 		}
5190 	}
5191 
5192 	/*
5193 	 *  Set the bus width.
5194 	 */
5195 	wval = wval & ~EWS;
5196 	if (wide != 0)
5197 		wval |= EWS;
5198 
5199 	/*
5200 	 *  Set misc. ultra enable bits.
5201 	 */
5202 	if (np->features & FE_C10) {
5203 		uval = uval & ~U3EN;
5204 		if (dt)	{
5205 			assert(np->features & FE_U3EN);
5206 			uval |= U3EN;
5207 		}
5208 	}
5209 	else {
5210 		wval = wval & ~ULTRA;
5211 		if (per <= 12)	wval |= ULTRA;
5212 	}
5213 
5214 	/*
5215 	 *   Stop there if sync parameters are unchanged.
5216 	 */
5217 	if (tp->sval == sval && tp->wval == wval && tp->uval == uval) return;
5218 	tp->sval = sval;
5219 	tp->wval = wval;
5220 	tp->uval = uval;
5221 
5222 	/*
5223 	 *  Disable extended Sreq/Sack filtering if per < 50.
5224 	 *  Not supported on the C1010.
5225 	 */
5226 	if (per < 50 && !(np->features & FE_C10))
5227 		OUTOFFB (nc_stest2, EXT);
5228 
5229 	/*
5230 	 *  set actual value and sync_status
5231 	 */
5232 	OUTB (nc_sxfer, tp->sval);
5233 	OUTB (nc_scntl3, tp->wval);
5234 
5235 	if (np->features & FE_C10) {
5236 		OUTB (nc_scntl4, tp->uval);
5237 	}
5238 
5239 	/*
5240 	 *  patch ALL busy ccbs of this target.
5241 	 */
5242 	FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
5243 		cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
5244 		if (cp->target != target)
5245 			continue;
5246 		cp->phys.select.sel_scntl3 = tp->wval;
5247 		cp->phys.select.sel_sxfer  = tp->sval;
5248 		if (np->features & FE_C10) {
5249 			cp->phys.select.sel_scntl4 = tp->uval;
5250 		}
5251 	}
5252 }
5253 
5254 /*
5255  *  log message for real hard errors
5256  *
5257  *  sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc).
5258  *  	      reg: r0 r1 r2 r3 r4 r5 r6 ..... rf.
5259  *
5260  *  exception register:
5261  *  	ds:	dstat
5262  *  	si:	sist
5263  *
5264  *  SCSI bus lines:
5265  *  	so:	control lines as driven by chip.
5266  *  	si:	control lines as seen by chip.
5267  *  	sd:	scsi data lines as seen by chip.
5268  *
5269  *  wide/fastmode:
5270  *  	sxfer:	(see the manual)
5271  *  	scntl3:	(see the manual)
5272  *
5273  *  current script command:
5274  *  	dsp:	script adress (relative to start of script).
5275  *  	dbc:	first word of script command.
5276  *
5277  *  First 24 register of the chip:
5278  *  	r0..rf
5279  */
5280 static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat)
5281 {
5282 	u32	dsp;
5283 	int	script_ofs;
5284 	int	script_size;
5285 	char	*script_name;
5286 	u_char	*script_base;
5287 	int	i;
5288 
5289 	dsp	= INL (nc_dsp);
5290 
5291 	if (dsp > np->script_ba &&
5292 	    dsp <= np->script_ba + sizeof(struct sym_scr)) {
5293 		script_ofs	= dsp - np->script_ba;
5294 		script_size	= sizeof(struct sym_scr);
5295 		script_base	= (u_char *) np->script0;
5296 		script_name	= "script";
5297 	}
5298 	else if (np->scripth_ba < dsp &&
5299 		 dsp <= np->scripth_ba + sizeof(struct sym_scrh)) {
5300 		script_ofs	= dsp - np->scripth_ba;
5301 		script_size	= sizeof(struct sym_scrh);
5302 		script_base	= (u_char *) np->scripth0;
5303 		script_name	= "scripth";
5304 	} else {
5305 		script_ofs	= dsp;
5306 		script_size	= 0;
5307 		script_base	= 0;
5308 		script_name	= "mem";
5309 	}
5310 
5311 	printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
5312 		sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist,
5313 		(unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl),
5314 		(unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),
5315 		(unsigned)INB (nc_scntl3), script_name, script_ofs,
5316 		(unsigned)INL (nc_dbc));
5317 
5318 	if (((script_ofs & 3) == 0) &&
5319 	    (unsigned)script_ofs < script_size) {
5320 		printf ("%s: script cmd = %08x\n", sym_name(np),
5321 			scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
5322 	}
5323 
5324         printf ("%s: regdump:", sym_name(np));
5325         for (i=0; i<24;i++)
5326             printf (" %02x", (unsigned)INB_OFF(i));
5327         printf (".\n");
5328 
5329 	/*
5330 	 *  PCI BUS error, read the PCI ststus register.
5331 	 */
5332 	if (dstat & (MDPE|BF)) {
5333 		u_short pci_sts;
5334 #ifdef FreeBSD_4_Bus
5335 		pci_sts = pci_read_config(np->device, PCIR_STATUS, 2);
5336 #else
5337 		pci_sts = pci_cfgread(np->pci_tag, PCIR_STATUS, 2);
5338 #endif
5339 		if (pci_sts & 0xf900) {
5340 #ifdef FreeBSD_4_Bus
5341 			pci_write_config(np->device, PCIR_STATUS, pci_sts, 2);
5342 #else
5343 			pci_cfgwrite(np->pci_tag, PCIR_STATUS, pci_sts, 2);
5344 #endif
5345 			printf("%s: PCI STATUS = 0x%04x\n",
5346 				sym_name(np), pci_sts & 0xf900);
5347 		}
5348 	}
5349 }
5350 
5351 /*
5352  *  chip interrupt handler
5353  *
5354  *  In normal situations, interrupt conditions occur one at
5355  *  a time. But when something bad happens on the SCSI BUS,
5356  *  the chip may raise several interrupt flags before
5357  *  stopping and interrupting the CPU. The additionnal
5358  *  interrupt flags are stacked in some extra registers
5359  *  after the SIP and/or DIP flag has been raised in the
5360  *  ISTAT. After the CPU has read the interrupt condition
5361  *  flag from SIST or DSTAT, the chip unstacks the other
5362  *  interrupt flags and sets the corresponding bits in
5363  *  SIST or DSTAT. Since the chip starts stacking once the
5364  *  SIP or DIP flag is set, there is a small window of time
5365  *  where the stacking does not occur.
5366  *
5367  *  Typically, multiple interrupt conditions may happen in
5368  *  the following situations:
5369  *
5370  *  - SCSI parity error + Phase mismatch  (PAR|MA)
5371  *    When an parity error is detected in input phase
5372  *    and the device switches to msg-in phase inside a
5373  *    block MOV.
5374  *  - SCSI parity error + Unexpected disconnect (PAR|UDC)
5375  *    When a stupid device does not want to handle the
5376  *    recovery of an SCSI parity error.
5377  *  - Some combinations of STO, PAR, UDC, ...
5378  *    When using non compliant SCSI stuff, when user is
5379  *    doing non compliant hot tampering on the BUS, when
5380  *    something really bad happens to a device, etc ...
5381  *
5382  *  The heuristic suggested by SYMBIOS to handle
5383  *  multiple interrupts is to try unstacking all
5384  *  interrupts conditions and to handle them on some
5385  *  priority based on error severity.
5386  *  This will work when the unstacking has been
5387  *  successful, but we cannot be 100 % sure of that,
5388  *  since the CPU may have been faster to unstack than
5389  *  the chip is able to stack. Hmmm ... But it seems that
5390  *  such a situation is very unlikely to happen.
5391  *
5392  *  If this happen, for example STO caught by the CPU
5393  *  then UDC happenning before the CPU have restarted
5394  *  the SCRIPTS, the driver may wrongly complete the
5395  *  same command on UDC, since the SCRIPTS didn't restart
5396  *  and the DSA still points to the same command.
5397  *  We avoid this situation by setting the DSA to an
5398  *  invalid value when the CCB is completed and before
5399  *  restarting the SCRIPTS.
5400  *
5401  *  Another issue is that we need some section of our
5402  *  recovery procedures to be somehow uninterruptible but
5403  *  the SCRIPTS processor does not provides such a
5404  *  feature. For this reason, we handle recovery preferently
5405  *  from the C code and check against some SCRIPTS critical
5406  *  sections from the C code.
5407  *
5408  *  Hopefully, the interrupt handling of the driver is now
5409  *  able to resist to weird BUS error conditions, but donnot
5410  *  ask me for any guarantee that it will never fail. :-)
5411  *  Use at your own decision and risk.
5412  */
5413 
5414 static void sym_intr1 (hcb_p np)
5415 {
5416 	u_char	istat, istatc;
5417 	u_char	dstat;
5418 	u_short	sist;
5419 
5420 	/*
5421 	 *  interrupt on the fly ?
5422 	 */
5423 	istat = INB (nc_istat);
5424 	if (istat & INTF) {
5425 		OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem);
5426 #if 1
5427 		istat = INB (nc_istat);		/* DUMMY READ */
5428 #endif
5429 		if (DEBUG_FLAGS & DEBUG_TINY) printf ("F ");
5430 		(void)sym_wakeup_done (np);
5431 	};
5432 
5433 	if (!(istat & (SIP|DIP)))
5434 		return;
5435 
5436 #if 0	/* We should never get this one */
5437 	if (istat & CABRT)
5438 		OUTB (nc_istat, CABRT);
5439 #endif
5440 
5441 	/*
5442 	 *  PAR and MA interrupts may occur at the same time,
5443 	 *  and we need to know of both in order to handle
5444 	 *  this situation properly. We try to unstack SCSI
5445 	 *  interrupts for that reason. BTW, I dislike a LOT
5446 	 *  such a loop inside the interrupt routine.
5447 	 *  Even if DMA interrupt stacking is very unlikely to
5448 	 *  happen, we also try unstacking these ones, since
5449 	 *  this has no performance impact.
5450 	 */
5451 	sist	= 0;
5452 	dstat	= 0;
5453 	istatc	= istat;
5454 	do {
5455 		if (istatc & SIP)
5456 			sist  |= INW (nc_sist);
5457 		if (istatc & DIP)
5458 			dstat |= INB (nc_dstat);
5459 		istatc = INB (nc_istat);
5460 		istat |= istatc;
5461 	} while (istatc & (SIP|DIP));
5462 
5463 	if (DEBUG_FLAGS & DEBUG_TINY)
5464 		printf ("<%d|%x:%x|%x:%x>",
5465 			(int)INB(nc_scr0),
5466 			dstat,sist,
5467 			(unsigned)INL(nc_dsp),
5468 			(unsigned)INL(nc_dbc));
5469 	/*
5470 	 *  First, interrupts we want to service cleanly.
5471 	 *
5472 	 *  Phase mismatch (MA) is the most frequent interrupt
5473 	 *  for chip earlier than the 896 and so we have to service
5474 	 *  it as quickly as possible.
5475 	 *  A SCSI parity error (PAR) may be combined with a phase
5476 	 *  mismatch condition (MA).
5477 	 *  Programmed interrupts (SIR) are used to call the C code
5478 	 *  from SCRIPTS.
5479 	 *  The single step interrupt (SSI) is not used in this
5480 	 *  driver.
5481 	 */
5482 	if (!(sist  & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
5483 	    !(dstat & (MDPE|BF|ABRT|IID))) {
5484 		if	(sist & PAR)	sym_int_par (np, sist);
5485 		else if (sist & MA)	sym_int_ma (np);
5486 		else if (dstat & SIR)	sym_int_sir (np);
5487 		else if (dstat & SSI)	OUTONB (nc_dcntl, (STD|NOCOM));
5488 		else			goto unknown_int;
5489 		return;
5490 	};
5491 
5492 	/*
5493 	 *  Now, interrupts that donnot happen in normal
5494 	 *  situations and that we may need to recover from.
5495 	 *
5496 	 *  On SCSI RESET (RST), we reset everything.
5497 	 *  On SCSI BUS MODE CHANGE (SBMC), we complete all
5498 	 *  active CCBs with RESET status, prepare all devices
5499 	 *  for negotiating again and restart the SCRIPTS.
5500 	 *  On STO and UDC, we complete the CCB with the corres-
5501 	 *  ponding status and restart the SCRIPTS.
5502 	 */
5503 	if (sist & RST) {
5504 		xpt_print_path(np->path);
5505 		printf("SCSI BUS reset detected.\n");
5506 		sym_init (np, 1);
5507 		return;
5508 	};
5509 
5510 	OUTB (nc_ctest3, np->rv_ctest3 | CLF);	/* clear dma fifo  */
5511 	OUTB (nc_stest3, TE|CSF);		/* clear scsi fifo */
5512 
5513 	if (!(sist  & (GEN|HTH|SGE)) &&
5514 	    !(dstat & (MDPE|BF|ABRT|IID))) {
5515 		if	(sist & SBMC)	sym_int_sbmc (np);
5516 		else if (sist & STO)	sym_int_sto (np);
5517 		else if (sist & UDC)	sym_int_udc (np);
5518 		else			goto unknown_int;
5519 		return;
5520 	};
5521 
5522 	/*
5523 	 *  Now, interrupts we are not able to recover cleanly.
5524 	 *
5525 	 *  Log message for hard errors.
5526 	 *  Reset everything.
5527 	 */
5528 
5529 	sym_log_hard_error(np, sist, dstat);
5530 
5531 	if ((sist & (GEN|HTH|SGE)) ||
5532 		(dstat & (MDPE|BF|ABRT|IID))) {
5533 		sym_start_reset(np);
5534 		return;
5535 	};
5536 
5537 unknown_int:
5538 	/*
5539 	 *  We just miss the cause of the interrupt. :(
5540 	 *  Print a message. The timeout will do the real work.
5541 	 */
5542 	printf(	"%s: unknown interrupt(s) ignored, "
5543 		"ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
5544 		sym_name(np), istat, dstat, sist);
5545 }
5546 
5547 static void sym_intr(void *arg)
5548 {
5549 	if (DEBUG_FLAGS & DEBUG_TINY) printf ("[");
5550 	sym_intr1((hcb_p) arg);
5551 	if (DEBUG_FLAGS & DEBUG_TINY) printf ("]");
5552 	return;
5553 }
5554 
5555 static void sym_poll(struct cam_sim *sim)
5556 {
5557 	int s = splcam();
5558 	sym_intr(cam_sim_softc(sim));
5559 	splx(s);
5560 }
5561 
5562 
5563 /*
5564  *  generic recovery from scsi interrupt
5565  *
5566  *  The doc says that when the chip gets an SCSI interrupt,
5567  *  it tries to stop in an orderly fashion, by completing
5568  *  an instruction fetch that had started or by flushing
5569  *  the DMA fifo for a write to memory that was executing.
5570  *  Such a fashion is not enough to know if the instruction
5571  *  that was just before the current DSP value has been
5572  *  executed or not.
5573  *
5574  *  There are some small SCRIPTS sections that deal with
5575  *  the start queue and the done queue that may break any
5576  *  assomption from the C code if we are interrupted
5577  *  inside, so we reset if this happens. Btw, since these
5578  *  SCRIPTS sections are executed while the SCRIPTS hasn't
5579  *  started SCSI operations, it is very unlikely to happen.
5580  *
5581  *  All the driver data structures are supposed to be
5582  *  allocated from the same 4 GB memory window, so there
5583  *  is a 1 to 1 relationship between DSA and driver data
5584  *  structures. Since we are careful :) to invalidate the
5585  *  DSA when we complete a command or when the SCRIPTS
5586  *  pushes a DSA into a queue, we can trust it when it
5587  *  points to a CCB.
5588  */
5589 static void sym_recover_scsi_int (hcb_p np, u_char hsts)
5590 {
5591 	u32	dsp	= INL (nc_dsp);
5592 	u32	dsa	= INL (nc_dsa);
5593 	ccb_p cp	= sym_ccb_from_dsa(np, dsa);
5594 
5595 	/*
5596 	 *  If we haven't been interrupted inside the SCRIPTS
5597 	 *  critical pathes, we can safely restart the SCRIPTS
5598 	 *  and trust the DSA value if it matches a CCB.
5599 	 */
5600 	if ((!(dsp > SCRIPT_BA (np, getjob_begin) &&
5601 	       dsp < SCRIPT_BA (np, getjob_end) + 1)) &&
5602 	    (!(dsp > SCRIPT_BA (np, ungetjob) &&
5603 	       dsp < SCRIPT_BA (np, reselect) + 1)) &&
5604 	    (!(dsp > SCRIPTH_BA (np, sel_for_abort) &&
5605 	       dsp < SCRIPTH_BA (np, sel_for_abort_1) + 1)) &&
5606 	    (!(dsp > SCRIPT_BA (np, done) &&
5607 	       dsp < SCRIPT_BA (np, done_end) + 1))) {
5608 		OUTB (nc_ctest3, np->rv_ctest3 | CLF);	/* clear dma fifo  */
5609 		OUTB (nc_stest3, TE|CSF);		/* clear scsi fifo */
5610 		/*
5611 		 *  If we have a CCB, let the SCRIPTS call us back for
5612 		 *  the handling of the error with SCRATCHA filled with
5613 		 *  STARTPOS. This way, we will be able to freeze the
5614 		 *  device queue and requeue awaiting IOs.
5615 		 */
5616 		if (cp) {
5617 			cp->host_status = hsts;
5618 			OUTL (nc_dsp, SCRIPT_BA (np, complete_error));
5619 		}
5620 		/*
5621 		 *  Otherwise just restart the SCRIPTS.
5622 		 */
5623 		else {
5624 			OUTL (nc_dsa, 0xffffff);
5625 			OUTL (nc_dsp, SCRIPT_BA (np, start));
5626 		}
5627 	}
5628 	else
5629 		goto reset_all;
5630 
5631 	return;
5632 
5633 reset_all:
5634 	sym_start_reset(np);
5635 }
5636 
5637 /*
5638  *  chip exception handler for selection timeout
5639  */
5640 void sym_int_sto (hcb_p np)
5641 {
5642 	u32 dsp	= INL (nc_dsp);
5643 
5644 	if (DEBUG_FLAGS & DEBUG_TINY) printf ("T");
5645 
5646 	if (dsp == SCRIPT_BA (np, wf_sel_done) + 8)
5647 		sym_recover_scsi_int(np, HS_SEL_TIMEOUT);
5648 	else
5649 		sym_start_reset(np);
5650 }
5651 
5652 /*
5653  *  chip exception handler for unexpected disconnect
5654  */
5655 void sym_int_udc (hcb_p np)
5656 {
5657 	printf ("%s: unexpected disconnect\n", sym_name(np));
5658 	sym_recover_scsi_int(np, HS_UNEXPECTED);
5659 }
5660 
5661 /*
5662  *  chip exception handler for SCSI bus mode change
5663  *
5664  *  spi2-r12 11.2.3 says a transceiver mode change must
5665  *  generate a reset event and a device that detects a reset
5666  *  event shall initiate a hard reset. It says also that a
5667  *  device that detects a mode change shall set data transfer
5668  *  mode to eight bit asynchronous, etc...
5669  *  So, just reinitializing all except chip should be enough.
5670  */
5671 static void sym_int_sbmc (hcb_p np)
5672 {
5673 	u_char scsi_mode = INB (nc_stest4) & SMODE;
5674 
5675 	/*
5676 	 *  Notify user.
5677 	 */
5678 	xpt_print_path(np->path);
5679 	printf("SCSI BUS mode change from %s to %s.\n",
5680 		sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode));
5681 
5682 	/*
5683 	 *  Should suspend command processing for a few seconds and
5684 	 *  reinitialize all except the chip.
5685 	 */
5686 	sym_init (np, 2);
5687 }
5688 
5689 /*
5690  *  chip exception handler for SCSI parity error.
5691  *
5692  *  When the chip detects a SCSI parity error and is
5693  *  currently executing a (CH)MOV instruction, it does
5694  *  not interrupt immediately, but tries to finish the
5695  *  transfer of the current scatter entry before
5696  *  interrupting. The following situations may occur:
5697  *
5698  *  - The complete scatter entry has been transferred
5699  *    without the device having changed phase.
5700  *    The chip will then interrupt with the DSP pointing
5701  *    to the instruction that follows the MOV.
5702  *
5703  *  - A phase mismatch occurs before the MOV finished
5704  *    and phase errors are to be handled by the C code.
5705  *    The chip will then interrupt with both PAR and MA
5706  *    conditions set.
5707  *
5708  *  - A phase mismatch occurs before the MOV finished and
5709  *    phase errors are to be handled by SCRIPTS.
5710  *    The chip will load the DSP with the phase mismatch
5711  *    JUMP address and interrupt the host processor.
5712  */
5713 static void sym_int_par (hcb_p np, u_short sist)
5714 {
5715 	u_char	hsts	= INB (HS_PRT);
5716 	u32	dsp	= INL (nc_dsp);
5717 	u32	dbc	= INL (nc_dbc);
5718 	u32	dsa	= INL (nc_dsa);
5719 	u_char	sbcl	= INB (nc_sbcl);
5720 	u_char	cmd	= dbc >> 24;
5721 	int phase	= cmd & 7;
5722 	ccb_p	cp	= sym_ccb_from_dsa(np, dsa);
5723 
5724 	printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
5725 		sym_name(np), hsts, dbc, sbcl);
5726 
5727 	/*
5728 	 *  Check that the chip is connected to the SCSI BUS.
5729 	 */
5730 	if (!(INB (nc_scntl1) & ISCON)) {
5731 		sym_recover_scsi_int(np, HS_UNEXPECTED);
5732 		return;
5733 	}
5734 
5735 	/*
5736 	 *  If the nexus is not clearly identified, reset the bus.
5737 	 *  We will try to do better later.
5738 	 */
5739 	if (!cp)
5740 		goto reset_all;
5741 
5742 	/*
5743 	 *  Check instruction was a MOV, direction was INPUT and
5744 	 *  ATN is asserted.
5745 	 */
5746 	if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
5747 		goto reset_all;
5748 
5749 	/*
5750 	 *  Keep track of the parity error.
5751 	 */
5752 	OUTONB (HF_PRT, HF_EXT_ERR);
5753 	cp->xerr_status |= XE_PARITY_ERR;
5754 
5755 	/*
5756 	 *  Prepare the message to send to the device.
5757 	 */
5758 	np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
5759 
5760 	/*
5761 	 *  If the old phase was DATA IN phase, we have to deal with
5762 	 *  the 3 situations described above.
5763 	 *  For other input phases (MSG IN and STATUS), the device
5764 	 *  must resend the whole thing that failed parity checking
5765 	 *  or signal error. So, jumping to dispatcher should be OK.
5766 	 */
5767 	if (phase == 1) {
5768 		/* Phase mismatch handled by SCRIPTS */
5769 		if (dsp == SCRIPTH_BA (np, pm_handle))
5770 			OUTL (nc_dsp, dsp);
5771 		/* Phase mismatch handled by the C code */
5772 		else if (sist & MA)
5773 			sym_int_ma (np);
5774 		/* No phase mismatch occurred */
5775 		else {
5776 			OUTL (nc_temp, dsp);
5777 			OUTL (nc_dsp, SCRIPT_BA (np, dispatch));
5778 		}
5779 	}
5780 	else
5781 		OUTL (nc_dsp, SCRIPT_BA (np, clrack));
5782 	return;
5783 
5784 reset_all:
5785 	sym_start_reset(np);
5786 	return;
5787 }
5788 
5789 /*
5790  *  chip exception handler for phase errors.
5791  *
5792  *  We have to construct a new transfer descriptor,
5793  *  to transfer the rest of the current block.
5794  */
5795 static void sym_int_ma (hcb_p np)
5796 {
5797 	u32	dbc;
5798 	u32	rest;
5799 	u32	dsp;
5800 	u32	dsa;
5801 	u32	nxtdsp;
5802 	u32	*vdsp;
5803 	u32	oadr, olen;
5804 	u32	*tblp;
5805         u32	newcmd;
5806 	u_int	delta;
5807 	u_char	cmd;
5808 	u_char	hflags, hflags0;
5809 	struct	sym_pmc *pm;
5810 	ccb_p	cp;
5811 
5812 	dsp	= INL (nc_dsp);
5813 	dbc	= INL (nc_dbc);
5814 	dsa	= INL (nc_dsa);
5815 
5816 	cmd	= dbc >> 24;
5817 	rest	= dbc & 0xffffff;
5818 	delta	= 0;
5819 
5820 	/*
5821 	 *  locate matching cp if any.
5822 	 */
5823 	cp = sym_ccb_from_dsa(np, dsa);
5824 
5825 	/*
5826 	 *  Donnot take into account dma fifo and various buffers in
5827 	 *  INPUT phase since the chip flushes everything before
5828 	 *  raising the MA interrupt for interrupted INPUT phases.
5829 	 *  For DATA IN phase, we will check for the SWIDE later.
5830 	 */
5831 	if ((cmd & 7) != 1) {
5832 		u_char ss0, ss2;
5833 
5834 		if (np->features & FE_DFBC)
5835 			delta = INW (nc_dfbc);
5836 		else {
5837 			u32 dfifo;
5838 
5839 			/*
5840 			 * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
5841 			 */
5842 			dfifo = INL(nc_dfifo);
5843 
5844 			/*
5845 			 *  Calculate remaining bytes in DMA fifo.
5846 			 *  (CTEST5 = dfifo >> 16)
5847 			 */
5848 			if (dfifo & (DFS << 16))
5849 				delta = ((((dfifo >> 8) & 0x300) |
5850 				          (dfifo & 0xff)) - rest) & 0x3ff;
5851 			else
5852 				delta = ((dfifo & 0xff) - rest) & 0x7f;
5853 		}
5854 
5855 		/*
5856 		 *  The data in the dma fifo has not been transfered to
5857 		 *  the target -> add the amount to the rest
5858 		 *  and clear the data.
5859 		 *  Check the sstat2 register in case of wide transfer.
5860 		 */
5861 		rest += delta;
5862 		ss0  = INB (nc_sstat0);
5863 		if (ss0 & OLF) rest++;
5864 		if (!(np->features & FE_C10))
5865 			if (ss0 & ORF) rest++;
5866 		if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
5867 			ss2 = INB (nc_sstat2);
5868 			if (ss2 & OLF1) rest++;
5869 			if (!(np->features & FE_C10))
5870 				if (ss2 & ORF1) rest++;
5871 		};
5872 
5873 		/*
5874 		 *  Clear fifos.
5875 		 */
5876 		OUTB (nc_ctest3, np->rv_ctest3 | CLF);	/* dma fifo  */
5877 		OUTB (nc_stest3, TE|CSF);		/* scsi fifo */
5878 	}
5879 
5880 	/*
5881 	 *  log the information
5882 	 */
5883 	if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
5884 		printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7,
5885 			(unsigned) rest, (unsigned) delta);
5886 
5887 	/*
5888 	 *  try to find the interrupted script command,
5889 	 *  and the address at which to continue.
5890 	 */
5891 	vdsp	= 0;
5892 	nxtdsp	= 0;
5893 	if	(dsp >  np->script_ba &&
5894 		 dsp <= np->script_ba + sizeof(struct sym_scr)) {
5895 		vdsp = (u32 *)((char*)np->script0 + (dsp-np->script_ba-8));
5896 		nxtdsp = dsp;
5897 	}
5898 	else if	(dsp >  np->scripth_ba &&
5899 		 dsp <= np->scripth_ba + sizeof(struct sym_scrh)) {
5900 		vdsp = (u32 *)((char*)np->scripth0 + (dsp-np->scripth_ba-8));
5901 		nxtdsp = dsp;
5902 	}
5903 
5904 	/*
5905 	 *  log the information
5906 	 */
5907 	if (DEBUG_FLAGS & DEBUG_PHASE) {
5908 		printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
5909 			cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
5910 	};
5911 
5912 	if (!vdsp) {
5913 		printf ("%s: interrupted SCRIPT address not found.\n",
5914 			sym_name (np));
5915 		goto reset_all;
5916 	}
5917 
5918 	if (!cp) {
5919 		printf ("%s: SCSI phase error fixup: CCB already dequeued.\n",
5920 			sym_name (np));
5921 		goto reset_all;
5922 	}
5923 
5924 	/*
5925 	 *  get old startaddress and old length.
5926 	 */
5927 	oadr = scr_to_cpu(vdsp[1]);
5928 
5929 	if (cmd & 0x10) {	/* Table indirect */
5930 		tblp = (u32 *) ((char*) &cp->phys + oadr);
5931 		olen = scr_to_cpu(tblp[0]);
5932 		oadr = scr_to_cpu(tblp[1]);
5933 	} else {
5934 		tblp = (u32 *) 0;
5935 		olen = scr_to_cpu(vdsp[0]) & 0xffffff;
5936 	};
5937 
5938 	if (DEBUG_FLAGS & DEBUG_PHASE) {
5939 		printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
5940 			(unsigned) (scr_to_cpu(vdsp[0]) >> 24),
5941 			tblp,
5942 			(unsigned) olen,
5943 			(unsigned) oadr);
5944 	};
5945 
5946 	/*
5947 	 *  check cmd against assumed interrupted script command.
5948 	 */
5949 	if (cmd != (scr_to_cpu(vdsp[0]) >> 24)) {
5950 		PRINT_ADDR(cp);
5951 		printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
5952 			(unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
5953 
5954 		goto reset_all;
5955 	};
5956 
5957 	/*
5958 	 *  if old phase not dataphase, leave here.
5959 	 */
5960 	if ((cmd & 5) != (cmd & 7)) {
5961 		PRINT_ADDR(cp);
5962 		printf ("phase change %x-%x %d@%08x resid=%d.\n",
5963 			cmd&7, INB(nc_sbcl)&7, (unsigned)olen,
5964 			(unsigned)oadr, (unsigned)rest);
5965 		goto unexpected_phase;
5966 	};
5967 
5968 	/*
5969 	 *  Choose the correct PM save area.
5970 	 *
5971 	 *  Look at the PM_SAVE SCRIPT if you want to understand
5972 	 *  this stuff. The equivalent code is implemented in
5973 	 *  SCRIPTS for the 895A and 896 that are able to handle
5974 	 *  PM from the SCRIPTS processor.
5975 	 */
5976 	hflags0 = INB (HF_PRT);
5977 	hflags = hflags0;
5978 
5979 	if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
5980 		if (hflags & HF_IN_PM0)
5981 			nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
5982 		else if	(hflags & HF_IN_PM1)
5983 			nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
5984 
5985 		if (hflags & HF_DP_SAVED)
5986 			hflags ^= HF_ACT_PM;
5987 	}
5988 
5989 	if (!(hflags & HF_ACT_PM)) {
5990 		pm = &cp->phys.pm0;
5991 		newcmd = SCRIPT_BA(np, pm0_data);
5992 	}
5993 	else {
5994 		pm = &cp->phys.pm1;
5995 		newcmd = SCRIPT_BA(np, pm1_data);
5996 	}
5997 
5998 	hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
5999 	if (hflags != hflags0)
6000 		OUTB (HF_PRT, hflags);
6001 
6002 	/*
6003 	 *  fillin the phase mismatch context
6004 	 */
6005 	pm->sg.addr = cpu_to_scr(oadr + olen - rest);
6006 	pm->sg.size = cpu_to_scr(rest);
6007 	pm->ret     = cpu_to_scr(nxtdsp);
6008 
6009 	/*
6010 	 *  If we have a SWIDE,
6011 	 *  - prepare the address to write the SWIDE from SCRIPTS,
6012 	 *  - compute the SCRIPTS address to restart from,
6013 	 *  - move current data pointer context by one byte.
6014 	 */
6015 	nxtdsp = SCRIPT_BA (np, dispatch);
6016 	if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) &&
6017 	    (INB (nc_scntl2) & WSR)) {
6018 		u32 tmp;
6019 
6020 		/*
6021 		 *  Set up the table indirect for the MOVE
6022 		 *  of the residual byte and adjust the data
6023 		 *  pointer context.
6024 		 */
6025 		tmp = scr_to_cpu(pm->sg.addr);
6026 		cp->phys.wresid.addr = cpu_to_scr(tmp);
6027 		pm->sg.addr = cpu_to_scr(tmp + 1);
6028 		tmp = scr_to_cpu(pm->sg.size);
6029 		cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
6030 		pm->sg.size = cpu_to_scr(tmp - 1);
6031 
6032 		/*
6033 		 *  If only the residual byte is to be moved,
6034 		 *  no PM context is needed.
6035 		 */
6036 		if ((tmp&0xffffff) == 1)
6037 			newcmd = pm->ret;
6038 
6039 		/*
6040 		 *  Prepare the address of SCRIPTS that will
6041 		 *  move the residual byte to memory.
6042 		 */
6043 		nxtdsp = SCRIPTH_BA (np, wsr_ma_helper);
6044 	}
6045 
6046 	if (DEBUG_FLAGS & DEBUG_PHASE) {
6047 		PRINT_ADDR(cp);
6048 		printf ("PM %x %x %x / %x %x %x.\n",
6049 			hflags0, hflags, newcmd,
6050 			(unsigned)scr_to_cpu(pm->sg.addr),
6051 			(unsigned)scr_to_cpu(pm->sg.size),
6052 			(unsigned)scr_to_cpu(pm->ret));
6053 	}
6054 
6055 	/*
6056 	 *  Restart the SCRIPTS processor.
6057 	 */
6058 	OUTL (nc_temp, newcmd);
6059 	OUTL (nc_dsp,  nxtdsp);
6060 	return;
6061 
6062 	/*
6063 	 *  Unexpected phase changes that occurs when the current phase
6064 	 *  is not a DATA IN or DATA OUT phase are due to error conditions.
6065 	 *  Such event may only happen when the SCRIPTS is using a
6066 	 *  multibyte SCSI MOVE.
6067 	 *
6068 	 *  Phase change		Some possible cause
6069 	 *
6070 	 *  COMMAND  --> MSG IN	SCSI parity error detected by target.
6071 	 *  COMMAND  --> STATUS	Bad command or refused by target.
6072 	 *  MSG OUT  --> MSG IN     Message rejected by target.
6073 	 *  MSG OUT  --> COMMAND    Bogus target that discards extended
6074 	 *  			negotiation messages.
6075 	 *
6076 	 *  The code below does not care of the new phase and so
6077 	 *  trusts the target. Why to annoy it ?
6078 	 *  If the interrupted phase is COMMAND phase, we restart at
6079 	 *  dispatcher.
6080 	 *  If a target does not get all the messages after selection,
6081 	 *  the code assumes blindly that the target discards extended
6082 	 *  messages and clears the negotiation status.
6083 	 *  If the target does not want all our response to negotiation,
6084 	 *  we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
6085 	 *  bloat for such a should_not_happen situation).
6086 	 *  In all other situation, we reset the BUS.
6087 	 *  Are these assumptions reasonnable ? (Wait and see ...)
6088 	 */
6089 unexpected_phase:
6090 	dsp -= 8;
6091 	nxtdsp = 0;
6092 
6093 	switch (cmd & 7) {
6094 	case 2:	/* COMMAND phase */
6095 		nxtdsp = SCRIPT_BA (np, dispatch);
6096 		break;
6097 #if 0
6098 	case 3:	/* STATUS  phase */
6099 		nxtdsp = SCRIPT_BA (np, dispatch);
6100 		break;
6101 #endif
6102 	case 6:	/* MSG OUT phase */
6103 		/*
6104 		 *  If the device may want to use untagged when we want
6105 		 *  tagged, we prepare an IDENTIFY without disc. granted,
6106 		 *  since we will not be able to handle reselect.
6107 		 *  Otherwise, we just don't care.
6108 		 */
6109 		if	(dsp == SCRIPT_BA (np, send_ident)) {
6110 			if (cp->tag != NO_TAG && olen - rest <= 3) {
6111 				cp->host_status = HS_BUSY;
6112 				np->msgout[0] = M_IDENTIFY | cp->lun;
6113 				nxtdsp = SCRIPTH_BA (np, ident_break_atn);
6114 			}
6115 			else
6116 				nxtdsp = SCRIPTH_BA (np, ident_break);
6117 		}
6118 		else if	(dsp == SCRIPTH_BA (np, send_wdtr) ||
6119 			 dsp == SCRIPTH_BA (np, send_sdtr) ||
6120 			 dsp == SCRIPTH_BA (np, send_ppr)) {
6121 			nxtdsp = SCRIPTH_BA (np, nego_bad_phase);
6122 		}
6123 		break;
6124 #if 0
6125 	case 7:	/* MSG IN  phase */
6126 		nxtdsp = SCRIPT_BA (np, clrack);
6127 		break;
6128 #endif
6129 	}
6130 
6131 	if (nxtdsp) {
6132 		OUTL (nc_dsp, nxtdsp);
6133 		return;
6134 	}
6135 
6136 reset_all:
6137 	sym_start_reset(np);
6138 }
6139 
6140 /*
6141  *  Dequeue from the START queue all CCBs that match
6142  *  a given target/lun/task condition (-1 means all),
6143  *  and move them from the BUSY queue to the COMP queue
6144  *  with CAM_REQUEUE_REQ status condition.
6145  *  This function is used during error handling/recovery.
6146  *  It is called with SCRIPTS not running.
6147  */
6148 static int
6149 sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task)
6150 {
6151 	int j;
6152 	ccb_p cp;
6153 
6154 	/*
6155 	 *  Make sure the starting index is within range.
6156 	 */
6157 	assert((i >= 0) && (i < 2*MAX_QUEUE));
6158 
6159 	/*
6160 	 *  Walk until end of START queue and dequeue every job
6161 	 *  that matches the target/lun/task condition.
6162 	 */
6163 	j = i;
6164 	while (i != np->squeueput) {
6165 		cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
6166 		assert(cp);
6167 #ifdef SYM_CONF_IARB_SUPPORT
6168 		/* Forget hints for IARB, they may be no longer relevant */
6169 		cp->host_flags &= ~HF_HINT_IARB;
6170 #endif
6171 		if ((target == -1 || cp->target == target) &&
6172 		    (lun    == -1 || cp->lun    == lun)    &&
6173 		    (task   == -1 || cp->tag    == task)) {
6174 			sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ);
6175 			sym_remque(&cp->link_ccbq);
6176 			sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
6177 		}
6178 		else {
6179 			if (i != j)
6180 				np->squeue[j] = np->squeue[i];
6181 			if ((j += 2) >= MAX_QUEUE*2) j = 0;
6182 		}
6183 		if ((i += 2) >= MAX_QUEUE*2) i = 0;
6184 	}
6185 	if (i != j)		/* Copy back the idle task if needed */
6186 		np->squeue[j] = np->squeue[i];
6187 	np->squeueput = j;	/* Update our current start queue pointer */
6188 
6189 	return (i - j) / 2;
6190 }
6191 
6192 /*
6193  *  Complete all CCBs queued to the COMP queue.
6194  *
6195  *  These CCBs are assumed:
6196  *  - Not to be referenced either by devices or
6197  *    SCRIPTS-related queues and datas.
6198  *  - To have to be completed with an error condition
6199  *    or requeued.
6200  *
6201  *  The device queue freeze count is incremented
6202  *  for each CCB that does not prevent this.
6203  *  This function is called when all CCBs involved
6204  *  in error handling/recovery have been reaped.
6205  */
6206 static void
6207 sym_flush_comp_queue(hcb_p np, int cam_status)
6208 {
6209 	SYM_QUEHEAD *qp;
6210 	ccb_p cp;
6211 
6212 	while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) {
6213 		union ccb *ccb;
6214 		cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
6215 		sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
6216 		/* Leave quiet CCBs waiting for resources */
6217 		if (cp->host_status == HS_WAIT)
6218 			continue;
6219 		ccb = cp->cam_ccb;
6220 		if (cam_status)
6221 			sym_set_cam_status(ccb, cam_status);
6222 		sym_free_ccb(np, cp);
6223 		sym_freeze_cam_ccb(ccb);
6224 		sym_xpt_done(np, ccb);
6225 	}
6226 }
6227 
6228 /*
6229  *  chip handler for bad SCSI status condition
6230  *
6231  *  In case of bad SCSI status, we unqueue all the tasks
6232  *  currently queued to the controller but not yet started
6233  *  and then restart the SCRIPTS processor immediately.
6234  *
6235  *  QUEUE FULL and BUSY conditions are handled the same way.
6236  *  Basically all the not yet started tasks are requeued in
6237  *  device queue and the queue is frozen until a completion.
6238  *
6239  *  For CHECK CONDITION and COMMAND TERMINATED status, we use
6240  *  the CCB of the failed command to prepare a REQUEST SENSE
6241  *  SCSI command and queue it to the controller queue.
6242  *
6243  *  SCRATCHA is assumed to have been loaded with STARTPOS
6244  *  before the SCRIPTS called the C code.
6245  */
6246 static void sym_sir_bad_scsi_status(hcb_p np, int num, ccb_p cp)
6247 {
6248 	tcb_p tp	= &np->target[cp->target];
6249 	u32		startp;
6250 	u_char		s_status = cp->ssss_status;
6251 	u_char		h_flags  = cp->host_flags;
6252 	int		msglen;
6253 	int		nego;
6254 	int		i;
6255 
6256 	/*
6257 	 *  Compute the index of the next job to start from SCRIPTS.
6258 	 */
6259 	i = (INL (nc_scratcha) - np->squeue_ba) / 4;
6260 
6261 	/*
6262 	 *  The last CCB queued used for IARB hint may be
6263 	 *  no longer relevant. Forget it.
6264 	 */
6265 #ifdef SYM_CONF_IARB_SUPPORT
6266 	if (np->last_cp)
6267 		np->last_cp = 0;
6268 #endif
6269 
6270 	/*
6271 	 *  Now deal with the SCSI status.
6272 	 */
6273 	switch(s_status) {
6274 	case S_BUSY:
6275 	case S_QUEUE_FULL:
6276 		if (sym_verbose >= 2) {
6277 			PRINT_ADDR(cp);
6278 			printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
6279 		}
6280 	default:	/* S_INT, S_INT_COND_MET, S_CONFLICT */
6281 		sym_complete_error (np, cp);
6282 		break;
6283 	case S_TERMINATED:
6284 	case S_CHECK_COND:
6285 		/*
6286 		 *  If we get an SCSI error when requesting sense, give up.
6287 		 */
6288 		if (h_flags & HF_SENSE) {
6289 			sym_complete_error (np, cp);
6290 			break;
6291 		}
6292 
6293 		/*
6294 		 *  Dequeue all queued CCBs for that device not yet started,
6295 		 *  and restart the SCRIPTS processor immediately.
6296 		 */
6297 		(void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
6298 		OUTL (nc_dsp, SCRIPT_BA (np, start));
6299 
6300  		/*
6301 		 *  Save some info of the actual IO.
6302 		 *  Compute the data residual.
6303 		 */
6304 		cp->sv_scsi_status = cp->ssss_status;
6305 		cp->sv_xerr_status = cp->xerr_status;
6306 		cp->sv_resid = sym_compute_residual(np, cp);
6307 
6308 		/*
6309 		 *  Prepare all needed data structures for
6310 		 *  requesting sense data.
6311 		 */
6312 
6313 		/*
6314 		 *  identify message
6315 		 */
6316 		cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun;
6317 		msglen = 1;
6318 
6319 		/*
6320 		 *  If we are currently using anything different from
6321 		 *  async. 8 bit data transfers with that target,
6322 		 *  start a negotiation, since the device may want
6323 		 *  to report us a UNIT ATTENTION condition due to
6324 		 *  a cause we currently ignore, and we donnot want
6325 		 *  to be stuck with WIDE and/or SYNC data transfer.
6326 		 *
6327 		 *  cp->nego_status is filled by sym_prepare_nego().
6328 		 */
6329 		cp->nego_status = 0;
6330 		nego = 0;
6331 		if	(tp->tinfo.current.options & PPR_OPT_MASK)
6332 			nego = NS_PPR;
6333 		else if	(tp->tinfo.current.width != BUS_8_BIT)
6334 			nego = NS_WIDE;
6335 		else if (tp->tinfo.current.offset != 0)
6336 			nego = NS_SYNC;
6337 		if (nego)
6338 			msglen +=
6339 			sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]);
6340 		/*
6341 		 *  Message table indirect structure.
6342 		 */
6343 		cp->phys.smsg.addr	= cpu_to_scr(CCB_BA (cp, scsi_smsg2));
6344 		cp->phys.smsg.size	= cpu_to_scr(msglen);
6345 
6346 		/*
6347 		 *  sense command
6348 		 */
6349 		cp->phys.cmd.addr	= cpu_to_scr(CCB_BA (cp, sensecmd));
6350 		cp->phys.cmd.size	= cpu_to_scr(6);
6351 
6352 		/*
6353 		 *  patch requested size into sense command
6354 		 */
6355 		cp->sensecmd[0]		= 0x03;
6356 		cp->sensecmd[1]		= cp->lun << 5;
6357 		cp->sensecmd[4]		= SYM_SNS_BBUF_LEN;
6358 		cp->data_len		= SYM_SNS_BBUF_LEN;
6359 
6360 		/*
6361 		 *  sense data
6362 		 */
6363 		bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN);
6364 		cp->phys.sense.addr	= cpu_to_scr(vtobus(cp->sns_bbuf));
6365 		cp->phys.sense.size	= cpu_to_scr(SYM_SNS_BBUF_LEN);
6366 
6367 		/*
6368 		 *  requeue the command.
6369 		 */
6370 		startp = SCRIPTH_BA (np, sdata_in);
6371 
6372 		cp->phys.savep	= cpu_to_scr(startp);
6373 		cp->phys.goalp	= cpu_to_scr(startp + 16);
6374 		cp->phys.lastp	= cpu_to_scr(startp);
6375 		cp->startp	= cpu_to_scr(startp);
6376 
6377 		cp->actualquirks = SYM_QUIRK_AUTOSAVE;
6378 		cp->host_status	= cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
6379 		cp->ssss_status = S_ILLEGAL;
6380 		cp->host_flags	= (HF_SENSE|HF_DATA_IN);
6381 		cp->xerr_status = 0;
6382 		cp->phys.extra_bytes = 0;
6383 
6384 		cp->phys.go.start =
6385 			cpu_to_scr(SCRIPT_BA (np, select));
6386 
6387 		/*
6388 		 *  Requeue the command.
6389 		 */
6390 		sym_put_start_queue(np, cp);
6391 
6392 		/*
6393 		 *  Give back to upper layer everything we have dequeued.
6394 		 */
6395 		sym_flush_comp_queue(np, 0);
6396 		break;
6397 	}
6398 }
6399 
6400 /*
6401  *  After a device has accepted some management message
6402  *  as BUS DEVICE RESET, ABORT TASK, etc ..., or when
6403  *  a device signals a UNIT ATTENTION condition, some
6404  *  tasks are thrown away by the device. We are required
6405  *  to reflect that on our tasks list since the device
6406  *  will never complete these tasks.
6407  *
6408  *  This function move from the BUSY queue to the COMP
6409  *  queue all disconnected CCBs for a given target that
6410  *  match the following criteria:
6411  *  - lun=-1  means any logical UNIT otherwise a given one.
6412  *  - task=-1 means any task, otherwise a given one.
6413  */
6414 static int
6415 sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task)
6416 {
6417 	SYM_QUEHEAD qtmp, *qp;
6418 	int i = 0;
6419 	ccb_p cp;
6420 
6421 	/*
6422 	 *  Move the entire BUSY queue to our temporary queue.
6423 	 */
6424 	sym_que_init(&qtmp);
6425 	sym_que_splice(&np->busy_ccbq, &qtmp);
6426 	sym_que_init(&np->busy_ccbq);
6427 
6428 	/*
6429 	 *  Put all CCBs that matches our criteria into
6430 	 *  the COMP queue and put back other ones into
6431 	 *  the BUSY queue.
6432 	 */
6433 	while ((qp = sym_remque_head(&qtmp)) != 0) {
6434 		union ccb *ccb;
6435 		cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
6436 		ccb = cp->cam_ccb;
6437 		if (cp->host_status != HS_DISCONNECT ||
6438 		    cp->target != target	     ||
6439 		    (lun  != -1 && cp->lun != lun)   ||
6440 		    (task != -1 &&
6441 			(cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) {
6442 			sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
6443 			continue;
6444 		}
6445 		sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
6446 
6447 		/* Preserve the software timeout condition */
6448 		if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT)
6449 			sym_set_cam_status(ccb, cam_status);
6450 		++i;
6451 #if 0
6452 printf("XXXX TASK @%p CLEARED\n", cp);
6453 #endif
6454 	}
6455 	return i;
6456 }
6457 
6458 /*
6459  *  chip handler for TASKS recovery
6460  *
6461  *  We cannot safely abort a command, while the SCRIPTS
6462  *  processor is running, since we just would be in race
6463  *  with it.
6464  *
6465  *  As long as we have tasks to abort, we keep the SEM
6466  *  bit set in the ISTAT. When this bit is set, the
6467  *  SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
6468  *  each time it enters the scheduler.
6469  *
6470  *  If we have to reset a target, clear tasks of a unit,
6471  *  or to perform the abort of a disconnected job, we
6472  *  restart the SCRIPTS for selecting the target. Once
6473  *  selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
6474  *  If it loses arbitration, the SCRIPTS will interrupt again
6475  *  the next time it will enter its scheduler, and so on ...
6476  *
6477  *  On SIR_TARGET_SELECTED, we scan for the more
6478  *  appropriate thing to do:
6479  *
6480  *  - If nothing, we just sent a M_ABORT message to the
6481  *    target to get rid of the useless SCSI bus ownership.
6482  *    According to the specs, no tasks shall be affected.
6483  *  - If the target is to be reset, we send it a M_RESET
6484  *    message.
6485  *  - If a logical UNIT is to be cleared , we send the
6486  *    IDENTIFY(lun) + M_ABORT.
6487  *  - If an untagged task is to be aborted, we send the
6488  *    IDENTIFY(lun) + M_ABORT.
6489  *  - If a tagged task is to be aborted, we send the
6490  *    IDENTIFY(lun) + task attributes + M_ABORT_TAG.
6491  *
6492  *  Once our 'kiss of death' :) message has been accepted
6493  *  by the target, the SCRIPTS interrupts again
6494  *  (SIR_ABORT_SENT). On this interrupt, we complete
6495  *  all the CCBs that should have been aborted by the
6496  *  target according to our message.
6497  */
6498 static void sym_sir_task_recovery(hcb_p np, int num)
6499 {
6500 	SYM_QUEHEAD *qp;
6501 	ccb_p cp;
6502 	tcb_p tp;
6503 	int target=-1, lun=-1, task;
6504 	int i, k;
6505 
6506 	switch(num) {
6507 	/*
6508 	 *  The SCRIPTS processor stopped before starting
6509 	 *  the next command in order to allow us to perform
6510 	 *  some task recovery.
6511 	 */
6512 	case SIR_SCRIPT_STOPPED:
6513 		/*
6514 		 *  Do we have any target to reset or unit to clear ?
6515 		 */
6516 		for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
6517 			tp = &np->target[i];
6518 			if (tp->to_reset ||
6519 			    (tp->lun0p && tp->lun0p->to_clear)) {
6520 				target = i;
6521 				break;
6522 			}
6523 			if (!tp->lunmp)
6524 				continue;
6525 			for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
6526 				if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
6527 					target	= i;
6528 					break;
6529 				}
6530 			}
6531 			if (target != -1)
6532 				break;
6533 		}
6534 
6535 		/*
6536 		 *  If not, walk the busy queue for any
6537 		 *  disconnected CCB to be aborted.
6538 		 */
6539 		if (target == -1) {
6540 			FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
6541 				cp = sym_que_entry(qp,struct sym_ccb,link_ccbq);
6542 				if (cp->host_status != HS_DISCONNECT)
6543 					continue;
6544 				if (cp->to_abort) {
6545 					target = cp->target;
6546 					break;
6547 				}
6548 			}
6549 		}
6550 
6551 		/*
6552 		 *  If some target is to be selected,
6553 		 *  prepare and start the selection.
6554 		 */
6555 		if (target != -1) {
6556 			tp = &np->target[target];
6557 			np->abrt_sel.sel_id	= target;
6558 			np->abrt_sel.sel_scntl3 = tp->wval;
6559 			np->abrt_sel.sel_sxfer  = tp->sval;
6560 			OUTL(nc_dsa, np->hcb_ba);
6561 			OUTL (nc_dsp, SCRIPTH_BA (np, sel_for_abort));
6562 			return;
6563 		}
6564 
6565 		/*
6566 		 *  Now look for a CCB to abort that haven't started yet.
6567 		 *  Btw, the SCRIPTS processor is still stopped, so
6568 		 *  we are not in race.
6569 		 */
6570 		i = 0;
6571 		cp = 0;
6572 		FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
6573 			cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
6574 			if (cp->host_status != HS_BUSY &&
6575 			    cp->host_status != HS_NEGOTIATE)
6576 				continue;
6577 			if (!cp->to_abort)
6578 				continue;
6579 #ifdef SYM_CONF_IARB_SUPPORT
6580 			/*
6581 			 *    If we are using IMMEDIATE ARBITRATION, we donnot
6582 			 *    want to cancel the last queued CCB, since the
6583 			 *    SCRIPTS may have anticipated the selection.
6584 			 */
6585 			if (cp == np->last_cp) {
6586 				cp->to_abort = 0;
6587 				continue;
6588 			}
6589 #endif
6590 			i = 1;	/* Means we have found some */
6591 			break;
6592 		}
6593 		if (!i) {
6594 			/*
6595 			 *  We are done, so we donnot need
6596 			 *  to synchronize with the SCRIPTS anylonger.
6597 			 *  Remove the SEM flag from the ISTAT.
6598 			 */
6599 			np->istat_sem = 0;
6600 			OUTB (nc_istat, SIGP);
6601 			break;
6602 		}
6603 		/*
6604 		 *  Compute index of next position in the start
6605 		 *  queue the SCRIPTS intends to start and dequeue
6606 		 *  all CCBs for that device that haven't been started.
6607 		 */
6608 		i = (INL (nc_scratcha) - np->squeue_ba) / 4;
6609 		i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
6610 
6611 		/*
6612 		 *  Make sure at least our IO to abort has been dequeued.
6613 		 */
6614 		assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ);
6615 
6616 		/*
6617 		 *  Keep track in cam status of the reason of the abort.
6618 		 */
6619 		if (cp->to_abort == 2)
6620 			sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT);
6621 		else
6622 			sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED);
6623 
6624 		/*
6625 		 *  Complete with error everything that we have dequeued.
6626 	 	 */
6627 		sym_flush_comp_queue(np, 0);
6628 		break;
6629 	/*
6630 	 *  The SCRIPTS processor has selected a target
6631 	 *  we may have some manual recovery to perform for.
6632 	 */
6633 	case SIR_TARGET_SELECTED:
6634 		target = (INB (nc_sdid) & 0xf);
6635 		tp = &np->target[target];
6636 
6637 		np->abrt_tbl.addr = vtobus(np->abrt_msg);
6638 
6639 		/*
6640 		 *  If the target is to be reset, prepare a
6641 		 *  M_RESET message and clear the to_reset flag
6642 		 *  since we donnot expect this operation to fail.
6643 		 */
6644 		if (tp->to_reset) {
6645 			np->abrt_msg[0] = M_RESET;
6646 			np->abrt_tbl.size = 1;
6647 			tp->to_reset = 0;
6648 			break;
6649 		}
6650 
6651 		/*
6652 		 *  Otherwise, look for some logical unit to be cleared.
6653 		 */
6654 		if (tp->lun0p && tp->lun0p->to_clear)
6655 			lun = 0;
6656 		else if (tp->lunmp) {
6657 			for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
6658 				if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
6659 					lun = k;
6660 					break;
6661 				}
6662 			}
6663 		}
6664 
6665 		/*
6666 		 *  If a logical unit is to be cleared, prepare
6667 		 *  an IDENTIFY(lun) + ABORT MESSAGE.
6668 		 */
6669 		if (lun != -1) {
6670 			lcb_p lp = sym_lp(np, tp, lun);
6671 			lp->to_clear = 0; /* We donnot expect to fail here */
6672 			np->abrt_msg[0] = M_IDENTIFY | lun;
6673 			np->abrt_msg[1] = M_ABORT;
6674 			np->abrt_tbl.size = 2;
6675 			break;
6676 		}
6677 
6678 		/*
6679 		 *  Otherwise, look for some disconnected job to
6680 		 *  abort for this target.
6681 		 */
6682 		i = 0;
6683 		cp = 0;
6684 		FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
6685 			cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
6686 			if (cp->host_status != HS_DISCONNECT)
6687 				continue;
6688 			if (cp->target != target)
6689 				continue;
6690 			if (!cp->to_abort)
6691 				continue;
6692 			i = 1;	/* Means we have some */
6693 			break;
6694 		}
6695 
6696 		/*
6697 		 *  If we have none, probably since the device has
6698 		 *  completed the command before we won abitration,
6699 		 *  send a M_ABORT message without IDENTIFY.
6700 		 *  According to the specs, the device must just
6701 		 *  disconnect the BUS and not abort any task.
6702 		 */
6703 		if (!i) {
6704 			np->abrt_msg[0] = M_ABORT;
6705 			np->abrt_tbl.size = 1;
6706 			break;
6707 		}
6708 
6709 		/*
6710 		 *  We have some task to abort.
6711 		 *  Set the IDENTIFY(lun)
6712 		 */
6713 		np->abrt_msg[0] = M_IDENTIFY | cp->lun;
6714 
6715 		/*
6716 		 *  If we want to abort an untagged command, we
6717 		 *  will send a IDENTIFY + M_ABORT.
6718 		 *  Otherwise (tagged command), we will send
6719 		 *  a IDENTITFY + task attributes + ABORT TAG.
6720 		 */
6721 		if (cp->tag == NO_TAG) {
6722 			np->abrt_msg[1] = M_ABORT;
6723 			np->abrt_tbl.size = 2;
6724 		}
6725 		else {
6726 			np->abrt_msg[1] = cp->scsi_smsg[1];
6727 			np->abrt_msg[2] = cp->scsi_smsg[2];
6728 			np->abrt_msg[3] = M_ABORT_TAG;
6729 			np->abrt_tbl.size = 4;
6730 		}
6731 		/*
6732 		 *  Keep track of software timeout condition, since the
6733 		 *  peripheral driver may not count retries on abort
6734 		 *  conditions not due to timeout.
6735 		 */
6736 		if (cp->to_abort == 2)
6737 			sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT);
6738 		cp->to_abort = 0; /* We donnot expect to fail here */
6739 		break;
6740 
6741 	/*
6742 	 *  The target has accepted our message and switched
6743 	 *  to BUS FREE phase as we expected.
6744 	 */
6745 	case SIR_ABORT_SENT:
6746 		target = (INB (nc_sdid) & 0xf);
6747 		tp = &np->target[target];
6748 
6749 		/*
6750 		**  If we didn't abort anything, leave here.
6751 		*/
6752 		if (np->abrt_msg[0] == M_ABORT)
6753 			break;
6754 
6755 		/*
6756 		 *  If we sent a M_RESET, then a hardware reset has
6757 		 *  been performed by the target.
6758 		 *  - Reset everything to async 8 bit
6759 		 *  - Tell ourself to negotiate next time :-)
6760 		 *  - Prepare to clear all disconnected CCBs for
6761 		 *    this target from our task list (lun=task=-1)
6762 		 */
6763 		lun = -1;
6764 		task = -1;
6765 		if (np->abrt_msg[0] == M_RESET) {
6766 			tp->sval = 0;
6767 			tp->wval = np->rv_scntl3;
6768 			tp->uval = 0;
6769 			tp->tinfo.current.period = 0;
6770 			tp->tinfo.current.offset = 0;
6771 			tp->tinfo.current.width  = BUS_8_BIT;
6772 			tp->tinfo.current.options = 0;
6773 		}
6774 
6775 		/*
6776 		 *  Otherwise, check for the LUN and TASK(s)
6777 		 *  concerned by the cancelation.
6778 		 *  If it is not ABORT_TAG then it is CLEAR_QUEUE
6779 		 *  or an ABORT message :-)
6780 		 */
6781 		else {
6782 			lun = np->abrt_msg[0] & 0x3f;
6783 			if (np->abrt_msg[1] == M_ABORT_TAG)
6784 				task = np->abrt_msg[2];
6785 		}
6786 
6787 		/*
6788 		 *  Complete all the CCBs the device should have
6789 		 *  aborted due to our 'kiss of death' message.
6790 		 */
6791 		i = (INL (nc_scratcha) - np->squeue_ba) / 4;
6792 		(void) sym_dequeue_from_squeue(np, i, target, lun, -1);
6793 		(void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task);
6794 		sym_flush_comp_queue(np, 0);
6795 
6796 		/*
6797 		 *  If we sent a BDR, make uper layer aware of that.
6798 		 */
6799 		if (np->abrt_msg[0] == M_RESET)
6800 			xpt_async(AC_SENT_BDR, np->path, NULL);
6801 		break;
6802 	}
6803 
6804 	/*
6805 	 *  Print to the log the message we intend to send.
6806 	 */
6807 	if (num == SIR_TARGET_SELECTED) {
6808 		PRINT_TARGET(np, target);
6809 		sym_printl_hex("control msgout:", np->abrt_msg,
6810 			      np->abrt_tbl.size);
6811 		np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
6812 	}
6813 
6814 	/*
6815 	 *  Let the SCRIPTS processor continue.
6816 	 */
6817 	OUTONB (nc_dcntl, (STD|NOCOM));
6818 }
6819 
6820 /*
6821  *  Gerard's alchemy:) that deals with with the data
6822  *  pointer for both MDP and the residual calculation.
6823  *
6824  *  I didn't want to bloat the code by more than 200
6825  *  lignes for the handling of both MDP and the residual.
6826  *  This has been achieved by using a data pointer
6827  *  representation consisting in an index in the data
6828  *  array (dp_sg) and a negative offset (dp_ofs) that
6829  *  have the following meaning:
6830  *
6831  *  - dp_sg = SYM_CONF_MAX_SG
6832  *    we are at the end of the data script.
6833  *  - dp_sg < SYM_CONF_MAX_SG
6834  *    dp_sg points to the next entry of the scatter array
6835  *    we want to transfer.
6836  *  - dp_ofs < 0
6837  *    dp_ofs represents the residual of bytes of the
6838  *    previous entry scatter entry we will send first.
6839  *  - dp_ofs = 0
6840  *    no residual to send first.
6841  *
6842  *  The function sym_evaluate_dp() accepts an arbitray
6843  *  offset (basically from the MDP message) and returns
6844  *  the corresponding values of dp_sg and dp_ofs.
6845  */
6846 
6847 static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs)
6848 {
6849 	u32	dp_scr;
6850 	int	dp_ofs, dp_sg, dp_sgmin;
6851 	int	tmp;
6852 	struct sym_pmc *pm;
6853 
6854 	/*
6855 	 *  Compute the resulted data pointer in term of a script
6856 	 *  address within some DATA script and a signed byte offset.
6857 	 */
6858 	dp_scr = scr;
6859 	dp_ofs = *ofs;
6860 	if	(dp_scr == SCRIPT_BA (np, pm0_data))
6861 		pm = &cp->phys.pm0;
6862 	else if (dp_scr == SCRIPT_BA (np, pm1_data))
6863 		pm = &cp->phys.pm1;
6864 	else
6865 		pm = 0;
6866 
6867 	if (pm) {
6868 		dp_scr  = scr_to_cpu(pm->ret);
6869 		dp_ofs -= scr_to_cpu(pm->sg.size);
6870 	}
6871 
6872 	/*
6873 	 *  If we are auto-sensing, then we are done.
6874 	 */
6875 	if (cp->host_flags & HF_SENSE) {
6876 		*ofs = dp_ofs;
6877 		return 0;
6878 	}
6879 
6880 	/*
6881 	 *  Deduce the index of the sg entry.
6882 	 *  Keep track of the index of the first valid entry.
6883 	 *  If result is dp_sg = SYM_CONF_MAX_SG, then we are at the
6884 	 *  end of the data.
6885 	 */
6886 	tmp = scr_to_cpu(cp->phys.goalp);
6887 	dp_sg = SYM_CONF_MAX_SG;
6888 	if (dp_scr != tmp)
6889 		dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4);
6890 	dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
6891 
6892 	/*
6893 	 *  Move to the sg entry the data pointer belongs to.
6894 	 *
6895 	 *  If we are inside the data area, we expect result to be:
6896 	 *
6897 	 *  Either,
6898 	 *      dp_ofs = 0 and dp_sg is the index of the sg entry
6899 	 *      the data pointer belongs to (or the end of the data)
6900 	 *  Or,
6901 	 *      dp_ofs < 0 and dp_sg is the index of the sg entry
6902 	 *      the data pointer belongs to + 1.
6903 	 */
6904 	if (dp_ofs < 0) {
6905 		int n;
6906 		while (dp_sg > dp_sgmin) {
6907 			--dp_sg;
6908 			tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
6909 			n = dp_ofs + (tmp & 0xffffff);
6910 			if (n > 0) {
6911 				++dp_sg;
6912 				break;
6913 			}
6914 			dp_ofs = n;
6915 		}
6916 	}
6917 	else if (dp_ofs > 0) {
6918 		while (dp_sg < SYM_CONF_MAX_SG) {
6919 			tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
6920 			dp_ofs -= (tmp & 0xffffff);
6921 			++dp_sg;
6922 			if (dp_ofs <= 0)
6923 				break;
6924 		}
6925 	}
6926 
6927 	/*
6928 	 *  Make sure the data pointer is inside the data area.
6929 	 *  If not, return some error.
6930 	 */
6931 	if	(dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
6932 		goto out_err;
6933 	else if	(dp_sg > SYM_CONF_MAX_SG ||
6934 		 (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0))
6935 		goto out_err;
6936 
6937 	/*
6938 	 *  Save the extreme pointer if needed.
6939 	 */
6940 	if (dp_sg > cp->ext_sg ||
6941             (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
6942 		cp->ext_sg  = dp_sg;
6943 		cp->ext_ofs = dp_ofs;
6944 	}
6945 
6946 	/*
6947 	 *  Return data.
6948 	 */
6949 	*ofs = dp_ofs;
6950 	return dp_sg;
6951 
6952 out_err:
6953 	return -1;
6954 }
6955 
6956 /*
6957  *  chip handler for MODIFY DATA POINTER MESSAGE
6958  *
6959  *  We also call this function on IGNORE WIDE RESIDUE
6960  *  messages that do not match a SWIDE full condition.
6961  *  Btw, we assume in that situation that such a message
6962  *  is equivalent to a MODIFY DATA POINTER (offset=-1).
6963  */
6964 
6965 static void sym_modify_dp(hcb_p np, tcb_p tp, ccb_p cp, int ofs)
6966 {
6967 	int dp_ofs	= ofs;
6968 	u32	dp_scr	= INL (nc_temp);
6969 	u32	dp_ret;
6970 	u32	tmp;
6971 	u_char	hflags;
6972 	int	dp_sg;
6973 	struct	sym_pmc *pm;
6974 
6975 	/*
6976 	 *  Not supported for auto-sense.
6977 	 */
6978 	if (cp->host_flags & HF_SENSE)
6979 		goto out_reject;
6980 
6981 	/*
6982 	 *  Apply our alchemy:) (see comments in sym_evaluate_dp()),
6983 	 *  to the resulted data pointer.
6984 	 */
6985 	dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs);
6986 	if (dp_sg < 0)
6987 		goto out_reject;
6988 
6989 	/*
6990 	 *  And our alchemy:) allows to easily calculate the data
6991 	 *  script address we want to return for the next data phase.
6992 	 */
6993 	dp_ret = cpu_to_scr(cp->phys.goalp);
6994 	dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4);
6995 
6996 	/*
6997 	 *  If offset / scatter entry is zero we donnot need
6998 	 *  a context for the new current data pointer.
6999 	 */
7000 	if (dp_ofs == 0) {
7001 		dp_scr = dp_ret;
7002 		goto out_ok;
7003 	}
7004 
7005 	/*
7006 	 *  Get a context for the new current data pointer.
7007 	 */
7008 	hflags = INB (HF_PRT);
7009 
7010 	if (hflags & HF_DP_SAVED)
7011 		hflags ^= HF_ACT_PM;
7012 
7013 	if (!(hflags & HF_ACT_PM)) {
7014 		pm  = &cp->phys.pm0;
7015 		dp_scr = SCRIPT_BA (np, pm0_data);
7016 	}
7017 	else {
7018 		pm = &cp->phys.pm1;
7019 		dp_scr = SCRIPT_BA (np, pm1_data);
7020 	}
7021 
7022 	hflags &= ~(HF_DP_SAVED);
7023 
7024 	OUTB (HF_PRT, hflags);
7025 
7026 	/*
7027 	 *  Set up the new current data pointer.
7028 	 *  ofs < 0 there, and for the next data phase, we
7029 	 *  want to transfer part of the data of the sg entry
7030 	 *  corresponding to index dp_sg-1 prior to returning
7031 	 *  to the main data script.
7032 	 */
7033 	pm->ret = cpu_to_scr(dp_ret);
7034 	tmp  = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
7035 	tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
7036 	pm->sg.addr = cpu_to_scr(tmp);
7037 	pm->sg.size = cpu_to_scr(-dp_ofs);
7038 
7039 out_ok:
7040 	OUTL (nc_temp, dp_scr);
7041 	OUTL (nc_dsp, SCRIPT_BA (np, clrack));
7042 	return;
7043 
7044 out_reject:
7045 	OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad));
7046 }
7047 
7048 
7049 /*
7050  *  chip calculation of the data residual.
7051  *
7052  *  As I used to say, the requirement of data residual
7053  *  in SCSI is broken, useless and cannot be achieved
7054  *  without huge complexity.
7055  *  But most OSes and even the official CAM require it.
7056  *  When stupidity happens to be so widely spread inside
7057  *  a community, it gets hard to convince.
7058  *
7059  *  Anyway, I don't care, since I am not going to use
7060  *  any software that considers this data residual as
7061  *  a relevant information. :)
7062  */
7063 
7064 static int sym_compute_residual(hcb_p np, ccb_p cp)
7065 {
7066 	int dp_sg, dp_sgmin, resid = 0;
7067 	int dp_ofs = 0;
7068 
7069 	/*
7070 	 *  Check for some data lost or just thrown away.
7071 	 *  We are not required to be quite accurate in this
7072 	 *  situation. Btw, if we are odd for output and the
7073 	 *  device claims some more data, it may well happen
7074 	 *  than our residual be zero. :-)
7075 	 */
7076 	if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
7077 		if (cp->xerr_status & XE_EXTRA_DATA)
7078 			resid -= scr_to_cpu(cp->phys.extra_bytes);
7079 		if (cp->xerr_status & XE_SODL_UNRUN)
7080 			++resid;
7081 		if (cp->xerr_status & XE_SWIDE_OVRUN)
7082 			--resid;
7083 	}
7084 
7085 	/*
7086 	 *  If all data has been transferred,
7087 	 *  there is no residual.
7088 	 */
7089 	if (cp->phys.lastp == cp->phys.goalp)
7090 		return resid;
7091 
7092 	/*
7093 	 *  If no data transfer occurs, or if the data
7094 	 *  pointer is weird, return full residual.
7095 	 */
7096 	if (cp->startp == cp->phys.lastp ||
7097 	    sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.lastp), &dp_ofs) < 0) {
7098 		return cp->data_len;
7099 	}
7100 
7101 	/*
7102 	 *  If we were auto-sensing, then we are done.
7103 	 */
7104 	if (cp->host_flags & HF_SENSE) {
7105 		return -dp_ofs;
7106 	}
7107 
7108 	/*
7109 	 *  We are now full comfortable in the computation
7110 	 *  of the data residual (2's complement).
7111 	 */
7112 	dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
7113 	resid = -cp->ext_ofs;
7114 	for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
7115 		u_long tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
7116 		resid += (tmp & 0xffffff);
7117 	}
7118 
7119 	/*
7120 	 *  Hopefully, the result is not too wrong.
7121 	 */
7122 	return resid;
7123 }
7124 
7125 /*
7126  *  Print out the containt of a SCSI message.
7127  */
7128 
7129 static int sym_show_msg (u_char * msg)
7130 {
7131 	u_char i;
7132 	printf ("%x",*msg);
7133 	if (*msg==M_EXTENDED) {
7134 		for (i=1;i<8;i++) {
7135 			if (i-1>msg[1]) break;
7136 			printf ("-%x",msg[i]);
7137 		};
7138 		return (i+1);
7139 	} else if ((*msg & 0xf0) == 0x20) {
7140 		printf ("-%x",msg[1]);
7141 		return (2);
7142 	};
7143 	return (1);
7144 }
7145 
7146 static void sym_print_msg (ccb_p cp, char *label, u_char *msg)
7147 {
7148 	PRINT_ADDR(cp);
7149 	if (label)
7150 		printf ("%s: ", label);
7151 
7152 	(void) sym_show_msg (msg);
7153 	printf (".\n");
7154 }
7155 
7156 /*
7157  *  Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
7158  *
7159  *  When we try to negotiate, we append the negotiation message
7160  *  to the identify and (maybe) simple tag message.
7161  *  The host status field is set to HS_NEGOTIATE to mark this
7162  *  situation.
7163  *
7164  *  If the target doesn't answer this message immediately
7165  *  (as required by the standard), the SIR_NEGO_FAILED interrupt
7166  *  will be raised eventually.
7167  *  The handler removes the HS_NEGOTIATE status, and sets the
7168  *  negotiated value to the default (async / nowide).
7169  *
7170  *  If we receive a matching answer immediately, we check it
7171  *  for validity, and set the values.
7172  *
7173  *  If we receive a Reject message immediately, we assume the
7174  *  negotiation has failed, and fall back to standard values.
7175  *
7176  *  If we receive a negotiation message while not in HS_NEGOTIATE
7177  *  state, it's a target initiated negotiation. We prepare a
7178  *  (hopefully) valid answer, set our parameters, and send back
7179  *  this answer to the target.
7180  *
7181  *  If the target doesn't fetch the answer (no message out phase),
7182  *  we assume the negotiation has failed, and fall back to default
7183  *  settings (SIR_NEGO_PROTO interrupt).
7184  *
7185  *  When we set the values, we adjust them in all ccbs belonging
7186  *  to this target, in the controller's register, and in the "phys"
7187  *  field of the controller's struct sym_hcb.
7188  */
7189 
7190 /*
7191  *  chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message.
7192  */
7193 static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp)
7194 {
7195 	u_char	chg, ofs, per, fak, div;
7196 	int	req = 1;
7197 
7198 	/*
7199 	 *  Synchronous request message received.
7200 	 */
7201 	if (DEBUG_FLAGS & DEBUG_NEGO) {
7202 		sym_print_msg(cp, "sync msgin", np->msgin);
7203 	};
7204 
7205 	/*
7206 	 * request or answer ?
7207 	 */
7208 	if (INB (HS_PRT) == HS_NEGOTIATE) {
7209 		OUTB (HS_PRT, HS_BUSY);
7210 		if (cp->nego_status && cp->nego_status != NS_SYNC)
7211 			goto reject_it;
7212 		req = 0;
7213 	}
7214 
7215 	/*
7216 	 *  get requested values.
7217 	 */
7218 	chg = 0;
7219 	per = np->msgin[3];
7220 	ofs = np->msgin[4];
7221 
7222 	/*
7223 	 *  check values against our limits.
7224 	 */
7225 	if (ofs) {
7226 		if (ofs > np->maxoffs)
7227 			{chg = 1; ofs = np->maxoffs;}
7228 		if (req) {
7229 			if (ofs > tp->tinfo.user.offset)
7230 				{chg = 1; ofs = tp->tinfo.user.offset;}
7231 		}
7232 	}
7233 
7234 	if (ofs) {
7235 		if (per < np->minsync)
7236 			{chg = 1; per = np->minsync;}
7237 		if (req) {
7238 			if (per < tp->tinfo.user.period)
7239 				{chg = 1; per = tp->tinfo.user.period;}
7240 		}
7241 	}
7242 
7243 	div = fak = 0;
7244 	if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0)
7245 		goto reject_it;
7246 
7247 	if (DEBUG_FLAGS & DEBUG_NEGO) {
7248 		PRINT_ADDR(cp);
7249 		printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n",
7250 			ofs, per, div, fak, chg);
7251 	}
7252 
7253 	/*
7254 	 *  This was an answer message
7255 	 */
7256 	if (req == 0) {
7257 		if (chg) 	/* Answer wasn't acceptable. */
7258 			goto reject_it;
7259 		sym_setsync (np, cp, ofs, per, div, fak);
7260 		OUTL (nc_dsp, SCRIPT_BA (np, clrack));
7261 		return;
7262 	}
7263 
7264 	/*
7265 	 *  It was a request. Set value and
7266 	 *  prepare an answer message
7267 	 */
7268 	sym_setsync (np, cp, ofs, per, div, fak);
7269 
7270 	np->msgout[0] = M_EXTENDED;
7271 	np->msgout[1] = 3;
7272 	np->msgout[2] = M_X_SYNC_REQ;
7273 	np->msgout[3] = per;
7274 	np->msgout[4] = ofs;
7275 
7276 	cp->nego_status = NS_SYNC;
7277 
7278 	if (DEBUG_FLAGS & DEBUG_NEGO) {
7279 		sym_print_msg(cp, "sync msgout", np->msgout);
7280 	}
7281 
7282 	np->msgin [0] = M_NOOP;
7283 
7284 	OUTL (nc_dsp, SCRIPTH_BA (np, sdtr_resp));
7285 	return;
7286 reject_it:
7287 	sym_setsync (np, cp, 0, 0, 0, 0);
7288 	OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad));
7289 }
7290 
7291 /*
7292  *  chip handler for PARALLEL PROTOCOL REQUEST (PPR) message.
7293  */
7294 static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp)
7295 {
7296 	u_char	chg, ofs, per, fak, dt, div, wide;
7297 	int	req = 1;
7298 
7299 	/*
7300 	 * Synchronous request message received.
7301 	 */
7302 	if (DEBUG_FLAGS & DEBUG_NEGO) {
7303 		sym_print_msg(cp, "ppr msgin", np->msgin);
7304 	};
7305 
7306 	/*
7307 	 * request or answer ?
7308 	 */
7309 	if (INB (HS_PRT) == HS_NEGOTIATE) {
7310 		OUTB (HS_PRT, HS_BUSY);
7311 		if (cp->nego_status && cp->nego_status != NS_PPR)
7312 			goto reject_it;
7313 		req = 0;
7314 	}
7315 
7316 	/*
7317 	 *  get requested values.
7318 	 */
7319 	chg  = 0;
7320 	per  = np->msgin[3];
7321 	ofs  = np->msgin[5];
7322 	wide = np->msgin[6];
7323 	dt   = np->msgin[7] & PPR_OPT_DT;
7324 
7325 	/*
7326 	 *  check values against our limits.
7327 	 */
7328 	if (wide > np->maxwide)
7329 		{chg = 1; wide = np->maxwide;}
7330 	if (!wide || !(np->features & FE_ULTRA3))
7331 		dt &= ~PPR_OPT_DT;
7332 	if (req) {
7333 		if (wide > tp->tinfo.user.width)
7334 			{chg = 1; wide = tp->tinfo.user.width;}
7335 	}
7336 
7337 	if (!(np->features & FE_U3EN))	/* Broken U3EN bit not supported */
7338 		dt &= ~PPR_OPT_DT;
7339 
7340 	if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1;
7341 
7342 	if (ofs) {
7343 		if (ofs > np->maxoffs)
7344 			{chg = 1; ofs = np->maxoffs;}
7345 		if (req) {
7346 			if (ofs > tp->tinfo.user.offset)
7347 				{chg = 1; ofs = tp->tinfo.user.offset;}
7348 		}
7349 	}
7350 
7351 	if (ofs) {
7352 		if (dt) {
7353 			if (per < np->minsync_dt)
7354 				{chg = 1; per = np->minsync_dt;}
7355 		}
7356 		else if (per < np->minsync)
7357 			{chg = 1; per = np->minsync;}
7358 		if (req) {
7359 			if (per < tp->tinfo.user.period)
7360 				{chg = 1; per = tp->tinfo.user.period;}
7361 		}
7362 	}
7363 
7364 	div = fak = 0;
7365 	if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0)
7366 		goto reject_it;
7367 
7368 	if (DEBUG_FLAGS & DEBUG_NEGO) {
7369 		PRINT_ADDR(cp);
7370 		printf ("ppr: "
7371 			"dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n",
7372 			dt, ofs, per, wide, div, fak, chg);
7373 	}
7374 
7375 	/*
7376 	 *  It was an answer.
7377 	 */
7378 	if (req == 0) {
7379 		if (chg) 	/* Answer wasn't acceptable */
7380 			goto reject_it;
7381 		sym_setpprot (np, cp, dt, ofs, per, wide, div, fak);
7382 		OUTL (nc_dsp, SCRIPT_BA (np, clrack));
7383 		return;
7384 	}
7385 
7386 	/*
7387 	 *  It was a request. Set value and
7388 	 *  prepare an answer message
7389 	 */
7390 	sym_setpprot (np, cp, dt, ofs, per, wide, div, fak);
7391 
7392 	np->msgout[0] = M_EXTENDED;
7393 	np->msgout[1] = 6;
7394 	np->msgout[2] = M_X_PPR_REQ;
7395 	np->msgout[3] = per;
7396 	np->msgout[4] = 0;
7397 	np->msgout[5] = ofs;
7398 	np->msgout[6] = wide;
7399 	np->msgout[7] = dt;
7400 
7401 	cp->nego_status = NS_PPR;
7402 
7403 	if (DEBUG_FLAGS & DEBUG_NEGO) {
7404 		sym_print_msg(cp, "ppr msgout", np->msgout);
7405 	}
7406 
7407 	np->msgin [0] = M_NOOP;
7408 
7409 	OUTL (nc_dsp, SCRIPTH_BA (np, ppr_resp));
7410 	return;
7411 reject_it:
7412 	sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0);
7413 	OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad));
7414 }
7415 
7416 /*
7417  *  chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message.
7418  */
7419 static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp)
7420 {
7421 	u_char	chg, wide;
7422 	int	req = 1;
7423 
7424 	/*
7425 	 *  Wide request message received.
7426 	 */
7427 	if (DEBUG_FLAGS & DEBUG_NEGO) {
7428 		sym_print_msg(cp, "wide msgin", np->msgin);
7429 	};
7430 
7431 	/*
7432 	 * Is it an request from the device?
7433 	 */
7434 	if (INB (HS_PRT) == HS_NEGOTIATE) {
7435 		OUTB (HS_PRT, HS_BUSY);
7436 		if (cp->nego_status && cp->nego_status != NS_WIDE)
7437 			goto reject_it;
7438 		req = 0;
7439 	}
7440 
7441 	/*
7442 	 *  get requested values.
7443 	 */
7444 	chg  = 0;
7445 	wide = np->msgin[3];
7446 
7447 	/*
7448 	 *  check values against driver limits.
7449 	 */
7450 	if (wide > np->maxoffs)
7451 		{chg = 1; wide = np->maxoffs;}
7452 	if (req) {
7453 		if (wide > tp->tinfo.user.width)
7454 			{chg = 1; wide = tp->tinfo.user.width;}
7455 	}
7456 
7457 	if (DEBUG_FLAGS & DEBUG_NEGO) {
7458 		PRINT_ADDR(cp);
7459 		printf ("wdtr: wide=%d chg=%d.\n", wide, chg);
7460 	}
7461 
7462 	/*
7463 	 * This was an answer message
7464 	 */
7465 	if (req == 0) {
7466 		if (chg)	/*  Answer wasn't acceptable. */
7467 			goto reject_it;
7468 		sym_setwide (np, cp, wide);
7469 #if 1
7470 		/*
7471 		 * Negotiate for SYNC immediately after WIDE response.
7472 		 * This allows to negotiate for both WIDE and SYNC on
7473 		 * a single SCSI command (Suggested by Justin Gibbs).
7474 		 */
7475 		if (tp->tinfo.goal.offset) {
7476 			np->msgout[0] = M_EXTENDED;
7477 			np->msgout[1] = 3;
7478 			np->msgout[2] = M_X_SYNC_REQ;
7479 			np->msgout[3] = tp->tinfo.goal.period;
7480 			np->msgout[4] = tp->tinfo.goal.offset;
7481 
7482 			if (DEBUG_FLAGS & DEBUG_NEGO) {
7483 				sym_print_msg(cp, "sync msgout", np->msgout);
7484 			}
7485 
7486 			cp->nego_status = NS_SYNC;
7487 			OUTB (HS_PRT, HS_NEGOTIATE);
7488 			OUTL (nc_dsp, SCRIPTH_BA (np, sdtr_resp));
7489 			return;
7490 		}
7491 #endif
7492 		OUTL (nc_dsp, SCRIPT_BA (np, clrack));
7493 		return;
7494 	};
7495 
7496 	/*
7497 	 *  It was a request, set value and
7498 	 *  prepare an answer message
7499 	 */
7500 	sym_setwide (np, cp, wide);
7501 
7502 	np->msgout[0] = M_EXTENDED;
7503 	np->msgout[1] = 2;
7504 	np->msgout[2] = M_X_WIDE_REQ;
7505 	np->msgout[3] = wide;
7506 
7507 	np->msgin [0] = M_NOOP;
7508 
7509 	cp->nego_status = NS_WIDE;
7510 
7511 	if (DEBUG_FLAGS & DEBUG_NEGO) {
7512 		sym_print_msg(cp, "wide msgout", np->msgout);
7513 	}
7514 
7515 	OUTL (nc_dsp, SCRIPTH_BA (np, wdtr_resp));
7516 	return;
7517 reject_it:
7518 	OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad));
7519 }
7520 
7521 /*
7522  *  Reset SYNC or WIDE to default settings.
7523  *
7524  *  Called when a negotiation does not succeed either
7525  *  on rejection or on protocol error.
7526  */
7527 static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp)
7528 {
7529 	/*
7530 	 *  any error in negotiation:
7531 	 *  fall back to default mode.
7532 	 */
7533 	switch (cp->nego_status) {
7534 	case NS_PPR:
7535 		sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0);
7536 		break;
7537 	case NS_SYNC:
7538 		sym_setsync (np, cp, 0, 0, 0, 0);
7539 		break;
7540 	case NS_WIDE:
7541 		sym_setwide (np, cp, 0);
7542 		break;
7543 	};
7544 	np->msgin [0] = M_NOOP;
7545 	np->msgout[0] = M_NOOP;
7546 	cp->nego_status = 0;
7547 }
7548 
7549 /*
7550  *  chip handler for MESSAGE REJECT received in response to
7551  *  a WIDE or SYNCHRONOUS negotiation.
7552  */
7553 static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp)
7554 {
7555 	sym_nego_default(np, tp, cp);
7556 	OUTB (HS_PRT, HS_BUSY);
7557 }
7558 
7559 /*
7560  *  chip exception handler for programmed interrupts.
7561  */
7562 void sym_int_sir (hcb_p np)
7563 {
7564 	u_char	num	= INB (nc_dsps);
7565 	u_long	dsa	= INL (nc_dsa);
7566 	ccb_p	cp	= sym_ccb_from_dsa(np, dsa);
7567 	u_char	target	= INB (nc_sdid) & 0x0f;
7568 	tcb_p	tp	= &np->target[target];
7569 	int	tmp;
7570 
7571 	if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num);
7572 
7573 	switch (num) {
7574 	/*
7575 	 *  Command has been completed with error condition
7576 	 *  or has been auto-sensed.
7577 	 */
7578 	case SIR_COMPLETE_ERROR:
7579 		sym_complete_error(np, cp);
7580 		return;
7581 	/*
7582 	 *  The C code is currently trying to recover from something.
7583 	 *  Typically, user want to abort some command.
7584 	 */
7585 	case SIR_SCRIPT_STOPPED:
7586 	case SIR_TARGET_SELECTED:
7587 	case SIR_ABORT_SENT:
7588 		sym_sir_task_recovery(np, num);
7589 		return;
7590 	/*
7591 	 *  The device didn't go to MSG OUT phase after having
7592 	 *  been selected with ATN. We donnot want to handle
7593 	 *  that.
7594 	 */
7595 	case SIR_SEL_ATN_NO_MSG_OUT:
7596 		printf ("%s:%d: No MSG OUT phase after selection with ATN.\n",
7597 			sym_name (np), target);
7598 		goto out_stuck;
7599 	/*
7600 	 *  The device didn't switch to MSG IN phase after
7601 	 *  having reseleted the initiator.
7602 	 */
7603 	case SIR_RESEL_NO_MSG_IN:
7604 		printf ("%s:%d: No MSG IN phase after reselection.\n",
7605 			sym_name (np), target);
7606 		goto out_stuck;
7607 	/*
7608 	 *  After reselection, the device sent a message that wasn't
7609 	 *  an IDENTIFY.
7610 	 */
7611 	case SIR_RESEL_NO_IDENTIFY:
7612 		printf ("%s:%d: No IDENTIFY after reselection.\n",
7613 			sym_name (np), target);
7614 		goto out_stuck;
7615 	/*
7616 	 *  The device reselected a LUN we donnot know about.
7617 	 */
7618 	case SIR_RESEL_BAD_LUN:
7619 		np->msgout[0] = M_RESET;
7620 		goto out;
7621 	/*
7622 	 *  The device reselected for an untagged nexus and we
7623 	 *  haven't any.
7624 	 */
7625 	case SIR_RESEL_BAD_I_T_L:
7626 		np->msgout[0] = M_ABORT;
7627 		goto out;
7628 	/*
7629 	 *  The device reselected for a tagged nexus that we donnot
7630 	 *  have.
7631 	 */
7632 	case SIR_RESEL_BAD_I_T_L_Q:
7633 		np->msgout[0] = M_ABORT_TAG;
7634 		goto out;
7635 	/*
7636 	 *  The SCRIPTS let us know that the device has grabbed
7637 	 *  our message and will abort the job.
7638 	 */
7639 	case SIR_RESEL_ABORTED:
7640 		np->lastmsg = np->msgout[0];
7641 		np->msgout[0] = M_NOOP;
7642 		printf ("%s:%d: message %x sent on bad reselection.\n",
7643 			sym_name (np), target, np->lastmsg);
7644 		goto out;
7645 	/*
7646 	 *  The SCRIPTS let us know that a message has been
7647 	 *  successfully sent to the device.
7648 	 */
7649 	case SIR_MSG_OUT_DONE:
7650 		np->lastmsg = np->msgout[0];
7651 		np->msgout[0] = M_NOOP;
7652 		/* Should we really care of that */
7653 		if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
7654 			if (cp) {
7655 				cp->xerr_status &= ~XE_PARITY_ERR;
7656 				if (!cp->xerr_status)
7657 					OUTOFFB (HF_PRT, HF_EXT_ERR);
7658 			}
7659 		}
7660 		goto out;
7661 	/*
7662 	 *  The device didn't send a GOOD SCSI status.
7663 	 *  We may have some work to do prior to allow
7664 	 *  the SCRIPTS processor to continue.
7665 	 */
7666 	case SIR_BAD_SCSI_STATUS:
7667 		if (!cp)
7668 			goto out;
7669 		sym_sir_bad_scsi_status(np, num, cp);
7670 		return;
7671 	/*
7672 	 *  We are asked by the SCRIPTS to prepare a
7673 	 *  REJECT message.
7674 	 */
7675 	case SIR_REJECT_TO_SEND:
7676 		sym_print_msg(cp, "M_REJECT to send for ", np->msgin);
7677 		np->msgout[0] = M_REJECT;
7678 		goto out;
7679 	/*
7680 	 *  We have been ODD at the end of a DATA IN
7681 	 *  transfer and the device didn't send a
7682 	 *  IGNORE WIDE RESIDUE message.
7683 	 *  It is a data overrun condition.
7684 	 */
7685 	case SIR_SWIDE_OVERRUN:
7686 		if (cp) {
7687 			OUTONB (HF_PRT, HF_EXT_ERR);
7688 			cp->xerr_status |= XE_SWIDE_OVRUN;
7689 		}
7690 		goto out;
7691 	/*
7692 	 *  We have been ODD at the end of a DATA OUT
7693 	 *  transfer.
7694 	 *  It is a data underrun condition.
7695 	 */
7696 	case SIR_SODL_UNDERRUN:
7697 		if (cp) {
7698 			OUTONB (HF_PRT, HF_EXT_ERR);
7699 			cp->xerr_status |= XE_SODL_UNRUN;
7700 		}
7701 		goto out;
7702 	/*
7703 	 *  We received a message.
7704 	 */
7705 	case SIR_MSG_RECEIVED:
7706 		if (!cp)
7707 			goto out_stuck;
7708 		switch (np->msgin [0]) {
7709 		/*
7710 		 *  We received an extended message.
7711 		 *  We handle MODIFY DATA POINTER, SDTR, WDTR
7712 		 *  and reject all other extended messages.
7713 		 */
7714 		case M_EXTENDED:
7715 			switch (np->msgin [2]) {
7716 			case M_X_MODIFY_DP:
7717 				if (DEBUG_FLAGS & DEBUG_POINTER)
7718 					sym_print_msg(cp,"modify DP",np->msgin);
7719 				tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
7720 				      (np->msgin[5]<<8)  + (np->msgin[6]);
7721 				sym_modify_dp(np, tp, cp, tmp);
7722 				return;
7723 			case M_X_SYNC_REQ:
7724 				sym_sync_nego(np, tp, cp);
7725 				return;
7726 			case M_X_PPR_REQ:
7727 				sym_ppr_nego(np, tp, cp);
7728 				return;
7729 			case M_X_WIDE_REQ:
7730 				sym_wide_nego(np, tp, cp);
7731 				return;
7732 			default:
7733 				goto out_reject;
7734 			}
7735 			break;
7736 		/*
7737 		 *  We received a 1/2 byte message not handled from SCRIPTS.
7738 		 *  We are only expecting MESSAGE REJECT and IGNORE WIDE
7739 		 *  RESIDUE messages that haven't been anticipated by
7740 		 *  SCRIPTS on SWIDE full condition. Unanticipated IGNORE
7741 		 *  WIDE RESIDUE messages are aliased as MODIFY DP (-1).
7742 		 */
7743 		case M_IGN_RESIDUE:
7744 			if (DEBUG_FLAGS & DEBUG_POINTER)
7745 				sym_print_msg(cp,"ign wide residue", np->msgin);
7746 			sym_modify_dp(np, tp, cp, -1);
7747 			return;
7748 		case M_REJECT:
7749 			if (INB (HS_PRT) == HS_NEGOTIATE)
7750 				sym_nego_rejected(np, tp, cp);
7751 			else {
7752 				PRINT_ADDR(cp);
7753 				printf ("M_REJECT received (%x:%x).\n",
7754 					scr_to_cpu(np->lastmsg), np->msgout[0]);
7755 			}
7756 			goto out_clrack;
7757 			break;
7758 		default:
7759 			goto out_reject;
7760 		}
7761 		break;
7762 	/*
7763 	 *  We received an unknown message.
7764 	 *  Ignore all MSG IN phases and reject it.
7765 	 */
7766 	case SIR_MSG_WEIRD:
7767 		sym_print_msg(cp, "WEIRD message received", np->msgin);
7768 		OUTL (nc_dsp, SCRIPTH_BA (np, msg_weird));
7769 		return;
7770 	/*
7771 	 *  Negotiation failed.
7772 	 *  Target does not send us the reply.
7773 	 *  Remove the HS_NEGOTIATE status.
7774 	 */
7775 	case SIR_NEGO_FAILED:
7776 		OUTB (HS_PRT, HS_BUSY);
7777 	/*
7778 	 *  Negotiation failed.
7779 	 *  Target does not want answer message.
7780 	 */
7781 	case SIR_NEGO_PROTO:
7782 		sym_nego_default(np, tp, cp);
7783 		goto out;
7784 	};
7785 
7786 out:
7787 	OUTONB (nc_dcntl, (STD|NOCOM));
7788 	return;
7789 out_reject:
7790 	OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad));
7791 	return;
7792 out_clrack:
7793 	OUTL (nc_dsp, SCRIPT_BA (np, clrack));
7794 	return;
7795 out_stuck:
7796 }
7797 
7798 /*
7799  *  Acquire a control block
7800  */
7801 static	ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order)
7802 {
7803 	tcb_p tp = &np->target[tn];
7804 	lcb_p lp = sym_lp(np, tp, ln);
7805 	u_short tag = NO_TAG;
7806 	SYM_QUEHEAD *qp;
7807 	ccb_p cp = (ccb_p) 0;
7808 
7809 	/*
7810 	 *  Look for a free CCB
7811 	 */
7812 	if (sym_que_empty(&np->free_ccbq))
7813 		(void) sym_alloc_ccb(np);
7814 	qp = sym_remque_head(&np->free_ccbq);
7815 	if (!qp)
7816 		goto out;
7817 	cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
7818 
7819 	/*
7820 	 *  If the LCB is not yet available and the LUN
7821 	 *  has been probed ok, try to allocate the LCB.
7822 	 */
7823 	if (!lp && sym_is_bit(tp->lun_map, ln)) {
7824 		lp = sym_alloc_lcb(np, tn, ln);
7825 		if (!lp)
7826 			goto out_free;
7827 	}
7828 
7829 	/*
7830 	 *  If the LCB is not available here, then the
7831 	 *  logical unit is not yet discovered. For those
7832 	 *  ones only accept 1 SCSI IO per logical unit,
7833 	 *  since we cannot allow disconnections.
7834 	 */
7835 	if (!lp) {
7836 		if (!sym_is_bit(tp->busy0_map, ln))
7837 			sym_set_bit(tp->busy0_map, ln);
7838 		else
7839 			goto out_free;
7840 	} else {
7841 		/*
7842 		 *  If we have been asked for a tagged command.
7843 		 */
7844 		if (tag_order) {
7845 			/*
7846 			 *  Debugging purpose.
7847 			 */
7848 			assert(lp->busy_itl == 0);
7849 			/*
7850 			 *  Allocate resources for tags if not yet.
7851 			 */
7852 			if (!lp->cb_tags) {
7853 				sym_alloc_lcb_tags(np, tn, ln);
7854 				if (!lp->cb_tags)
7855 					goto out_free;
7856 			}
7857 			/*
7858 			 *  Get a tag for this SCSI IO and set up
7859 			 *  the CCB bus address for reselection,
7860 			 *  and count it for this LUN.
7861 			 *  Toggle reselect path to tagged.
7862 			 */
7863 			if (lp->busy_itlq < SYM_CONF_MAX_TASK) {
7864 				tag = lp->cb_tags[lp->ia_tag];
7865 				if (++lp->ia_tag == SYM_CONF_MAX_TASK)
7866 					lp->ia_tag = 0;
7867 				lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba);
7868 				++lp->busy_itlq;
7869 				lp->resel_sa =
7870 					cpu_to_scr(SCRIPT_BA (np, resel_tag));
7871 			}
7872 			else
7873 				goto out_free;
7874 		}
7875 		/*
7876 		 *  This command will not be tagged.
7877 		 *  If we already have either a tagged or untagged
7878 		 *  one, refuse to overlap this untagged one.
7879 		 */
7880 		else {
7881 			/*
7882 			 *  Debugging purpose.
7883 			 */
7884 			assert(lp->busy_itl == 0 && lp->busy_itlq == 0);
7885 			/*
7886 			 *  Count this nexus for this LUN.
7887 			 *  Set up the CCB bus address for reselection.
7888 			 *  Toggle reselect path to untagged.
7889 			 */
7890 			if (++lp->busy_itl == 1) {
7891 				lp->itl_task_sa = cpu_to_scr(cp->ccb_ba);
7892 				lp->resel_sa =
7893 					cpu_to_scr(SCRIPT_BA (np,resel_no_tag));
7894 			}
7895 			else
7896 				goto out_free;
7897 		}
7898 	}
7899 	/*
7900 	 *  Put the CCB into the busy queue.
7901 	 */
7902 	sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
7903 
7904 	/*
7905 	 *  Remember all informations needed to free this CCB.
7906 	 */
7907 	cp->to_abort = 0;
7908 	cp->tag	   = tag;
7909 	cp->target = tn;
7910 	cp->lun    = ln;
7911 
7912 	if (DEBUG_FLAGS & DEBUG_TAGS) {
7913 		PRINT_LUN(np, tn, ln);
7914 		printf ("ccb @%p using tag %d.\n", cp, tag);
7915 	}
7916 
7917 out:
7918 	return cp;
7919 out_free:
7920 	sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
7921 	return (ccb_p) 0;
7922 }
7923 
7924 /*
7925  *  Release one control block
7926  */
7927 static void sym_free_ccb (hcb_p np, ccb_p cp)
7928 {
7929 	tcb_p tp = &np->target[cp->target];
7930 	lcb_p lp = sym_lp(np, tp, cp->lun);
7931 
7932 	if (DEBUG_FLAGS & DEBUG_TAGS) {
7933 		PRINT_LUN(np, cp->target, cp->lun);
7934 		printf ("ccb @%p freeing tag %d.\n", cp, cp->tag);
7935 	}
7936 
7937 	/*
7938 	 *  If LCB available,
7939 	 */
7940 	if (lp) {
7941 		/*
7942 		 *  If tagged, release the tag, set the relect path
7943 		 */
7944 		if (cp->tag != NO_TAG) {
7945 			/*
7946 			 *  Free the tag value.
7947 			 */
7948 			lp->cb_tags[lp->if_tag] = cp->tag;
7949 			if (++lp->if_tag == SYM_CONF_MAX_TASK)
7950 				lp->if_tag = 0;
7951 			/*
7952 			 *  Make the reselect path invalid,
7953 			 *  and uncount this CCB.
7954 			 */
7955 			lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba);
7956 			--lp->busy_itlq;
7957 		} else {	/* Untagged */
7958 			/*
7959 			 *  Make the reselect path invalid,
7960 			 *  and uncount this CCB.
7961 			 */
7962 			lp->itl_task_sa = cpu_to_scr(np->bad_itl_ba);
7963 			--lp->busy_itl;
7964 		}
7965 		/*
7966 		 *  If no JOB active, make the LUN reselect path invalid.
7967 		 */
7968 		if (lp->busy_itlq == 0 && lp->busy_itl == 0)
7969 			lp->resel_sa = cpu_to_scr(SCRIPTH_BA(np,resel_bad_lun));
7970 	}
7971 	/*
7972 	 *  Otherwise, we only accept 1 IO per LUN.
7973 	 *  Clear the bit that keeps track of this IO.
7974 	 */
7975 	else
7976 		sym_clr_bit(tp->busy0_map, cp->lun);
7977 
7978 	/*
7979 	 *  We donnot queue more than 1 ccb per target
7980 	 *  with negotiation at any time. If this ccb was
7981 	 *  used for negotiation, clear this info in the tcb.
7982 	 */
7983 	if (cp == tp->nego_cp)
7984 		tp->nego_cp = 0;
7985 
7986 #ifdef SYM_CONF_IARB_SUPPORT
7987 	/*
7988 	 *  If we just complete the last queued CCB,
7989 	 *  clear this info that is no longer relevant.
7990 	 */
7991 	if (cp == np->last_cp)
7992 		np->last_cp = 0;
7993 #endif
7994 
7995 #ifdef	FreeBSD_Bus_Dma_Abstraction
7996 	/*
7997 	 *  Unmap user data from DMA map if needed.
7998 	 */
7999 	if (cp->dmamapped) {
8000 		bus_dmamap_unload(np->data_dmat, cp->dmamap);
8001 		cp->dmamapped = 0;
8002 	}
8003 #endif
8004 
8005 	/*
8006 	 *  Make this CCB available.
8007 	 */
8008 	cp->cam_ccb = 0;
8009 	cp->host_status = HS_IDLE;
8010 	sym_remque(&cp->link_ccbq);
8011 	sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
8012 }
8013 
8014 /*
8015  *  Allocate a CCB from memory and initialize its fixed part.
8016  */
8017 static ccb_p sym_alloc_ccb(hcb_p np)
8018 {
8019 	ccb_p cp = 0;
8020 	int hcode;
8021 
8022 	/*
8023 	 *  Prevent from allocating more CCBs than we can
8024 	 *  queue to the controller.
8025 	 */
8026 	if (np->actccbs >= SYM_CONF_MAX_START)
8027 		return 0;
8028 
8029 	/*
8030 	 *  Allocate memory for this CCB.
8031 	 */
8032 	cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB");
8033 	if (!cp)
8034 		goto out_free;
8035 
8036 	/*
8037 	 *  Allocate a bounce buffer for sense data.
8038 	 */
8039 	cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF");
8040 	if (!cp->sns_bbuf)
8041 		goto out_free;
8042 
8043 	/*
8044 	 *  Allocate a map for the DMA of user data.
8045 	 */
8046 #ifdef	FreeBSD_Bus_Dma_Abstraction
8047 	if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap))
8048 		goto out_free;
8049 #endif
8050 	/*
8051 	 *  Count it.
8052 	 */
8053 	np->actccbs++;
8054 
8055 	/*
8056 	 *  Compute the bus address of this ccb.
8057 	 */
8058 	cp->ccb_ba = vtobus(cp);
8059 
8060 	/*
8061 	 *  Insert this ccb into the hashed list.
8062 	 */
8063 	hcode = CCB_HASH_CODE(cp->ccb_ba);
8064 	cp->link_ccbh = np->ccbh[hcode];
8065 	np->ccbh[hcode] = cp;
8066 
8067 	/*
8068 	 *  Initialyze the start and restart actions.
8069 	 */
8070 	cp->phys.go.start   = cpu_to_scr(SCRIPT_BA (np, idle));
8071 	cp->phys.go.restart = cpu_to_scr(SCRIPTH_BA(np, bad_i_t_l));
8072 
8073  	/*
8074 	 *  Initilialyze some other fields.
8075 	 */
8076 	cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2]));
8077 
8078 	/*
8079 	 *  Chain into free ccb queue.
8080 	 */
8081 	sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
8082 
8083 	return cp;
8084 out_free:
8085 	if (cp) {
8086 		if (cp->sns_bbuf)
8087 			sym_mfree_dma(cp->sns_bbuf,SYM_SNS_BBUF_LEN,"SNS_BBUF");
8088 		sym_mfree_dma(cp, sizeof(*cp), "CCB");
8089 	}
8090 	return 0;
8091 }
8092 
8093 /*
8094  *  Look up a CCB from a DSA value.
8095  */
8096 static ccb_p sym_ccb_from_dsa(hcb_p np, u_long dsa)
8097 {
8098 	int hcode;
8099 	ccb_p cp;
8100 
8101 	hcode = CCB_HASH_CODE(dsa);
8102 	cp = np->ccbh[hcode];
8103 	while (cp) {
8104 		if (cp->ccb_ba == dsa)
8105 			break;
8106 		cp = cp->link_ccbh;
8107 	}
8108 
8109 	return cp;
8110 }
8111 
8112 /*
8113  *  Target control block initialisation.
8114  *  Nothing important to do at the moment.
8115  */
8116 static void sym_init_tcb (hcb_p np, u_char tn)
8117 {
8118 	/*
8119 	 *  Check some alignments required by the chip.
8120 	 */
8121 	assert (((offsetof(struct sym_reg, nc_sxfer) ^
8122 		offsetof(struct sym_tcb, sval)) &3) == 0);
8123 	assert (((offsetof(struct sym_reg, nc_scntl3) ^
8124 		offsetof(struct sym_tcb, wval)) &3) == 0);
8125 }
8126 
8127 /*
8128  *  Lun control block allocation and initialization.
8129  */
8130 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln)
8131 {
8132 	tcb_p tp = &np->target[tn];
8133 	lcb_p lp = sym_lp(np, tp, ln);
8134 
8135 	/*
8136 	 *  Already done, just return.
8137 	 */
8138 	if (lp)
8139 		return lp;
8140 	/*
8141 	 *  Check against some race.
8142 	 */
8143 	assert(!sym_is_bit(tp->busy0_map, ln));
8144 
8145 	/*
8146 	 *  Initialize the target control block if not yet.
8147 	 */
8148 	sym_init_tcb (np, tn);
8149 
8150 	/*
8151 	 *  Allocate the LCB bus address array.
8152 	 *  Compute the bus address of this table.
8153 	 */
8154 	if (ln && !tp->luntbl) {
8155 		int i;
8156 
8157 		tp->luntbl = sym_calloc_dma(256, "LUNTBL");
8158 		if (!tp->luntbl)
8159 			goto fail;
8160 		for (i = 0 ; i < 64 ; i++)
8161 			tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
8162 		tp->luntbl_sa = cpu_to_scr(vtobus(tp->luntbl));
8163 	}
8164 
8165 	/*
8166 	 *  Allocate the table of pointers for LUN(s) > 0, if needed.
8167 	 */
8168 	if (ln && !tp->lunmp) {
8169 		tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p),
8170 				   "LUNMP");
8171 		if (!tp->lunmp)
8172 			goto fail;
8173 	}
8174 
8175 	/*
8176 	 *  Allocate the lcb.
8177 	 *  Make it available to the chip.
8178 	 */
8179 	lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB");
8180 	if (!lp)
8181 		goto fail;
8182 	if (ln) {
8183 		tp->lunmp[ln] = lp;
8184 		tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
8185 	}
8186 	else {
8187 		tp->lun0p = lp;
8188 		tp->lun0_sa = cpu_to_scr(vtobus(lp));
8189 	}
8190 
8191 	/*
8192 	 *  Let the itl task point to error handling.
8193 	 */
8194 	lp->itl_task_sa = cpu_to_scr(np->bad_itl_ba);
8195 
8196 	/*
8197 	 *  Set the reselect pattern to our default. :)
8198 	 */
8199 	lp->resel_sa = cpu_to_scr(SCRIPTH_BA(np, resel_bad_lun));
8200 
8201 	/*
8202 	 *  Set user capabilities.
8203 	 */
8204 	lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
8205 
8206 fail:
8207 	return lp;
8208 }
8209 
8210 /*
8211  *  Allocate LCB resources for tagged command queuing.
8212  */
8213 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln)
8214 {
8215 	tcb_p tp = &np->target[tn];
8216 	lcb_p lp = sym_lp(np, tp, ln);
8217 	int i;
8218 
8219 	/*
8220 	 *  If LCB not available, try to allocate it.
8221 	 */
8222 	if (!lp && !(lp = sym_alloc_lcb(np, tn, ln)))
8223 		goto fail;
8224 
8225 	/*
8226 	 *  Allocate the task table and and the tag allocation
8227 	 *  circular buffer. We want both or none.
8228 	 */
8229 	lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
8230 	if (!lp->itlq_tbl)
8231 		goto fail;
8232 	lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS");
8233 	if (!lp->cb_tags) {
8234 		sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
8235 		lp->itlq_tbl = 0;
8236 		goto fail;
8237 	}
8238 
8239 	/*
8240 	 *  Initialize the task table with invalid entries.
8241 	 */
8242 	for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
8243 		lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba);
8244 
8245 	/*
8246 	 *  Fill up the tag buffer with tag numbers.
8247 	 */
8248 	for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
8249 		lp->cb_tags[i] = i;
8250 
8251 	/*
8252 	 *  Make the task table available to SCRIPTS,
8253 	 *  And accept tagged commands now.
8254 	 */
8255 	lp->itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
8256 
8257 	return;
8258 fail:
8259 }
8260 
8261 /*
8262  *  Test the pci bus snoop logic :-(
8263  *
8264  *  Has to be called with interrupts disabled.
8265  */
8266 #ifndef SYM_CONF_IOMAPPED
8267 static int sym_regtest (hcb_p np)
8268 {
8269 	register volatile u32 data;
8270 	/*
8271 	 *  chip registers may NOT be cached.
8272 	 *  write 0xffffffff to a read only register area,
8273 	 *  and try to read it back.
8274 	 */
8275 	data = 0xffffffff;
8276 	OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data);
8277 	data = INL_OFF(offsetof(struct sym_reg, nc_dstat));
8278 #if 1
8279 	if (data == 0xffffffff) {
8280 #else
8281 	if ((data & 0xe2f0fffd) != 0x02000080) {
8282 #endif
8283 		printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
8284 			(unsigned) data);
8285 		return (0x10);
8286 	};
8287 	return (0);
8288 }
8289 #endif
8290 
8291 static int sym_snooptest (hcb_p np)
8292 {
8293 	u32	sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc;
8294 	int	i, err=0;
8295 #ifndef SYM_CONF_IOMAPPED
8296 	err |= sym_regtest (np);
8297 	if (err) return (err);
8298 #endif
8299 	/*
8300 	 *  init
8301 	 */
8302 	pc  = SCRIPTH0_BA (np, snooptest);
8303 	host_wr = 1;
8304 	sym_wr  = 2;
8305 	/*
8306 	 *  Set memory and register.
8307 	 */
8308 	np->cache = cpu_to_scr(host_wr);
8309 	OUTL (nc_temp, sym_wr);
8310 	/*
8311 	 *  Start script (exchange values)
8312 	 */
8313 	OUTL (nc_dsa, np->hcb_ba);
8314 	OUTL (nc_dsp, pc);
8315 	/*
8316 	 *  Wait 'til done (with timeout)
8317 	 */
8318 	for (i=0; i<SYM_SNOOP_TIMEOUT; i++)
8319 		if (INB(nc_istat) & (INTF|SIP|DIP))
8320 			break;
8321 	/*
8322 	 *  Save termination position.
8323 	 */
8324 	pc = INL (nc_dsp);
8325 	/*
8326 	 *  Read memory and register.
8327 	 */
8328 	host_rd = scr_to_cpu(np->cache);
8329 	sym_rd  = INL (nc_scratcha);
8330 	sym_bk  = INL (nc_temp);
8331 
8332 	/*
8333 	 *  check for timeout
8334 	 */
8335 	if (i>=SYM_SNOOP_TIMEOUT) {
8336 		printf ("CACHE TEST FAILED: timeout.\n");
8337 		return (0x20);
8338 	};
8339 	/*
8340 	 *  Check termination position.
8341 	 */
8342 	if (pc != SCRIPTH0_BA (np, snoopend)+8) {
8343 		printf ("CACHE TEST FAILED: script execution failed.\n");
8344 		printf ("start=%08lx, pc=%08lx, end=%08lx\n",
8345 			(u_long) SCRIPTH0_BA (np, snooptest), (u_long) pc,
8346 			(u_long) SCRIPTH0_BA (np, snoopend) +8);
8347 		return (0x40);
8348 	};
8349 	/*
8350 	 *  Show results.
8351 	 */
8352 	if (host_wr != sym_rd) {
8353 		printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n",
8354 			(int) host_wr, (int) sym_rd);
8355 		err |= 1;
8356 	};
8357 	if (host_rd != sym_wr) {
8358 		printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n",
8359 			(int) sym_wr, (int) host_rd);
8360 		err |= 2;
8361 	};
8362 	if (sym_bk != sym_wr) {
8363 		printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n",
8364 			(int) sym_wr, (int) sym_bk);
8365 		err |= 4;
8366 	};
8367 	return (err);
8368 }
8369 
8370 /*
8371  *  Determine the chip's clock frequency.
8372  *
8373  *  This is essential for the negotiation of the synchronous
8374  *  transfer rate.
8375  *
8376  *  Note: we have to return the correct value.
8377  *  THERE IS NO SAFE DEFAULT VALUE.
8378  *
8379  *  Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
8380  *  53C860 and 53C875 rev. 1 support fast20 transfers but
8381  *  do not have a clock doubler and so are provided with a
8382  *  80 MHz clock. All other fast20 boards incorporate a doubler
8383  *  and so should be delivered with a 40 MHz clock.
8384  *  The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base
8385  *  clock and provide a clock quadrupler (160 Mhz).
8386  */
8387 
8388 /*
8389  *  Select SCSI clock frequency
8390  */
8391 static void sym_selectclock(hcb_p np, u_char scntl3)
8392 {
8393 	/*
8394 	 *  If multiplier not present or not selected, leave here.
8395 	 */
8396 	if (np->multiplier <= 1) {
8397 		OUTB(nc_scntl3,	scntl3);
8398 		return;
8399 	}
8400 
8401 	if (sym_verbose >= 2)
8402 		printf ("%s: enabling clock multiplier\n", sym_name(np));
8403 
8404 	OUTB(nc_stest1, DBLEN);	   /* Enable clock multiplier		  */
8405 	/*
8406 	 *  Wait for the LCKFRQ bit to be set if supported by the chip.
8407 	 *  Otherwise wait 20 micro-seconds.
8408 	 */
8409 	if (np->features & FE_LCKFRQ) {
8410 		int i = 20;
8411 		while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
8412 			UDELAY (20);
8413 		if (!i)
8414 			printf("%s: the chip cannot lock the frequency\n",
8415 				sym_name(np));
8416 	} else
8417 		UDELAY (20);
8418 	OUTB(nc_stest3, HSC);		/* Halt the scsi clock		*/
8419 	OUTB(nc_scntl3,	scntl3);
8420 	OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier	*/
8421 	OUTB(nc_stest3, 0x00);		/* Restart scsi clock 		*/
8422 }
8423 
8424 /*
8425  *  calculate SCSI clock frequency (in KHz)
8426  */
8427 static unsigned getfreq (hcb_p np, int gen)
8428 {
8429 	unsigned int ms = 0;
8430 	unsigned int f;
8431 
8432 	/*
8433 	 * Measure GEN timer delay in order
8434 	 * to calculate SCSI clock frequency
8435 	 *
8436 	 * This code will never execute too
8437 	 * many loop iterations (if DELAY is
8438 	 * reasonably correct). It could get
8439 	 * too low a delay (too high a freq.)
8440 	 * if the CPU is slow executing the
8441 	 * loop for some reason (an NMI, for
8442 	 * example). For this reason we will
8443 	 * if multiple measurements are to be
8444 	 * performed trust the higher delay
8445 	 * (lower frequency returned).
8446 	 */
8447 	OUTW (nc_sien , 0);	/* mask all scsi interrupts */
8448 	(void) INW (nc_sist);	/* clear pending scsi interrupt */
8449 	OUTB (nc_dien , 0);	/* mask all dma interrupts */
8450 	(void) INW (nc_sist);	/* another one, just to be sure :) */
8451 	OUTB (nc_scntl3, 4);	/* set pre-scaler to divide by 3 */
8452 	OUTB (nc_stime1, 0);	/* disable general purpose timer */
8453 	OUTB (nc_stime1, gen);	/* set to nominal delay of 1<<gen * 125us */
8454 	while (!(INW(nc_sist) & GEN) && ms++ < 100000)
8455 		UDELAY (1000);	/* count ms */
8456 	OUTB (nc_stime1, 0);	/* disable general purpose timer */
8457  	/*
8458  	 * set prescaler to divide by whatever 0 means
8459  	 * 0 ought to choose divide by 2, but appears
8460  	 * to set divide by 3.5 mode in my 53c810 ...
8461  	 */
8462  	OUTB (nc_scntl3, 0);
8463 
8464   	/*
8465  	 * adjust for prescaler, and convert into KHz
8466   	 */
8467 	f = ms ? ((1 << gen) * 4340) / ms : 0;
8468 
8469 	if (sym_verbose >= 2)
8470 		printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
8471 			sym_name(np), gen, ms, f);
8472 
8473 	return f;
8474 }
8475 
8476 static unsigned sym_getfreq (hcb_p np)
8477 {
8478 	u_int f1, f2;
8479 	int gen = 11;
8480 
8481 	(void) getfreq (np, gen);	/* throw away first result */
8482 	f1 = getfreq (np, gen);
8483 	f2 = getfreq (np, gen);
8484 	if (f1 > f2) f1 = f2;		/* trust lower result	*/
8485 	return f1;
8486 }
8487 
8488 /*
8489  *  Get/probe chip SCSI clock frequency
8490  */
8491 static void sym_getclock (hcb_p np, int mult)
8492 {
8493 	unsigned char scntl3 = np->sv_scntl3;
8494 	unsigned char stest1 = np->sv_stest1;
8495 	unsigned f1;
8496 
8497 	/*
8498 	 *  For the C10 core, assume 40 MHz.
8499 	 */
8500 	if (np->features & FE_C10) {
8501 		np->multiplier = mult;
8502 		np->clock_khz = 40000 * mult;
8503 		return;
8504 	}
8505 
8506 	np->multiplier = 1;
8507 	f1 = 40000;
8508 	/*
8509 	 *  True with 875/895/896/895A with clock multiplier selected
8510 	 */
8511 	if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
8512 		if (sym_verbose >= 2)
8513 			printf ("%s: clock multiplier found\n", sym_name(np));
8514 		np->multiplier = mult;
8515 	}
8516 
8517 	/*
8518 	 *  If multiplier not found or scntl3 not 7,5,3,
8519 	 *  reset chip and get frequency from general purpose timer.
8520 	 *  Otherwise trust scntl3 BIOS setting.
8521 	 */
8522 	if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
8523 		OUTB (nc_stest1, 0);		/* make sure doubler is OFF */
8524 		f1 = sym_getfreq (np);
8525 
8526 		if (sym_verbose)
8527 			printf ("%s: chip clock is %uKHz\n", sym_name(np), f1);
8528 
8529 		if	(f1 <	45000)		f1 =  40000;
8530 		else if (f1 <	55000)		f1 =  50000;
8531 		else				f1 =  80000;
8532 
8533 		if (f1 < 80000 && mult > 1) {
8534 			if (sym_verbose >= 2)
8535 				printf ("%s: clock multiplier assumed\n",
8536 					sym_name(np));
8537 			np->multiplier	= mult;
8538 		}
8539 	} else {
8540 		if	((scntl3 & 7) == 3)	f1 =  40000;
8541 		else if	((scntl3 & 7) == 5)	f1 =  80000;
8542 		else 				f1 = 160000;
8543 
8544 		f1 /= np->multiplier;
8545 	}
8546 
8547 	/*
8548 	 *  Compute controller synchronous parameters.
8549 	 */
8550 	f1		*= np->multiplier;
8551 	np->clock_khz	= f1;
8552 }
8553 
8554 /*
8555  *  Get/probe PCI clock frequency
8556  */
8557 static int sym_getpciclock (hcb_p np)
8558 {
8559 	static int f = 0;
8560 
8561 	/* For the C10, this will not work */
8562 	if (!f && !(np->features & FE_C10)) {
8563 		OUTB (nc_stest1, SCLK);	/* Use the PCI clock as SCSI clock */
8564 		f = (int) sym_getfreq (np);
8565 		OUTB (nc_stest1, 0);
8566 	}
8567 	return f;
8568 }
8569 
8570 /*============= DRIVER ACTION/COMPLETION ====================*/
8571 
8572 /*
8573  *  Print something that tells about extended errors.
8574  */
8575 static void sym_print_xerr(ccb_p cp, int x_status)
8576 {
8577 	if (x_status & XE_PARITY_ERR) {
8578 		PRINT_ADDR(cp);
8579 		printf ("unrecovered SCSI parity error.\n");
8580 	}
8581 	if (x_status & XE_EXTRA_DATA) {
8582 		PRINT_ADDR(cp);
8583 		printf ("extraneous data discarded.\n");
8584 	}
8585 	if (x_status & XE_BAD_PHASE) {
8586 		PRINT_ADDR(cp);
8587 		printf ("illegal scsi phase (4/5).\n");
8588 	}
8589 	if (x_status & XE_SODL_UNRUN) {
8590 		PRINT_ADDR(cp);
8591 		printf ("ODD transfer in DATA OUT phase.\n");
8592 	}
8593 	if (x_status & XE_SWIDE_OVRUN) {
8594 		PRINT_ADDR(cp);
8595 		printf ("ODD transfer in DATA IN phase.\n");
8596 	}
8597 }
8598 
8599 /*
8600  *  Choose the more appropriate CAM status if
8601  *  the IO encountered an extended error.
8602  */
8603 static int sym_xerr_cam_status(int cam_status, int x_status)
8604 {
8605 	if (x_status) {
8606 		if	(x_status & XE_PARITY_ERR)
8607 			cam_status = CAM_UNCOR_PARITY;
8608 		else if	(x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
8609 			cam_status = CAM_DATA_RUN_ERR;
8610 		else if	(x_status & XE_BAD_PHASE)
8611 			cam_status = CAM_REQ_CMP_ERR;
8612 		else
8613 			cam_status = CAM_REQ_CMP_ERR;
8614 	}
8615 	return cam_status;
8616 }
8617 
8618 /*
8619  *  Complete execution of a SCSI command with extented
8620  *  error, SCSI status error, or having been auto-sensed.
8621  *
8622  *  The SCRIPTS processor is not running there, so we
8623  *  can safely access IO registers and remove JOBs from
8624  *  the START queue.
8625  *  SCRATCHA is assumed to have been loaded with STARTPOS
8626  *  before the SCRIPTS called the C code.
8627  */
8628 static void sym_complete_error (hcb_p np, ccb_p cp)
8629 {
8630 	struct ccb_scsiio *csio;
8631 	u_int cam_status;
8632 	int i;
8633 
8634 	/*
8635 	 *  Paranoid check. :)
8636 	 */
8637 	if (!cp || !cp->cam_ccb)
8638 		return;
8639 
8640 	if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) {
8641 		printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp,
8642 			cp->host_status, cp->ssss_status, cp->host_flags,
8643 			cp->target, cp->lun);
8644 		MDELAY(100);
8645 	}
8646 
8647 	/*
8648 	 *  Get command, target and lun pointers.
8649 	 */
8650 	csio = &cp->cam_ccb->csio;
8651 
8652 	/*
8653 	 *  Check for extended errors.
8654 	 */
8655 	if (cp->xerr_status) {
8656 		if (sym_verbose)
8657 			sym_print_xerr(cp, cp->xerr_status);
8658 		if (cp->host_status == HS_COMPLETE)
8659 			cp->host_status = HS_COMP_ERR;
8660 	}
8661 
8662 	/*
8663 	 *  Calculate the residual.
8664 	 */
8665 	csio->sense_resid = 0;
8666 	csio->resid = sym_compute_residual(np, cp);
8667 
8668 	if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */
8669 		csio->resid  = 0;	/* throw them away. :)		   */
8670 		cp->sv_resid = 0;
8671 	}
8672 
8673 	if (cp->host_flags & HF_SENSE) {		/* Auto sense     */
8674 		csio->scsi_status = cp->sv_scsi_status;	/* Restore status */
8675 		csio->sense_resid = csio->resid;	/* Swap residuals */
8676 		csio->resid       = cp->sv_resid;
8677 		cp->sv_resid	  = 0;
8678 		if (sym_verbose && cp->sv_xerr_status)
8679 			sym_print_xerr(cp, cp->sv_xerr_status);
8680 		if (cp->host_status == HS_COMPLETE &&
8681 		    cp->ssss_status == S_GOOD &&
8682 		    cp->xerr_status == 0) {
8683 			cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR,
8684 							 cp->sv_xerr_status);
8685 			cam_status |= CAM_AUTOSNS_VALID;
8686 			/*
8687 			 *  Bounce back the sense data to user and
8688 			 *  fix the residual.
8689 			 */
8690 			bzero(&csio->sense_data, csio->sense_len);
8691 			bcopy(cp->sns_bbuf, &csio->sense_data,
8692 			      MIN(csio->sense_len, SYM_SNS_BBUF_LEN));
8693 			csio->sense_resid += csio->sense_len;
8694 			csio->sense_resid -= SYM_SNS_BBUF_LEN;
8695 #if 0
8696 			/*
8697 			 *  If the device reports a UNIT ATTENTION condition
8698 			 *  due to a RESET condition, we should consider all
8699 			 *  disconnect CCBs for this unit as aborted.
8700 			 */
8701 			if (1) {
8702 				u_char *p;
8703 				p  = (u_char *) csio->sense_data;
8704 				if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
8705 					sym_clear_tasks(np, CAM_REQ_ABORTED,
8706 							cp->target,cp->lun, -1);
8707 			}
8708 #endif
8709 		}
8710 		else
8711 			cam_status = CAM_AUTOSENSE_FAIL;
8712 	}
8713 	else if (cp->host_status == HS_COMPLETE) {	/* Bad SCSI status */
8714 		csio->scsi_status = cp->ssss_status;
8715 		cam_status = CAM_SCSI_STATUS_ERROR;
8716 	}
8717 	else if (cp->host_status == HS_SEL_TIMEOUT)	/* Selection timeout */
8718 		cam_status = CAM_SEL_TIMEOUT;
8719 	else if (cp->host_status == HS_UNEXPECTED)	/* Unexpected BUS FREE*/
8720 		cam_status = CAM_UNEXP_BUSFREE;
8721 	else {						/* Extended error */
8722 		if (sym_verbose) {
8723 			PRINT_ADDR(cp);
8724 			printf ("COMMAND FAILED (%x %x %x).\n",
8725 				cp->host_status, cp->ssss_status,
8726 				cp->xerr_status);
8727 		}
8728 		csio->scsi_status = cp->ssss_status;
8729 		/*
8730 		 *  Set the most appropriate value for CAM status.
8731 		 */
8732 		cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR,
8733 						 cp->xerr_status);
8734 	}
8735 
8736 	/*
8737 	 *  Dequeue all queued CCBs for that device
8738 	 *  not yet started by SCRIPTS.
8739 	 */
8740 	i = (INL (nc_scratcha) - np->squeue_ba) / 4;
8741 	(void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
8742 
8743 	/*
8744 	 *  Restart the SCRIPTS processor.
8745 	 */
8746 	OUTL (nc_dsp, SCRIPT_BA (np, start));
8747 
8748 #ifdef	FreeBSD_Bus_Dma_Abstraction
8749 	/*
8750 	 *  Synchronize DMA map if needed.
8751 	 */
8752 	if (cp->dmamapped) {
8753 		bus_dmamap_sync(np->data_dmat, cp->dmamap,
8754 			(bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ?
8755 				BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
8756 	}
8757 #endif
8758 	/*
8759 	 *  Add this one to the COMP queue.
8760 	 *  Complete all those commands with either error
8761 	 *  or requeue condition.
8762 	 */
8763 	sym_set_cam_status((union ccb *) csio, cam_status);
8764 	sym_remque(&cp->link_ccbq);
8765 	sym_insque_head(&cp->link_ccbq, &np->comp_ccbq);
8766 	sym_flush_comp_queue(np, 0);
8767 }
8768 
8769 /*
8770  *  Complete execution of a successful SCSI command.
8771  *
8772  *  Only successful commands go to the DONE queue,
8773  *  since we need to have the SCRIPTS processor
8774  *  stopped on any error condition.
8775  *  The SCRIPTS processor is running while we are
8776  *  completing successful commands.
8777  */
8778 static void sym_complete_ok (hcb_p np, ccb_p cp)
8779 {
8780 	struct ccb_scsiio *csio;
8781 	tcb_p tp;
8782 	lcb_p lp;
8783 
8784 	/*
8785 	 *  Paranoid check. :)
8786 	 */
8787 	if (!cp || !cp->cam_ccb)
8788 		return;
8789 	assert (cp->host_status == HS_COMPLETE);
8790 
8791 	/*
8792 	 *  Get command, target and lun pointers.
8793 	 */
8794 	csio = &cp->cam_ccb->csio;
8795 	tp = &np->target[cp->target];
8796 	lp = sym_lp(np, tp, cp->lun);
8797 
8798 	/*
8799 	 *  Assume device discovered on first success.
8800 	 */
8801 	if (!lp)
8802 		sym_set_bit(tp->lun_map, cp->lun);
8803 
8804 	/*
8805 	 *  If all data have been transferred, given than no
8806 	 *  extended error did occur, there is no residual.
8807 	 */
8808 	csio->resid = 0;
8809 	if (cp->phys.lastp != cp->phys.goalp)
8810 		csio->resid = sym_compute_residual(np, cp);
8811 
8812 	/*
8813 	 *  Wrong transfer residuals may be worse than just always
8814 	 *  returning zero. User can disable this feature from
8815 	 *  sym_conf.h. Residual support is enabled by default.
8816 	 */
8817 	if (!SYM_CONF_RESIDUAL_SUPPORT)
8818 		csio->resid  = 0;
8819 
8820 #ifdef	FreeBSD_Bus_Dma_Abstraction
8821 	/*
8822 	 *  Synchronize DMA map if needed.
8823 	 */
8824 	if (cp->dmamapped) {
8825 		bus_dmamap_sync(np->data_dmat, cp->dmamap,
8826 			(bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ?
8827 				BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
8828 	}
8829 #endif
8830 	/*
8831 	 *  Set status and complete the command.
8832 	 */
8833 	csio->scsi_status = cp->ssss_status;
8834 	sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP);
8835 	sym_free_ccb (np, cp);
8836 	sym_xpt_done(np, (union ccb *) csio);
8837 }
8838 
8839 /*
8840  *  Our timeout handler.
8841  */
8842 static void sym_timeout1(void *arg)
8843 {
8844 	union ccb *ccb = (union ccb *) arg;
8845 	hcb_p np = ccb->ccb_h.sym_hcb_ptr;
8846 
8847 	/*
8848 	 *  Check that the CAM CCB is still queued.
8849 	 */
8850 	if (!np)
8851 		return;
8852 
8853 	switch(ccb->ccb_h.func_code) {
8854 	case XPT_SCSI_IO:
8855 		(void) sym_abort_scsiio(np, ccb, 1);
8856 		break;
8857 	default:
8858 		break;
8859 	}
8860 }
8861 
8862 static void sym_timeout(void *arg)
8863 {
8864 	int s = splcam();
8865 	sym_timeout1(arg);
8866 	splx(s);
8867 }
8868 
8869 /*
8870  *  Abort an SCSI IO.
8871  */
8872 static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out)
8873 {
8874 	ccb_p cp;
8875 	SYM_QUEHEAD *qp;
8876 
8877 	/*
8878 	 *  Look up our CCB control block.
8879 	 */
8880 	cp = 0;
8881 	FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
8882 		ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
8883 		if (cp2->cam_ccb == ccb) {
8884 			cp = cp2;
8885 			break;
8886 		}
8887 	}
8888 	if (!cp || cp->host_status == HS_WAIT)
8889 		return -1;
8890 
8891 	/*
8892 	 *  If a previous abort didn't succeed in time,
8893 	 *  perform a BUS reset.
8894 	 */
8895 	if (cp->to_abort) {
8896 		sym_reset_scsi_bus(np, 1);
8897 		return 0;
8898 	}
8899 
8900 	/*
8901 	 *  Mark the CCB for abort and allow time for.
8902 	 */
8903 	cp->to_abort = timed_out ? 2 : 1;
8904 	ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb, 10*hz);
8905 
8906 	/*
8907 	 *  Tell the SCRIPTS processor to stop and synchronize with us.
8908 	 */
8909 	np->istat_sem = SEM;
8910 	OUTB (nc_istat, SIGP|SEM);
8911 	return 0;
8912 }
8913 
8914 /*
8915  *  Reset a SCSI device (all LUNs of a target).
8916  */
8917 static void sym_reset_dev(hcb_p np, union ccb *ccb)
8918 {
8919 	tcb_p tp;
8920 	struct ccb_hdr *ccb_h = &ccb->ccb_h;
8921 
8922 	if (ccb_h->target_id   == np->myaddr ||
8923 	    ccb_h->target_id   >= SYM_CONF_MAX_TARGET ||
8924 	    ccb_h->target_lun  >= SYM_CONF_MAX_LUN) {
8925 		sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
8926 		return;
8927 	}
8928 
8929 	tp = &np->target[ccb_h->target_id];
8930 
8931 	tp->to_reset = 1;
8932 	sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8933 
8934 	np->istat_sem = SEM;
8935 	OUTB (nc_istat, SIGP|SEM);
8936 	return;
8937 }
8938 
8939 /*
8940  *  SIM action entry point.
8941  */
8942 static void sym_action(struct cam_sim *sim, union ccb *ccb)
8943 {
8944 	int s = splcam();
8945 	sym_action1(sim, ccb);
8946 	splx(s);
8947 }
8948 
8949 static void sym_action1(struct cam_sim *sim, union ccb *ccb)
8950 {
8951 	hcb_p	np;
8952 	tcb_p	tp;
8953 	lcb_p	lp;
8954 	ccb_p	cp;
8955 	int 	tmp;
8956 	u_char	idmsg, *msgptr;
8957 	u_int   msglen;
8958 	struct	ccb_scsiio *csio;
8959 	struct	ccb_hdr  *ccb_h;
8960 
8961 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n"));
8962 
8963 	/*
8964 	 *  Retrieve our controller data structure.
8965 	 */
8966 	np = (hcb_p) cam_sim_softc(sim);
8967 
8968 	/*
8969 	 *  The common case is SCSI IO.
8970 	 *  We deal with other ones elsewhere.
8971 	 */
8972 	if (ccb->ccb_h.func_code != XPT_SCSI_IO) {
8973 		sym_action2(sim, ccb);
8974 		return;
8975 	}
8976 	csio  = &ccb->csio;
8977 	ccb_h = &csio->ccb_h;
8978 
8979 	/*
8980 	 *  Work around races.
8981 	 */
8982 	if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
8983 		xpt_done(ccb);
8984 		return;
8985 	}
8986 
8987 	/*
8988 	 *  Minimal checkings, so that we will not
8989 	 *  go outside our tables.
8990 	 */
8991 	if (ccb_h->target_id   == np->myaddr ||
8992 	    ccb_h->target_id   >= SYM_CONF_MAX_TARGET ||
8993 	    ccb_h->target_lun  >= SYM_CONF_MAX_LUN) {
8994 		sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
8995 		return;
8996         }
8997 
8998 	/*
8999 	 *  Retreive the target and lun descriptors.
9000 	 */
9001 	tp = &np->target[ccb_h->target_id];
9002 	lp = sym_lp(np, tp, ccb_h->target_lun);
9003 
9004 	/*
9005 	 *  Complete the 1st INQUIRY command with error
9006 	 *  condition if the device is flagged NOSCAN
9007 	 *  at BOOT in the NVRAM. This may speed up
9008 	 *  the boot and maintain coherency with BIOS
9009 	 *  device numbering. Clearing the flag allows
9010 	 *  user to rescan skipped devices later.
9011 	 *  We also return error for devices not flagged
9012 	 *  for SCAN LUNS in the NVRAM since some mono-lun
9013 	 *  devices behave badly when asked for some non
9014 	 *  zero LUN. Btw, this is an absolute hack.:-)
9015 	 */
9016 	if (!(ccb_h->flags & CAM_CDB_PHYS) &&
9017 	    (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ?
9018 		  csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) {
9019 		if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) ||
9020 		    ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) &&
9021 		     ccb_h->target_lun != 0)) {
9022 			tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
9023 			sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
9024 			return;
9025 		}
9026 	}
9027 
9028 	/*
9029 	 *  Get a control block for this IO.
9030 	 */
9031 	tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0);
9032 	cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp);
9033 	if (!cp) {
9034 		sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL);
9035 		return;
9036 	}
9037 
9038 	/*
9039 	 *  Keep track of the IO in our CCB.
9040 	 */
9041 	cp->cam_ccb = ccb;
9042 
9043 	/*
9044 	 *  Build the IDENTIFY message.
9045 	 */
9046 	idmsg = M_IDENTIFY | cp->lun;
9047 	if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED)))
9048 		idmsg |= 0x40;
9049 
9050 	msgptr = cp->scsi_smsg;
9051 	msglen = 0;
9052 	msgptr[msglen++] = idmsg;
9053 
9054 	/*
9055 	 *  Build the tag message if present.
9056 	 */
9057 	if (cp->tag != NO_TAG) {
9058 		u_char order = csio->tag_action;
9059 
9060 		switch(order) {
9061 		case M_ORDERED_TAG:
9062 			break;
9063 		case M_HEAD_TAG:
9064 			break;
9065 		default:
9066 			order = M_SIMPLE_TAG;
9067 		}
9068 		msgptr[msglen++] = order;
9069 
9070 		/*
9071 		 *  For less than 128 tags, actual tags are numbered
9072 		 *  1,3,5,..2*MAXTAGS+1,since we may have to deal
9073 		 *  with devices that have problems with #TAG 0 or too
9074 		 *  great #TAG numbers. For more tags (up to 256),
9075 		 *  we use directly our tag number.
9076 		 */
9077 #if SYM_CONF_MAX_TASK > (512/4)
9078 		msgptr[msglen++] = cp->tag;
9079 #else
9080 		msgptr[msglen++] = (cp->tag << 1) + 1;
9081 #endif
9082 	}
9083 
9084 	/*
9085 	 *  Build a negotiation message if needed.
9086 	 *  (nego_status is filled by sym_prepare_nego())
9087 	 */
9088 	cp->nego_status = 0;
9089 	if (tp->tinfo.current.width   != tp->tinfo.goal.width  ||
9090 	    tp->tinfo.current.period  != tp->tinfo.goal.period ||
9091 	    tp->tinfo.current.offset  != tp->tinfo.goal.offset ||
9092 #if 0 /* For now only renegotiate, based on width, period and offset */
9093 	    tp->tinfo.current.options != tp->tinfo.goal.options) {
9094 #else
9095 	    0) {
9096 #endif
9097 		if (!tp->nego_cp && lp)
9098 			msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen);
9099 	}
9100 
9101 	/*
9102 	 *  Fill in our ccb
9103 	 */
9104 
9105 	/*
9106 	 *  Startqueue
9107 	 */
9108 	cp->phys.go.start   = cpu_to_scr(SCRIPT_BA (np, select));
9109 	cp->phys.go.restart = cpu_to_scr(SCRIPT_BA (np, resel_dsa));
9110 
9111 	/*
9112 	 *  select
9113 	 */
9114 	cp->phys.select.sel_id		= cp->target;
9115 	cp->phys.select.sel_scntl3	= tp->wval;
9116 	cp->phys.select.sel_sxfer	= tp->sval;
9117 	cp->phys.select.sel_scntl4	= tp->uval;
9118 
9119 	/*
9120 	 *  message
9121 	 */
9122 	cp->phys.smsg.addr	= cpu_to_scr(CCB_BA (cp, scsi_smsg));
9123 	cp->phys.smsg.size	= cpu_to_scr(msglen);
9124 
9125 	/*
9126 	 *  command
9127 	 */
9128 	if (sym_setup_cdb(np, csio, cp) < 0) {
9129 		sym_free_ccb(np, cp);
9130 		sym_xpt_done(np, ccb);
9131 		return;
9132 	}
9133 
9134 	/*
9135 	 *  status
9136 	 */
9137 #if	0	/* Provision */
9138 	cp->actualquirks	= tp->quirks;
9139 #endif
9140 	cp->actualquirks	= SYM_QUIRK_AUTOSAVE;
9141 	cp->host_status		= cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
9142 	cp->ssss_status		= S_ILLEGAL;
9143 	cp->xerr_status		= 0;
9144 	cp->host_flags		= 0;
9145 	cp->phys.extra_bytes	= 0;
9146 
9147 	/*
9148 	 *  extreme data pointer.
9149 	 *  shall be positive, so -1 is lower than lowest.:)
9150 	 */
9151 	cp->ext_sg  = -1;
9152 	cp->ext_ofs = 0;
9153 
9154 	/*
9155 	 *  Build the data descriptor block
9156 	 *  and start the IO.
9157 	 */
9158 	sym_setup_data_and_start(np, csio, cp);
9159 }
9160 
9161 /*
9162  *  Setup buffers and pointers that address the CDB.
9163  *  I bet, physical CDBs will never be used on the planet,
9164  *  since they can be bounced without significant overhead.
9165  */
9166 static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
9167 {
9168 	struct ccb_hdr *ccb_h;
9169 	u32	cmd_ba;
9170 	int	cmd_len;
9171 
9172 	ccb_h = &csio->ccb_h;
9173 
9174 	/*
9175 	 *  CDB is 16 bytes max.
9176 	 */
9177 	if (csio->cdb_len > sizeof(cp->cdb_buf)) {
9178 		sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
9179 		return -1;
9180 	}
9181 	cmd_len = csio->cdb_len;
9182 
9183 	if (ccb_h->flags & CAM_CDB_POINTER) {
9184 		/* CDB is a pointer */
9185 		if (!(ccb_h->flags & CAM_CDB_PHYS)) {
9186 			/* CDB pointer is virtual */
9187 			bcopy(csio->cdb_io.cdb_ptr, cp->cdb_buf, cmd_len);
9188 			cmd_ba = CCB_BA (cp, cdb_buf[0]);
9189 		} else {
9190 			/* CDB pointer is physical */
9191 #if 0
9192 			cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff;
9193 #else
9194 			sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
9195 			return -1;
9196 #endif
9197 		}
9198 	} else {
9199 		/* CDB is in the CAM ccb (buffer) */
9200 		bcopy(csio->cdb_io.cdb_bytes, cp->cdb_buf, cmd_len);
9201 		cmd_ba = CCB_BA (cp, cdb_buf[0]);
9202 	}
9203 
9204 	cp->phys.cmd.addr	= cpu_to_scr(cmd_ba);
9205 	cp->phys.cmd.size	= cpu_to_scr(cmd_len);
9206 
9207 	return 0;
9208 }
9209 
9210 /*
9211  *  Set up data pointers used by SCRIPTS.
9212  */
9213 static void __inline__
9214 sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir)
9215 {
9216 	u32 lastp, goalp;
9217 
9218 	/*
9219 	 *  No segments means no data.
9220 	 */
9221 	if (!cp->segments)
9222 		dir = CAM_DIR_NONE;
9223 
9224 	/*
9225 	 *  Set the data pointer.
9226 	 */
9227 	switch(dir) {
9228 	case CAM_DIR_OUT:
9229 		goalp = SCRIPT_BA (np, data_out2) + 8;
9230 		lastp = goalp - 8 - (cp->segments * (2*4));
9231 		break;
9232 	case CAM_DIR_IN:
9233 		cp->host_flags |= HF_DATA_IN;
9234 		goalp = SCRIPT_BA (np, data_in2) + 8;
9235 		lastp = goalp - 8 - (cp->segments * (2*4));
9236 		break;
9237 	case CAM_DIR_NONE:
9238 	default:
9239 		lastp = goalp = SCRIPTH_BA (np, no_data);
9240 		break;
9241 	}
9242 
9243 	cp->phys.lastp = cpu_to_scr(lastp);
9244 	cp->phys.goalp = cpu_to_scr(goalp);
9245 	cp->phys.savep = cpu_to_scr(lastp);
9246 	cp->startp     = cp->phys.savep;
9247 }
9248 
9249 
9250 #ifdef	FreeBSD_Bus_Dma_Abstraction
9251 /*
9252  *  Call back routine for the DMA map service.
9253  *  If bounce buffers are used (why ?), we may sleep and then
9254  *  be called there in another context.
9255  */
9256 static void
9257 sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error)
9258 {
9259 	ccb_p	cp;
9260 	hcb_p	np;
9261 	union	ccb *ccb;
9262 	int	s;
9263 
9264 	s = splcam();
9265 
9266 	cp  = (ccb_p) arg;
9267 	ccb = cp->cam_ccb;
9268 	np  = (hcb_p) cp->arg;
9269 
9270 	/*
9271 	 *  Deal with weird races.
9272 	 */
9273 	if (sym_get_cam_status(ccb) != CAM_REQ_INPROG)
9274 		goto out_abort;
9275 
9276 	/*
9277 	 *  Deal with weird errors.
9278 	 */
9279 	if (error) {
9280 		cp->dmamapped = 0;
9281 		sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED);
9282 		goto out_abort;
9283 	}
9284 
9285 	/*
9286 	 *  Build the data descriptor for the chip.
9287 	 */
9288 	if (nsegs) {
9289 		int retv;
9290 		/* 896 rev 1 requires to be careful about boundaries */
9291 		if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1)
9292 			retv = sym_scatter_sg_physical(np, cp, psegs, nsegs);
9293 		else
9294 			retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs);
9295 		if (retv < 0) {
9296 			sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG);
9297 			goto out_abort;
9298 		}
9299 	}
9300 
9301 	/*
9302 	 *  Synchronize the DMA map only if we have
9303 	 *  actually mapped the data.
9304 	 */
9305 	if (cp->dmamapped) {
9306 		bus_dmamap_sync(np->data_dmat, cp->dmamap,
9307 			(bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ?
9308 				BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
9309 	}
9310 
9311 	/*
9312 	 *  Set host status to busy state.
9313 	 *  May have been set back to HS_WAIT to avoid a race.
9314 	 */
9315 	cp->host_status	= cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
9316 
9317 	/*
9318 	 *  Set data pointers.
9319 	 */
9320 	sym_setup_data_pointers(np, cp,  (ccb->ccb_h.flags & CAM_DIR_MASK));
9321 
9322 	/*
9323 	 *  Enqueue this IO in our pending queue.
9324 	 */
9325 	sym_enqueue_cam_ccb(np, ccb);
9326 
9327 #if 0
9328 	switch (cp->cdb_buf[0]) {
9329 	case 0x0A: case 0x2A: case 0xAA:
9330 		panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
9331 		MDELAY(10000);
9332 		break;
9333 	default:
9334 		break;
9335 	}
9336 #endif
9337 	/*
9338 	 *  Activate this job.
9339 	 */
9340 	sym_put_start_queue(np, cp);
9341 out:
9342 	splx(s);
9343 	return;
9344 out_abort:
9345 	sym_free_ccb(np, cp);
9346 	sym_xpt_done(np, ccb);
9347 	goto out;
9348 }
9349 
9350 /*
9351  *  How complex it gets to deal with the data in CAM.
9352  *  The Bus Dma stuff makes things still more complex.
9353  */
9354 static void
9355 sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
9356 {
9357 	struct ccb_hdr *ccb_h;
9358 	int dir, retv;
9359 
9360 	ccb_h = &csio->ccb_h;
9361 
9362 	/*
9363 	 *  Now deal with the data.
9364 	 */
9365 	cp->data_len = csio->dxfer_len;
9366 	cp->arg      = np;
9367 
9368 	/*
9369 	 *  No direction means no data.
9370 	 */
9371 	dir = (ccb_h->flags & CAM_DIR_MASK);
9372 	if (dir == CAM_DIR_NONE) {
9373 		sym_execute_ccb(cp, NULL, 0, 0);
9374 		return;
9375 	}
9376 
9377 	if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
9378 		/* Single buffer */
9379 		if (!(ccb_h->flags & CAM_DATA_PHYS)) {
9380 			/* Buffer is virtual */
9381 			int s;
9382 
9383 			cp->dmamapped = (dir == CAM_DIR_IN) ?
9384 						SYM_DMA_READ : SYM_DMA_WRITE;
9385 			s = splsoftvm();
9386 			retv = bus_dmamap_load(np->data_dmat, cp->dmamap,
9387 					       csio->data_ptr, csio->dxfer_len,
9388 					       sym_execute_ccb, cp, 0);
9389 			if (retv == EINPROGRESS) {
9390 				cp->host_status	= HS_WAIT;
9391 				xpt_freeze_simq(np->sim, 1);
9392 				csio->ccb_h.status |= CAM_RELEASE_SIMQ;
9393 			}
9394 			splx(s);
9395 		} else {
9396 			/* Buffer is physical */
9397 			struct bus_dma_segment seg;
9398 
9399 			seg.ds_addr = (bus_addr_t) csio->data_ptr;
9400 			sym_execute_ccb(cp, &seg, 1, 0);
9401 		}
9402 	} else {
9403 		/* Scatter/gather list */
9404 		struct bus_dma_segment *segs;
9405 
9406 		if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) {
9407 			/* The SG list pointer is physical */
9408 			sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
9409 			goto out_abort;
9410 		}
9411 
9412 		if (!(ccb_h->flags & CAM_DATA_PHYS)) {
9413 			/* SG buffer pointers are virtual */
9414 			sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
9415 			goto out_abort;
9416 		}
9417 
9418 		/* SG buffer pointers are physical */
9419 		segs  = (struct bus_dma_segment *)csio->data_ptr;
9420 		sym_execute_ccb(cp, segs, csio->sglist_cnt, 0);
9421 	}
9422 	return;
9423 out_abort:
9424 	sym_free_ccb(np, cp);
9425 	sym_xpt_done(np, (union ccb *) csio);
9426 }
9427 
9428 /*
9429  *  Move the scatter list to our data block.
9430  */
9431 static int
9432 sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp,
9433 			     bus_dma_segment_t *psegs, int nsegs)
9434 {
9435 	struct sym_tblmove *data;
9436 	bus_dma_segment_t *psegs2;
9437 
9438 	if (nsegs > SYM_CONF_MAX_SG)
9439 		return -1;
9440 
9441 	data   = &cp->phys.data[SYM_CONF_MAX_SG-1];
9442 	psegs2 = &psegs[nsegs-1];
9443 	cp->segments = nsegs;
9444 
9445 	while (1) {
9446 		data->addr = cpu_to_scr(psegs2->ds_addr);
9447 		data->size = cpu_to_scr(psegs2->ds_len);
9448 		if (DEBUG_FLAGS & DEBUG_SCATTER) {
9449 			printf ("%s scatter: paddr=%lx len=%ld\n",
9450 				sym_name(np), (long) psegs2->ds_addr,
9451 				(long) psegs2->ds_len);
9452 		}
9453 		if (psegs2 != psegs) {
9454 			--data;
9455 			--psegs2;
9456 			continue;
9457 		}
9458 		break;
9459 	}
9460 	return 0;
9461 }
9462 
9463 #else	/* FreeBSD_Bus_Dma_Abstraction */
9464 
9465 /*
9466  *  How complex it gets to deal with the data in CAM.
9467  *  Variant without the Bus Dma Abstraction option.
9468  */
9469 static void
9470 sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
9471 {
9472 	struct ccb_hdr *ccb_h;
9473 	int dir, retv;
9474 
9475 	ccb_h = &csio->ccb_h;
9476 
9477 	/*
9478 	 *  Now deal with the data.
9479 	 */
9480 	cp->data_len = 0;
9481 	cp->segments = 0;
9482 
9483 	/*
9484 	 *  No direction means no data.
9485 	 */
9486 	dir = (ccb_h->flags & CAM_DIR_MASK);
9487 	if (dir == CAM_DIR_NONE)
9488 		goto end_scatter;
9489 
9490 	if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
9491 		/* Single buffer */
9492 		if (!(ccb_h->flags & CAM_DATA_PHYS)) {
9493 			/* Buffer is virtual */
9494 			retv = sym_scatter_virtual(np, cp,
9495 						(vm_offset_t) csio->data_ptr,
9496 						(vm_size_t) csio->dxfer_len);
9497 		} else {
9498 			/* Buffer is physical */
9499 			retv = sym_scatter_physical(np, cp,
9500 						(vm_offset_t) csio->data_ptr,
9501 						(vm_size_t) csio->dxfer_len);
9502 		}
9503 	} else {
9504 		/* Scatter/gather list */
9505 		int nsegs;
9506 		struct bus_dma_segment *segs;
9507 		segs  = (struct bus_dma_segment *)csio->data_ptr;
9508 		nsegs = csio->sglist_cnt;
9509 
9510 		if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) {
9511 			/* The SG list pointer is physical */
9512 			sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
9513 			goto out_abort;
9514 		}
9515 		if (!(ccb_h->flags & CAM_DATA_PHYS)) {
9516 			/* SG buffer pointers are virtual */
9517 			retv = sym_scatter_sg_virtual(np, cp, segs, nsegs);
9518 		} else {
9519 			/* SG buffer pointers are physical */
9520 			retv = sym_scatter_sg_physical(np, cp, segs, nsegs);
9521 		}
9522 	}
9523 	if (retv < 0) {
9524 		sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG);
9525 		goto out_abort;
9526 	}
9527 
9528 end_scatter:
9529 	/*
9530 	 *  Set data pointers.
9531 	 */
9532 	sym_setup_data_pointers(np, cp, dir);
9533 
9534 	/*
9535 	 *  Enqueue this IO in our pending queue.
9536 	 */
9537 	sym_enqueue_cam_ccb(np, (union ccb *) csio);
9538 
9539 	/*
9540 	 *  Activate this job.
9541 	 */
9542 	sym_put_start_queue(np, cp);
9543 
9544 	/*
9545 	 *  Command is successfully queued.
9546 	 */
9547 	return;
9548 out_abort:
9549 	sym_free_ccb(np, cp);
9550 	sym_xpt_done(np, (union ccb *) csio);
9551 }
9552 
9553 /*
9554  *  Scatter a virtual buffer into bus addressable chunks.
9555  */
9556 static int
9557 sym_scatter_virtual(hcb_p np, ccb_p cp, vm_offset_t vaddr, vm_size_t len)
9558 {
9559 	u_long	pe, pn;
9560 	u_long	n, k;
9561 	int s;
9562 
9563 	cp->data_len += len;
9564 
9565 	pe = vaddr + len;
9566 	n  = len;
9567 	s  = SYM_CONF_MAX_SG - 1 - cp->segments;
9568 
9569 	while (n && s >= 0) {
9570 		pn = (pe - 1) & ~PAGE_MASK;
9571 		k = pe - pn;
9572 		if (k > n) {
9573 			k  = n;
9574 			pn = pe - n;
9575 		}
9576 		if (DEBUG_FLAGS & DEBUG_SCATTER) {
9577 			printf ("%s scatter: va=%lx pa=%lx siz=%ld\n",
9578 				sym_name(np), pn, (u_long) vtobus(pn), k);
9579 		}
9580 		cp->phys.data[s].addr = cpu_to_scr(vtobus(pn));
9581 		cp->phys.data[s].size = cpu_to_scr(k);
9582 		pe = pn;
9583 		n -= k;
9584 		--s;
9585 	}
9586 	cp->segments = SYM_CONF_MAX_SG - 1 - s;
9587 
9588 	return n ? -1 : 0;
9589 }
9590 
9591 /*
9592  *  Scatter a SG list with virtual addresses into bus addressable chunks.
9593  */
9594 static int
9595 sym_scatter_sg_virtual(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs)
9596 {
9597 	int i, retv = 0;
9598 
9599 	for (i = nsegs - 1 ;  i >= 0 ; --i) {
9600 		retv = sym_scatter_virtual(np, cp,
9601 					   psegs[i].ds_addr, psegs[i].ds_len);
9602 		if (retv < 0)
9603 			break;
9604 	}
9605 	return retv;
9606 }
9607 
9608 /*
9609  *  Scatter a physical buffer into bus addressable chunks.
9610  */
9611 static int
9612 sym_scatter_physical(hcb_p np, ccb_p cp, vm_offset_t paddr, vm_size_t len)
9613 {
9614 	struct bus_dma_segment seg;
9615 
9616 	seg.ds_addr = paddr;
9617 	seg.ds_len  = len;
9618 	return sym_scatter_sg_physical(np, cp, &seg, 1);
9619 }
9620 
9621 #endif	/* FreeBSD_Bus_Dma_Abstraction */
9622 
9623 /*
9624  *  Scatter a SG list with physical addresses into bus addressable chunks.
9625  *  We need to ensure 16MB boundaries not to be crossed during DMA of
9626  *  each segment, due to some chips being flawed.
9627  */
9628 #define BOUND_MASK ((1UL<<24)-1)
9629 static int
9630 sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs)
9631 {
9632 	u_long	ps, pe, pn;
9633 	u_long	k;
9634 	int s, t;
9635 
9636 #ifndef	FreeBSD_Bus_Dma_Abstraction
9637 	s  = SYM_CONF_MAX_SG - 1 - cp->segments;
9638 #else
9639 	s  = SYM_CONF_MAX_SG - 1;
9640 #endif
9641 	t  = nsegs - 1;
9642 	ps = psegs[t].ds_addr;
9643 	pe = ps + psegs[t].ds_len;
9644 
9645 	while (s >= 0) {
9646 		pn = (pe - 1) & ~BOUND_MASK;
9647 		if (pn <= ps)
9648 			pn = ps;
9649 		k = pe - pn;
9650 		if (DEBUG_FLAGS & DEBUG_SCATTER) {
9651 			printf ("%s scatter: paddr=%lx len=%ld\n",
9652 				sym_name(np), pn, k);
9653 		}
9654 		cp->phys.data[s].addr = cpu_to_scr(pn);
9655 		cp->phys.data[s].size = cpu_to_scr(k);
9656 #ifndef	FreeBSD_Bus_Dma_Abstraction
9657 		cp->data_len += k;
9658 #endif
9659 		--s;
9660 		if (pn == ps) {
9661 			if (--t < 0)
9662 				break;
9663 			ps = psegs[t].ds_addr;
9664 			pe = ps + psegs[t].ds_len;
9665 		}
9666 		else
9667 			pe = pn;
9668 	}
9669 
9670 	cp->segments = SYM_CONF_MAX_SG - 1 - s;
9671 
9672 	return t >= 0 ? -1 : 0;
9673 }
9674 #undef BOUND_MASK
9675 
9676 /*
9677  *  SIM action for non performance critical stuff.
9678  */
9679 static void sym_action2(struct cam_sim *sim, union ccb *ccb)
9680 {
9681 	hcb_p	np;
9682 	tcb_p	tp;
9683 	lcb_p	lp;
9684 	struct	ccb_hdr  *ccb_h;
9685 
9686 	/*
9687 	 *  Retrieve our controller data structure.
9688 	 */
9689 	np = (hcb_p) cam_sim_softc(sim);
9690 
9691 	ccb_h = &ccb->ccb_h;
9692 
9693 	switch (ccb_h->func_code) {
9694 	case XPT_SET_TRAN_SETTINGS:
9695 	{
9696 		struct ccb_trans_settings *cts;
9697 
9698 		cts  = &ccb->cts;
9699 		tp = &np->target[ccb_h->target_id];
9700 
9701 		/*
9702 		 *  Update our transfer settings (basically WIDE/SYNC).
9703 		 *  These features are to be handled in a per target
9704 		 *  basis according to SCSI specifications.
9705 		 */
9706 		if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0)
9707 			sym_update_trans(np, tp, &tp->tinfo.user, cts);
9708 
9709 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
9710 			sym_update_trans(np, tp, &tp->tinfo.goal, cts);
9711 
9712 		/*
9713 		 *  Update our disconnect and tag settings.
9714 		 *  SCSI requires CmdQue feature to be handled in a per
9715 		 *  device (logical unit) basis.
9716 		 */
9717 		lp = sym_lp(np, tp, ccb_h->target_lun);
9718 		if (lp) {
9719 			if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0)
9720 				sym_update_dflags(np, &lp->user_flags, cts);
9721 			if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
9722 				sym_update_dflags(np, &lp->current_flags, cts);
9723 		}
9724 
9725 		sym_xpt_done2(np, ccb, CAM_REQ_CMP);
9726 		break;
9727 	}
9728 	case XPT_GET_TRAN_SETTINGS:
9729 	{
9730 		struct ccb_trans_settings *cts;
9731 		struct sym_trans *tip;
9732 		u_char dflags;
9733 
9734 		cts = &ccb->cts;
9735 		tp = &np->target[ccb_h->target_id];
9736 		lp = sym_lp(np, tp, ccb_h->target_lun);
9737 
9738 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
9739 			tip = &tp->tinfo.current;
9740 			dflags = lp ? lp->current_flags : 0;
9741 		}
9742 		else {
9743 			tip = &tp->tinfo.user;
9744 			dflags = lp ? lp->user_flags : tp->usrflags;
9745 		}
9746 
9747 		cts->sync_period = tip->period;
9748 		cts->sync_offset = tip->offset;
9749 		cts->bus_width   = tip->width;
9750 
9751 		cts->valid = CCB_TRANS_SYNC_RATE_VALID
9752 			   | CCB_TRANS_SYNC_OFFSET_VALID
9753 			   | CCB_TRANS_BUS_WIDTH_VALID;
9754 
9755 		if (lp) {
9756 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
9757 
9758 			if (dflags & SYM_DISC_ENABLED)
9759 				cts->flags |= CCB_TRANS_DISC_ENB;
9760 
9761 			if (dflags & SYM_TAGS_ENABLED)
9762 				cts->flags |= CCB_TRANS_TAG_ENB;
9763 
9764 			cts->valid |= CCB_TRANS_DISC_VALID;
9765 			cts->valid |= CCB_TRANS_TQ_VALID;
9766 		}
9767 
9768 		sym_xpt_done2(np, ccb, CAM_REQ_CMP);
9769 		break;
9770 	}
9771 	case XPT_CALC_GEOMETRY:
9772 	{
9773 		struct ccb_calc_geometry *ccg;
9774 		u32 size_mb;
9775 		u32 secs_per_cylinder;
9776 		int extended;
9777 
9778 		/*
9779 		 *  Silly DOS geometry.
9780 		 */
9781 		ccg = &ccb->ccg;
9782 		size_mb = ccg->volume_size
9783 			/ ((1024L * 1024L) / ccg->block_size);
9784 		extended = 1;
9785 
9786 		if (size_mb > 1024 && extended) {
9787 			ccg->heads = 255;
9788 			ccg->secs_per_track = 63;
9789 		} else {
9790 			ccg->heads = 64;
9791 			ccg->secs_per_track = 32;
9792 		}
9793 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
9794 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
9795 		sym_xpt_done2(np, ccb, CAM_REQ_CMP);
9796 		break;
9797 	}
9798 	case XPT_PATH_INQ:
9799 	{
9800 		struct ccb_pathinq *cpi = &ccb->cpi;
9801 		cpi->version_num = 1;
9802 		cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE;
9803 		if ((np->features & FE_WIDE) != 0)
9804 			cpi->hba_inquiry |= PI_WIDE_16;
9805 		cpi->target_sprt = 0;
9806 		cpi->hba_misc = 0;
9807 		if (np->usrflags & SYM_SCAN_TARGETS_HILO)
9808 			cpi->hba_misc |= PIM_SCANHILO;
9809 		if (np->usrflags & SYM_AVOID_BUS_RESET)
9810 			cpi->hba_misc |= PIM_NOBUSRESET;
9811 		cpi->hba_eng_cnt = 0;
9812 		cpi->max_target = (np->features & FE_WIDE) ? 15 : 7;
9813 		/* Semantic problem:)LUN number max = max number of LUNs - 1 */
9814 		cpi->max_lun = SYM_CONF_MAX_LUN-1;
9815 		if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN)
9816 			cpi->max_lun = SYM_SETUP_MAX_LUN-1;
9817 		cpi->bus_id = cam_sim_bus(sim);
9818 		cpi->initiator_id = np->myaddr;
9819 		cpi->base_transfer_speed = 3300;
9820 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
9821 		strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN);
9822 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
9823 		cpi->unit_number = cam_sim_unit(sim);
9824 		sym_xpt_done2(np, ccb, CAM_REQ_CMP);
9825 		break;
9826 	}
9827 	case XPT_ABORT:
9828 	{
9829 		union ccb *abort_ccb = ccb->cab.abort_ccb;
9830 		switch(abort_ccb->ccb_h.func_code) {
9831 		case XPT_SCSI_IO:
9832 			if (sym_abort_scsiio(np, abort_ccb, 0) == 0) {
9833 				sym_xpt_done2(np, ccb, CAM_REQ_CMP);
9834 				break;
9835 			}
9836 		default:
9837 			sym_xpt_done2(np, ccb, CAM_UA_ABORT);
9838 			break;
9839 		}
9840 		break;
9841 	}
9842 	case XPT_RESET_DEV:
9843 	{
9844 		sym_reset_dev(np, ccb);
9845 		break;
9846 	}
9847 	case XPT_RESET_BUS:
9848 	{
9849 		sym_reset_scsi_bus(np, 0);
9850 		if (sym_verbose) {
9851 			xpt_print_path(np->path);
9852 			printf("SCSI BUS reset delivered.\n");
9853 		}
9854 		sym_init (np, 1);
9855 		sym_xpt_done2(np, ccb, CAM_REQ_CMP);
9856 		break;
9857 	}
9858 	case XPT_ACCEPT_TARGET_IO:
9859 	case XPT_CONT_TARGET_IO:
9860 	case XPT_EN_LUN:
9861 	case XPT_NOTIFY_ACK:
9862 	case XPT_IMMED_NOTIFY:
9863 	case XPT_TERM_IO:
9864 	default:
9865 		sym_xpt_done2(np, ccb, CAM_REQ_INVALID);
9866 		break;
9867 	}
9868 }
9869 
9870 /*
9871  *  Update transfer settings of a target.
9872  */
9873 static void sym_update_trans(hcb_p np, tcb_p tp, struct sym_trans *tip,
9874 			    struct ccb_trans_settings *cts)
9875 {
9876 	/*
9877 	 *  Update the infos.
9878 	 */
9879 	if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
9880 		tip->width = cts->bus_width;
9881 	if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)
9882 		tip->offset = cts->sync_offset;
9883 	if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
9884 		tip->period = cts->sync_period;
9885 
9886 	/*
9887 	 *  Scale against out limits.
9888 	 */
9889 	if (tip->width  > SYM_SETUP_MAX_WIDE)	tip->width  =SYM_SETUP_MAX_WIDE;
9890 	if (tip->width  > np->maxwide)		tip->width  = np->maxwide;
9891 	if (tip->offset > SYM_SETUP_MAX_OFFS)	tip->offset =SYM_SETUP_MAX_OFFS;
9892 	if (tip->offset > np->maxoffs)		tip->offset = np->maxoffs;
9893 	if (tip->period) {
9894 		if (tip->period < SYM_SETUP_MIN_SYNC)
9895 			tip->period = SYM_SETUP_MIN_SYNC;
9896 		if (np->features & FE_ULTRA3) {
9897 			if (tip->period < np->minsync_dt)
9898 				tip->period = np->minsync_dt;
9899 		}
9900 		else {
9901 			if (tip->period < np->minsync)
9902 				tip->period = np->minsync;
9903 		}
9904 		if (tip->period > np->maxsync)
9905 			tip->period = np->maxsync;
9906 	}
9907 }
9908 
9909 /*
9910  *  Update flags for a device (logical unit).
9911  */
9912 static void
9913 sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts)
9914 {
9915 	if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
9916 		if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
9917 			*flags |= SYM_DISC_ENABLED;
9918 		else
9919 			*flags &= ~SYM_DISC_ENABLED;
9920 	}
9921 
9922 	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
9923 		if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
9924 			*flags |= SYM_TAGS_ENABLED;
9925 		else
9926 			*flags &= ~SYM_TAGS_ENABLED;
9927 	}
9928 }
9929 
9930 
9931 /*============= DRIVER INITIALISATION ==================*/
9932 
9933 #ifdef FreeBSD_4_Bus
9934 
9935 static device_method_t sym_pci_methods[] = {
9936 	DEVMETHOD(device_probe,	 sym_pci_probe),
9937 	DEVMETHOD(device_attach, sym_pci_attach),
9938 	{ 0, 0 }
9939 };
9940 
9941 static driver_t sym_pci_driver = {
9942 	"sym",
9943 	sym_pci_methods,
9944 	sizeof(struct sym_hcb)
9945 };
9946 
9947 static devclass_t sym_devclass;
9948 
9949 DRIVER_MODULE(sym, pci, sym_pci_driver, sym_devclass, 0, 0);
9950 
9951 #else	/* Pre-FreeBSD_4_Bus */
9952 
9953 static u_long sym_unit;
9954 
9955 static struct	pci_device sym_pci_driver = {
9956 	"sym",
9957 	sym_pci_probe,
9958 	sym_pci_attach,
9959 	&sym_unit,
9960 	NULL
9961 };
9962 
9963 #if 	__FreeBSD_version >= 400000
9964 COMPAT_PCI_DRIVER (sym, sym_pci_driver);
9965 #else
9966 DATA_SET (pcidevice_set, sym_pci_driver);
9967 #endif
9968 
9969 #endif /* FreeBSD_4_Bus */
9970 
9971 static struct sym_pci_chip sym_pci_dev_table[] = {
9972  {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 0,
9973  FE_ERL}
9974  ,
9975  {PCI_ID_SYM53C810, 0xff, "810a", 4,  8, 4, 1,
9976  FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF}
9977  ,
9978  {PCI_ID_SYM53C825, 0x0f, "825", 6,  8, 4, 0,
9979  FE_WIDE|FE_BOF|FE_ERL|FE_DIFF}
9980  ,
9981  {PCI_ID_SYM53C825, 0xff, "825a", 6,  8, 4, 2,
9982  FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF}
9983  ,
9984  {PCI_ID_SYM53C860, 0xff, "860", 4,  8, 5, 1,
9985  FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN}
9986  ,
9987  {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2,
9988  FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
9989  FE_RAM|FE_DIFF}
9990  ,
9991  {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2,
9992  FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
9993  FE_RAM|FE_DIFF}
9994  ,
9995  {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2,
9996  FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
9997  FE_RAM|FE_DIFF}
9998  ,
9999  {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2,
10000  FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
10001  FE_RAM|FE_DIFF}
10002  ,
10003  {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2,
10004  FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
10005  FE_RAM|FE_LCKFRQ}
10006  ,
10007  {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4,
10008  FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
10009  FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
10010  ,
10011  {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4,
10012  FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
10013  FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
10014  ,
10015  {PCI_ID_LSI53C1010, 0x00, "1010", 6, 62, 7, 8,
10016  FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
10017  FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_PCI66|FE_CRC|
10018  FE_C10}
10019  ,
10020  {PCI_ID_LSI53C1010, 0xff, "1010", 6, 62, 7, 8,
10021  FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
10022  FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
10023  FE_C10|FE_U3EN}
10024  ,
10025  {PCI_ID_LSI53C1010_2, 0xff, "1010", 6, 62, 7, 8,
10026  FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
10027  FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_PCI66|FE_CRC|
10028  FE_C10|FE_U3EN}
10029  ,
10030  {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4,
10031  FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
10032  FE_RAM|FE_IO256|FE_LEDC}
10033 };
10034 
10035 #define sym_pci_num_devs \
10036 	(sizeof(sym_pci_dev_table) / sizeof(sym_pci_dev_table[0]))
10037 
10038 /*
10039  *  Look up the chip table.
10040  *
10041  *  Return a pointer to the chip entry if found,
10042  *  zero otherwise.
10043  */
10044 static struct sym_pci_chip *
10045 #ifdef FreeBSD_4_Bus
10046 sym_find_pci_chip(device_t dev)
10047 #else
10048 sym_find_pci_chip(pcici_t pci_tag)
10049 #endif
10050 {
10051 	struct	sym_pci_chip *chip;
10052 	int	i;
10053 	u_short	device_id;
10054 	u_char	revision;
10055 
10056 #ifdef FreeBSD_4_Bus
10057 	if (pci_get_vendor(dev) != PCI_VENDOR_NCR)
10058 		return 0;
10059 
10060 	device_id = pci_get_device(dev);
10061 	revision  = pci_get_revid(dev);
10062 #else
10063 	if (pci_cfgread(pci_tag, PCIR_VENDOR, 2) != PCI_VENDOR_NCR)
10064 		return 0;
10065 
10066 	device_id = pci_cfgread(pci_tag, PCIR_DEVICE, 2);
10067 	revision  = pci_cfgread(pci_tag, PCIR_REVID,  1);
10068 #endif
10069 
10070 	for (i = 0; i < sym_pci_num_devs; i++) {
10071 		chip = &sym_pci_dev_table[i];
10072 		if (device_id != chip->device_id)
10073 			continue;
10074 		if (revision > chip->revision_id)
10075 			continue;
10076 		if (FE_LDSTR & chip->features)
10077 			return chip;
10078 		break;
10079 	}
10080 
10081 	return 0;
10082 }
10083 
10084 /*
10085  *  Tell upper layer if the chip is supported.
10086  */
10087 #ifdef FreeBSD_4_Bus
10088 static int
10089 sym_pci_probe(device_t dev)
10090 {
10091 	struct	sym_pci_chip *chip;
10092 
10093 	chip = sym_find_pci_chip(dev);
10094 	if (chip) {
10095 		device_set_desc(dev, chip->name);
10096 		return (chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)? -2000 : 0;
10097 	}
10098 	return ENXIO;
10099 }
10100 #else /* Pre-FreeBSD_4_Bus */
10101 static const char *
10102 sym_pci_probe(pcici_t pci_tag, pcidi_t type)
10103 {
10104 	struct	sym_pci_chip *chip;
10105 
10106 	chip = sym_find_pci_chip(pci_tag);
10107 #if NNCR > 0
10108 	/* Only claim chips we are allowed to take precedence over the ncr */
10109 	if (chip && !(chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP))
10110 #else
10111 	if (chip)
10112 #endif
10113 		return chip->name;
10114 	return 0;
10115 }
10116 #endif
10117 
10118 /*
10119  *  Attach a sym53c8xx device.
10120  */
10121 #ifdef FreeBSD_4_Bus
10122 static int
10123 sym_pci_attach(device_t dev)
10124 #else
10125 static void
10126 sym_pci_attach(pcici_t pci_tag, int unit)
10127 {
10128 	int err = sym_pci_attach2(pci_tag, unit);
10129 	if (err)
10130 		printf("sym: failed to attach unit %d - err=%d.\n", unit, err);
10131 }
10132 static int
10133 sym_pci_attach2(pcici_t pci_tag, int unit)
10134 #endif
10135 {
10136 	struct	sym_pci_chip *chip;
10137 	u_short	command;
10138 	u_char	cachelnsz;
10139 	struct	sym_hcb *np = 0;
10140 	struct	sym_nvram nvram;
10141 	int 	i;
10142 #ifdef	FreeBSD_Bus_Dma_Abstraction
10143 	bus_dma_tag_t	bus_dmat;
10144 
10145 	/*
10146 	 *  I expected to be told about a parent
10147 	 *  DMA tag, but didn't find any.
10148 	 */
10149 	bus_dmat = NULL;
10150 #endif
10151 
10152 	/*
10153 	 *  Only probed devices should be attached.
10154 	 *  We just enjoy being paranoid. :)
10155 	 */
10156 #ifdef FreeBSD_4_Bus
10157 	chip = sym_find_pci_chip(dev);
10158 #else
10159 	chip = sym_find_pci_chip(pci_tag);
10160 #endif
10161 	if (chip == NULL)
10162 		return (ENXIO);
10163 
10164 	/*
10165 	 *  Allocate immediately the host control block,
10166 	 *  since we are only expecting to succeed. :)
10167 	 *  We keep track in the HCB of all the resources that
10168 	 *  are to be released on error.
10169 	 */
10170 #ifdef	FreeBSD_Bus_Dma_Abstraction
10171 	np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB");
10172 	if (np)
10173 		np->bus_dmat = bus_dmat;
10174 	else
10175 		goto attach_failed;
10176 #else
10177 	np = sym_calloc_dma(sizeof(*np), "HCB");
10178 	if (!np)
10179 		goto attach_failed;
10180 #endif
10181 
10182 	/*
10183 	 *  Copy some useful infos to the HCB.
10184 	 */
10185 	np->hcb_ba	 = vtobus(np);
10186 	np->verbose	 = bootverbose;
10187 #ifdef FreeBSD_4_Bus
10188 	np->device	 = dev;
10189 	np->unit	 = device_get_unit(dev);
10190 	np->device_id	 = pci_get_device(dev);
10191 	np->revision_id  = pci_get_revid(dev);
10192 #else
10193 	np->pci_tag	 = pci_tag;
10194 	np->unit	 = unit;
10195 	np->device_id	 = pci_cfgread(pci_tag, PCIR_DEVICE, 2);
10196 	np->revision_id  = pci_cfgread(pci_tag, PCIR_REVID,  1);
10197 #endif
10198 	np->features	 = chip->features;
10199 	np->clock_divn	 = chip->nr_divisor;
10200 	np->maxoffs	 = chip->offset_max;
10201 	np->maxburst	 = chip->burst_max;
10202 
10203 	/*
10204 	 * Edit its name.
10205 	 */
10206 	snprintf(np->inst_name, sizeof(np->inst_name), "sym%d", np->unit);
10207 
10208 	/*
10209 	 *  Allocate a tag for the DMA of user data.
10210 	 */
10211 #ifdef	FreeBSD_Bus_Dma_Abstraction
10212 	if (bus_dma_tag_create(np->bus_dmat, 1, (1<<24),
10213 				BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
10214 				NULL, NULL,
10215 				BUS_SPACE_MAXSIZE, SYM_CONF_MAX_SG,
10216 				(1<<24), 0, &np->data_dmat)) {
10217 		device_printf(dev, "failed to create DMA tag.\n");
10218 		goto attach_failed;
10219 	}
10220 #endif
10221 	/*
10222 	 *  Read and apply some fix-ups to the PCI COMMAND
10223 	 *  register. We want the chip to be enabled for:
10224 	 *  - BUS mastering
10225 	 *  - PCI parity checking (reporting would also be fine)
10226 	 *  - Write And Invalidate.
10227 	 */
10228 #ifdef FreeBSD_4_Bus
10229 	command = pci_read_config(dev, PCIR_COMMAND, 2);
10230 #else
10231 	command = pci_cfgread(pci_tag, PCIR_COMMAND, 2);
10232 #endif
10233 	command |= PCIM_CMD_BUSMASTEREN;
10234 	command |= PCIM_CMD_PERRESPEN;
10235 	command |= /* PCIM_CMD_MWIEN */ 0x0010;
10236 #ifdef FreeBSD_4_Bus
10237 	pci_write_config(dev, PCIR_COMMAND, command, 2);
10238 #else
10239 	pci_cfgwrite(pci_tag, PCIR_COMMAND, command, 2);
10240 #endif
10241 
10242 	/*
10243 	 *  Let the device know about the cache line size,
10244 	 *  if it doesn't yet.
10245 	 */
10246 #ifdef FreeBSD_4_Bus
10247 	cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
10248 #else
10249 	cachelnsz = pci_cfgread(pci_tag, PCIR_CACHELNSZ, 1);
10250 #endif
10251 	if (!cachelnsz) {
10252 		cachelnsz = 8;
10253 #ifdef FreeBSD_4_Bus
10254 		pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1);
10255 #else
10256 		pci_cfgwrite(pci_tag, PCIR_CACHELNSZ, cachelnsz, 1);
10257 #endif
10258 	}
10259 
10260 	/*
10261 	 *  Alloc/get/map/retrieve everything that deals with MMIO.
10262 	 */
10263 #ifdef FreeBSD_4_Bus
10264 	if ((command & PCIM_CMD_MEMEN) != 0) {
10265 		int regs_id = SYM_PCI_MMIO;
10266 		np->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &regs_id,
10267 						  0, ~0, 1, RF_ACTIVE);
10268 	}
10269 	if (!np->mmio_res) {
10270 		device_printf(dev, "failed to allocate MMIO resources\n");
10271 		goto attach_failed;
10272 	}
10273 	np->mmio_bsh = rman_get_bushandle(np->mmio_res);
10274 	np->mmio_tag = rman_get_bustag(np->mmio_res);
10275 	np->mmio_pa  = rman_get_start(np->mmio_res);
10276 	np->mmio_va  = (vm_offset_t) rman_get_virtual(np->mmio_res);
10277 	np->mmio_ba  = np->mmio_pa;
10278 #else
10279 	if ((command & PCIM_CMD_MEMEN) != 0) {
10280 		vm_offset_t vaddr, paddr;
10281 		if (!pci_map_mem(pci_tag, SYM_PCI_MMIO, &vaddr, &paddr)) {
10282 			printf("%s: failed to map MMIO window\n", sym_name(np));
10283 			goto attach_failed;
10284 		}
10285 		np->mmio_va = vaddr;
10286 		np->mmio_pa = paddr;
10287 		np->mmio_ba = paddr;
10288 	}
10289 #endif
10290 
10291 	/*
10292 	 *  Allocate the IRQ.
10293 	 */
10294 #ifdef FreeBSD_4_Bus
10295 	i = 0;
10296 	np->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &i,
10297 					 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
10298 	if (!np->irq_res) {
10299 		device_printf(dev, "failed to allocate IRQ resource\n");
10300 		goto attach_failed;
10301 	}
10302 #endif
10303 
10304 #ifdef	SYM_CONF_IOMAPPED
10305 	/*
10306 	 *  User want us to use normal IO with PCI.
10307 	 *  Alloc/get/map/retrieve everything that deals with IO.
10308 	 */
10309 #ifdef FreeBSD_4_Bus
10310 	if ((command & PCI_COMMAND_IO_ENABLE) != 0) {
10311 		int regs_id = SYM_PCI_IO;
10312 		np->io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &regs_id,
10313 						0, ~0, 1, RF_ACTIVE);
10314 	}
10315 	if (!np->io_res) {
10316 		device_printf(dev, "failed to allocate IO resources\n");
10317 		goto attach_failed;
10318 	}
10319 	np->io_bsh  = rman_get_bushandle(np->io_res);
10320 	np->io_tag  = rman_get_bustag(np->io_res);
10321 	np->io_port = rman_get_start(np->io_res);
10322 #else
10323 	if ((command & PCI_COMMAND_IO_ENABLE) != 0) {
10324 		pci_port_t io_port;
10325 		if (!pci_map_port (pci_tag, SYM_PCI_IO, &io_port)) {
10326 			printf("%s: failed to map IO window\n", sym_name(np));
10327 			goto attach_failed;
10328 		}
10329 		np->io_port = io_port;
10330 	}
10331 #endif
10332 
10333 #endif /* SYM_CONF_IOMAPPED */
10334 
10335 	/*
10336 	 *  If the chip has RAM.
10337 	 *  Alloc/get/map/retrieve the corresponding resources.
10338 	 */
10339 	if ((np->features & (FE_RAM|FE_RAM8K)) &&
10340 	    (command & PCIM_CMD_MEMEN) != 0) {
10341 #ifdef FreeBSD_4_Bus
10342 		int regs_id = SYM_PCI_RAM;
10343 		if (np->features & FE_64BIT)
10344 			regs_id = SYM_PCI_RAM64;
10345 		np->ram_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &regs_id,
10346 						 0, ~0, 1, RF_ACTIVE);
10347 		if (!np->ram_res) {
10348 			device_printf(dev,"failed to allocate RAM resources\n");
10349 			goto attach_failed;
10350 		}
10351 		np->ram_id  = regs_id;
10352 		np->ram_bsh = rman_get_bushandle(np->ram_res);
10353 		np->ram_tag = rman_get_bustag(np->ram_res);
10354 		np->ram_pa  = rman_get_start(np->ram_res);
10355 		np->ram_va  = (vm_offset_t) rman_get_virtual(np->ram_res);
10356 		np->ram_ba  = np->ram_pa;
10357 #else
10358 		vm_offset_t vaddr, paddr;
10359 		int regs_id = SYM_PCI_RAM;
10360 		if (np->features & FE_64BIT)
10361 			regs_id = SYM_PCI_RAM64;
10362 		if (!pci_map_mem(pci_tag, regs_id, &vaddr, &paddr)) {
10363 			printf("%s: failed to map RAM window\n", sym_name(np));
10364 			goto attach_failed;
10365 		}
10366 		np->ram_va = vaddr;
10367 		np->ram_pa = paddr;
10368 		np->ram_ba = paddr;
10369 #endif
10370 	}
10371 
10372 	/*
10373 	 *  Save setting of some IO registers, so we will
10374 	 *  be able to probe specific implementations.
10375 	 */
10376 	sym_save_initial_setting (np);
10377 
10378 	/*
10379 	 *  Reset the chip now, since it has been reported
10380 	 *  that SCSI clock calibration may not work properly
10381 	 *  if the chip is currently active.
10382 	 */
10383 	sym_chip_reset (np);
10384 
10385 	/*
10386 	 *  Try to read the user set-up.
10387 	 */
10388 	(void) sym_read_nvram(np, &nvram);
10389 
10390 	/*
10391 	 *  Prepare controller and devices settings, according
10392 	 *  to chip features, user set-up and driver set-up.
10393 	 */
10394 	(void) sym_prepare_setting(np, &nvram);
10395 
10396 	/*
10397 	 *  Check the PCI clock frequency.
10398 	 *  Must be performed after prepare_setting since it destroys
10399 	 *  STEST1 that is used to probe for the clock doubler.
10400 	 */
10401 	i = sym_getpciclock(np);
10402 	if (i > 37000)
10403 #ifdef FreeBSD_4_Bus
10404 		device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i);
10405 #else
10406 		printf("%s: PCI BUS clock seems too high: %u KHz.\n",
10407 			sym_name(np), i);
10408 #endif
10409 
10410 	/*
10411 	 *  Allocate the start queue.
10412 	 */
10413 	np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE");
10414 	if (!np->squeue)
10415 		goto attach_failed;
10416 	np->squeue_ba = vtobus(np->squeue);
10417 
10418 	/*
10419 	 *  Allocate the done queue.
10420 	 */
10421 	np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE");
10422 	if (!np->dqueue)
10423 		goto attach_failed;
10424 
10425 	/*
10426 	 *  Allocate the target bus address array.
10427 	 */
10428 	np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL");
10429 	if (!np->targtbl)
10430 		goto attach_failed;
10431 
10432 	/*
10433 	 *  Allocate SCRIPTS areas.
10434 	 */
10435 	np->script0  = (struct sym_scr *)
10436 			sym_calloc_dma(sizeof(struct sym_scr), "SCRIPT0");
10437 	np->scripth0 = (struct sym_scrh *)
10438 			sym_calloc_dma(sizeof(struct sym_scrh), "SCRIPTH0");
10439 	if (!np->script0 || !np->scripth0)
10440 		goto attach_failed;
10441 
10442 	/*
10443 	 *  Initialyze the CCB free and busy queues.
10444 	 *  Allocate some CCB. We need at least ONE.
10445 	 */
10446 	sym_que_init(&np->free_ccbq);
10447 	sym_que_init(&np->busy_ccbq);
10448 	sym_que_init(&np->comp_ccbq);
10449 	if (!sym_alloc_ccb(np))
10450 		goto attach_failed;
10451 
10452 	/*
10453 	 * Initialyze the CAM CCB pending queue.
10454 	 */
10455 	sym_que_init(&np->cam_ccbq);
10456 
10457 	/*
10458 	 *  Fill-up variable-size parts of the SCRIPTS.
10459 	 */
10460 	sym_fill_scripts(&script0, &scripth0);
10461 
10462 	/*
10463 	 *  Calculate BUS addresses where we are going
10464 	 *  to load the SCRIPTS.
10465 	 */
10466 	np->script_ba	= vtobus(np->script0);
10467 	np->scripth_ba	= vtobus(np->scripth0);
10468 	np->scripth0_ba	= np->scripth_ba;
10469 
10470 	if (np->ram_ba) {
10471 		np->script_ba	= np->ram_ba;
10472 		if (np->features & FE_RAM8K) {
10473 			np->ram_ws = 8192;
10474 			np->scripth_ba = np->script_ba + 4096;
10475 #if BITS_PER_LONG > 32
10476 			np->scr_ram_seg = cpu_to_scr(np->script_ba >> 32);
10477 #endif
10478 		}
10479 		else
10480 			np->ram_ws = 4096;
10481 	}
10482 
10483 	/*
10484 	 *  Bind SCRIPTS with physical addresses usable by the
10485 	 *  SCRIPTS processor (as seen from the BUS = BUS addresses).
10486 	 */
10487 	sym_bind_script(np, (u32 *) &script0,
10488 			    (u32 *) np->script0, sizeof(struct sym_scr));
10489 	sym_bind_script(np, (u32 *) &scripth0,
10490 			    (u32 *) np->scripth0, sizeof(struct sym_scrh));
10491 
10492 	/*
10493 	 *  Patch some variables in SCRIPTS.
10494 	 *  These ones are loaded by the SCRIPTS processor.
10495 	 */
10496 	np->scripth0->pm0_data_addr[0] = cpu_to_scr(SCRIPT_BA(np,pm0_data));
10497 	np->scripth0->pm1_data_addr[0] = cpu_to_scr(SCRIPT_BA(np,pm1_data));
10498 
10499 
10500 	/*
10501 	 *  Still some for LED support.
10502 	 */
10503 	if (np->features & FE_LED0) {
10504 		np->script0->idle[0]  =
10505 				cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR,  0x01));
10506 		np->script0->reselected[0] =
10507 				cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
10508 		np->script0->start[0] =
10509 				cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
10510 	}
10511 
10512 	/*
10513 	 *  Load SCNTL4 on reselection for the C10.
10514 	 */
10515 	if (np->features & FE_C10) {
10516 		np->script0->resel_scntl4[0] =
10517 				cpu_to_scr(SCR_LOAD_REL (scntl4, 1));
10518 		np->script0->resel_scntl4[1] =
10519 				cpu_to_scr(offsetof(struct sym_tcb, uval));
10520 	}
10521 
10522 #ifdef SYM_CONF_IARB_SUPPORT
10523 	/*
10524 	 *    If user does not want to use IMMEDIATE ARBITRATION
10525 	 *    when we are reselected while attempting to arbitrate,
10526 	 *    patch the SCRIPTS accordingly with a SCRIPT NO_OP.
10527 	 */
10528 	if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
10529 		np->script0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
10530 
10531 	/*
10532 	 *    If user wants IARB to be set when we win arbitration
10533 	 *    and have other jobs, compute the max number of consecutive
10534 	 *    settings of IARB hints before we leave devices a chance to
10535 	 *    arbitrate for reselection.
10536 	 */
10537 #ifdef	SYM_SETUP_IARB_MAX
10538 	np->iarb_max = SYM_SETUP_IARB_MAX;
10539 #else
10540 	np->iarb_max = 4;
10541 #endif
10542 #endif
10543 
10544 	/*
10545 	 *  Prepare the idle and invalid task actions.
10546 	 */
10547 	np->idletask.start	= cpu_to_scr(SCRIPT_BA(np, idle));
10548 	np->idletask.restart	= cpu_to_scr(SCRIPTH_BA(np, bad_i_t_l));
10549 	np->idletask_ba		= vtobus(&np->idletask);
10550 
10551 	np->notask.start	= cpu_to_scr(SCRIPT_BA(np, idle));
10552 	np->notask.restart	= cpu_to_scr(SCRIPTH_BA(np, bad_i_t_l));
10553 	np->notask_ba		= vtobus(&np->notask);
10554 
10555 	np->bad_itl.start	= cpu_to_scr(SCRIPT_BA(np, idle));
10556 	np->bad_itl.restart	= cpu_to_scr(SCRIPTH_BA(np, bad_i_t_l));
10557 	np->bad_itl_ba		= vtobus(&np->bad_itl);
10558 
10559 	np->bad_itlq.start	= cpu_to_scr(SCRIPT_BA(np, idle));
10560 	np->bad_itlq.restart	= cpu_to_scr(SCRIPTH_BA (np,bad_i_t_l_q));
10561 	np->bad_itlq_ba		= vtobus(&np->bad_itlq);
10562 
10563 	/*
10564 	 *  Allocate and prepare the lun JUMP table that is used
10565 	 *  for a target prior the probing of devices (bad lun table).
10566 	 *  A private table will be allocated for the target on the
10567 	 *  first INQUIRY response received.
10568 	 */
10569 	np->badluntbl = sym_calloc_dma(256, "BADLUNTBL");
10570 	if (!np->badluntbl)
10571 		goto attach_failed;
10572 
10573 	np->badlun_sa = cpu_to_scr(SCRIPTH_BA(np, resel_bad_lun));
10574 	for (i = 0 ; i < 64 ; i++)	/* 64 luns/target, no less */
10575 		np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
10576 
10577 	/*
10578 	 *  Prepare the bus address array that contains the bus
10579 	 *  address of each target control bloc.
10580 	 *  For now, assume all logical unit are wrong. :)
10581 	 */
10582 	np->scripth0->targtbl[0] = cpu_to_scr(vtobus(np->targtbl));
10583 	for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
10584 		np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i]));
10585 		np->target[i].luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
10586 		np->target[i].lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
10587 	}
10588 
10589 	/*
10590 	 *  Now check the cache handling of the pci chipset.
10591 	 */
10592 	if (sym_snooptest (np)) {
10593 #ifdef FreeBSD_4_Bus
10594 		device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n");
10595 #else
10596 		printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np));
10597 #endif
10598 		goto attach_failed;
10599 	};
10600 
10601 	/*
10602 	 *  Now deal with CAM.
10603 	 *  Hopefully, we will succeed with that one.:)
10604 	 */
10605 	if (!sym_cam_attach(np))
10606 		goto attach_failed;
10607 
10608 	/*
10609 	 *  Sigh! we are done.
10610 	 */
10611 	return 0;
10612 
10613 	/*
10614 	 *  We have failed.
10615 	 *  We will try to free all the resources we have
10616 	 *  allocated, but if we are a boot device, this
10617 	 *  will not help that much.;)
10618 	 */
10619 attach_failed:
10620 	if (np)
10621 		sym_pci_free(np);
10622 	return ENXIO;
10623 }
10624 
10625 /*
10626  *  Free everything that have been allocated for this device.
10627  */
10628 static void sym_pci_free(hcb_p np)
10629 {
10630 	SYM_QUEHEAD *qp;
10631 	ccb_p cp;
10632 	tcb_p tp;
10633 	lcb_p lp;
10634 	int target, lun;
10635 	int s;
10636 
10637 	/*
10638 	 *  First free CAM resources.
10639 	 */
10640 	s = splcam();
10641 	sym_cam_free(np);
10642 	splx(s);
10643 
10644 	/*
10645 	 *  Now every should be quiet for us to
10646 	 *  free other resources.
10647 	 */
10648 #ifdef FreeBSD_4_Bus
10649 	if (np->ram_res)
10650 		bus_release_resource(np->device, SYS_RES_MEMORY,
10651 				     np->ram_id, np->ram_res);
10652 	if (np->mmio_res)
10653 		bus_release_resource(np->device, SYS_RES_MEMORY,
10654 				     SYM_PCI_MMIO, np->mmio_res);
10655 	if (np->io_res)
10656 		bus_release_resource(np->device, SYS_RES_IOPORT,
10657 				     SYM_PCI_IO, np->io_res);
10658 	if (np->irq_res)
10659 		bus_release_resource(np->device, SYS_RES_IRQ,
10660 				     0, np->irq_res);
10661 #else
10662 	/*
10663 	 *  YEAH!!!
10664 	 *  It seems there is no means to free MMIO resources.
10665 	 */
10666 #endif
10667 
10668 	if (np->scripth0)
10669 		sym_mfree_dma(np->scripth0, sizeof(struct sym_scrh),"SCRIPTH0");
10670 	if (np->script0)
10671 		sym_mfree_dma(np->script0, sizeof(struct sym_scr), "SCRIPT0");
10672 	if (np->squeue)
10673 		sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE");
10674 	if (np->dqueue)
10675 		sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
10676 
10677 	while ((qp = sym_remque_head(&np->free_ccbq)) != 0) {
10678 		cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
10679 #ifdef	FreeBSD_Bus_Dma_Abstraction
10680 		bus_dmamap_destroy(np->data_dmat, cp->dmamap);
10681 #endif
10682 		sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF");
10683 		sym_mfree_dma(cp, sizeof(*cp), "CCB");
10684 	}
10685 
10686 	if (np->badluntbl)
10687 		sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
10688 
10689 	for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
10690 		tp = &np->target[target];
10691 		for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) {
10692 			lp = sym_lp(np, tp, lun);
10693 			if (!lp)
10694 				continue;
10695 			if (lp->itlq_tbl)
10696 				sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4,
10697 				       "ITLQ_TBL");
10698 			if (lp->cb_tags)
10699 				sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK,
10700 				       "CB_TAGS");
10701 			sym_mfree_dma(lp, sizeof(*lp), "LCB");
10702 		}
10703 #if SYM_CONF_MAX_LUN > 1
10704 		if (tp->lunmp)
10705 			sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p),
10706 			       "LUNMP");
10707 #endif
10708 	}
10709 	if (np->targtbl)
10710 		sym_mfree_dma(np->targtbl, 256, "TARGTBL");
10711 #ifdef	FreeBSD_Bus_Dma_Abstraction
10712 	if (np->data_dmat)
10713 		bus_dma_tag_destroy(np->data_dmat);
10714 #endif
10715 	sym_mfree_dma(np, sizeof(*np), "HCB");
10716 }
10717 
10718 /*
10719  *  Allocate CAM resources and register a bus to CAM.
10720  */
10721 int sym_cam_attach(hcb_p np)
10722 {
10723 	struct cam_devq *devq = 0;
10724 	struct cam_sim *sim = 0;
10725 	struct cam_path *path = 0;
10726 	int err, s;
10727 
10728 	s = splcam();
10729 
10730 	/*
10731 	 *  Establish our interrupt handler.
10732 	 */
10733 #ifdef FreeBSD_4_Bus
10734 	err = bus_setup_intr(np->device, np->irq_res, INTR_TYPE_CAM,
10735 			     sym_intr, np, &np->intr);
10736 	if (err) {
10737 		device_printf(np->device, "bus_setup_intr() failed: %d\n",
10738 			      err);
10739 		goto fail;
10740 	}
10741 #else
10742 	err = 0;
10743 	if (!pci_map_int (np->pci_tag, sym_intr, np, &cam_imask)) {
10744 		printf("%s: failed to map interrupt\n", sym_name(np));
10745 		goto fail;
10746 	}
10747 #endif
10748 
10749 	/*
10750 	 *  Create the device queue for our sym SIM.
10751 	 */
10752 	devq = cam_simq_alloc(SYM_CONF_MAX_START);
10753 	if (!devq)
10754 		goto fail;
10755 
10756 	/*
10757 	 *  Construct our SIM entry.
10758 	 */
10759 	sim = cam_sim_alloc(sym_action, sym_poll, "sym", np, np->unit,
10760 			    1, SYM_SETUP_MAX_TAG, devq);
10761 	if (!sim)
10762 		goto fail;
10763 	devq = 0;
10764 
10765 	if (xpt_bus_register(sim, 0) != CAM_SUCCESS)
10766 		goto fail;
10767 	np->sim = sim;
10768 	sim = 0;
10769 
10770 	if (xpt_create_path(&path, 0,
10771 			    cam_sim_path(np->sim), CAM_TARGET_WILDCARD,
10772 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
10773 		goto fail;
10774 	}
10775 	np->path = path;
10776 
10777 	/*
10778 	 *  Hmmm... This should be useful, but I donnot want to
10779 	 *  know about.
10780 	 */
10781 #if 	__FreeBSD_version < 400000
10782 #ifdef	__alpha__
10783 #ifdef	FreeBSD_4_Bus
10784 	alpha_register_pci_scsi(pci_get_bus(np->device),
10785 				pci_get_slot(np->device), np->sim);
10786 #else
10787 	alpha_register_pci_scsi(pci_tag->bus, pci_tag->slot, np->sim);
10788 #endif
10789 #endif
10790 #endif
10791 
10792 #if 0
10793 	/*
10794 	 *  Establish our async notification handler.
10795 	 */
10796 	{
10797 	struct ccb_setasync csa;
10798 	xpt_setup_ccb(&csa.ccb_h, np->path, 5);
10799 	csa.ccb_h.func_code = XPT_SASYNC_CB;
10800 	csa.event_enable    = AC_LOST_DEVICE;
10801 	csa.callback	    = sym_async;
10802 	csa.callback_arg    = np->sim;
10803 	xpt_action((union ccb *)&csa);
10804 	}
10805 #endif
10806 	/*
10807 	 *  Start the chip now, without resetting the BUS, since
10808 	 *  it seems that this must stay under control of CAM.
10809 	 *  With LVD/SE capable chips and BUS in SE mode, we may
10810 	 *  get a spurious SMBC interrupt.
10811 	 */
10812 	sym_init (np, 0);
10813 
10814 	splx(s);
10815 	return 1;
10816 fail:
10817 	if (sim)
10818 		cam_sim_free(sim, FALSE);
10819 	if (devq)
10820 		cam_simq_free(devq);
10821 
10822 	sym_cam_free(np);
10823 
10824 	splx(s);
10825 	return 0;
10826 }
10827 
10828 /*
10829  *  Free everything that deals with CAM.
10830  */
10831 void sym_cam_free(hcb_p np)
10832 {
10833 #ifdef FreeBSD_4_Bus
10834 	if (np->intr)
10835 		bus_teardown_intr(np->device, np->irq_res, np->intr);
10836 #else
10837 	/* pci_unmap_int(np->pci_tag); */	/* Does nothing */
10838 #endif
10839 
10840 	if (np->sim) {
10841 		xpt_bus_deregister(cam_sim_path(np->sim));
10842 		cam_sim_free(np->sim, /*free_devq*/ TRUE);
10843 	}
10844 	if (np->path)
10845 		xpt_free_path(np->path);
10846 }
10847 
10848 /*============ OPTIONNAL NVRAM SUPPORT =================*/
10849 
10850 /*
10851  *  Get host setup from NVRAM.
10852  */
10853 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram)
10854 {
10855 #ifdef SYM_CONF_NVRAM_SUPPORT
10856 	/*
10857 	 *  Get parity checking, host ID, verbose mode
10858 	 *  and miscellaneous host flags from NVRAM.
10859 	 */
10860 	switch(nvram->type) {
10861 	case SYM_SYMBIOS_NVRAM:
10862 		if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
10863 			np->rv_scntl0  &= ~0x0a;
10864 		np->myaddr = nvram->data.Symbios.host_id & 0x0f;
10865 		if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
10866 			np->verbose += 1;
10867 		if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO)
10868 			np->usrflags |= SYM_SCAN_TARGETS_HILO;
10869 		if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET)
10870 			np->usrflags |= SYM_AVOID_BUS_RESET;
10871 		break;
10872 	case SYM_TEKRAM_NVRAM:
10873 		np->myaddr = nvram->data.Tekram.host_id & 0x0f;
10874 		break;
10875 	default:
10876 		break;
10877 	}
10878 #endif
10879 }
10880 
10881 /*
10882  *  Get target setup from NVRAM.
10883  */
10884 #ifdef SYM_CONF_NVRAM_SUPPORT
10885 static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram);
10886 static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram);
10887 #endif
10888 
10889 static void
10890 sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp)
10891 {
10892 #ifdef SYM_CONF_NVRAM_SUPPORT
10893 	switch(nvp->type) {
10894 	case SYM_SYMBIOS_NVRAM:
10895 		sym_Symbios_setup_target (np, target, &nvp->data.Symbios);
10896 		break;
10897 	case SYM_TEKRAM_NVRAM:
10898 		sym_Tekram_setup_target (np, target, &nvp->data.Tekram);
10899 		break;
10900 	default:
10901 		break;
10902 	}
10903 #endif
10904 }
10905 
10906 #ifdef SYM_CONF_NVRAM_SUPPORT
10907 /*
10908  *  Get target set-up from Symbios format NVRAM.
10909  */
10910 static void
10911 sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram)
10912 {
10913 	tcb_p tp = &np->target[target];
10914 	Symbios_target *tn = &nvram->target[target];
10915 
10916 	tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0;
10917 	tp->tinfo.user.width  = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT;
10918 	tp->usrtags =
10919 		(tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0;
10920 
10921 	if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
10922 		tp->usrflags &= ~SYM_DISC_ENABLED;
10923 	if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
10924 		tp->usrflags |= SYM_SCAN_BOOT_DISABLED;
10925 	if (!(tn->flags & SYMBIOS_SCAN_LUNS))
10926 		tp->usrflags |= SYM_SCAN_LUNS_DISABLED;
10927 }
10928 
10929 /*
10930  *  Get target set-up from Tekram format NVRAM.
10931  */
10932 static void
10933 sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram)
10934 {
10935 	tcb_p tp = &np->target[target];
10936 	struct Tekram_target *tn = &nvram->target[target];
10937 	int i;
10938 
10939 	if (tn->flags & TEKRAM_SYNC_NEGO) {
10940 		i = tn->sync_index & 0xf;
10941 		tp->tinfo.user.period = Tekram_sync[i];
10942 	}
10943 
10944 	tp->tinfo.user.width =
10945 		(tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT;
10946 
10947 	if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
10948 		tp->usrtags = 2 << nvram->max_tags_index;
10949 	}
10950 
10951 	if (tn->flags & TEKRAM_DISCONNECT_ENABLE)
10952 		tp->usrflags |= SYM_DISC_ENABLED;
10953 
10954 	/* If any device does not support parity, we will not use this option */
10955 	if (!(tn->flags & TEKRAM_PARITY_CHECK))
10956 		np->rv_scntl0  &= ~0x0a; /* SCSI parity checking disabled */
10957 }
10958 
10959 #ifdef	SYM_CONF_DEBUG_NVRAM
10960 /*
10961  *  Dump Symbios format NVRAM for debugging purpose.
10962  */
10963 void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram)
10964 {
10965 	int i;
10966 
10967 	/* display Symbios nvram host data */
10968 	printf("%s: HOST ID=%d%s%s%s%s%s%s\n",
10969 		sym_name(np), nvram->host_id & 0x0f,
10970 		(nvram->flags  & SYMBIOS_SCAM_ENABLE)	? " SCAM"	:"",
10971 		(nvram->flags  & SYMBIOS_PARITY_ENABLE)	? " PARITY"	:"",
10972 		(nvram->flags  & SYMBIOS_VERBOSE_MSGS)	? " VERBOSE"	:"",
10973 		(nvram->flags  & SYMBIOS_CHS_MAPPING)	? " CHS_ALT"	:"",
10974 		(nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET"	:"",
10975 		(nvram->flags1 & SYMBIOS_SCAN_HI_LO)	? " HI_LO"	:"");
10976 
10977 	/* display Symbios nvram drive data */
10978 	for (i = 0 ; i < 15 ; i++) {
10979 		struct Symbios_target *tn = &nvram->target[i];
10980 		printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
10981 		sym_name(np), i,
10982 		(tn->flags & SYMBIOS_DISCONNECT_ENABLE)	? " DISC"	: "",
10983 		(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)	? " SCAN_BOOT"	: "",
10984 		(tn->flags & SYMBIOS_SCAN_LUNS)		? " SCAN_LUNS"	: "",
10985 		(tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ"	: "",
10986 		tn->bus_width,
10987 		tn->sync_period / 4,
10988 		tn->timeout);
10989 	}
10990 }
10991 
10992 /*
10993  *  Dump TEKRAM format NVRAM for debugging purpose.
10994  */
10995 static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120};
10996 void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram)
10997 {
10998 	int i, tags, boot_delay;
10999 	char *rem;
11000 
11001 	/* display Tekram nvram host data */
11002 	tags = 2 << nvram->max_tags_index;
11003 	boot_delay = 0;
11004 	if (nvram->boot_delay_index < 6)
11005 		boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
11006 	switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
11007 	default:
11008 	case 0:	rem = "";			break;
11009 	case 1: rem = " REMOVABLE=boot device";	break;
11010 	case 2: rem = " REMOVABLE=all";		break;
11011 	}
11012 
11013 	printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
11014 		sym_name(np), nvram->host_id & 0x0f,
11015 		(nvram->flags1 & SYMBIOS_SCAM_ENABLE)	? " SCAM"	:"",
11016 		(nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES"	:"",
11017 		(nvram->flags & TEKRAM_DRIVES_SUP_1GB)	? " >1GB"	:"",
11018 		(nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET"	:"",
11019 		(nvram->flags & TEKRAM_ACTIVE_NEGATION)	? " ACT_NEG"	:"",
11020 		(nvram->flags & TEKRAM_IMMEDIATE_SEEK)	? " IMM_SEEK"	:"",
11021 		(nvram->flags & TEKRAM_SCAN_LUNS)	? " SCAN_LUNS"	:"",
11022 		(nvram->flags1 & TEKRAM_F2_F6_ENABLED)	? " F2_F6"	:"",
11023 		rem, boot_delay, tags);
11024 
11025 	/* display Tekram nvram drive data */
11026 	for (i = 0; i <= 15; i++) {
11027 		int sync, j;
11028 		struct Tekram_target *tn = &nvram->target[i];
11029 		j = tn->sync_index & 0xf;
11030 		sync = Tekram_sync[j];
11031 		printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
11032 		sym_name(np), i,
11033 		(tn->flags & TEKRAM_PARITY_CHECK)	? " PARITY"	: "",
11034 		(tn->flags & TEKRAM_SYNC_NEGO)		? " SYNC"	: "",
11035 		(tn->flags & TEKRAM_DISCONNECT_ENABLE)	? " DISC"	: "",
11036 		(tn->flags & TEKRAM_START_CMD)		? " START"	: "",
11037 		(tn->flags & TEKRAM_TAGGED_COMMANDS)	? " TCQ"	: "",
11038 		(tn->flags & TEKRAM_WIDE_NEGO)		? " WIDE"	: "",
11039 		sync);
11040 	}
11041 }
11042 #endif	/* SYM_CONF_DEBUG_NVRAM */
11043 #endif	/* SYM_CONF_NVRAM_SUPPORT */
11044 
11045 
11046 /*
11047  *  Try reading Symbios or Tekram NVRAM
11048  */
11049 #ifdef SYM_CONF_NVRAM_SUPPORT
11050 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram);
11051 static int sym_read_Tekram_nvram  (hcb_p np, Tekram_nvram *nvram);
11052 #endif
11053 
11054 int sym_read_nvram(hcb_p np, struct sym_nvram *nvp)
11055 {
11056 #ifdef SYM_CONF_NVRAM_SUPPORT
11057 	/*
11058 	 *  Try to read SYMBIOS nvram.
11059 	 *  Try to read TEKRAM nvram if Symbios nvram not found.
11060 	 */
11061 	if	(SYM_SETUP_SYMBIOS_NVRAM &&
11062 		 !sym_read_Symbios_nvram (np, &nvp->data.Symbios))
11063 		nvp->type = SYM_SYMBIOS_NVRAM;
11064 	else if	(SYM_SETUP_TEKRAM_NVRAM &&
11065 		 !sym_read_Tekram_nvram (np, &nvp->data.Tekram))
11066 		nvp->type = SYM_TEKRAM_NVRAM;
11067 	else
11068 		nvp->type = 0;
11069 #else
11070 	nvp->type = 0;
11071 #endif
11072 	return nvp->type;
11073 }
11074 
11075 
11076 #ifdef SYM_CONF_NVRAM_SUPPORT
11077 /*
11078  *  24C16 EEPROM reading.
11079  *
11080  *  GPOI0 - data in/data out
11081  *  GPIO1 - clock
11082  *  Symbios NVRAM wiring now also used by Tekram.
11083  */
11084 
11085 #define SET_BIT 0
11086 #define CLR_BIT 1
11087 #define SET_CLK 2
11088 #define CLR_CLK 3
11089 
11090 /*
11091  *  Set/clear data/clock bit in GPIO0
11092  */
11093 static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg,
11094 			  int bit_mode)
11095 {
11096 	UDELAY (5);
11097 	switch (bit_mode){
11098 	case SET_BIT:
11099 		*gpreg |= write_bit;
11100 		break;
11101 	case CLR_BIT:
11102 		*gpreg &= 0xfe;
11103 		break;
11104 	case SET_CLK:
11105 		*gpreg |= 0x02;
11106 		break;
11107 	case CLR_CLK:
11108 		*gpreg &= 0xfd;
11109 		break;
11110 
11111 	}
11112 	OUTB (nc_gpreg, *gpreg);
11113 	UDELAY (5);
11114 }
11115 
11116 /*
11117  *  Send START condition to NVRAM to wake it up.
11118  */
11119 static void S24C16_start(hcb_p np, u_char *gpreg)
11120 {
11121 	S24C16_set_bit(np, 1, gpreg, SET_BIT);
11122 	S24C16_set_bit(np, 0, gpreg, SET_CLK);
11123 	S24C16_set_bit(np, 0, gpreg, CLR_BIT);
11124 	S24C16_set_bit(np, 0, gpreg, CLR_CLK);
11125 }
11126 
11127 /*
11128  *  Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
11129  */
11130 static void S24C16_stop(hcb_p np, u_char *gpreg)
11131 {
11132 	S24C16_set_bit(np, 0, gpreg, SET_CLK);
11133 	S24C16_set_bit(np, 1, gpreg, SET_BIT);
11134 }
11135 
11136 /*
11137  *  Read or write a bit to the NVRAM,
11138  *  read if GPIO0 input else write if GPIO0 output
11139  */
11140 static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit,
11141 			 u_char *gpreg)
11142 {
11143 	S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
11144 	S24C16_set_bit(np, 0, gpreg, SET_CLK);
11145 	if (read_bit)
11146 		*read_bit = INB (nc_gpreg);
11147 	S24C16_set_bit(np, 0, gpreg, CLR_CLK);
11148 	S24C16_set_bit(np, 0, gpreg, CLR_BIT);
11149 }
11150 
11151 /*
11152  *  Output an ACK to the NVRAM after reading,
11153  *  change GPIO0 to output and when done back to an input
11154  */
11155 static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg,
11156 			    u_char *gpcntl)
11157 {
11158 	OUTB (nc_gpcntl, *gpcntl & 0xfe);
11159 	S24C16_do_bit(np, 0, write_bit, gpreg);
11160 	OUTB (nc_gpcntl, *gpcntl);
11161 }
11162 
11163 /*
11164  *  Input an ACK from NVRAM after writing,
11165  *  change GPIO0 to input and when done back to an output
11166  */
11167 static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg,
11168 			   u_char *gpcntl)
11169 {
11170 	OUTB (nc_gpcntl, *gpcntl | 0x01);
11171 	S24C16_do_bit(np, read_bit, 1, gpreg);
11172 	OUTB (nc_gpcntl, *gpcntl);
11173 }
11174 
11175 /*
11176  *  WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
11177  *  GPIO0 must already be set as an output
11178  */
11179 static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data,
11180 			     u_char *gpreg, u_char *gpcntl)
11181 {
11182 	int x;
11183 
11184 	for (x = 0; x < 8; x++)
11185 		S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
11186 
11187 	S24C16_read_ack(np, ack_data, gpreg, gpcntl);
11188 }
11189 
11190 /*
11191  *  READ a byte from the NVRAM and then send an ACK to say we have got it,
11192  *  GPIO0 must already be set as an input
11193  */
11194 static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data,
11195 			    u_char *gpreg, u_char *gpcntl)
11196 {
11197 	int x;
11198 	u_char read_bit;
11199 
11200 	*read_data = 0;
11201 	for (x = 0; x < 8; x++) {
11202 		S24C16_do_bit(np, &read_bit, 1, gpreg);
11203 		*read_data |= ((read_bit & 0x01) << (7 - x));
11204 	}
11205 
11206 	S24C16_write_ack(np, ack_data, gpreg, gpcntl);
11207 }
11208 
11209 /*
11210  *  Read 'len' bytes starting at 'offset'.
11211  */
11212 static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len)
11213 {
11214 	u_char	gpcntl, gpreg;
11215 	u_char	old_gpcntl, old_gpreg;
11216 	u_char	ack_data;
11217 	int	retv = 1;
11218 	int	x;
11219 
11220 	/* save current state of GPCNTL and GPREG */
11221 	old_gpreg	= INB (nc_gpreg);
11222 	old_gpcntl	= INB (nc_gpcntl);
11223 	gpcntl		= old_gpcntl & 0xfc;
11224 
11225 	/* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
11226 	OUTB (nc_gpreg,  old_gpreg);
11227 	OUTB (nc_gpcntl, gpcntl);
11228 
11229 	/* this is to set NVRAM into a known state with GPIO0/1 both low */
11230 	gpreg = old_gpreg;
11231 	S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
11232 	S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
11233 
11234 	/* now set NVRAM inactive with GPIO0/1 both high */
11235 	S24C16_stop(np, &gpreg);
11236 
11237 	/* activate NVRAM */
11238 	S24C16_start(np, &gpreg);
11239 
11240 	/* write device code and random address MSB */
11241 	S24C16_write_byte(np, &ack_data,
11242 		0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
11243 	if (ack_data & 0x01)
11244 		goto out;
11245 
11246 	/* write random address LSB */
11247 	S24C16_write_byte(np, &ack_data,
11248 		offset & 0xff, &gpreg, &gpcntl);
11249 	if (ack_data & 0x01)
11250 		goto out;
11251 
11252 	/* regenerate START state to set up for reading */
11253 	S24C16_start(np, &gpreg);
11254 
11255 	/* rewrite device code and address MSB with read bit set (lsb = 0x01) */
11256 	S24C16_write_byte(np, &ack_data,
11257 		0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
11258 	if (ack_data & 0x01)
11259 		goto out;
11260 
11261 	/* now set up GPIO0 for inputting data */
11262 	gpcntl |= 0x01;
11263 	OUTB (nc_gpcntl, gpcntl);
11264 
11265 	/* input all requested data - only part of total NVRAM */
11266 	for (x = 0; x < len; x++)
11267 		S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
11268 
11269 	/* finally put NVRAM back in inactive mode */
11270 	gpcntl &= 0xfe;
11271 	OUTB (nc_gpcntl, gpcntl);
11272 	S24C16_stop(np, &gpreg);
11273 	retv = 0;
11274 out:
11275 	/* return GPIO0/1 to original states after having accessed NVRAM */
11276 	OUTB (nc_gpcntl, old_gpcntl);
11277 	OUTB (nc_gpreg,  old_gpreg);
11278 
11279 	return retv;
11280 }
11281 
11282 #undef SET_BIT 0
11283 #undef CLR_BIT 1
11284 #undef SET_CLK 2
11285 #undef CLR_CLK 3
11286 
11287 /*
11288  *  Try reading Symbios NVRAM.
11289  *  Return 0 if OK.
11290  */
11291 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram)
11292 {
11293 	static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
11294 	u_char *data = (u_char *) nvram;
11295 	int len  = sizeof(*nvram);
11296 	u_short	csum;
11297 	int x;
11298 
11299 	/* probe the 24c16 and read the SYMBIOS 24c16 area */
11300 	if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
11301 		return 1;
11302 
11303 	/* check valid NVRAM signature, verify byte count and checksum */
11304 	if (nvram->type != 0 ||
11305 	    bcmp(nvram->trailer, Symbios_trailer, 6) ||
11306 	    nvram->byte_count != len - 12)
11307 		return 1;
11308 
11309 	/* verify checksum */
11310 	for (x = 6, csum = 0; x < len - 6; x++)
11311 		csum += data[x];
11312 	if (csum != nvram->checksum)
11313 		return 1;
11314 
11315 	return 0;
11316 }
11317 
11318 /*
11319  *  93C46 EEPROM reading.
11320  *
11321  *  GPOI0 - data in
11322  *  GPIO1 - data out
11323  *  GPIO2 - clock
11324  *  GPIO4 - chip select
11325  *
11326  *  Used by Tekram.
11327  */
11328 
11329 /*
11330  *  Pulse clock bit in GPIO0
11331  */
11332 static void T93C46_Clk(hcb_p np, u_char *gpreg)
11333 {
11334 	OUTB (nc_gpreg, *gpreg | 0x04);
11335 	UDELAY (2);
11336 	OUTB (nc_gpreg, *gpreg);
11337 }
11338 
11339 /*
11340  *  Read bit from NVRAM
11341  */
11342 static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg)
11343 {
11344 	UDELAY (2);
11345 	T93C46_Clk(np, gpreg);
11346 	*read_bit = INB (nc_gpreg);
11347 }
11348 
11349 /*
11350  *  Write bit to GPIO0
11351  */
11352 static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg)
11353 {
11354 	if (write_bit & 0x01)
11355 		*gpreg |= 0x02;
11356 	else
11357 		*gpreg &= 0xfd;
11358 
11359 	*gpreg |= 0x10;
11360 
11361 	OUTB (nc_gpreg, *gpreg);
11362 	UDELAY (2);
11363 
11364 	T93C46_Clk(np, gpreg);
11365 }
11366 
11367 /*
11368  *  Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
11369  */
11370 static void T93C46_Stop(hcb_p np, u_char *gpreg)
11371 {
11372 	*gpreg &= 0xef;
11373 	OUTB (nc_gpreg, *gpreg);
11374 	UDELAY (2);
11375 
11376 	T93C46_Clk(np, gpreg);
11377 }
11378 
11379 /*
11380  *  Send read command and address to NVRAM
11381  */
11382 static void T93C46_Send_Command(hcb_p np, u_short write_data,
11383 				u_char *read_bit, u_char *gpreg)
11384 {
11385 	int x;
11386 
11387 	/* send 9 bits, start bit (1), command (2), address (6)  */
11388 	for (x = 0; x < 9; x++)
11389 		T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
11390 
11391 	*read_bit = INB (nc_gpreg);
11392 }
11393 
11394 /*
11395  *  READ 2 bytes from the NVRAM
11396  */
11397 static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg)
11398 {
11399 	int x;
11400 	u_char read_bit;
11401 
11402 	*nvram_data = 0;
11403 	for (x = 0; x < 16; x++) {
11404 		T93C46_Read_Bit(np, &read_bit, gpreg);
11405 
11406 		if (read_bit & 0x01)
11407 			*nvram_data |=  (0x01 << (15 - x));
11408 		else
11409 			*nvram_data &= ~(0x01 << (15 - x));
11410 	}
11411 }
11412 
11413 /*
11414  *  Read Tekram NvRAM data.
11415  */
11416 static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg)
11417 {
11418 	u_char	read_bit;
11419 	int	x;
11420 
11421 	for (x = 0; x < len; x++)  {
11422 
11423 		/* output read command and address */
11424 		T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
11425 		if (read_bit & 0x01)
11426 			return 1; /* Bad */
11427 		T93C46_Read_Word(np, &data[x], gpreg);
11428 		T93C46_Stop(np, gpreg);
11429 	}
11430 
11431 	return 0;
11432 }
11433 
11434 /*
11435  *  Try reading 93C46 Tekram NVRAM.
11436  */
11437 static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram)
11438 {
11439 	u_char gpcntl, gpreg;
11440 	u_char old_gpcntl, old_gpreg;
11441 	int retv = 1;
11442 
11443 	/* save current state of GPCNTL and GPREG */
11444 	old_gpreg	= INB (nc_gpreg);
11445 	old_gpcntl	= INB (nc_gpcntl);
11446 
11447 	/* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
11448 	   1/2/4 out */
11449 	gpreg = old_gpreg & 0xe9;
11450 	OUTB (nc_gpreg, gpreg);
11451 	gpcntl = (old_gpcntl & 0xe9) | 0x09;
11452 	OUTB (nc_gpcntl, gpcntl);
11453 
11454 	/* input all of NVRAM, 64 words */
11455 	retv = T93C46_Read_Data(np, (u_short *) nvram,
11456 				sizeof(*nvram) / sizeof(short), &gpreg);
11457 
11458 	/* return GPIO0/1/2/4 to original states after having accessed NVRAM */
11459 	OUTB (nc_gpcntl, old_gpcntl);
11460 	OUTB (nc_gpreg,  old_gpreg);
11461 
11462 	return retv;
11463 }
11464 
11465 /*
11466  *  Try reading Tekram NVRAM.
11467  *  Return 0 if OK.
11468  */
11469 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram)
11470 {
11471 	u_char *data = (u_char *) nvram;
11472 	int len = sizeof(*nvram);
11473 	u_short	csum;
11474 	int x;
11475 
11476 	switch (np->device_id) {
11477 	case PCI_ID_SYM53C885:
11478 	case PCI_ID_SYM53C895:
11479 	case PCI_ID_SYM53C896:
11480 		x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
11481 					  data, len);
11482 		break;
11483 	case PCI_ID_SYM53C875:
11484 		x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
11485 					  data, len);
11486 		if (!x)
11487 			break;
11488 	default:
11489 		x = sym_read_T93C46_nvram(np, nvram);
11490 		break;
11491 	}
11492 	if (x)
11493 		return 1;
11494 
11495 	/* verify checksum */
11496 	for (x = 0, csum = 0; x < len - 1; x += 2)
11497 		csum += data[x] + (data[x+1] << 8);
11498 	if (csum != 0x1234)
11499 		return 1;
11500 
11501 	return 0;
11502 }
11503 
11504 #endif	/* SYM_CONF_NVRAM_SUPPORT */
11505