1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010
5 * PCI-SCSI controllers.
6 *
7 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
8 *
9 * This driver also supports the following Symbios/LSI PCI-SCSI chips:
10 * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895,
11 * 53C810, 53C815, 53C825 and the 53C1510D is 53C8XX mode.
12 *
13 *
14 * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver.
15 * Copyright (C) 1998-1999 Gerard Roudier
16 *
17 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
18 * a port of the FreeBSD ncr driver to Linux-1.2.13.
19 *
20 * The original ncr driver has been written for 386bsd and FreeBSD by
21 * Wolfgang Stanglmeier <wolf@cologne.de>
22 * Stefan Esser <se@mi.Uni-Koeln.de>
23 * Copyright (C) 1994 Wolfgang Stanglmeier
24 *
25 * The initialisation code, and part of the code that addresses
26 * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM
27 * written by Justin T. Gibbs.
28 *
29 * Other major contributions:
30 *
31 * NVRAM detection and reading.
32 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
33 *
34 *-----------------------------------------------------------------------------
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
51 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 */
59
60 #include <sys/cdefs.h>
61
62 /* #define SYM_DEBUG_GENERIC_SUPPORT */
63
64 #include <sys/param.h>
65
66 /*
67 * Driver configuration options.
68 */
69 #include "opt_sym.h"
70 #include <dev/sym/sym_conf.h>
71
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/endian.h>
75 #include <sys/kernel.h>
76 #include <sys/lock.h>
77 #include <sys/mutex.h>
78 #include <sys/module.h>
79 #include <sys/bus.h>
80
81 #include <sys/proc.h>
82
83 #include <dev/pci/pcireg.h>
84 #include <dev/pci/pcivar.h>
85
86 #include <machine/bus.h>
87 #include <machine/resource.h>
88 #include <machine/atomic.h>
89
90 #include <sys/rman.h>
91
92 #include <cam/cam.h>
93 #include <cam/cam_ccb.h>
94 #include <cam/cam_sim.h>
95 #include <cam/cam_xpt_sim.h>
96 #include <cam/cam_debug.h>
97
98 #include <cam/scsi/scsi_all.h>
99 #include <cam/scsi/scsi_message.h>
100
101 /* Short and quite clear integer types */
102 typedef int8_t s8;
103 typedef int16_t s16;
104 typedef int32_t s32;
105 typedef u_int8_t u8;
106 typedef u_int16_t u16;
107 typedef u_int32_t u32;
108
109 /*
110 * Driver definitions.
111 */
112 #include <dev/sym/sym_defs.h>
113 #include <dev/sym/sym_fw.h>
114
115 /*
116 * With uncacheable memory, x86 does not reorder STORES and prevents LOADS
117 * from passing STORES. For ensuring this program order, we still need to
118 * employ compiler barriers, though, when the ordering of LOADS and STORES
119 * matters.
120 * Other architectures may implement weaker ordering guarantees and, thus,
121 * require memory barriers (and also IO barriers) to be used.
122 */
123 #if defined __i386__ || defined __amd64__
124 #define MEMORY_BARRIER() __compiler_membar()
125 #elif defined __powerpc__
126 #define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory")
127 #elif defined __arm__
128 #define MEMORY_BARRIER() dmb()
129 #elif defined __aarch64__
130 #define MEMORY_BARRIER() dmb(sy)
131 #elif defined __riscv
132 #define MEMORY_BARRIER() fence()
133 #else
134 #error "Not supported platform"
135 #endif
136
137 /*
138 * A la VMS/CAM-3 queue management.
139 */
140 typedef struct sym_quehead {
141 struct sym_quehead *flink; /* Forward pointer */
142 struct sym_quehead *blink; /* Backward pointer */
143 } SYM_QUEHEAD;
144
145 #define sym_que_init(ptr) do { \
146 (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
147 } while (0)
148
__sym_que_add(struct sym_quehead * new,struct sym_quehead * blink,struct sym_quehead * flink)149 static __inline void __sym_que_add(struct sym_quehead * new,
150 struct sym_quehead * blink,
151 struct sym_quehead * flink)
152 {
153 flink->blink = new;
154 new->flink = flink;
155 new->blink = blink;
156 blink->flink = new;
157 }
158
__sym_que_del(struct sym_quehead * blink,struct sym_quehead * flink)159 static __inline void __sym_que_del(struct sym_quehead * blink,
160 struct sym_quehead * flink)
161 {
162 flink->blink = blink;
163 blink->flink = flink;
164 }
165
sym_que_empty(struct sym_quehead * head)166 static __inline int sym_que_empty(struct sym_quehead *head)
167 {
168 return head->flink == head;
169 }
170
sym_que_splice(struct sym_quehead * list,struct sym_quehead * head)171 static __inline void sym_que_splice(struct sym_quehead *list,
172 struct sym_quehead *head)
173 {
174 struct sym_quehead *first = list->flink;
175
176 if (first != list) {
177 struct sym_quehead *last = list->blink;
178 struct sym_quehead *at = head->flink;
179
180 first->blink = head;
181 head->flink = first;
182
183 last->flink = at;
184 at->blink = last;
185 }
186 }
187
188 #define sym_que_entry(ptr, type, member) \
189 ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
190
191 #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)
192
193 #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink)
194
195 #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink)
196
sym_remque_head(struct sym_quehead * head)197 static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
198 {
199 struct sym_quehead *elem = head->flink;
200
201 if (elem != head)
202 __sym_que_del(head, elem->flink);
203 else
204 elem = NULL;
205 return elem;
206 }
207
208 #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head)
209
210 /*
211 * This one may be useful.
212 */
213 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \
214 for (qp = (head)->flink; qp != (head); qp = qp->flink)
215 /*
216 * FreeBSD does not offer our kind of queue in the CAM CCB.
217 * So, we have to cast.
218 */
219 #define sym_qptr(p) ((struct sym_quehead *) (p))
220
221 /*
222 * Simple bitmap operations.
223 */
224 #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f)))
225 #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f)))
226 #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f)))
227
228 /*
229 * Number of tasks per device we want to handle.
230 */
231 #if SYM_CONF_MAX_TAG_ORDER > 8
232 #error "more than 256 tags per logical unit not allowed."
233 #endif
234 #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER)
235
236 /*
237 * Donnot use more tasks that we can handle.
238 */
239 #ifndef SYM_CONF_MAX_TAG
240 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
241 #endif
242 #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
243 #undef SYM_CONF_MAX_TAG
244 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
245 #endif
246
247 /*
248 * This one means 'NO TAG for this job'
249 */
250 #define NO_TAG (256)
251
252 /*
253 * Number of SCSI targets.
254 */
255 #if SYM_CONF_MAX_TARGET > 16
256 #error "more than 16 targets not allowed."
257 #endif
258
259 /*
260 * Number of logical units per target.
261 */
262 #if SYM_CONF_MAX_LUN > 64
263 #error "more than 64 logical units per target not allowed."
264 #endif
265
266 /*
267 * Asynchronous pre-scaler (ns). Shall be 40 for
268 * the SCSI timings to be compliant.
269 */
270 #define SYM_CONF_MIN_ASYNC (40)
271
272 /*
273 * Number of entries in the START and DONE queues.
274 *
275 * We limit to 1 PAGE in order to succeed allocation of
276 * these queues. Each entry is 8 bytes long (2 DWORDS).
277 */
278 #ifdef SYM_CONF_MAX_START
279 #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
280 #else
281 #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
282 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
283 #endif
284
285 #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8
286 #undef SYM_CONF_MAX_QUEUE
287 #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8
288 #undef SYM_CONF_MAX_START
289 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
290 #endif
291
292 /*
293 * For this one, we want a short name :-)
294 */
295 #define MAX_QUEUE SYM_CONF_MAX_QUEUE
296
297 /*
298 * Active debugging tags and verbosity.
299 */
300 #define DEBUG_ALLOC (0x0001)
301 #define DEBUG_PHASE (0x0002)
302 #define DEBUG_POLL (0x0004)
303 #define DEBUG_QUEUE (0x0008)
304 #define DEBUG_RESULT (0x0010)
305 #define DEBUG_SCATTER (0x0020)
306 #define DEBUG_SCRIPT (0x0040)
307 #define DEBUG_TINY (0x0080)
308 #define DEBUG_TIMING (0x0100)
309 #define DEBUG_NEGO (0x0200)
310 #define DEBUG_TAGS (0x0400)
311 #define DEBUG_POINTER (0x0800)
312
313 #if 0
314 static int sym_debug = 0;
315 #define DEBUG_FLAGS sym_debug
316 #else
317 /* #define DEBUG_FLAGS (0x0631) */
318 #define DEBUG_FLAGS (0x0000)
319
320 #endif
321 #define sym_verbose (np->verbose)
322
323 /*
324 * Insert a delay in micro-seconds and milli-seconds.
325 */
UDELAY(int us)326 static void UDELAY(int us) { DELAY(us); }
MDELAY(int ms)327 static void MDELAY(int ms) { while (ms--) UDELAY(1000); }
328
329 /*
330 * Simple power of two buddy-like allocator.
331 *
332 * This simple code is not intended to be fast, but to
333 * provide power of 2 aligned memory allocations.
334 * Since the SCRIPTS processor only supplies 8 bit arithmetic,
335 * this allocator allows simple and fast address calculations
336 * from the SCRIPTS code. In addition, cache line alignment
337 * is guaranteed for power of 2 cache line size.
338 *
339 * This allocator has been developed for the Linux sym53c8xx
340 * driver, since this O/S does not provide naturally aligned
341 * allocations.
342 * It has the advantage of allowing the driver to use private
343 * pages of memory that will be useful if we ever need to deal
344 * with IO MMUs for PCI.
345 */
346 #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
347 #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
348 #if 0
349 #define MEMO_FREE_UNUSED /* Free unused pages immediately */
350 #endif
351 #define MEMO_WARN 1
352 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
353 #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
354 #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
355
356 #define get_pages() malloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_NOWAIT)
357 #define free_pages(p) free((p), M_DEVBUF)
358
359 typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
360
361 typedef struct m_link { /* Link between free memory chunks */
362 struct m_link *next;
363 } m_link_s;
364
365 typedef struct m_vtob { /* Virtual to Bus address translation */
366 struct m_vtob *next;
367 bus_dmamap_t dmamap; /* Map for this chunk */
368 m_addr_t vaddr; /* Virtual address */
369 m_addr_t baddr; /* Bus physical address */
370 } m_vtob_s;
371 /* Hash this stuff a bit to speed up translations */
372 #define VTOB_HASH_SHIFT 5
373 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
374 #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
375 #define VTOB_HASH_CODE(m) \
376 ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
377
378 typedef struct m_pool { /* Memory pool of a given kind */
379 bus_dma_tag_t dev_dmat; /* Identifies the pool */
380 bus_dma_tag_t dmat; /* Tag for our fixed allocations */
381 m_addr_t (*getp)(struct m_pool *);
382 #ifdef MEMO_FREE_UNUSED
383 void (*freep)(struct m_pool *, m_addr_t);
384 #endif
385 #define M_GETP() mp->getp(mp)
386 #define M_FREEP(p) mp->freep(mp, p)
387 int nump;
388 m_vtob_s *(vtob[VTOB_HASH_SIZE]);
389 struct m_pool *next;
390 struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1];
391 } m_pool_s;
392
___sym_malloc(m_pool_s * mp,int size)393 static void *___sym_malloc(m_pool_s *mp, int size)
394 {
395 int i = 0;
396 int s = (1 << MEMO_SHIFT);
397 int j;
398 m_addr_t a;
399 m_link_s *h = mp->h;
400
401 if (size > MEMO_CLUSTER_SIZE)
402 return NULL;
403
404 while (size > s) {
405 s <<= 1;
406 ++i;
407 }
408
409 j = i;
410 while (!h[j].next) {
411 if (s == MEMO_CLUSTER_SIZE) {
412 h[j].next = (m_link_s *) M_GETP();
413 if (h[j].next)
414 h[j].next->next = NULL;
415 break;
416 }
417 ++j;
418 s <<= 1;
419 }
420 a = (m_addr_t) h[j].next;
421 if (a) {
422 h[j].next = h[j].next->next;
423 while (j > i) {
424 j -= 1;
425 s >>= 1;
426 h[j].next = (m_link_s *) (a+s);
427 h[j].next->next = NULL;
428 }
429 }
430 #ifdef DEBUG
431 printf("___sym_malloc(%d) = %p\n", size, (void *) a);
432 #endif
433 return (void *) a;
434 }
435
___sym_mfree(m_pool_s * mp,void * ptr,int size)436 static void ___sym_mfree(m_pool_s *mp, void *ptr, int size)
437 {
438 int i = 0;
439 int s = (1 << MEMO_SHIFT);
440 m_link_s *q;
441 m_addr_t a, b;
442 m_link_s *h = mp->h;
443
444 #ifdef DEBUG
445 printf("___sym_mfree(%p, %d)\n", ptr, size);
446 #endif
447
448 if (size > MEMO_CLUSTER_SIZE)
449 return;
450
451 while (size > s) {
452 s <<= 1;
453 ++i;
454 }
455
456 a = (m_addr_t) ptr;
457
458 while (1) {
459 #ifdef MEMO_FREE_UNUSED
460 if (s == MEMO_CLUSTER_SIZE) {
461 M_FREEP(a);
462 break;
463 }
464 #endif
465 b = a ^ s;
466 q = &h[i];
467 while (q->next && q->next != (m_link_s *) b) {
468 q = q->next;
469 }
470 if (!q->next) {
471 ((m_link_s *) a)->next = h[i].next;
472 h[i].next = (m_link_s *) a;
473 break;
474 }
475 q->next = q->next->next;
476 a = a & b;
477 s <<= 1;
478 ++i;
479 }
480 }
481
__sym_calloc2(m_pool_s * mp,int size,char * name,int uflags)482 static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags)
483 {
484 void *p;
485
486 p = ___sym_malloc(mp, size);
487
488 if (DEBUG_FLAGS & DEBUG_ALLOC)
489 printf ("new %-10s[%4d] @%p.\n", name, size, p);
490
491 if (p)
492 bzero(p, size);
493 else if (uflags & MEMO_WARN)
494 printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
495
496 return p;
497 }
498
499 #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, MEMO_WARN)
500
__sym_mfree(m_pool_s * mp,void * ptr,int size,char * name)501 static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name)
502 {
503 if (DEBUG_FLAGS & DEBUG_ALLOC)
504 printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
505
506 ___sym_mfree(mp, ptr, size);
507
508 }
509
510 /*
511 * Default memory pool we donnot need to involve in DMA.
512 */
513 /*
514 * With the `bus dma abstraction', we use a separate pool for
515 * memory we donnot need to involve in DMA.
516 */
___mp0_getp(m_pool_s * mp)517 static m_addr_t ___mp0_getp(m_pool_s *mp)
518 {
519 m_addr_t m = (m_addr_t) get_pages();
520 if (m)
521 ++mp->nump;
522 return m;
523 }
524
525 #ifdef MEMO_FREE_UNUSED
___mp0_freep(m_pool_s * mp,m_addr_t m)526 static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
527 {
528 free_pages(m);
529 --mp->nump;
530 }
531 #endif
532
533 #ifdef MEMO_FREE_UNUSED
534 static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep};
535 #else
536 static m_pool_s mp0 = {0, 0, ___mp0_getp};
537 #endif
538
539 /*
540 * Actual memory allocation routine for non-DMAed memory.
541 */
sym_calloc(int size,char * name)542 static void *sym_calloc(int size, char *name)
543 {
544 void *m;
545 /* Lock */
546 m = __sym_calloc(&mp0, size, name);
547 /* Unlock */
548 return m;
549 }
550
551 /*
552 * Actual memory allocation routine for non-DMAed memory.
553 */
sym_mfree(void * ptr,int size,char * name)554 static void sym_mfree(void *ptr, int size, char *name)
555 {
556 /* Lock */
557 __sym_mfree(&mp0, ptr, size, name);
558 /* Unlock */
559 }
560
561 /*
562 * DMAable pools.
563 */
564 /*
565 * With `bus dma abstraction', we use a separate pool per parent
566 * BUS handle. A reverse table (hashed) is maintained for virtual
567 * to BUS address translation.
568 */
getbaddrcb(void * arg,bus_dma_segment_t * segs,int nseg __diagused,int error)569 static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg __diagused,
570 int error)
571 {
572 bus_addr_t *baddr;
573
574 KASSERT(nseg == 1, ("%s: too many DMA segments (%d)", __func__, nseg));
575
576 baddr = (bus_addr_t *)arg;
577 if (error)
578 *baddr = 0;
579 else
580 *baddr = segs->ds_addr;
581 }
582
___dma_getp(m_pool_s * mp)583 static m_addr_t ___dma_getp(m_pool_s *mp)
584 {
585 m_vtob_s *vbp;
586 void *vaddr = NULL;
587 bus_addr_t baddr = 0;
588
589 vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
590 if (!vbp)
591 goto out_err;
592
593 if (bus_dmamem_alloc(mp->dmat, &vaddr,
594 BUS_DMA_COHERENT | BUS_DMA_NOCACHE | BUS_DMA_WAITOK, &vbp->dmamap))
595 goto out_err;
596 bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr, MEMO_CLUSTER_SIZE,
597 getbaddrcb, &baddr, BUS_DMA_NOWAIT);
598 if (baddr) {
599 int hc = VTOB_HASH_CODE(vaddr);
600 vbp->vaddr = (m_addr_t) vaddr;
601 vbp->baddr = (m_addr_t) baddr;
602 vbp->next = mp->vtob[hc];
603 mp->vtob[hc] = vbp;
604 ++mp->nump;
605 return (m_addr_t) vaddr;
606 }
607 out_err:
608 if (vaddr)
609 bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap);
610 if (vbp)
611 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
612 return 0;
613 }
614
615 #ifdef MEMO_FREE_UNUSED
___dma_freep(m_pool_s * mp,m_addr_t m)616 static void ___dma_freep(m_pool_s *mp, m_addr_t m)
617 {
618 m_vtob_s **vbpp, *vbp;
619 int hc = VTOB_HASH_CODE(m);
620
621 vbpp = &mp->vtob[hc];
622 while (*vbpp && (*vbpp)->vaddr != m)
623 vbpp = &(*vbpp)->next;
624 if (*vbpp) {
625 vbp = *vbpp;
626 *vbpp = (*vbpp)->next;
627 bus_dmamap_unload(mp->dmat, vbp->dmamap);
628 bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap);
629 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
630 --mp->nump;
631 }
632 }
633 #endif
634
___get_dma_pool(bus_dma_tag_t dev_dmat)635 static __inline m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat)
636 {
637 m_pool_s *mp;
638 for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next);
639 return mp;
640 }
641
___cre_dma_pool(bus_dma_tag_t dev_dmat)642 static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat)
643 {
644 m_pool_s *mp = NULL;
645
646 mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
647 if (mp) {
648 mp->dev_dmat = dev_dmat;
649 if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE,
650 BUS_SPACE_MAXADDR_32BIT,
651 BUS_SPACE_MAXADDR,
652 NULL, NULL, MEMO_CLUSTER_SIZE, 1,
653 MEMO_CLUSTER_SIZE, 0,
654 NULL, NULL, &mp->dmat)) {
655 mp->getp = ___dma_getp;
656 #ifdef MEMO_FREE_UNUSED
657 mp->freep = ___dma_freep;
658 #endif
659 mp->next = mp0.next;
660 mp0.next = mp;
661 return mp;
662 }
663 }
664 if (mp)
665 __sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL");
666 return NULL;
667 }
668
669 #ifdef MEMO_FREE_UNUSED
___del_dma_pool(m_pool_s * p)670 static void ___del_dma_pool(m_pool_s *p)
671 {
672 struct m_pool **pp = &mp0.next;
673
674 while (*pp && *pp != p)
675 pp = &(*pp)->next;
676 if (*pp) {
677 *pp = (*pp)->next;
678 bus_dma_tag_destroy(p->dmat);
679 __sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
680 }
681 }
682 #endif
683
__sym_calloc_dma(bus_dma_tag_t dev_dmat,int size,char * name)684 static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name)
685 {
686 struct m_pool *mp;
687 void *m = NULL;
688
689 /* Lock */
690 mp = ___get_dma_pool(dev_dmat);
691 if (!mp)
692 mp = ___cre_dma_pool(dev_dmat);
693 if (mp)
694 m = __sym_calloc(mp, size, name);
695 #ifdef MEMO_FREE_UNUSED
696 if (mp && !mp->nump)
697 ___del_dma_pool(mp);
698 #endif
699 /* Unlock */
700
701 return m;
702 }
703
704 static void
__sym_mfree_dma(bus_dma_tag_t dev_dmat,void * m,int size,char * name)705 __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name)
706 {
707 struct m_pool *mp;
708
709 /* Lock */
710 mp = ___get_dma_pool(dev_dmat);
711 if (mp)
712 __sym_mfree(mp, m, size, name);
713 #ifdef MEMO_FREE_UNUSED
714 if (mp && !mp->nump)
715 ___del_dma_pool(mp);
716 #endif
717 /* Unlock */
718 }
719
__vtobus(bus_dma_tag_t dev_dmat,void * m)720 static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m)
721 {
722 m_pool_s *mp;
723 int hc = VTOB_HASH_CODE(m);
724 m_vtob_s *vp = NULL;
725 m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
726
727 /* Lock */
728 mp = ___get_dma_pool(dev_dmat);
729 if (mp) {
730 vp = mp->vtob[hc];
731 while (vp && (m_addr_t) vp->vaddr != a)
732 vp = vp->next;
733 }
734 /* Unlock */
735 if (!vp)
736 panic("sym: VTOBUS FAILED!\n");
737 return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
738 }
739
740 /*
741 * Verbs for DMAable memory handling.
742 * The _uvptv_ macro avoids a nasty warning about pointer to volatile
743 * being discarded.
744 */
745 #define _uvptv_(p) ((void *)((vm_offset_t)(p)))
746 #define _sym_calloc_dma(np, s, n) __sym_calloc_dma(np->bus_dmat, s, n)
747 #define _sym_mfree_dma(np, p, s, n) \
748 __sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n)
749 #define sym_calloc_dma(s, n) _sym_calloc_dma(np, s, n)
750 #define sym_mfree_dma(p, s, n) _sym_mfree_dma(np, p, s, n)
751 #define _vtobus(np, p) __vtobus(np->bus_dmat, _uvptv_(p))
752 #define vtobus(p) _vtobus(np, p)
753
754 /*
755 * Print a buffer in hexadecimal format.
756 */
sym_printb_hex(u_char * p,int n)757 static void sym_printb_hex (u_char *p, int n)
758 {
759 while (n-- > 0)
760 printf (" %x", *p++);
761 }
762
763 /*
764 * Same with a label at beginning and .\n at end.
765 */
sym_printl_hex(char * label,u_char * p,int n)766 static void sym_printl_hex (char *label, u_char *p, int n)
767 {
768 printf ("%s", label);
769 sym_printb_hex (p, n);
770 printf (".\n");
771 }
772
773 /*
774 * Return a string for SCSI BUS mode.
775 */
sym_scsi_bus_mode(int mode)776 static const char *sym_scsi_bus_mode(int mode)
777 {
778 switch(mode) {
779 case SMODE_HVD: return "HVD";
780 case SMODE_SE: return "SE";
781 case SMODE_LVD: return "LVD";
782 }
783 return "??";
784 }
785
786 /*
787 * Some poor and bogus sync table that refers to Tekram NVRAM layout.
788 */
789 #ifdef SYM_CONF_NVRAM_SUPPORT
790 static const u_char Tekram_sync[16] =
791 {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10};
792 #endif
793
794 /*
795 * Union of supported NVRAM formats.
796 */
797 struct sym_nvram {
798 int type;
799 #define SYM_SYMBIOS_NVRAM (1)
800 #define SYM_TEKRAM_NVRAM (2)
801 #ifdef SYM_CONF_NVRAM_SUPPORT
802 union {
803 Symbios_nvram Symbios;
804 Tekram_nvram Tekram;
805 } data;
806 #endif
807 };
808
809 /*
810 * This one is hopefully useless, but actually useful. :-)
811 */
812 #ifndef assert
813 #define assert(expression) { \
814 if (!(expression)) { \
815 (void)panic( \
816 "assertion \"%s\" failed: file \"%s\", line %d\n", \
817 #expression, \
818 __FILE__, __LINE__); \
819 } \
820 }
821 #endif
822
823 /*
824 * Some provision for a possible big endian mode supported by
825 * Symbios chips (never seen, by the way).
826 * For now, this stuff does not deserve any comments. :)
827 */
828 #define sym_offb(o) (o)
829 #define sym_offw(o) (o)
830
831 /*
832 * Some provision for support for BIG ENDIAN CPU.
833 */
834 #define cpu_to_scr(dw) htole32(dw)
835 #define scr_to_cpu(dw) le32toh(dw)
836
837 /*
838 * Access to the chip IO registers and on-chip RAM.
839 * We use the `bus space' interface under FreeBSD-4 and
840 * later kernel versions.
841 */
842 #if defined(SYM_CONF_IOMAPPED)
843
844 #define INB_OFF(o) bus_read_1(np->io_res, (o))
845 #define INW_OFF(o) bus_read_2(np->io_res, (o))
846 #define INL_OFF(o) bus_read_4(np->io_res, (o))
847
848 #define OUTB_OFF(o, v) bus_write_1(np->io_res, (o), (v))
849 #define OUTW_OFF(o, v) bus_write_2(np->io_res, (o), (v))
850 #define OUTL_OFF(o, v) bus_write_4(np->io_res, (o), (v))
851
852 #else /* Memory mapped IO */
853
854 #define INB_OFF(o) bus_read_1(np->mmio_res, (o))
855 #define INW_OFF(o) bus_read_2(np->mmio_res, (o))
856 #define INL_OFF(o) bus_read_4(np->mmio_res, (o))
857
858 #define OUTB_OFF(o, v) bus_write_1(np->mmio_res, (o), (v))
859 #define OUTW_OFF(o, v) bus_write_2(np->mmio_res, (o), (v))
860 #define OUTL_OFF(o, v) bus_write_4(np->mmio_res, (o), (v))
861
862 #endif /* SYM_CONF_IOMAPPED */
863
864 #define OUTRAM_OFF(o, a, l) \
865 bus_write_region_1(np->ram_res, (o), (a), (l))
866
867 /*
868 * Common definitions for both bus space and legacy IO methods.
869 */
870 #define INB(r) INB_OFF(offsetof(struct sym_reg,r))
871 #define INW(r) INW_OFF(offsetof(struct sym_reg,r))
872 #define INL(r) INL_OFF(offsetof(struct sym_reg,r))
873
874 #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v))
875 #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v))
876 #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v))
877
878 #define OUTONB(r, m) OUTB(r, INB(r) | (m))
879 #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
880 #define OUTONW(r, m) OUTW(r, INW(r) | (m))
881 #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
882 #define OUTONL(r, m) OUTL(r, INL(r) | (m))
883 #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
884
885 /*
886 * We normally want the chip to have a consistent view
887 * of driver internal data structures when we restart it.
888 * Thus these macros.
889 */
890 #define OUTL_DSP(v) \
891 do { \
892 MEMORY_BARRIER(); \
893 OUTL (nc_dsp, (v)); \
894 } while (0)
895
896 #define OUTONB_STD() \
897 do { \
898 MEMORY_BARRIER(); \
899 OUTONB (nc_dcntl, (STD|NOCOM)); \
900 } while (0)
901
902 /*
903 * Command control block states.
904 */
905 #define HS_IDLE (0)
906 #define HS_BUSY (1)
907 #define HS_NEGOTIATE (2) /* sync/wide data transfer*/
908 #define HS_DISCONNECT (3) /* Disconnected by target */
909 #define HS_WAIT (4) /* waiting for resource */
910
911 #define HS_DONEMASK (0x80)
912 #define HS_COMPLETE (4|HS_DONEMASK)
913 #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
914 #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */
915 #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */
916
917 /*
918 * Software Interrupt Codes
919 */
920 #define SIR_BAD_SCSI_STATUS (1)
921 #define SIR_SEL_ATN_NO_MSG_OUT (2)
922 #define SIR_MSG_RECEIVED (3)
923 #define SIR_MSG_WEIRD (4)
924 #define SIR_NEGO_FAILED (5)
925 #define SIR_NEGO_PROTO (6)
926 #define SIR_SCRIPT_STOPPED (7)
927 #define SIR_REJECT_TO_SEND (8)
928 #define SIR_SWIDE_OVERRUN (9)
929 #define SIR_SODL_UNDERRUN (10)
930 #define SIR_RESEL_NO_MSG_IN (11)
931 #define SIR_RESEL_NO_IDENTIFY (12)
932 #define SIR_RESEL_BAD_LUN (13)
933 #define SIR_TARGET_SELECTED (14)
934 #define SIR_RESEL_BAD_I_T_L (15)
935 #define SIR_RESEL_BAD_I_T_L_Q (16)
936 #define SIR_ABORT_SENT (17)
937 #define SIR_RESEL_ABORTED (18)
938 #define SIR_MSG_OUT_DONE (19)
939 #define SIR_COMPLETE_ERROR (20)
940 #define SIR_DATA_OVERRUN (21)
941 #define SIR_BAD_PHASE (22)
942 #define SIR_MAX (22)
943
944 /*
945 * Extended error bit codes.
946 * xerr_status field of struct sym_ccb.
947 */
948 #define XE_EXTRA_DATA (1) /* unexpected data phase */
949 #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */
950 #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */
951 #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */
952 #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */
953
954 /*
955 * Negotiation status.
956 * nego_status field of struct sym_ccb.
957 */
958 #define NS_SYNC (1)
959 #define NS_WIDE (2)
960 #define NS_PPR (3)
961
962 /*
963 * A CCB hashed table is used to retrieve CCB address
964 * from DSA value.
965 */
966 #define CCB_HASH_SHIFT 8
967 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
968 #define CCB_HASH_MASK (CCB_HASH_SIZE-1)
969 #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK)
970
971 /*
972 * Device flags.
973 */
974 #define SYM_DISC_ENABLED (1)
975 #define SYM_TAGS_ENABLED (1<<1)
976 #define SYM_SCAN_BOOT_DISABLED (1<<2)
977 #define SYM_SCAN_LUNS_DISABLED (1<<3)
978
979 /*
980 * Host adapter miscellaneous flags.
981 */
982 #define SYM_AVOID_BUS_RESET (1)
983 #define SYM_SCAN_TARGETS_HILO (1<<1)
984
985 /*
986 * Device quirks.
987 * Some devices, for example the CHEETAH 2 LVD, disconnects without
988 * saving the DATA POINTER then reselects and terminates the IO.
989 * On reselection, the automatic RESTORE DATA POINTER makes the
990 * CURRENT DATA POINTER not point at the end of the IO.
991 * This behaviour just breaks our calculation of the residual.
992 * For now, we just force an AUTO SAVE on disconnection and will
993 * fix that in a further driver version.
994 */
995 #define SYM_QUIRK_AUTOSAVE 1
996
997 /*
998 * Misc.
999 */
1000 #define SYM_LOCK() mtx_lock(&np->mtx)
1001 #define SYM_LOCK_ASSERT(_what) mtx_assert(&np->mtx, (_what))
1002 #define SYM_LOCK_DESTROY() mtx_destroy(&np->mtx)
1003 #define SYM_LOCK_INIT() mtx_init(&np->mtx, "sym_lock", NULL, MTX_DEF)
1004 #define SYM_LOCK_INITIALIZED() mtx_initialized(&np->mtx)
1005 #define SYM_UNLOCK() mtx_unlock(&np->mtx)
1006
1007 #define SYM_SNOOP_TIMEOUT (10000000)
1008 #define SYM_PCI_IO PCIR_BAR(0)
1009 #define SYM_PCI_MMIO PCIR_BAR(1)
1010 #define SYM_PCI_RAM PCIR_BAR(2)
1011 #define SYM_PCI_RAM64 PCIR_BAR(3)
1012
1013 /*
1014 * Back-pointer from the CAM CCB to our data structures.
1015 */
1016 #define sym_hcb_ptr spriv_ptr0
1017 /* #define sym_ccb_ptr spriv_ptr1 */
1018
1019 /*
1020 * We mostly have to deal with pointers.
1021 * Thus these typedef's.
1022 */
1023 typedef struct sym_tcb *tcb_p;
1024 typedef struct sym_lcb *lcb_p;
1025 typedef struct sym_ccb *ccb_p;
1026 typedef struct sym_hcb *hcb_p;
1027
1028 /*
1029 * Gather negotiable parameters value
1030 */
1031 struct sym_trans {
1032 u8 scsi_version;
1033 u8 spi_version;
1034 u8 period;
1035 u8 offset;
1036 u8 width;
1037 u8 options; /* PPR options */
1038 };
1039
1040 struct sym_tinfo {
1041 struct sym_trans current;
1042 struct sym_trans goal;
1043 struct sym_trans user;
1044 };
1045
1046 #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT
1047 #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT
1048
1049 /*
1050 * Global TCB HEADER.
1051 *
1052 * Due to lack of indirect addressing on earlier NCR chips,
1053 * this substructure is copied from the TCB to a global
1054 * address after selection.
1055 * For SYMBIOS chips that support LOAD/STORE this copy is
1056 * not needed and thus not performed.
1057 */
1058 struct sym_tcbh {
1059 /*
1060 * Scripts bus addresses of LUN table accessed from scripts.
1061 * LUN #0 is a special case, since multi-lun devices are rare,
1062 * and we we want to speed-up the general case and not waste
1063 * resources.
1064 */
1065 u32 luntbl_sa; /* bus address of this table */
1066 u32 lun0_sa; /* bus address of LCB #0 */
1067 /*
1068 * Actual SYNC/WIDE IO registers value for this target.
1069 * 'sval', 'wval' and 'uval' are read from SCRIPTS and
1070 * so have alignment constraints.
1071 */
1072 /*0*/ u_char uval; /* -> SCNTL4 register */
1073 /*1*/ u_char sval; /* -> SXFER io register */
1074 /*2*/ u_char filler1;
1075 /*3*/ u_char wval; /* -> SCNTL3 io register */
1076 };
1077
1078 /*
1079 * Target Control Block
1080 */
1081 struct sym_tcb {
1082 /*
1083 * TCB header.
1084 * Assumed at offset 0.
1085 */
1086 /*0*/ struct sym_tcbh head;
1087
1088 /*
1089 * LUN table used by the SCRIPTS processor.
1090 * An array of bus addresses is used on reselection.
1091 */
1092 u32 *luntbl; /* LCBs bus address table */
1093
1094 /*
1095 * LUN table used by the C code.
1096 */
1097 lcb_p lun0p; /* LCB of LUN #0 (usual case) */
1098 #if SYM_CONF_MAX_LUN > 1
1099 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */
1100 #endif
1101
1102 /*
1103 * Bitmap that tells about LUNs that succeeded at least
1104 * 1 IO and therefore assumed to be a real device.
1105 * Avoid useless allocation of the LCB structure.
1106 */
1107 u32 lun_map[(SYM_CONF_MAX_LUN+31)/32];
1108
1109 /*
1110 * Bitmap that tells about LUNs that haven't yet an LCB
1111 * allocated (not discovered or LCB allocation failed).
1112 */
1113 u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32];
1114
1115 /*
1116 * Transfer capabilities (SIP)
1117 */
1118 struct sym_tinfo tinfo;
1119
1120 /*
1121 * Keep track of the CCB used for the negotiation in order
1122 * to ensure that only 1 negotiation is queued at a time.
1123 */
1124 ccb_p nego_cp; /* CCB used for the nego */
1125
1126 /*
1127 * Set when we want to reset the device.
1128 */
1129 u_char to_reset;
1130
1131 /*
1132 * Other user settable limits and options.
1133 * These limits are read from the NVRAM if present.
1134 */
1135 u_char usrflags;
1136 u_short usrtags;
1137 };
1138
1139 /*
1140 * Assert some alignments required by the chip.
1141 */
1142 CTASSERT(((offsetof(struct sym_reg, nc_sxfer) ^
1143 offsetof(struct sym_tcb, head.sval)) &3) == 0);
1144 CTASSERT(((offsetof(struct sym_reg, nc_scntl3) ^
1145 offsetof(struct sym_tcb, head.wval)) &3) == 0);
1146
1147 /*
1148 * Global LCB HEADER.
1149 *
1150 * Due to lack of indirect addressing on earlier NCR chips,
1151 * this substructure is copied from the LCB to a global
1152 * address after selection.
1153 * For SYMBIOS chips that support LOAD/STORE this copy is
1154 * not needed and thus not performed.
1155 */
1156 struct sym_lcbh {
1157 /*
1158 * SCRIPTS address jumped by SCRIPTS on reselection.
1159 * For not probed logical units, this address points to
1160 * SCRIPTS that deal with bad LU handling (must be at
1161 * offset zero of the LCB for that reason).
1162 */
1163 /*0*/ u32 resel_sa;
1164
1165 /*
1166 * Task (bus address of a CCB) read from SCRIPTS that points
1167 * to the unique ITL nexus allowed to be disconnected.
1168 */
1169 u32 itl_task_sa;
1170
1171 /*
1172 * Task table bus address (read from SCRIPTS).
1173 */
1174 u32 itlq_tbl_sa;
1175 };
1176
1177 /*
1178 * Logical Unit Control Block
1179 */
1180 struct sym_lcb {
1181 /*
1182 * TCB header.
1183 * Assumed at offset 0.
1184 */
1185 /*0*/ struct sym_lcbh head;
1186
1187 /*
1188 * Task table read from SCRIPTS that contains pointers to
1189 * ITLQ nexuses. The bus address read from SCRIPTS is
1190 * inside the header.
1191 */
1192 u32 *itlq_tbl; /* Kernel virtual address */
1193
1194 /*
1195 * Busy CCBs management.
1196 */
1197 u_short busy_itlq; /* Number of busy tagged CCBs */
1198 u_short busy_itl; /* Number of busy untagged CCBs */
1199
1200 /*
1201 * Circular tag allocation buffer.
1202 */
1203 u_short ia_tag; /* Tag allocation index */
1204 u_short if_tag; /* Tag release index */
1205 u_char *cb_tags; /* Circular tags buffer */
1206
1207 /*
1208 * Set when we want to clear all tasks.
1209 */
1210 u_char to_clear;
1211
1212 /*
1213 * Capabilities.
1214 */
1215 u_char user_flags;
1216 u_char current_flags;
1217 };
1218
1219 /*
1220 * Action from SCRIPTS on a task.
1221 * Is part of the CCB, but is also used separately to plug
1222 * error handling action to perform from SCRIPTS.
1223 */
1224 struct sym_actscr {
1225 u32 start; /* Jumped by SCRIPTS after selection */
1226 u32 restart; /* Jumped by SCRIPTS on relection */
1227 };
1228
1229 /*
1230 * Phase mismatch context.
1231 *
1232 * It is part of the CCB and is used as parameters for the
1233 * DATA pointer. We need two contexts to handle correctly the
1234 * SAVED DATA POINTER.
1235 */
1236 struct sym_pmc {
1237 struct sym_tblmove sg; /* Updated interrupted SG block */
1238 u32 ret; /* SCRIPT return address */
1239 };
1240
1241 /*
1242 * LUN control block lookup.
1243 * We use a direct pointer for LUN #0, and a table of
1244 * pointers which is only allocated for devices that support
1245 * LUN(s) > 0.
1246 */
1247 #if SYM_CONF_MAX_LUN <= 1
1248 #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : 0
1249 #else
1250 #define sym_lp(tp, lun) \
1251 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0
1252 #endif
1253
1254 /*
1255 * Status are used by the host and the script processor.
1256 *
1257 * The last four bytes (status[4]) are copied to the
1258 * scratchb register (declared as scr0..scr3) just after the
1259 * select/reselect, and copied back just after disconnecting.
1260 * Inside the script the XX_REG are used.
1261 */
1262
1263 /*
1264 * Last four bytes (script)
1265 */
1266 #define QU_REG scr0
1267 #define HS_REG scr1
1268 #define HS_PRT nc_scr1
1269 #define SS_REG scr2
1270 #define SS_PRT nc_scr2
1271 #define HF_REG scr3
1272 #define HF_PRT nc_scr3
1273
1274 /*
1275 * Last four bytes (host)
1276 */
1277 #define actualquirks phys.head.status[0]
1278 #define host_status phys.head.status[1]
1279 #define ssss_status phys.head.status[2]
1280 #define host_flags phys.head.status[3]
1281
1282 /*
1283 * Host flags
1284 */
1285 #define HF_IN_PM0 1u
1286 #define HF_IN_PM1 (1u<<1)
1287 #define HF_ACT_PM (1u<<2)
1288 #define HF_DP_SAVED (1u<<3)
1289 #define HF_SENSE (1u<<4)
1290 #define HF_EXT_ERR (1u<<5)
1291 #define HF_DATA_IN (1u<<6)
1292 #ifdef SYM_CONF_IARB_SUPPORT
1293 #define HF_HINT_IARB (1u<<7)
1294 #endif
1295
1296 /*
1297 * Global CCB HEADER.
1298 *
1299 * Due to lack of indirect addressing on earlier NCR chips,
1300 * this substructure is copied from the ccb to a global
1301 * address after selection (or reselection) and copied back
1302 * before disconnect.
1303 * For SYMBIOS chips that support LOAD/STORE this copy is
1304 * not needed and thus not performed.
1305 */
1306 struct sym_ccbh {
1307 /*
1308 * Start and restart SCRIPTS addresses (must be at 0).
1309 */
1310 /*0*/ struct sym_actscr go;
1311
1312 /*
1313 * SCRIPTS jump address that deal with data pointers.
1314 * 'savep' points to the position in the script responsible
1315 * for the actual transfer of data.
1316 * It's written on reception of a SAVE_DATA_POINTER message.
1317 */
1318 u32 savep; /* Jump address to saved data pointer */
1319 u32 lastp; /* SCRIPTS address at end of data */
1320 u32 goalp; /* Not accessed for now from SCRIPTS */
1321
1322 /*
1323 * Status fields.
1324 */
1325 u8 status[4];
1326 };
1327
1328 /*
1329 * Data Structure Block
1330 *
1331 * During execution of a ccb by the script processor, the
1332 * DSA (data structure address) register points to this
1333 * substructure of the ccb.
1334 */
1335 struct sym_dsb {
1336 /*
1337 * CCB header.
1338 * Also assumed at offset 0 of the sym_ccb structure.
1339 */
1340 /*0*/ struct sym_ccbh head;
1341
1342 /*
1343 * Phase mismatch contexts.
1344 * We need two to handle correctly the SAVED DATA POINTER.
1345 * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic
1346 * for address calculation from SCRIPTS.
1347 */
1348 struct sym_pmc pm0;
1349 struct sym_pmc pm1;
1350
1351 /*
1352 * Table data for Script
1353 */
1354 struct sym_tblsel select;
1355 struct sym_tblmove smsg;
1356 struct sym_tblmove smsg_ext;
1357 struct sym_tblmove cmd;
1358 struct sym_tblmove sense;
1359 struct sym_tblmove wresid;
1360 struct sym_tblmove data [SYM_CONF_MAX_SG];
1361 };
1362
1363 /*
1364 * Our Command Control Block
1365 */
1366 struct sym_ccb {
1367 /*
1368 * This is the data structure which is pointed by the DSA
1369 * register when it is executed by the script processor.
1370 * It must be the first entry.
1371 */
1372 struct sym_dsb phys;
1373
1374 /*
1375 * Pointer to CAM ccb and related stuff.
1376 */
1377 struct callout ch; /* callout handle */
1378 union ccb *cam_ccb; /* CAM scsiio ccb */
1379 u8 cdb_buf[16]; /* Copy of CDB */
1380 u8 *sns_bbuf; /* Bounce buffer for sense data */
1381 #define SYM_SNS_BBUF_LEN sizeof(struct scsi_sense_data)
1382 int data_len; /* Total data length */
1383 int segments; /* Number of SG segments */
1384
1385 /*
1386 * Miscellaneous status'.
1387 */
1388 u_char nego_status; /* Negotiation status */
1389 u_char xerr_status; /* Extended error flags */
1390 u32 extra_bytes; /* Extraneous bytes transferred */
1391
1392 /*
1393 * Message areas.
1394 * We prepare a message to be sent after selection.
1395 * We may use a second one if the command is rescheduled
1396 * due to CHECK_CONDITION or COMMAND TERMINATED.
1397 * Contents are IDENTIFY and SIMPLE_TAG.
1398 * While negotiating sync or wide transfer,
1399 * a SDTR or WDTR message is appended.
1400 */
1401 u_char scsi_smsg [12];
1402 u_char scsi_smsg2[12];
1403
1404 /*
1405 * Auto request sense related fields.
1406 */
1407 u_char sensecmd[6]; /* Request Sense command */
1408 u_char sv_scsi_status; /* Saved SCSI status */
1409 u_char sv_xerr_status; /* Saved extended status */
1410 int sv_resid; /* Saved residual */
1411
1412 /*
1413 * Map for the DMA of user data.
1414 */
1415 void *arg; /* Argument for some callback */
1416 bus_dmamap_t dmamap; /* DMA map for user data */
1417 u_char dmamapped;
1418 #define SYM_DMA_NONE 0
1419 #define SYM_DMA_READ 1
1420 #define SYM_DMA_WRITE 2
1421 /*
1422 * Other fields.
1423 */
1424 u32 ccb_ba; /* BUS address of this CCB */
1425 u_short tag; /* Tag for this transfer */
1426 /* NO_TAG means no tag */
1427 u_char target;
1428 u_char lun;
1429 ccb_p link_ccbh; /* Host adapter CCB hash chain */
1430 SYM_QUEHEAD
1431 link_ccbq; /* Link to free/busy CCB queue */
1432 u32 startp; /* Initial data pointer */
1433 int ext_sg; /* Extreme data pointer, used */
1434 int ext_ofs; /* to calculate the residual. */
1435 u_char to_abort; /* Want this IO to be aborted */
1436 };
1437
1438 #define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl))
1439
1440 /*
1441 * Host Control Block
1442 */
1443 struct sym_hcb {
1444 struct mtx mtx;
1445
1446 /*
1447 * Global headers.
1448 * Due to poorness of addressing capabilities, earlier
1449 * chips (810, 815, 825) copy part of the data structures
1450 * (CCB, TCB and LCB) in fixed areas.
1451 */
1452 #ifdef SYM_CONF_GENERIC_SUPPORT
1453 struct sym_ccbh ccb_head;
1454 struct sym_tcbh tcb_head;
1455 struct sym_lcbh lcb_head;
1456 #endif
1457 /*
1458 * Idle task and invalid task actions and
1459 * their bus addresses.
1460 */
1461 struct sym_actscr idletask, notask, bad_itl, bad_itlq;
1462 vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
1463
1464 /*
1465 * Dummy lun table to protect us against target
1466 * returning bad lun number on reselection.
1467 */
1468 u32 *badluntbl; /* Table physical address */
1469 u32 badlun_sa; /* SCRIPT handler BUS address */
1470
1471 /*
1472 * Bus address of this host control block.
1473 */
1474 u32 hcb_ba;
1475
1476 /*
1477 * Bit 32-63 of the on-chip RAM bus address in LE format.
1478 * The START_RAM64 script loads the MMRS and MMWS from this
1479 * field.
1480 */
1481 u32 scr_ram_seg;
1482
1483 /*
1484 * Chip and controller identification.
1485 */
1486 device_t device;
1487
1488 /*
1489 * Initial value of some IO register bits.
1490 * These values are assumed to have been set by BIOS, and may
1491 * be used to probe adapter implementation differences.
1492 */
1493 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
1494 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
1495 sv_stest1;
1496
1497 /*
1498 * Actual initial value of IO register bits used by the
1499 * driver. They are loaded at initialisation according to
1500 * features that are to be enabled/disabled.
1501 */
1502 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
1503 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
1504
1505 /*
1506 * Target data.
1507 */
1508 #ifdef __amd64__
1509 struct sym_tcb *target;
1510 #else
1511 struct sym_tcb target[SYM_CONF_MAX_TARGET];
1512 #endif
1513
1514 /*
1515 * Target control block bus address array used by the SCRIPT
1516 * on reselection.
1517 */
1518 u32 *targtbl;
1519 u32 targtbl_ba;
1520
1521 /*
1522 * CAM SIM information for this instance.
1523 */
1524 struct cam_sim *sim;
1525 struct cam_path *path;
1526
1527 /*
1528 * Allocated hardware resources.
1529 */
1530 struct resource *irq_res;
1531 struct resource *io_res;
1532 struct resource *mmio_res;
1533 struct resource *ram_res;
1534 void *intr;
1535
1536 /*
1537 * Bus stuff.
1538 *
1539 * My understanding of PCI is that all agents must share the
1540 * same addressing range and model.
1541 * But some hardware architecture guys provide complex and
1542 * brain-deaded stuff that makes shit.
1543 * This driver only support PCI compliant implementations and
1544 * deals with part of the BUS stuff complexity only to fit O/S
1545 * requirements.
1546 */
1547
1548 /*
1549 * DMA stuff.
1550 */
1551 bus_dma_tag_t bus_dmat; /* DMA tag from parent BUS */
1552 bus_dma_tag_t data_dmat; /* DMA tag for user data */
1553 /*
1554 * BUS addresses of the chip
1555 */
1556 vm_offset_t mmio_ba; /* MMIO BUS address */
1557 vm_offset_t ram_ba; /* RAM BUS address */
1558 int ram_ws; /* RAM window size */
1559
1560 /*
1561 * SCRIPTS virtual and physical bus addresses.
1562 * 'script' is loaded in the on-chip RAM if present.
1563 * 'scripth' stays in main memory for all chips except the
1564 * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
1565 */
1566 u_char *scripta0; /* Copies of script and scripth */
1567 u_char *scriptb0; /* Copies of script and scripth */
1568 vm_offset_t scripta_ba; /* Actual script and scripth */
1569 vm_offset_t scriptb_ba; /* bus addresses. */
1570 vm_offset_t scriptb0_ba;
1571 u_short scripta_sz; /* Actual size of script A */
1572 u_short scriptb_sz; /* Actual size of script B */
1573
1574 /*
1575 * Bus addresses, setup and patch methods for
1576 * the selected firmware.
1577 */
1578 struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */
1579 struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */
1580 void (*fw_setup)(hcb_p np, const struct sym_fw *fw);
1581 void (*fw_patch)(hcb_p np);
1582 const char *fw_name;
1583
1584 /*
1585 * General controller parameters and configuration.
1586 */
1587 u_short device_id; /* PCI device id */
1588 u_char revision_id; /* PCI device revision id */
1589 u_int features; /* Chip features map */
1590 u_char myaddr; /* SCSI id of the adapter */
1591 u_char maxburst; /* log base 2 of dwords burst */
1592 u_char maxwide; /* Maximum transfer width */
1593 u_char minsync; /* Min sync period factor (ST) */
1594 u_char maxsync; /* Max sync period factor (ST) */
1595 u_char maxoffs; /* Max scsi offset (ST) */
1596 u_char minsync_dt; /* Min sync period factor (DT) */
1597 u_char maxsync_dt; /* Max sync period factor (DT) */
1598 u_char maxoffs_dt; /* Max scsi offset (DT) */
1599 u_char multiplier; /* Clock multiplier (1,2,4) */
1600 u_char clock_divn; /* Number of clock divisors */
1601 u32 clock_khz; /* SCSI clock frequency in KHz */
1602 u32 pciclk_khz; /* Estimated PCI clock in KHz */
1603 /*
1604 * Start queue management.
1605 * It is filled up by the host processor and accessed by the
1606 * SCRIPTS processor in order to start SCSI commands.
1607 */
1608 volatile /* Prevent code optimizations */
1609 u32 *squeue; /* Start queue virtual address */
1610 u32 squeue_ba; /* Start queue BUS address */
1611 u_short squeueput; /* Next free slot of the queue */
1612 u_short actccbs; /* Number of allocated CCBs */
1613
1614 /*
1615 * Command completion queue.
1616 * It is the same size as the start queue to avoid overflow.
1617 */
1618 u_short dqueueget; /* Next position to scan */
1619 volatile /* Prevent code optimizations */
1620 u32 *dqueue; /* Completion (done) queue */
1621 u32 dqueue_ba; /* Done queue BUS address */
1622
1623 /*
1624 * Miscellaneous buffers accessed by the scripts-processor.
1625 * They shall be DWORD aligned, because they may be read or
1626 * written with a script command.
1627 */
1628 u_char msgout[8]; /* Buffer for MESSAGE OUT */
1629 u_char msgin [8]; /* Buffer for MESSAGE IN */
1630 u32 lastmsg; /* Last SCSI message sent */
1631 u_char scratch; /* Scratch for SCSI receive */
1632
1633 /*
1634 * Miscellaneous configuration and status parameters.
1635 */
1636 u_char usrflags; /* Miscellaneous user flags */
1637 u_char scsi_mode; /* Current SCSI BUS mode */
1638 u_char verbose; /* Verbosity for this controller*/
1639 u32 cache; /* Used for cache test at init. */
1640
1641 /*
1642 * CCB lists and queue.
1643 */
1644 ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */
1645 SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */
1646 SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
1647
1648 /*
1649 * During error handling and/or recovery,
1650 * active CCBs that are to be completed with
1651 * error or requeued are moved from the busy_ccbq
1652 * to the comp_ccbq prior to completion.
1653 */
1654 SYM_QUEHEAD comp_ccbq;
1655
1656 /*
1657 * CAM CCB pending queue.
1658 */
1659 SYM_QUEHEAD cam_ccbq;
1660
1661 /*
1662 * IMMEDIATE ARBITRATION (IARB) control.
1663 *
1664 * We keep track in 'last_cp' of the last CCB that has been
1665 * queued to the SCRIPTS processor and clear 'last_cp' when
1666 * this CCB completes. If last_cp is not zero at the moment
1667 * we queue a new CCB, we set a flag in 'last_cp' that is
1668 * used by the SCRIPTS as a hint for setting IARB.
1669 * We donnot set more than 'iarb_max' consecutive hints for
1670 * IARB in order to leave devices a chance to reselect.
1671 * By the way, any non zero value of 'iarb_max' is unfair. :)
1672 */
1673 #ifdef SYM_CONF_IARB_SUPPORT
1674 u_short iarb_max; /* Max. # consecutive IARB hints*/
1675 u_short iarb_count; /* Actual # of these hints */
1676 ccb_p last_cp;
1677 #endif
1678
1679 /*
1680 * Command abort handling.
1681 * We need to synchronize tightly with the SCRIPTS
1682 * processor in order to handle things correctly.
1683 */
1684 u_char abrt_msg[4]; /* Message to send buffer */
1685 struct sym_tblmove abrt_tbl; /* Table for the MOV of it */
1686 struct sym_tblsel abrt_sel; /* Sync params for selection */
1687 u_char istat_sem; /* Tells the chip to stop (SEM) */
1688 };
1689
1690 #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl))
1691
1692 /*
1693 * Return the name of the controller.
1694 */
sym_name(hcb_p np)1695 static __inline const char *sym_name(hcb_p np)
1696 {
1697 return device_get_nameunit(np->device);
1698 }
1699
1700 /*--------------------------------------------------------------------------*/
1701 /*------------------------------ FIRMWARES ---------------------------------*/
1702 /*--------------------------------------------------------------------------*/
1703
1704 /*
1705 * This stuff will be moved to a separate source file when
1706 * the driver will be broken into several source modules.
1707 */
1708
1709 /*
1710 * Macros used for all firmwares.
1711 */
1712 #define SYM_GEN_A(s, label) ((short) offsetof(s, label)),
1713 #define SYM_GEN_B(s, label) ((short) offsetof(s, label)),
1714 #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label)
1715 #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label)
1716
1717 #ifdef SYM_CONF_GENERIC_SUPPORT
1718 /*
1719 * Allocate firmware #1 script area.
1720 */
1721 #define SYM_FWA_SCR sym_fw1a_scr
1722 #define SYM_FWB_SCR sym_fw1b_scr
1723 #include <dev/sym/sym_fw1.h>
1724 static const struct sym_fwa_ofs sym_fw1a_ofs = {
1725 SYM_GEN_FW_A(struct SYM_FWA_SCR)
1726 };
1727 static const struct sym_fwb_ofs sym_fw1b_ofs = {
1728 SYM_GEN_FW_B(struct SYM_FWB_SCR)
1729 };
1730 #undef SYM_FWA_SCR
1731 #undef SYM_FWB_SCR
1732 #endif /* SYM_CONF_GENERIC_SUPPORT */
1733
1734 /*
1735 * Allocate firmware #2 script area.
1736 */
1737 #define SYM_FWA_SCR sym_fw2a_scr
1738 #define SYM_FWB_SCR sym_fw2b_scr
1739 #include <dev/sym/sym_fw2.h>
1740 static const struct sym_fwa_ofs sym_fw2a_ofs = {
1741 SYM_GEN_FW_A(struct SYM_FWA_SCR)
1742 };
1743 static const struct sym_fwb_ofs sym_fw2b_ofs = {
1744 SYM_GEN_FW_B(struct SYM_FWB_SCR)
1745 SYM_GEN_B(struct SYM_FWB_SCR, start64)
1746 SYM_GEN_B(struct SYM_FWB_SCR, pm_handle)
1747 };
1748 #undef SYM_FWA_SCR
1749 #undef SYM_FWB_SCR
1750
1751 #undef SYM_GEN_A
1752 #undef SYM_GEN_B
1753 #undef PADDR_A
1754 #undef PADDR_B
1755
1756 #ifdef SYM_CONF_GENERIC_SUPPORT
1757 /*
1758 * Patch routine for firmware #1.
1759 */
1760 static void
sym_fw1_patch(hcb_p np)1761 sym_fw1_patch(hcb_p np)
1762 {
1763 struct sym_fw1a_scr *scripta0;
1764 struct sym_fw1b_scr *scriptb0;
1765
1766 scripta0 = (struct sym_fw1a_scr *) np->scripta0;
1767 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
1768
1769 /*
1770 * Remove LED support if not needed.
1771 */
1772 if (!(np->features & FE_LED0)) {
1773 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
1774 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
1775 scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
1776 }
1777
1778 #ifdef SYM_CONF_IARB_SUPPORT
1779 /*
1780 * If user does not want to use IMMEDIATE ARBITRATION
1781 * when we are reselected while attempting to arbitrate,
1782 * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
1783 */
1784 if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
1785 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
1786 #endif
1787 /*
1788 * Patch some data in SCRIPTS.
1789 * - start and done queue initial bus address.
1790 * - target bus address table bus address.
1791 */
1792 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
1793 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
1794 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
1795 }
1796 #endif /* SYM_CONF_GENERIC_SUPPORT */
1797
1798 /*
1799 * Patch routine for firmware #2.
1800 */
1801 static void
sym_fw2_patch(hcb_p np)1802 sym_fw2_patch(hcb_p np)
1803 {
1804 struct sym_fw2a_scr *scripta0;
1805 struct sym_fw2b_scr *scriptb0;
1806
1807 scripta0 = (struct sym_fw2a_scr *) np->scripta0;
1808 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
1809
1810 /*
1811 * Remove LED support if not needed.
1812 */
1813 if (!(np->features & FE_LED0)) {
1814 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
1815 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
1816 scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
1817 }
1818
1819 #ifdef SYM_CONF_IARB_SUPPORT
1820 /*
1821 * If user does not want to use IMMEDIATE ARBITRATION
1822 * when we are reselected while attempting to arbitrate,
1823 * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
1824 */
1825 if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
1826 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
1827 #endif
1828 /*
1829 * Patch some variable in SCRIPTS.
1830 * - start and done queue initial bus address.
1831 * - target bus address table bus address.
1832 */
1833 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
1834 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
1835 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
1836
1837 /*
1838 * Remove the load of SCNTL4 on reselection if not a C10.
1839 */
1840 if (!(np->features & FE_C10)) {
1841 scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP);
1842 scripta0->resel_scntl4[1] = cpu_to_scr(0);
1843 }
1844
1845 /*
1846 * Remove a couple of work-arounds specific to C1010 if
1847 * they are not desirable. See `sym_fw2.h' for more details.
1848 */
1849 if (!(np->device_id == PCI_ID_LSI53C1010_2 &&
1850 np->revision_id < 0x1 &&
1851 np->pciclk_khz < 60000)) {
1852 scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP);
1853 scripta0->datao_phase[1] = cpu_to_scr(0);
1854 }
1855 if (!(np->device_id == PCI_ID_LSI53C1010 &&
1856 /* np->revision_id < 0xff */ 1)) {
1857 scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP);
1858 scripta0->sel_done[1] = cpu_to_scr(0);
1859 }
1860
1861 /*
1862 * Patch some other variables in SCRIPTS.
1863 * These ones are loaded by the SCRIPTS processor.
1864 */
1865 scriptb0->pm0_data_addr[0] =
1866 cpu_to_scr(np->scripta_ba +
1867 offsetof(struct sym_fw2a_scr, pm0_data));
1868 scriptb0->pm1_data_addr[0] =
1869 cpu_to_scr(np->scripta_ba +
1870 offsetof(struct sym_fw2a_scr, pm1_data));
1871 }
1872
1873 /*
1874 * Fill the data area in scripts.
1875 * To be done for all firmwares.
1876 */
1877 static void
sym_fw_fill_data(u32 * in,u32 * out)1878 sym_fw_fill_data (u32 *in, u32 *out)
1879 {
1880 int i;
1881
1882 for (i = 0; i < SYM_CONF_MAX_SG; i++) {
1883 *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN;
1884 *in++ = offsetof (struct sym_dsb, data[i]);
1885 *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT;
1886 *out++ = offsetof (struct sym_dsb, data[i]);
1887 }
1888 }
1889
1890 /*
1891 * Setup useful script bus addresses.
1892 * To be done for all firmwares.
1893 */
1894 static void
sym_fw_setup_bus_addresses(hcb_p np,const struct sym_fw * fw)1895 sym_fw_setup_bus_addresses(hcb_p np, const struct sym_fw *fw)
1896 {
1897 u32 *pa;
1898 const u_short *po;
1899 int i;
1900
1901 /*
1902 * Build the bus address table for script A
1903 * from the script A offset table.
1904 */
1905 po = (const u_short *) fw->a_ofs;
1906 pa = (u32 *) &np->fwa_bas;
1907 for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++)
1908 pa[i] = np->scripta_ba + po[i];
1909
1910 /*
1911 * Same for script B.
1912 */
1913 po = (const u_short *) fw->b_ofs;
1914 pa = (u32 *) &np->fwb_bas;
1915 for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++)
1916 pa[i] = np->scriptb_ba + po[i];
1917 }
1918
1919 #ifdef SYM_CONF_GENERIC_SUPPORT
1920 /*
1921 * Setup routine for firmware #1.
1922 */
1923 static void
sym_fw1_setup(hcb_p np,const struct sym_fw * fw)1924 sym_fw1_setup(hcb_p np, const struct sym_fw *fw)
1925 {
1926 struct sym_fw1a_scr *scripta0;
1927
1928 scripta0 = (struct sym_fw1a_scr *) np->scripta0;
1929
1930 /*
1931 * Fill variable parts in scripts.
1932 */
1933 sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
1934
1935 /*
1936 * Setup bus addresses used from the C code..
1937 */
1938 sym_fw_setup_bus_addresses(np, fw);
1939 }
1940 #endif /* SYM_CONF_GENERIC_SUPPORT */
1941
1942 /*
1943 * Setup routine for firmware #2.
1944 */
1945 static void
sym_fw2_setup(hcb_p np,const struct sym_fw * fw)1946 sym_fw2_setup(hcb_p np, const struct sym_fw *fw)
1947 {
1948 struct sym_fw2a_scr *scripta0;
1949
1950 scripta0 = (struct sym_fw2a_scr *) np->scripta0;
1951
1952 /*
1953 * Fill variable parts in scripts.
1954 */
1955 sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
1956
1957 /*
1958 * Setup bus addresses used from the C code..
1959 */
1960 sym_fw_setup_bus_addresses(np, fw);
1961 }
1962
1963 /*
1964 * Allocate firmware descriptors.
1965 */
1966 #ifdef SYM_CONF_GENERIC_SUPPORT
1967 static const struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic");
1968 #endif /* SYM_CONF_GENERIC_SUPPORT */
1969 static const struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based");
1970
1971 /*
1972 * Find the most appropriate firmware for a chip.
1973 */
1974 static const struct sym_fw *
sym_find_firmware(const struct sym_pci_chip * chip)1975 sym_find_firmware(const struct sym_pci_chip *chip)
1976 {
1977 if (chip->features & FE_LDSTR)
1978 return &sym_fw2;
1979 #ifdef SYM_CONF_GENERIC_SUPPORT
1980 else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC)))
1981 return &sym_fw1;
1982 #endif
1983 else
1984 return NULL;
1985 }
1986
1987 /*
1988 * Bind a script to physical addresses.
1989 */
sym_fw_bind_script(hcb_p np,u32 * start,int len)1990 static void sym_fw_bind_script (hcb_p np, u32 *start, int len)
1991 {
1992 u32 opcode, new, old, tmp1, tmp2;
1993 u32 *end, *cur;
1994 int relocs;
1995
1996 cur = start;
1997 end = start + len/4;
1998
1999 while (cur < end) {
2000 opcode = *cur;
2001
2002 /*
2003 * If we forget to change the length
2004 * in scripts, a field will be
2005 * padded with 0. This is an illegal
2006 * command.
2007 */
2008 if (opcode == 0) {
2009 device_printf(np->device, "ERROR0 IN SCRIPT at %d.\n",
2010 (int)(cur-start));
2011 MDELAY (10000);
2012 ++cur;
2013 continue;
2014 }
2015
2016 /*
2017 * We use the bogus value 0xf00ff00f ;-)
2018 * to reserve data area in SCRIPTS.
2019 */
2020 if (opcode == SCR_DATA_ZERO) {
2021 *cur++ = 0;
2022 continue;
2023 }
2024
2025 if (DEBUG_FLAGS & DEBUG_SCRIPT)
2026 printf ("%d: <%x>\n", (int) (cur-start),
2027 (unsigned)opcode);
2028
2029 /*
2030 * We don't have to decode ALL commands
2031 */
2032 switch (opcode >> 28) {
2033 case 0xf:
2034 /*
2035 * LOAD / STORE DSA relative, don't relocate.
2036 */
2037 relocs = 0;
2038 break;
2039 case 0xe:
2040 /*
2041 * LOAD / STORE absolute.
2042 */
2043 relocs = 1;
2044 break;
2045 case 0xc:
2046 /*
2047 * COPY has TWO arguments.
2048 */
2049 relocs = 2;
2050 tmp1 = cur[1];
2051 tmp2 = cur[2];
2052 if ((tmp1 ^ tmp2) & 3) {
2053 device_printf(np->device,
2054 "ERROR1 IN SCRIPT at %d.\n",
2055 (int)(cur-start));
2056 MDELAY (10000);
2057 }
2058 /*
2059 * If PREFETCH feature not enabled, remove
2060 * the NO FLUSH bit if present.
2061 */
2062 if ((opcode & SCR_NO_FLUSH) &&
2063 !(np->features & FE_PFEN)) {
2064 opcode = (opcode & ~SCR_NO_FLUSH);
2065 }
2066 break;
2067 case 0x0:
2068 /*
2069 * MOVE/CHMOV (absolute address)
2070 */
2071 if (!(np->features & FE_WIDE))
2072 opcode = (opcode | OPC_MOVE);
2073 relocs = 1;
2074 break;
2075 case 0x1:
2076 /*
2077 * MOVE/CHMOV (table indirect)
2078 */
2079 if (!(np->features & FE_WIDE))
2080 opcode = (opcode | OPC_MOVE);
2081 relocs = 0;
2082 break;
2083 case 0x8:
2084 /*
2085 * JUMP / CALL
2086 * dont't relocate if relative :-)
2087 */
2088 if (opcode & 0x00800000)
2089 relocs = 0;
2090 else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
2091 relocs = 2;
2092 else
2093 relocs = 1;
2094 break;
2095 case 0x4:
2096 case 0x5:
2097 case 0x6:
2098 case 0x7:
2099 relocs = 1;
2100 break;
2101 default:
2102 relocs = 0;
2103 break;
2104 }
2105
2106 /*
2107 * Scriptify:) the opcode.
2108 */
2109 *cur++ = cpu_to_scr(opcode);
2110
2111 /*
2112 * If no relocation, assume 1 argument
2113 * and just scriptize:) it.
2114 */
2115 if (!relocs) {
2116 *cur = cpu_to_scr(*cur);
2117 ++cur;
2118 continue;
2119 }
2120
2121 /*
2122 * Otherwise performs all needed relocations.
2123 */
2124 while (relocs--) {
2125 old = *cur;
2126
2127 switch (old & RELOC_MASK) {
2128 case RELOC_REGISTER:
2129 new = (old & ~RELOC_MASK) + np->mmio_ba;
2130 break;
2131 case RELOC_LABEL_A:
2132 new = (old & ~RELOC_MASK) + np->scripta_ba;
2133 break;
2134 case RELOC_LABEL_B:
2135 new = (old & ~RELOC_MASK) + np->scriptb_ba;
2136 break;
2137 case RELOC_SOFTC:
2138 new = (old & ~RELOC_MASK) + np->hcb_ba;
2139 break;
2140 case 0:
2141 /*
2142 * Don't relocate a 0 address.
2143 * They are mostly used for patched or
2144 * script self-modified areas.
2145 */
2146 if (old == 0) {
2147 new = old;
2148 break;
2149 }
2150 /* fall through */
2151 default:
2152 new = 0;
2153 panic("sym_fw_bind_script: "
2154 "weird relocation %x\n", old);
2155 break;
2156 }
2157
2158 *cur++ = cpu_to_scr(new);
2159 }
2160 }
2161 }
2162
2163 /*---------------------------------------------------------------------------*/
2164 /*--------------------------- END OF FIRMWARES -----------------------------*/
2165 /*---------------------------------------------------------------------------*/
2166
2167 /*
2168 * Function prototypes.
2169 */
2170 static void sym_save_initial_setting (hcb_p np);
2171 static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram);
2172 static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr);
2173 static void sym_put_start_queue (hcb_p np, ccb_p cp);
2174 static void sym_chip_reset (hcb_p np);
2175 static void sym_soft_reset (hcb_p np);
2176 static void sym_start_reset (hcb_p np);
2177 static int sym_reset_scsi_bus (hcb_p np, int enab_int);
2178 static int sym_wakeup_done (hcb_p np);
2179 static void sym_flush_busy_queue (hcb_p np, int cam_status);
2180 static void sym_flush_comp_queue (hcb_p np, int cam_status);
2181 static void sym_init (hcb_p np, int reason);
2182 static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp,
2183 u_char *fakp);
2184 static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per,
2185 u_char div, u_char fak);
2186 static void sym_setwide (hcb_p np, ccb_p cp, u_char wide);
2187 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
2188 u_char per, u_char wide, u_char div, u_char fak);
2189 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
2190 u_char per, u_char wide, u_char div, u_char fak);
2191 static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat);
2192 static void sym_intr (void *arg);
2193 static void sym_poll (struct cam_sim *sim);
2194 static void sym_recover_scsi_int (hcb_p np, u_char hsts);
2195 static void sym_int_sto (hcb_p np);
2196 static void sym_int_udc (hcb_p np);
2197 static void sym_int_sbmc (hcb_p np);
2198 static void sym_int_par (hcb_p np, u_short sist);
2199 static void sym_int_ma (hcb_p np);
2200 static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun,
2201 int task);
2202 static void sym_sir_bad_scsi_status (hcb_p np, ccb_p cp);
2203 static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task);
2204 static void sym_sir_task_recovery (hcb_p np, int num);
2205 static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs);
2206 static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs);
2207 static int sym_compute_residual (hcb_p np, ccb_p cp);
2208 static int sym_show_msg (u_char * msg);
2209 static void sym_print_msg (ccb_p cp, char *label, u_char *msg);
2210 static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp);
2211 static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp);
2212 static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp);
2213 static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp);
2214 static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp);
2215 static void sym_int_sir (hcb_p np);
2216 static void sym_free_ccb (hcb_p np, ccb_p cp);
2217 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order);
2218 static ccb_p sym_alloc_ccb (hcb_p np);
2219 static ccb_p sym_ccb_from_dsa (hcb_p np, u32 dsa);
2220 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln);
2221 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln);
2222 static int sym_snooptest (hcb_p np);
2223 static void sym_selectclock(hcb_p np, u_char scntl3);
2224 static void sym_getclock (hcb_p np, int mult);
2225 static int sym_getpciclock (hcb_p np);
2226 static void sym_complete_ok (hcb_p np, ccb_p cp);
2227 static void sym_complete_error (hcb_p np, ccb_p cp);
2228 static void sym_callout (void *arg);
2229 static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out);
2230 static void sym_reset_dev (hcb_p np, union ccb *ccb);
2231 static void sym_action (struct cam_sim *sim, union ccb *ccb);
2232 static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp);
2233 static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio,
2234 ccb_p cp);
2235 static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp,
2236 bus_dma_segment_t *psegs, int nsegs);
2237 static int sym_scatter_sg_physical (hcb_p np, ccb_p cp,
2238 bus_dma_segment_t *psegs, int nsegs);
2239 static void sym_action2 (struct cam_sim *sim, union ccb *ccb);
2240 static void sym_update_trans(hcb_p np, struct sym_trans *tip,
2241 struct ccb_trans_settings *cts);
2242 static void sym_update_dflags(hcb_p np, u_char *flags,
2243 struct ccb_trans_settings *cts);
2244
2245 static const struct sym_pci_chip *sym_find_pci_chip (device_t dev);
2246
2247 static device_probe_t sym_pci_probe;
2248 static device_attach_t sym_pci_attach;
2249 static device_detach_t sym_pci_detach;
2250
2251 static int sym_cam_attach (hcb_p np);
2252 static void sym_cam_free (hcb_p np);
2253
2254 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram);
2255 static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp);
2256 static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp);
2257
2258 /*
2259 * Print something which allows to retrieve the controller type,
2260 * unit, target, lun concerned by a kernel message.
2261 */
PRINT_TARGET(hcb_p np,int target)2262 static void PRINT_TARGET (hcb_p np, int target)
2263 {
2264 printf ("%s:%d:", sym_name(np), target);
2265 }
2266
PRINT_LUN(hcb_p np,int target,int lun)2267 static void PRINT_LUN(hcb_p np, int target, int lun)
2268 {
2269 printf ("%s:%d:%d:", sym_name(np), target, lun);
2270 }
2271
PRINT_ADDR(ccb_p cp)2272 static void PRINT_ADDR (ccb_p cp)
2273 {
2274 if (cp && cp->cam_ccb)
2275 xpt_print_path(cp->cam_ccb->ccb_h.path);
2276 }
2277
2278 /*
2279 * Take into account this ccb in the freeze count.
2280 */
sym_freeze_cam_ccb(union ccb * ccb)2281 static void sym_freeze_cam_ccb(union ccb *ccb)
2282 {
2283 if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) {
2284 if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
2285 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2286 xpt_freeze_devq(ccb->ccb_h.path, 1);
2287 }
2288 }
2289 }
2290
2291 /*
2292 * Set the status field of a CAM CCB.
2293 */
sym_set_cam_status(union ccb * ccb,cam_status status)2294 static __inline void sym_set_cam_status(union ccb *ccb, cam_status status)
2295 {
2296 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2297 ccb->ccb_h.status |= status;
2298 }
2299
2300 /*
2301 * Get the status field of a CAM CCB.
2302 */
sym_get_cam_status(union ccb * ccb)2303 static __inline int sym_get_cam_status(union ccb *ccb)
2304 {
2305 return ccb->ccb_h.status & CAM_STATUS_MASK;
2306 }
2307
2308 /*
2309 * Enqueue a CAM CCB.
2310 */
sym_enqueue_cam_ccb(ccb_p cp)2311 static void sym_enqueue_cam_ccb(ccb_p cp)
2312 {
2313 hcb_p np;
2314 union ccb *ccb;
2315
2316 ccb = cp->cam_ccb;
2317 np = (hcb_p) cp->arg;
2318
2319 assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED));
2320 ccb->ccb_h.status = CAM_REQ_INPROG;
2321
2322 callout_reset_sbt(&cp->ch, SBT_1MS * ccb->ccb_h.timeout, 0, sym_callout,
2323 (caddr_t)ccb, 0);
2324 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2325 ccb->ccb_h.sym_hcb_ptr = np;
2326
2327 sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq);
2328 }
2329
2330 /*
2331 * Complete a pending CAM CCB.
2332 */
2333
sym_xpt_done(hcb_p np,union ccb * ccb,ccb_p cp)2334 static void sym_xpt_done(hcb_p np, union ccb *ccb, ccb_p cp)
2335 {
2336
2337 SYM_LOCK_ASSERT(MA_OWNED);
2338
2339 if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
2340 callout_stop(&cp->ch);
2341 sym_remque(sym_qptr(&ccb->ccb_h.sim_links));
2342 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2343 ccb->ccb_h.sym_hcb_ptr = NULL;
2344 }
2345 xpt_done(ccb);
2346 }
2347
sym_xpt_done2(hcb_p np,union ccb * ccb,int cam_status)2348 static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status)
2349 {
2350
2351 SYM_LOCK_ASSERT(MA_OWNED);
2352
2353 sym_set_cam_status(ccb, cam_status);
2354 xpt_done(ccb);
2355 }
2356
2357 /*
2358 * SYMBIOS chip clock divisor table.
2359 *
2360 * Divisors are multiplied by 10,000,000 in order to make
2361 * calculations more simple.
2362 */
2363 #define _5M 5000000
2364 static const u32 div_10M[] =
2365 {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
2366
2367 /*
2368 * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
2369 * 128 transfers. All chips support at least 16 transfers
2370 * bursts. The 825A, 875 and 895 chips support bursts of up
2371 * to 128 transfers and the 895A and 896 support bursts of up
2372 * to 64 transfers. All other chips support up to 16
2373 * transfers bursts.
2374 *
2375 * For PCI 32 bit data transfers each transfer is a DWORD.
2376 * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
2377 *
2378 * We use log base 2 (burst length) as internal code, with
2379 * value 0 meaning "burst disabled".
2380 */
2381
2382 /*
2383 * Burst length from burst code.
2384 */
2385 #define burst_length(bc) (!(bc))? 0 : 1 << (bc)
2386
2387 /*
2388 * Burst code from io register bits.
2389 */
2390 #define burst_code(dmode, ctest4, ctest5) \
2391 (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
2392
2393 /*
2394 * Set initial io register bits from burst code.
2395 */
sym_init_burst(hcb_p np,u_char bc)2396 static __inline void sym_init_burst(hcb_p np, u_char bc)
2397 {
2398 np->rv_ctest4 &= ~0x80;
2399 np->rv_dmode &= ~(0x3 << 6);
2400 np->rv_ctest5 &= ~0x4;
2401
2402 if (!bc) {
2403 np->rv_ctest4 |= 0x80;
2404 }
2405 else {
2406 --bc;
2407 np->rv_dmode |= ((bc & 0x3) << 6);
2408 np->rv_ctest5 |= (bc & 0x4);
2409 }
2410 }
2411
2412 /*
2413 * Print out the list of targets that have some flag disabled by user.
2414 */
sym_print_targets_flag(hcb_p np,int mask,char * msg)2415 static void sym_print_targets_flag(hcb_p np, int mask, char *msg)
2416 {
2417 int cnt;
2418 int i;
2419
2420 for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
2421 if (i == np->myaddr)
2422 continue;
2423 if (np->target[i].usrflags & mask) {
2424 if (!cnt++)
2425 device_printf(np->device,
2426 "%s disabled for targets", msg);
2427 printf(" %d", i);
2428 }
2429 }
2430 if (cnt)
2431 printf(".\n");
2432 }
2433
2434 /*
2435 * Save initial settings of some IO registers.
2436 * Assumed to have been set by BIOS.
2437 * We cannot reset the chip prior to reading the
2438 * IO registers, since informations will be lost.
2439 * Since the SCRIPTS processor may be running, this
2440 * is not safe on paper, but it seems to work quite
2441 * well. :)
2442 */
sym_save_initial_setting(hcb_p np)2443 static void sym_save_initial_setting (hcb_p np)
2444 {
2445 np->sv_scntl0 = INB(nc_scntl0) & 0x0a;
2446 np->sv_scntl3 = INB(nc_scntl3) & 0x07;
2447 np->sv_dmode = INB(nc_dmode) & 0xce;
2448 np->sv_dcntl = INB(nc_dcntl) & 0xa8;
2449 np->sv_ctest3 = INB(nc_ctest3) & 0x01;
2450 np->sv_ctest4 = INB(nc_ctest4) & 0x80;
2451 np->sv_gpcntl = INB(nc_gpcntl);
2452 np->sv_stest1 = INB(nc_stest1);
2453 np->sv_stest2 = INB(nc_stest2) & 0x20;
2454 np->sv_stest4 = INB(nc_stest4);
2455 if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */
2456 np->sv_scntl4 = INB(nc_scntl4);
2457 np->sv_ctest5 = INB(nc_ctest5) & 0x04;
2458 }
2459 else
2460 np->sv_ctest5 = INB(nc_ctest5) & 0x24;
2461 }
2462
2463 /*
2464 * Prepare io register values used by sym_init() according
2465 * to selected and supported features.
2466 */
sym_prepare_setting(hcb_p np,struct sym_nvram * nvram)2467 static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram)
2468 {
2469 u_char burst_max;
2470 u32 period;
2471 int i;
2472
2473 /*
2474 * Wide ?
2475 */
2476 np->maxwide = (np->features & FE_WIDE)? 1 : 0;
2477
2478 /*
2479 * Get the frequency of the chip's clock.
2480 */
2481 if (np->features & FE_QUAD)
2482 np->multiplier = 4;
2483 else if (np->features & FE_DBLR)
2484 np->multiplier = 2;
2485 else
2486 np->multiplier = 1;
2487
2488 np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
2489 np->clock_khz *= np->multiplier;
2490
2491 if (np->clock_khz != 40000)
2492 sym_getclock(np, np->multiplier);
2493
2494 /*
2495 * Divisor to be used for async (timer pre-scaler).
2496 */
2497 i = np->clock_divn - 1;
2498 while (--i >= 0) {
2499 if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) {
2500 ++i;
2501 break;
2502 }
2503 }
2504 np->rv_scntl3 = i+1;
2505
2506 /*
2507 * The C1010 uses hardwired divisors for async.
2508 * So, we just throw away, the async. divisor.:-)
2509 */
2510 if (np->features & FE_C10)
2511 np->rv_scntl3 = 0;
2512
2513 /*
2514 * Minimum synchronous period factor supported by the chip.
2515 * Btw, 'period' is in tenths of nanoseconds.
2516 */
2517 period = howmany(4 * div_10M[0], np->clock_khz);
2518 if (period <= 250) np->minsync = 10;
2519 else if (period <= 303) np->minsync = 11;
2520 else if (period <= 500) np->minsync = 12;
2521 else np->minsync = howmany(period, 40);
2522
2523 /*
2524 * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
2525 */
2526 if (np->minsync < 25 &&
2527 !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
2528 np->minsync = 25;
2529 else if (np->minsync < 12 &&
2530 !(np->features & (FE_ULTRA2|FE_ULTRA3)))
2531 np->minsync = 12;
2532
2533 /*
2534 * Maximum synchronous period factor supported by the chip.
2535 */
2536 period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
2537 np->maxsync = period > 2540 ? 254 : period / 10;
2538
2539 /*
2540 * If chip is a C1010, guess the sync limits in DT mode.
2541 */
2542 if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) {
2543 if (np->clock_khz == 160000) {
2544 np->minsync_dt = 9;
2545 np->maxsync_dt = 50;
2546 np->maxoffs_dt = 62;
2547 }
2548 }
2549
2550 /*
2551 * 64 bit addressing (895A/896/1010) ?
2552 */
2553 if (np->features & FE_DAC)
2554 #ifdef __LP64__
2555 np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
2556 #else
2557 np->rv_ccntl1 |= (DDAC);
2558 #endif
2559
2560 /*
2561 * Phase mismatch handled by SCRIPTS (895A/896/1010) ?
2562 */
2563 if (np->features & FE_NOPM)
2564 np->rv_ccntl0 |= (ENPMJ);
2565
2566 /*
2567 * C1010 Errata.
2568 * In dual channel mode, contention occurs if internal cycles
2569 * are used. Disable internal cycles.
2570 */
2571 if (np->device_id == PCI_ID_LSI53C1010 &&
2572 np->revision_id < 0x2)
2573 np->rv_ccntl0 |= DILS;
2574
2575 /*
2576 * Select burst length (dwords)
2577 */
2578 burst_max = SYM_SETUP_BURST_ORDER;
2579 if (burst_max == 255)
2580 burst_max = burst_code(np->sv_dmode, np->sv_ctest4,
2581 np->sv_ctest5);
2582 if (burst_max > 7)
2583 burst_max = 7;
2584 if (burst_max > np->maxburst)
2585 burst_max = np->maxburst;
2586
2587 /*
2588 * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
2589 * This chip and the 860 Rev 1 may wrongly use PCI cache line
2590 * based transactions on LOAD/STORE instructions. So we have
2591 * to prevent these chips from using such PCI transactions in
2592 * this driver. The generic ncr driver that does not use
2593 * LOAD/STORE instructions does not need this work-around.
2594 */
2595 if ((np->device_id == PCI_ID_SYM53C810 &&
2596 np->revision_id >= 0x10 && np->revision_id <= 0x11) ||
2597 (np->device_id == PCI_ID_SYM53C860 &&
2598 np->revision_id <= 0x1))
2599 np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
2600
2601 /*
2602 * Select all supported special features.
2603 * If we are using on-board RAM for scripts, prefetch (PFEN)
2604 * does not help, but burst op fetch (BOF) does.
2605 * Disabling PFEN makes sure BOF will be used.
2606 */
2607 if (np->features & FE_ERL)
2608 np->rv_dmode |= ERL; /* Enable Read Line */
2609 if (np->features & FE_BOF)
2610 np->rv_dmode |= BOF; /* Burst Opcode Fetch */
2611 if (np->features & FE_ERMP)
2612 np->rv_dmode |= ERMP; /* Enable Read Multiple */
2613 #if 1
2614 if ((np->features & FE_PFEN) && !np->ram_ba)
2615 #else
2616 if (np->features & FE_PFEN)
2617 #endif
2618 np->rv_dcntl |= PFEN; /* Prefetch Enable */
2619 if (np->features & FE_CLSE)
2620 np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
2621 if (np->features & FE_WRIE)
2622 np->rv_ctest3 |= WRIE; /* Write and Invalidate */
2623 if (np->features & FE_DFS)
2624 np->rv_ctest5 |= DFS; /* Dma Fifo Size */
2625
2626 /*
2627 * Select some other
2628 */
2629 if (SYM_SETUP_PCI_PARITY)
2630 np->rv_ctest4 |= MPEE; /* Master parity checking */
2631 if (SYM_SETUP_SCSI_PARITY)
2632 np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
2633
2634 /*
2635 * Get parity checking, host ID and verbose mode from NVRAM
2636 */
2637 np->myaddr = 255;
2638 sym_nvram_setup_host (np, nvram);
2639
2640 /*
2641 * Get SCSI addr of host adapter (set by bios?).
2642 */
2643 if (np->myaddr == 255) {
2644 np->myaddr = INB(nc_scid) & 0x07;
2645 if (!np->myaddr)
2646 np->myaddr = SYM_SETUP_HOST_ID;
2647 }
2648
2649 /*
2650 * Prepare initial io register bits for burst length
2651 */
2652 sym_init_burst(np, burst_max);
2653
2654 /*
2655 * Set SCSI BUS mode.
2656 * - LVD capable chips (895/895A/896/1010) report the
2657 * current BUS mode through the STEST4 IO register.
2658 * - For previous generation chips (825/825A/875),
2659 * user has to tell us how to check against HVD,
2660 * since a 100% safe algorithm is not possible.
2661 */
2662 np->scsi_mode = SMODE_SE;
2663 if (np->features & (FE_ULTRA2|FE_ULTRA3))
2664 np->scsi_mode = (np->sv_stest4 & SMODE);
2665 else if (np->features & FE_DIFF) {
2666 if (SYM_SETUP_SCSI_DIFF == 1) {
2667 if (np->sv_scntl3) {
2668 if (np->sv_stest2 & 0x20)
2669 np->scsi_mode = SMODE_HVD;
2670 }
2671 else if (nvram->type == SYM_SYMBIOS_NVRAM) {
2672 if (!(INB(nc_gpreg) & 0x08))
2673 np->scsi_mode = SMODE_HVD;
2674 }
2675 }
2676 else if (SYM_SETUP_SCSI_DIFF == 2)
2677 np->scsi_mode = SMODE_HVD;
2678 }
2679 if (np->scsi_mode == SMODE_HVD)
2680 np->rv_stest2 |= 0x20;
2681
2682 /*
2683 * Set LED support from SCRIPTS.
2684 * Ignore this feature for boards known to use a
2685 * specific GPIO wiring and for the 895A, 896
2686 * and 1010 that drive the LED directly.
2687 */
2688 if ((SYM_SETUP_SCSI_LED ||
2689 (nvram->type == SYM_SYMBIOS_NVRAM ||
2690 (nvram->type == SYM_TEKRAM_NVRAM &&
2691 np->device_id == PCI_ID_SYM53C895))) &&
2692 !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
2693 np->features |= FE_LED0;
2694
2695 /*
2696 * Set irq mode.
2697 */
2698 switch(SYM_SETUP_IRQ_MODE & 3) {
2699 case 2:
2700 np->rv_dcntl |= IRQM;
2701 break;
2702 case 1:
2703 np->rv_dcntl |= (np->sv_dcntl & IRQM);
2704 break;
2705 default:
2706 break;
2707 }
2708
2709 /*
2710 * Configure targets according to driver setup.
2711 * If NVRAM present get targets setup from NVRAM.
2712 */
2713 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
2714 tcb_p tp = &np->target[i];
2715
2716 tp->tinfo.user.scsi_version = tp->tinfo.current.scsi_version= 2;
2717 tp->tinfo.user.spi_version = tp->tinfo.current.spi_version = 2;
2718 tp->tinfo.user.period = np->minsync;
2719 if (np->features & FE_ULTRA3)
2720 tp->tinfo.user.period = np->minsync_dt;
2721 tp->tinfo.user.offset = np->maxoffs;
2722 tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT;
2723 tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
2724 tp->usrtags = SYM_SETUP_MAX_TAG;
2725
2726 sym_nvram_setup_target (np, i, nvram);
2727
2728 /*
2729 * For now, guess PPR/DT support from the period
2730 * and BUS width.
2731 */
2732 if (np->features & FE_ULTRA3) {
2733 if (tp->tinfo.user.period <= 9 &&
2734 tp->tinfo.user.width == BUS_16_BIT) {
2735 tp->tinfo.user.options |= PPR_OPT_DT;
2736 tp->tinfo.user.offset = np->maxoffs_dt;
2737 tp->tinfo.user.spi_version = 3;
2738 }
2739 }
2740
2741 if (!tp->usrtags)
2742 tp->usrflags &= ~SYM_TAGS_ENABLED;
2743 }
2744
2745 /*
2746 * Let user know about the settings.
2747 */
2748 i = nvram->type;
2749 device_printf(np->device, "%s NVRAM, ID %d, Fast-%d, %s, %s\n",
2750 i == SYM_SYMBIOS_NVRAM ? "Symbios" :
2751 (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"),
2752 np->myaddr,
2753 (np->features & FE_ULTRA3) ? 80 :
2754 (np->features & FE_ULTRA2) ? 40 :
2755 (np->features & FE_ULTRA) ? 20 : 10,
2756 sym_scsi_bus_mode(np->scsi_mode),
2757 (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity");
2758 /*
2759 * Tell him more on demand.
2760 */
2761 if (sym_verbose) {
2762 device_printf(np->device, "%s IRQ line driver%s\n",
2763 np->rv_dcntl & IRQM ? "totem pole" : "open drain",
2764 np->ram_ba ? ", using on-chip SRAM" : "");
2765 device_printf(np->device, "using %s firmware.\n", np->fw_name);
2766 if (np->features & FE_NOPM)
2767 device_printf(np->device,
2768 "handling phase mismatch from SCRIPTS.\n");
2769 }
2770 /*
2771 * And still more.
2772 */
2773 if (sym_verbose > 1) {
2774 device_printf(np->device,
2775 "initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
2776 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
2777 np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3,
2778 np->sv_ctest4, np->sv_ctest5);
2779
2780 device_printf(np->device,
2781 "final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
2782 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
2783 np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
2784 np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
2785 }
2786 /*
2787 * Let user be aware of targets that have some disable flags set.
2788 */
2789 sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT");
2790 if (sym_verbose)
2791 sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED,
2792 "SCAN FOR LUNS");
2793
2794 return 0;
2795 }
2796
2797 /*
2798 * Prepare the next negotiation message if needed.
2799 *
2800 * Fill in the part of message buffer that contains the
2801 * negotiation and the nego_status field of the CCB.
2802 * Returns the size of the message in bytes.
2803 */
sym_prepare_nego(hcb_p np,ccb_p cp,int nego,u_char * msgptr)2804 static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr)
2805 {
2806 tcb_p tp = &np->target[cp->target];
2807 int msglen = 0;
2808
2809 /*
2810 * Early C1010 chips need a work-around for DT
2811 * data transfer to work.
2812 */
2813 if (!(np->features & FE_U3EN))
2814 tp->tinfo.goal.options = 0;
2815 /*
2816 * negotiate using PPR ?
2817 */
2818 if (tp->tinfo.goal.options & PPR_OPT_MASK)
2819 nego = NS_PPR;
2820 /*
2821 * negotiate wide transfers ?
2822 */
2823 else if (tp->tinfo.current.width != tp->tinfo.goal.width)
2824 nego = NS_WIDE;
2825 /*
2826 * negotiate synchronous transfers?
2827 */
2828 else if (tp->tinfo.current.period != tp->tinfo.goal.period ||
2829 tp->tinfo.current.offset != tp->tinfo.goal.offset)
2830 nego = NS_SYNC;
2831
2832 switch (nego) {
2833 case NS_SYNC:
2834 msgptr[msglen++] = M_EXTENDED;
2835 msgptr[msglen++] = 3;
2836 msgptr[msglen++] = M_X_SYNC_REQ;
2837 msgptr[msglen++] = tp->tinfo.goal.period;
2838 msgptr[msglen++] = tp->tinfo.goal.offset;
2839 break;
2840 case NS_WIDE:
2841 msgptr[msglen++] = M_EXTENDED;
2842 msgptr[msglen++] = 2;
2843 msgptr[msglen++] = M_X_WIDE_REQ;
2844 msgptr[msglen++] = tp->tinfo.goal.width;
2845 break;
2846 case NS_PPR:
2847 msgptr[msglen++] = M_EXTENDED;
2848 msgptr[msglen++] = 6;
2849 msgptr[msglen++] = M_X_PPR_REQ;
2850 msgptr[msglen++] = tp->tinfo.goal.period;
2851 msgptr[msglen++] = 0;
2852 msgptr[msglen++] = tp->tinfo.goal.offset;
2853 msgptr[msglen++] = tp->tinfo.goal.width;
2854 msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT;
2855 break;
2856 }
2857
2858 cp->nego_status = nego;
2859
2860 if (nego) {
2861 tp->nego_cp = cp; /* Keep track a nego will be performed */
2862 if (DEBUG_FLAGS & DEBUG_NEGO) {
2863 sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" :
2864 nego == NS_WIDE ? "wide msgout" :
2865 "ppr msgout", msgptr);
2866 }
2867 }
2868
2869 return msglen;
2870 }
2871
2872 /*
2873 * Insert a job into the start queue.
2874 */
sym_put_start_queue(hcb_p np,ccb_p cp)2875 static void sym_put_start_queue(hcb_p np, ccb_p cp)
2876 {
2877 u_short qidx;
2878
2879 #ifdef SYM_CONF_IARB_SUPPORT
2880 /*
2881 * If the previously queued CCB is not yet done,
2882 * set the IARB hint. The SCRIPTS will go with IARB
2883 * for this job when starting the previous one.
2884 * We leave devices a chance to win arbitration by
2885 * not using more than 'iarb_max' consecutive
2886 * immediate arbitrations.
2887 */
2888 if (np->last_cp && np->iarb_count < np->iarb_max) {
2889 np->last_cp->host_flags |= HF_HINT_IARB;
2890 ++np->iarb_count;
2891 }
2892 else
2893 np->iarb_count = 0;
2894 np->last_cp = cp;
2895 #endif
2896
2897 /*
2898 * Insert first the idle task and then our job.
2899 * The MB should ensure proper ordering.
2900 */
2901 qidx = np->squeueput + 2;
2902 if (qidx >= MAX_QUEUE*2) qidx = 0;
2903
2904 np->squeue [qidx] = cpu_to_scr(np->idletask_ba);
2905 MEMORY_BARRIER();
2906 np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba);
2907
2908 np->squeueput = qidx;
2909
2910 if (DEBUG_FLAGS & DEBUG_QUEUE)
2911 device_printf(np->device, "queuepos=%d.\n", np->squeueput);
2912
2913 /*
2914 * Script processor may be waiting for reselect.
2915 * Wake it up.
2916 */
2917 MEMORY_BARRIER();
2918 OUTB (nc_istat, SIGP|np->istat_sem);
2919 }
2920
2921 /*
2922 * Soft reset the chip.
2923 *
2924 * Raising SRST when the chip is running may cause
2925 * problems on dual function chips (see below).
2926 * On the other hand, LVD devices need some delay
2927 * to settle and report actual BUS mode in STEST4.
2928 */
sym_chip_reset(hcb_p np)2929 static void sym_chip_reset (hcb_p np)
2930 {
2931 OUTB (nc_istat, SRST);
2932 UDELAY (10);
2933 OUTB (nc_istat, 0);
2934 UDELAY(2000); /* For BUS MODE to settle */
2935 }
2936
2937 /*
2938 * Soft reset the chip.
2939 *
2940 * Some 896 and 876 chip revisions may hang-up if we set
2941 * the SRST (soft reset) bit at the wrong time when SCRIPTS
2942 * are running.
2943 * So, we need to abort the current operation prior to
2944 * soft resetting the chip.
2945 */
sym_soft_reset(hcb_p np)2946 static void sym_soft_reset (hcb_p np)
2947 {
2948 u_char istat;
2949 int i;
2950
2951 OUTB (nc_istat, CABRT);
2952 for (i = 1000000 ; i ; --i) {
2953 istat = INB (nc_istat);
2954 if (istat & SIP) {
2955 INW (nc_sist);
2956 continue;
2957 }
2958 if (istat & DIP) {
2959 OUTB (nc_istat, 0);
2960 INB (nc_dstat);
2961 break;
2962 }
2963 }
2964 if (!i)
2965 device_printf(np->device,
2966 "unable to abort current chip operation.\n");
2967 sym_chip_reset (np);
2968 }
2969
2970 /*
2971 * Start reset process.
2972 *
2973 * The interrupt handler will reinitialize the chip.
2974 */
sym_start_reset(hcb_p np)2975 static void sym_start_reset(hcb_p np)
2976 {
2977 (void) sym_reset_scsi_bus(np, 1);
2978 }
2979
sym_reset_scsi_bus(hcb_p np,int enab_int)2980 static int sym_reset_scsi_bus(hcb_p np, int enab_int)
2981 {
2982 u32 term;
2983 int retv = 0;
2984
2985 sym_soft_reset(np); /* Soft reset the chip */
2986 if (enab_int)
2987 OUTW (nc_sien, RST);
2988 /*
2989 * Enable Tolerant, reset IRQD if present and
2990 * properly set IRQ mode, prior to resetting the bus.
2991 */
2992 OUTB (nc_stest3, TE);
2993 OUTB (nc_dcntl, (np->rv_dcntl & IRQM));
2994 OUTB (nc_scntl1, CRST);
2995 UDELAY (200);
2996
2997 if (!SYM_SETUP_SCSI_BUS_CHECK)
2998 goto out;
2999 /*
3000 * Check for no terminators or SCSI bus shorts to ground.
3001 * Read SCSI data bus, data parity bits and control signals.
3002 * We are expecting RESET to be TRUE and other signals to be
3003 * FALSE.
3004 */
3005 term = INB(nc_sstat0);
3006 term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
3007 term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */
3008 ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */
3009 ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */
3010 INB(nc_sbcl); /* req ack bsy sel atn msg cd io */
3011
3012 if (!(np->features & FE_WIDE))
3013 term &= 0x3ffff;
3014
3015 if (term != (2<<7)) {
3016 device_printf(np->device,
3017 "suspicious SCSI data while resetting the BUS.\n");
3018 device_printf(np->device,
3019 "%sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
3020 "0x%lx, expecting 0x%lx\n", (np->features & FE_WIDE) ?
3021 "dp1,d15-8," : "", (u_long)term, (u_long)(2 << 7));
3022 if (SYM_SETUP_SCSI_BUS_CHECK == 1)
3023 retv = 1;
3024 }
3025 out:
3026 OUTB (nc_scntl1, 0);
3027 /* MDELAY(100); */
3028 return retv;
3029 }
3030
3031 /*
3032 * The chip may have completed jobs. Look at the DONE QUEUE.
3033 *
3034 * On architectures that may reorder LOAD/STORE operations,
3035 * a memory barrier may be needed after the reading of the
3036 * so-called `flag' and prior to dealing with the data.
3037 */
sym_wakeup_done(hcb_p np)3038 static int sym_wakeup_done (hcb_p np)
3039 {
3040 ccb_p cp;
3041 int i, n;
3042 u32 dsa;
3043
3044 SYM_LOCK_ASSERT(MA_OWNED);
3045
3046 n = 0;
3047 i = np->dqueueget;
3048 while (1) {
3049 dsa = scr_to_cpu(np->dqueue[i]);
3050 if (!dsa)
3051 break;
3052 np->dqueue[i] = 0;
3053 if ((i = i+2) >= MAX_QUEUE*2)
3054 i = 0;
3055
3056 cp = sym_ccb_from_dsa(np, dsa);
3057 if (cp) {
3058 MEMORY_BARRIER();
3059 sym_complete_ok (np, cp);
3060 ++n;
3061 } else
3062 device_printf(np->device,
3063 "bad DSA (%x) in done queue.\n", (u_int)dsa);
3064 }
3065 np->dqueueget = i;
3066
3067 return n;
3068 }
3069
3070 /*
3071 * Complete all active CCBs with error.
3072 * Used on CHIP/SCSI RESET.
3073 */
sym_flush_busy_queue(hcb_p np,int cam_status)3074 static void sym_flush_busy_queue (hcb_p np, int cam_status)
3075 {
3076 /*
3077 * Move all active CCBs to the COMP queue
3078 * and flush this queue.
3079 */
3080 sym_que_splice(&np->busy_ccbq, &np->comp_ccbq);
3081 sym_que_init(&np->busy_ccbq);
3082 sym_flush_comp_queue(np, cam_status);
3083 }
3084
3085 /*
3086 * Start chip.
3087 *
3088 * 'reason' means:
3089 * 0: initialisation.
3090 * 1: SCSI BUS RESET delivered or received.
3091 * 2: SCSI BUS MODE changed.
3092 */
sym_init(hcb_p np,int reason)3093 static void sym_init (hcb_p np, int reason)
3094 {
3095 int i;
3096 u32 phys;
3097
3098 SYM_LOCK_ASSERT(MA_OWNED);
3099
3100 /*
3101 * Reset chip if asked, otherwise just clear fifos.
3102 */
3103 if (reason == 1)
3104 sym_soft_reset(np);
3105 else {
3106 OUTB (nc_stest3, TE|CSF);
3107 OUTONB (nc_ctest3, CLF);
3108 }
3109
3110 /*
3111 * Clear Start Queue
3112 */
3113 phys = np->squeue_ba;
3114 for (i = 0; i < MAX_QUEUE*2; i += 2) {
3115 np->squeue[i] = cpu_to_scr(np->idletask_ba);
3116 np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
3117 }
3118 np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
3119
3120 /*
3121 * Start at first entry.
3122 */
3123 np->squeueput = 0;
3124
3125 /*
3126 * Clear Done Queue
3127 */
3128 phys = np->dqueue_ba;
3129 for (i = 0; i < MAX_QUEUE*2; i += 2) {
3130 np->dqueue[i] = 0;
3131 np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
3132 }
3133 np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
3134
3135 /*
3136 * Start at first entry.
3137 */
3138 np->dqueueget = 0;
3139
3140 /*
3141 * Install patches in scripts.
3142 * This also let point to first position the start
3143 * and done queue pointers used from SCRIPTS.
3144 */
3145 np->fw_patch(np);
3146
3147 /*
3148 * Wakeup all pending jobs.
3149 */
3150 sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET);
3151
3152 /*
3153 * Init chip.
3154 */
3155 OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */
3156 UDELAY (2000); /* The 895 needs time for the bus mode to settle */
3157
3158 OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
3159 /* full arb., ena parity, par->ATN */
3160 OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
3161
3162 sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
3163
3164 OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
3165 OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */
3166 OUTB (nc_istat , SIGP ); /* Signal Process */
3167 OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */
3168 OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
3169
3170 OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
3171 OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */
3172 OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */
3173
3174 /* Extended Sreq/Sack filtering not supported on the C10 */
3175 if (np->features & FE_C10)
3176 OUTB (nc_stest2, np->rv_stest2);
3177 else
3178 OUTB (nc_stest2, EXT|np->rv_stest2);
3179
3180 OUTB (nc_stest3, TE); /* TolerANT enable */
3181 OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */
3182
3183 /*
3184 * For now, disable AIP generation on C1010-66.
3185 */
3186 if (np->device_id == PCI_ID_LSI53C1010_2)
3187 OUTB (nc_aipcntl1, DISAIP);
3188
3189 /*
3190 * C10101 Errata.
3191 * Errant SGE's when in narrow. Write bits 4 & 5 of
3192 * STEST1 register to disable SGE. We probably should do
3193 * that from SCRIPTS for each selection/reselection, but
3194 * I just don't want. :)
3195 */
3196 if (np->device_id == PCI_ID_LSI53C1010 &&
3197 /* np->revision_id < 0xff */ 1)
3198 OUTB (nc_stest1, INB(nc_stest1) | 0x30);
3199
3200 /*
3201 * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
3202 * Disable overlapped arbitration for some dual function devices,
3203 * regardless revision id (kind of post-chip-design feature. ;-))
3204 */
3205 if (np->device_id == PCI_ID_SYM53C875)
3206 OUTB (nc_ctest0, (1<<5));
3207 else if (np->device_id == PCI_ID_SYM53C896)
3208 np->rv_ccntl0 |= DPR;
3209
3210 /*
3211 * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing
3212 * and/or hardware phase mismatch, since only such chips
3213 * seem to support those IO registers.
3214 */
3215 if (np->features & (FE_DAC|FE_NOPM)) {
3216 OUTB (nc_ccntl0, np->rv_ccntl0);
3217 OUTB (nc_ccntl1, np->rv_ccntl1);
3218 }
3219
3220 /*
3221 * If phase mismatch handled by scripts (895A/896/1010),
3222 * set PM jump addresses.
3223 */
3224 if (np->features & FE_NOPM) {
3225 OUTL (nc_pmjad1, SCRIPTB_BA (np, pm_handle));
3226 OUTL (nc_pmjad2, SCRIPTB_BA (np, pm_handle));
3227 }
3228
3229 /*
3230 * Enable GPIO0 pin for writing if LED support from SCRIPTS.
3231 * Also set GPIO5 and clear GPIO6 if hardware LED control.
3232 */
3233 if (np->features & FE_LED0)
3234 OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01);
3235 else if (np->features & FE_LEDC)
3236 OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20);
3237
3238 /*
3239 * enable ints
3240 */
3241 OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
3242 OUTB (nc_dien , MDPE|BF|SSI|SIR|IID);
3243
3244 /*
3245 * For 895/6 enable SBMC interrupt and save current SCSI bus mode.
3246 * Try to eat the spurious SBMC interrupt that may occur when
3247 * we reset the chip but not the SCSI BUS (at initialization).
3248 */
3249 if (np->features & (FE_ULTRA2|FE_ULTRA3)) {
3250 OUTONW (nc_sien, SBMC);
3251 if (reason == 0) {
3252 MDELAY(100);
3253 INW (nc_sist);
3254 }
3255 np->scsi_mode = INB (nc_stest4) & SMODE;
3256 }
3257
3258 /*
3259 * Fill in target structure.
3260 * Reinitialize usrsync.
3261 * Reinitialize usrwide.
3262 * Prepare sync negotiation according to actual SCSI bus mode.
3263 */
3264 for (i = 0; i < SYM_CONF_MAX_TARGET; i++) {
3265 tcb_p tp = &np->target[i];
3266
3267 tp->to_reset = 0;
3268 tp->head.sval = 0;
3269 tp->head.wval = np->rv_scntl3;
3270 tp->head.uval = 0;
3271
3272 tp->tinfo.current.period = 0;
3273 tp->tinfo.current.offset = 0;
3274 tp->tinfo.current.width = BUS_8_BIT;
3275 tp->tinfo.current.options = 0;
3276 }
3277
3278 /*
3279 * Download SCSI SCRIPTS to on-chip RAM if present,
3280 * and start script processor.
3281 */
3282 if (np->ram_ba) {
3283 if (sym_verbose > 1)
3284 device_printf(np->device,
3285 "Downloading SCSI SCRIPTS.\n");
3286 if (np->ram_ws == 8192) {
3287 OUTRAM_OFF(4096, np->scriptb0, np->scriptb_sz);
3288 OUTL (nc_mmws, np->scr_ram_seg);
3289 OUTL (nc_mmrs, np->scr_ram_seg);
3290 OUTL (nc_sfs, np->scr_ram_seg);
3291 phys = SCRIPTB_BA (np, start64);
3292 }
3293 else
3294 phys = SCRIPTA_BA (np, init);
3295 OUTRAM_OFF(0, np->scripta0, np->scripta_sz);
3296 }
3297 else
3298 phys = SCRIPTA_BA (np, init);
3299
3300 np->istat_sem = 0;
3301
3302 OUTL (nc_dsa, np->hcb_ba);
3303 OUTL_DSP (phys);
3304
3305 /*
3306 * Notify the XPT about the RESET condition.
3307 */
3308 if (reason != 0)
3309 xpt_async(AC_BUS_RESET, np->path, NULL);
3310 }
3311
3312 /*
3313 * Get clock factor and sync divisor for a given
3314 * synchronous factor period.
3315 */
3316 static int
sym_getsync(hcb_p np,u_char dt,u_char sfac,u_char * divp,u_char * fakp)3317 sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp)
3318 {
3319 u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */
3320 int div = np->clock_divn; /* Number of divisors supported */
3321 u32 fak; /* Sync factor in sxfer */
3322 u32 per; /* Period in tenths of ns */
3323 u32 kpc; /* (per * clk) */
3324 int ret;
3325
3326 /*
3327 * Compute the synchronous period in tenths of nano-seconds
3328 */
3329 if (dt && sfac <= 9) per = 125;
3330 else if (sfac <= 10) per = 250;
3331 else if (sfac == 11) per = 303;
3332 else if (sfac == 12) per = 500;
3333 else per = 40 * sfac;
3334 ret = per;
3335
3336 kpc = per * clk;
3337 if (dt)
3338 kpc <<= 1;
3339
3340 /*
3341 * For earliest C10 revision 0, we cannot use extra
3342 * clocks for the setting of the SCSI clocking.
3343 * Note that this limits the lowest sync data transfer
3344 * to 5 Mega-transfers per second and may result in
3345 * using higher clock divisors.
3346 */
3347 #if 1
3348 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) {
3349 /*
3350 * Look for the lowest clock divisor that allows an
3351 * output speed not faster than the period.
3352 */
3353 while (div > 0) {
3354 --div;
3355 if (kpc > (div_10M[div] << 2)) {
3356 ++div;
3357 break;
3358 }
3359 }
3360 fak = 0; /* No extra clocks */
3361 if (div == np->clock_divn) { /* Are we too fast ? */
3362 ret = -1;
3363 }
3364 *divp = div;
3365 *fakp = fak;
3366 return ret;
3367 }
3368 #endif
3369
3370 /*
3371 * Look for the greatest clock divisor that allows an
3372 * input speed faster than the period.
3373 */
3374 while (div-- > 0)
3375 if (kpc >= (div_10M[div] << 2)) break;
3376
3377 /*
3378 * Calculate the lowest clock factor that allows an output
3379 * speed not faster than the period, and the max output speed.
3380 * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT.
3381 * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT.
3382 */
3383 if (dt) {
3384 fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2;
3385 /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */
3386 }
3387 else {
3388 fak = (kpc - 1) / div_10M[div] + 1 - 4;
3389 /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */
3390 }
3391
3392 /*
3393 * Check against our hardware limits, or bugs :).
3394 */
3395 if (fak > 2) {fak = 2; ret = -1;}
3396
3397 /*
3398 * Compute and return sync parameters.
3399 */
3400 *divp = div;
3401 *fakp = fak;
3402
3403 return ret;
3404 }
3405
3406 /*
3407 * Tell the SCSI layer about the new transfer parameters.
3408 */
3409 static void
sym_xpt_async_transfer_neg(hcb_p np,int target,u_int spi_valid)3410 sym_xpt_async_transfer_neg(hcb_p np, int target, u_int spi_valid)
3411 {
3412 struct ccb_trans_settings cts;
3413 struct cam_path *path;
3414 int sts;
3415 tcb_p tp = &np->target[target];
3416
3417 sts = xpt_create_path(&path, NULL, cam_sim_path(np->sim), target,
3418 CAM_LUN_WILDCARD);
3419 if (sts != CAM_REQ_CMP)
3420 return;
3421
3422 bzero(&cts, sizeof(cts));
3423
3424 #define cts__scsi (cts.proto_specific.scsi)
3425 #define cts__spi (cts.xport_specific.spi)
3426
3427 cts.type = CTS_TYPE_CURRENT_SETTINGS;
3428 cts.protocol = PROTO_SCSI;
3429 cts.transport = XPORT_SPI;
3430 cts.protocol_version = tp->tinfo.current.scsi_version;
3431 cts.transport_version = tp->tinfo.current.spi_version;
3432
3433 cts__spi.valid = spi_valid;
3434 if (spi_valid & CTS_SPI_VALID_SYNC_RATE)
3435 cts__spi.sync_period = tp->tinfo.current.period;
3436 if (spi_valid & CTS_SPI_VALID_SYNC_OFFSET)
3437 cts__spi.sync_offset = tp->tinfo.current.offset;
3438 if (spi_valid & CTS_SPI_VALID_BUS_WIDTH)
3439 cts__spi.bus_width = tp->tinfo.current.width;
3440 if (spi_valid & CTS_SPI_VALID_PPR_OPTIONS)
3441 cts__spi.ppr_options = tp->tinfo.current.options;
3442 #undef cts__spi
3443 #undef cts__scsi
3444 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
3445 xpt_async(AC_TRANSFER_NEG, path, &cts);
3446 xpt_free_path(path);
3447 }
3448
3449 #define SYM_SPI_VALID_WDTR \
3450 CTS_SPI_VALID_BUS_WIDTH | \
3451 CTS_SPI_VALID_SYNC_RATE | \
3452 CTS_SPI_VALID_SYNC_OFFSET
3453 #define SYM_SPI_VALID_SDTR \
3454 CTS_SPI_VALID_SYNC_RATE | \
3455 CTS_SPI_VALID_SYNC_OFFSET
3456 #define SYM_SPI_VALID_PPR \
3457 CTS_SPI_VALID_PPR_OPTIONS | \
3458 CTS_SPI_VALID_BUS_WIDTH | \
3459 CTS_SPI_VALID_SYNC_RATE | \
3460 CTS_SPI_VALID_SYNC_OFFSET
3461
3462 /*
3463 * We received a WDTR.
3464 * Let everything be aware of the changes.
3465 */
sym_setwide(hcb_p np,ccb_p cp,u_char wide)3466 static void sym_setwide(hcb_p np, ccb_p cp, u_char wide)
3467 {
3468 tcb_p tp = &np->target[cp->target];
3469
3470 sym_settrans(np, cp, 0, 0, 0, wide, 0, 0);
3471
3472 /*
3473 * Tell the SCSI layer about the new transfer parameters.
3474 */
3475 tp->tinfo.goal.width = tp->tinfo.current.width = wide;
3476 tp->tinfo.current.offset = 0;
3477 tp->tinfo.current.period = 0;
3478 tp->tinfo.current.options = 0;
3479
3480 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_WDTR);
3481 }
3482
3483 /*
3484 * We received a SDTR.
3485 * Let everything be aware of the changes.
3486 */
3487 static void
sym_setsync(hcb_p np,ccb_p cp,u_char ofs,u_char per,u_char div,u_char fak)3488 sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak)
3489 {
3490 tcb_p tp = &np->target[cp->target];
3491 u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0;
3492
3493 sym_settrans(np, cp, 0, ofs, per, wide, div, fak);
3494
3495 /*
3496 * Tell the SCSI layer about the new transfer parameters.
3497 */
3498 tp->tinfo.goal.period = tp->tinfo.current.period = per;
3499 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs;
3500 tp->tinfo.goal.options = tp->tinfo.current.options = 0;
3501
3502 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_SDTR);
3503 }
3504
3505 /*
3506 * We received a PPR.
3507 * Let everything be aware of the changes.
3508 */
sym_setpprot(hcb_p np,ccb_p cp,u_char dt,u_char ofs,u_char per,u_char wide,u_char div,u_char fak)3509 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
3510 u_char per, u_char wide, u_char div, u_char fak)
3511 {
3512 tcb_p tp = &np->target[cp->target];
3513
3514 sym_settrans(np, cp, dt, ofs, per, wide, div, fak);
3515
3516 /*
3517 * Tell the SCSI layer about the new transfer parameters.
3518 */
3519 tp->tinfo.goal.width = tp->tinfo.current.width = wide;
3520 tp->tinfo.goal.period = tp->tinfo.current.period = per;
3521 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs;
3522 tp->tinfo.goal.options = tp->tinfo.current.options = dt;
3523
3524 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_PPR);
3525 }
3526
3527 /*
3528 * Switch trans mode for current job and it's target.
3529 */
sym_settrans(hcb_p np,ccb_p cp,u_char dt,u_char ofs,u_char per,u_char wide,u_char div,u_char fak)3530 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
3531 u_char per, u_char wide, u_char div, u_char fak)
3532 {
3533 SYM_QUEHEAD *qp;
3534 union ccb *ccb;
3535 tcb_p tp;
3536 u_char target = INB (nc_sdid) & 0x0f;
3537 u_char sval, wval, uval;
3538
3539 assert (cp);
3540 if (!cp) return;
3541 ccb = cp->cam_ccb;
3542 assert (ccb);
3543 if (!ccb) return;
3544 assert (target == (cp->target & 0xf));
3545 tp = &np->target[target];
3546
3547 sval = tp->head.sval;
3548 wval = tp->head.wval;
3549 uval = tp->head.uval;
3550
3551 #if 0
3552 printf("XXXX sval=%x wval=%x uval=%x (%x)\n",
3553 sval, wval, uval, np->rv_scntl3);
3554 #endif
3555 /*
3556 * Set the offset.
3557 */
3558 if (!(np->features & FE_C10))
3559 sval = (sval & ~0x1f) | ofs;
3560 else
3561 sval = (sval & ~0x3f) | ofs;
3562
3563 /*
3564 * Set the sync divisor and extra clock factor.
3565 */
3566 if (ofs != 0) {
3567 wval = (wval & ~0x70) | ((div+1) << 4);
3568 if (!(np->features & FE_C10))
3569 sval = (sval & ~0xe0) | (fak << 5);
3570 else {
3571 uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT);
3572 if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT);
3573 if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT);
3574 }
3575 }
3576
3577 /*
3578 * Set the bus width.
3579 */
3580 wval = wval & ~EWS;
3581 if (wide != 0)
3582 wval |= EWS;
3583
3584 /*
3585 * Set misc. ultra enable bits.
3586 */
3587 if (np->features & FE_C10) {
3588 uval = uval & ~(U3EN|AIPCKEN);
3589 if (dt) {
3590 assert(np->features & FE_U3EN);
3591 uval |= U3EN;
3592 }
3593 }
3594 else {
3595 wval = wval & ~ULTRA;
3596 if (per <= 12) wval |= ULTRA;
3597 }
3598
3599 /*
3600 * Stop there if sync parameters are unchanged.
3601 */
3602 if (tp->head.sval == sval &&
3603 tp->head.wval == wval &&
3604 tp->head.uval == uval)
3605 return;
3606 tp->head.sval = sval;
3607 tp->head.wval = wval;
3608 tp->head.uval = uval;
3609
3610 /*
3611 * Disable extended Sreq/Sack filtering if per < 50.
3612 * Not supported on the C1010.
3613 */
3614 if (per < 50 && !(np->features & FE_C10))
3615 OUTOFFB (nc_stest2, EXT);
3616
3617 /*
3618 * set actual value and sync_status
3619 */
3620 OUTB (nc_sxfer, tp->head.sval);
3621 OUTB (nc_scntl3, tp->head.wval);
3622
3623 if (np->features & FE_C10) {
3624 OUTB (nc_scntl4, tp->head.uval);
3625 }
3626
3627 /*
3628 * patch ALL busy ccbs of this target.
3629 */
3630 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
3631 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
3632 if (cp->target != target)
3633 continue;
3634 cp->phys.select.sel_scntl3 = tp->head.wval;
3635 cp->phys.select.sel_sxfer = tp->head.sval;
3636 if (np->features & FE_C10) {
3637 cp->phys.select.sel_scntl4 = tp->head.uval;
3638 }
3639 }
3640 }
3641
3642 /*
3643 * log message for real hard errors
3644 *
3645 * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc).
3646 * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf.
3647 *
3648 * exception register:
3649 * ds: dstat
3650 * si: sist
3651 *
3652 * SCSI bus lines:
3653 * so: control lines as driven by chip.
3654 * si: control lines as seen by chip.
3655 * sd: scsi data lines as seen by chip.
3656 *
3657 * wide/fastmode:
3658 * sxfer: (see the manual)
3659 * scntl3: (see the manual)
3660 *
3661 * current script command:
3662 * dsp: script address (relative to start of script).
3663 * dbc: first word of script command.
3664 *
3665 * First 24 register of the chip:
3666 * r0..rf
3667 */
sym_log_hard_error(hcb_p np,u_short sist,u_char dstat)3668 static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat)
3669 {
3670 u32 dsp;
3671 int script_ofs;
3672 int script_size;
3673 char *script_name;
3674 u_char *script_base;
3675 int i;
3676
3677 dsp = INL (nc_dsp);
3678
3679 if (dsp > np->scripta_ba &&
3680 dsp <= np->scripta_ba + np->scripta_sz) {
3681 script_ofs = dsp - np->scripta_ba;
3682 script_size = np->scripta_sz;
3683 script_base = (u_char *) np->scripta0;
3684 script_name = "scripta";
3685 }
3686 else if (np->scriptb_ba < dsp &&
3687 dsp <= np->scriptb_ba + np->scriptb_sz) {
3688 script_ofs = dsp - np->scriptb_ba;
3689 script_size = np->scriptb_sz;
3690 script_base = (u_char *) np->scriptb0;
3691 script_name = "scriptb";
3692 } else {
3693 script_ofs = dsp;
3694 script_size = 0;
3695 script_base = NULL;
3696 script_name = "mem";
3697 }
3698
3699 printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
3700 sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist,
3701 (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl),
3702 (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),
3703 (unsigned)INB (nc_scntl3), script_name, script_ofs,
3704 (unsigned)INL (nc_dbc));
3705
3706 if (((script_ofs & 3) == 0) &&
3707 (unsigned)script_ofs < script_size) {
3708 device_printf(np->device, "script cmd = %08x\n",
3709 scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
3710 }
3711
3712 device_printf(np->device, "regdump:");
3713 for (i = 0; i < 24; i++)
3714 printf (" %02x", (unsigned)INB_OFF(i));
3715 printf (".\n");
3716
3717 /*
3718 * PCI BUS error, read the PCI ststus register.
3719 */
3720 if (dstat & (MDPE|BF)) {
3721 u_short pci_sts;
3722 pci_sts = pci_read_config(np->device, PCIR_STATUS, 2);
3723 if (pci_sts & 0xf900) {
3724 pci_write_config(np->device, PCIR_STATUS, pci_sts, 2);
3725 device_printf(np->device, "PCI STATUS = 0x%04x\n",
3726 pci_sts & 0xf900);
3727 }
3728 }
3729 }
3730
3731 /*
3732 * chip interrupt handler
3733 *
3734 * In normal situations, interrupt conditions occur one at
3735 * a time. But when something bad happens on the SCSI BUS,
3736 * the chip may raise several interrupt flags before
3737 * stopping and interrupting the CPU. The additionnal
3738 * interrupt flags are stacked in some extra registers
3739 * after the SIP and/or DIP flag has been raised in the
3740 * ISTAT. After the CPU has read the interrupt condition
3741 * flag from SIST or DSTAT, the chip unstacks the other
3742 * interrupt flags and sets the corresponding bits in
3743 * SIST or DSTAT. Since the chip starts stacking once the
3744 * SIP or DIP flag is set, there is a small window of time
3745 * where the stacking does not occur.
3746 *
3747 * Typically, multiple interrupt conditions may happen in
3748 * the following situations:
3749 *
3750 * - SCSI parity error + Phase mismatch (PAR|MA)
3751 * When a parity error is detected in input phase
3752 * and the device switches to msg-in phase inside a
3753 * block MOV.
3754 * - SCSI parity error + Unexpected disconnect (PAR|UDC)
3755 * When a stupid device does not want to handle the
3756 * recovery of an SCSI parity error.
3757 * - Some combinations of STO, PAR, UDC, ...
3758 * When using non compliant SCSI stuff, when user is
3759 * doing non compliant hot tampering on the BUS, when
3760 * something really bad happens to a device, etc ...
3761 *
3762 * The heuristic suggested by SYMBIOS to handle
3763 * multiple interrupts is to try unstacking all
3764 * interrupts conditions and to handle them on some
3765 * priority based on error severity.
3766 * This will work when the unstacking has been
3767 * successful, but we cannot be 100 % sure of that,
3768 * since the CPU may have been faster to unstack than
3769 * the chip is able to stack. Hmmm ... But it seems that
3770 * such a situation is very unlikely to happen.
3771 *
3772 * If this happen, for example STO caught by the CPU
3773 * then UDC happenning before the CPU have restarted
3774 * the SCRIPTS, the driver may wrongly complete the
3775 * same command on UDC, since the SCRIPTS didn't restart
3776 * and the DSA still points to the same command.
3777 * We avoid this situation by setting the DSA to an
3778 * invalid value when the CCB is completed and before
3779 * restarting the SCRIPTS.
3780 *
3781 * Another issue is that we need some section of our
3782 * recovery procedures to be somehow uninterruptible but
3783 * the SCRIPTS processor does not provides such a
3784 * feature. For this reason, we handle recovery preferently
3785 * from the C code and check against some SCRIPTS critical
3786 * sections from the C code.
3787 *
3788 * Hopefully, the interrupt handling of the driver is now
3789 * able to resist to weird BUS error conditions, but donnot
3790 * ask me for any guarantee that it will never fail. :-)
3791 * Use at your own decision and risk.
3792 */
sym_intr1(hcb_p np)3793 static void sym_intr1 (hcb_p np)
3794 {
3795 u_char istat, istatc;
3796 u_char dstat;
3797 u_short sist;
3798
3799 SYM_LOCK_ASSERT(MA_OWNED);
3800
3801 /*
3802 * interrupt on the fly ?
3803 *
3804 * A `dummy read' is needed to ensure that the
3805 * clear of the INTF flag reaches the device
3806 * before the scanning of the DONE queue.
3807 */
3808 istat = INB (nc_istat);
3809 if (istat & INTF) {
3810 OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem);
3811 istat = INB (nc_istat); /* DUMMY READ */
3812 if (DEBUG_FLAGS & DEBUG_TINY) printf ("F ");
3813 (void)sym_wakeup_done (np);
3814 }
3815
3816 if (!(istat & (SIP|DIP)))
3817 return;
3818
3819 #if 0 /* We should never get this one */
3820 if (istat & CABRT)
3821 OUTB (nc_istat, CABRT);
3822 #endif
3823
3824 /*
3825 * PAR and MA interrupts may occur at the same time,
3826 * and we need to know of both in order to handle
3827 * this situation properly. We try to unstack SCSI
3828 * interrupts for that reason. BTW, I dislike a LOT
3829 * such a loop inside the interrupt routine.
3830 * Even if DMA interrupt stacking is very unlikely to
3831 * happen, we also try unstacking these ones, since
3832 * this has no performance impact.
3833 */
3834 sist = 0;
3835 dstat = 0;
3836 istatc = istat;
3837 do {
3838 if (istatc & SIP)
3839 sist |= INW (nc_sist);
3840 if (istatc & DIP)
3841 dstat |= INB (nc_dstat);
3842 istatc = INB (nc_istat);
3843 istat |= istatc;
3844 } while (istatc & (SIP|DIP));
3845
3846 if (DEBUG_FLAGS & DEBUG_TINY)
3847 printf ("<%d|%x:%x|%x:%x>",
3848 (int)INB(nc_scr0),
3849 dstat,sist,
3850 (unsigned)INL(nc_dsp),
3851 (unsigned)INL(nc_dbc));
3852 /*
3853 * On paper, a memory barrier may be needed here.
3854 * And since we are paranoid ... :)
3855 */
3856 MEMORY_BARRIER();
3857
3858 /*
3859 * First, interrupts we want to service cleanly.
3860 *
3861 * Phase mismatch (MA) is the most frequent interrupt
3862 * for chip earlier than the 896 and so we have to service
3863 * it as quickly as possible.
3864 * A SCSI parity error (PAR) may be combined with a phase
3865 * mismatch condition (MA).
3866 * Programmed interrupts (SIR) are used to call the C code
3867 * from SCRIPTS.
3868 * The single step interrupt (SSI) is not used in this
3869 * driver.
3870 */
3871 if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
3872 !(dstat & (MDPE|BF|ABRT|IID))) {
3873 if (sist & PAR) sym_int_par (np, sist);
3874 else if (sist & MA) sym_int_ma (np);
3875 else if (dstat & SIR) sym_int_sir (np);
3876 else if (dstat & SSI) OUTONB_STD ();
3877 else goto unknown_int;
3878 return;
3879 }
3880
3881 /*
3882 * Now, interrupts that donnot happen in normal
3883 * situations and that we may need to recover from.
3884 *
3885 * On SCSI RESET (RST), we reset everything.
3886 * On SCSI BUS MODE CHANGE (SBMC), we complete all
3887 * active CCBs with RESET status, prepare all devices
3888 * for negotiating again and restart the SCRIPTS.
3889 * On STO and UDC, we complete the CCB with the corres-
3890 * ponding status and restart the SCRIPTS.
3891 */
3892 if (sist & RST) {
3893 xpt_print_path(np->path);
3894 printf("SCSI BUS reset detected.\n");
3895 sym_init (np, 1);
3896 return;
3897 }
3898
3899 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
3900 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
3901
3902 if (!(sist & (GEN|HTH|SGE)) &&
3903 !(dstat & (MDPE|BF|ABRT|IID))) {
3904 if (sist & SBMC) sym_int_sbmc (np);
3905 else if (sist & STO) sym_int_sto (np);
3906 else if (sist & UDC) sym_int_udc (np);
3907 else goto unknown_int;
3908 return;
3909 }
3910
3911 /*
3912 * Now, interrupts we are not able to recover cleanly.
3913 *
3914 * Log message for hard errors.
3915 * Reset everything.
3916 */
3917
3918 sym_log_hard_error(np, sist, dstat);
3919
3920 if ((sist & (GEN|HTH|SGE)) ||
3921 (dstat & (MDPE|BF|ABRT|IID))) {
3922 sym_start_reset(np);
3923 return;
3924 }
3925
3926 unknown_int:
3927 /*
3928 * We just miss the cause of the interrupt. :(
3929 * Print a message. The timeout will do the real work.
3930 */
3931 device_printf(np->device,
3932 "unknown interrupt(s) ignored, ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
3933 istat, dstat, sist);
3934 }
3935
sym_intr(void * arg)3936 static void sym_intr(void *arg)
3937 {
3938 hcb_p np = arg;
3939
3940 SYM_LOCK();
3941
3942 if (DEBUG_FLAGS & DEBUG_TINY) printf ("[");
3943 sym_intr1((hcb_p) arg);
3944 if (DEBUG_FLAGS & DEBUG_TINY) printf ("]");
3945
3946 SYM_UNLOCK();
3947 }
3948
sym_poll(struct cam_sim * sim)3949 static void sym_poll(struct cam_sim *sim)
3950 {
3951 sym_intr1(cam_sim_softc(sim));
3952 }
3953
3954 /*
3955 * generic recovery from scsi interrupt
3956 *
3957 * The doc says that when the chip gets an SCSI interrupt,
3958 * it tries to stop in an orderly fashion, by completing
3959 * an instruction fetch that had started or by flushing
3960 * the DMA fifo for a write to memory that was executing.
3961 * Such a fashion is not enough to know if the instruction
3962 * that was just before the current DSP value has been
3963 * executed or not.
3964 *
3965 * There are some small SCRIPTS sections that deal with
3966 * the start queue and the done queue that may break any
3967 * assomption from the C code if we are interrupted
3968 * inside, so we reset if this happens. Btw, since these
3969 * SCRIPTS sections are executed while the SCRIPTS hasn't
3970 * started SCSI operations, it is very unlikely to happen.
3971 *
3972 * All the driver data structures are supposed to be
3973 * allocated from the same 4 GB memory window, so there
3974 * is a 1 to 1 relationship between DSA and driver data
3975 * structures. Since we are careful :) to invalidate the
3976 * DSA when we complete a command or when the SCRIPTS
3977 * pushes a DSA into a queue, we can trust it when it
3978 * points to a CCB.
3979 */
sym_recover_scsi_int(hcb_p np,u_char hsts)3980 static void sym_recover_scsi_int (hcb_p np, u_char hsts)
3981 {
3982 u32 dsp = INL (nc_dsp);
3983 u32 dsa = INL (nc_dsa);
3984 ccb_p cp = sym_ccb_from_dsa(np, dsa);
3985
3986 /*
3987 * If we haven't been interrupted inside the SCRIPTS
3988 * critical paths, we can safely restart the SCRIPTS
3989 * and trust the DSA value if it matches a CCB.
3990 */
3991 if ((!(dsp > SCRIPTA_BA (np, getjob_begin) &&
3992 dsp < SCRIPTA_BA (np, getjob_end) + 1)) &&
3993 (!(dsp > SCRIPTA_BA (np, ungetjob) &&
3994 dsp < SCRIPTA_BA (np, reselect) + 1)) &&
3995 (!(dsp > SCRIPTB_BA (np, sel_for_abort) &&
3996 dsp < SCRIPTB_BA (np, sel_for_abort_1) + 1)) &&
3997 (!(dsp > SCRIPTA_BA (np, done) &&
3998 dsp < SCRIPTA_BA (np, done_end) + 1))) {
3999 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
4000 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
4001 /*
4002 * If we have a CCB, let the SCRIPTS call us back for
4003 * the handling of the error with SCRATCHA filled with
4004 * STARTPOS. This way, we will be able to freeze the
4005 * device queue and requeue awaiting IOs.
4006 */
4007 if (cp) {
4008 cp->host_status = hsts;
4009 OUTL_DSP (SCRIPTA_BA (np, complete_error));
4010 }
4011 /*
4012 * Otherwise just restart the SCRIPTS.
4013 */
4014 else {
4015 OUTL (nc_dsa, 0xffffff);
4016 OUTL_DSP (SCRIPTA_BA (np, start));
4017 }
4018 }
4019 else
4020 goto reset_all;
4021
4022 return;
4023
4024 reset_all:
4025 sym_start_reset(np);
4026 }
4027
4028 /*
4029 * chip exception handler for selection timeout
4030 */
sym_int_sto(hcb_p np)4031 static void sym_int_sto (hcb_p np)
4032 {
4033 u32 dsp = INL (nc_dsp);
4034
4035 if (DEBUG_FLAGS & DEBUG_TINY) printf ("T");
4036
4037 if (dsp == SCRIPTA_BA (np, wf_sel_done) + 8)
4038 sym_recover_scsi_int(np, HS_SEL_TIMEOUT);
4039 else
4040 sym_start_reset(np);
4041 }
4042
4043 /*
4044 * chip exception handler for unexpected disconnect
4045 */
sym_int_udc(hcb_p np)4046 static void sym_int_udc (hcb_p np)
4047 {
4048 device_printf(np->device, "unexpected disconnect\n");
4049 sym_recover_scsi_int(np, HS_UNEXPECTED);
4050 }
4051
4052 /*
4053 * chip exception handler for SCSI bus mode change
4054 *
4055 * spi2-r12 11.2.3 says a transceiver mode change must
4056 * generate a reset event and a device that detects a reset
4057 * event shall initiate a hard reset. It says also that a
4058 * device that detects a mode change shall set data transfer
4059 * mode to eight bit asynchronous, etc...
4060 * So, just reinitializing all except chip should be enough.
4061 */
sym_int_sbmc(hcb_p np)4062 static void sym_int_sbmc (hcb_p np)
4063 {
4064 u_char scsi_mode = INB (nc_stest4) & SMODE;
4065
4066 /*
4067 * Notify user.
4068 */
4069 xpt_print_path(np->path);
4070 printf("SCSI BUS mode change from %s to %s.\n",
4071 sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode));
4072
4073 /*
4074 * Should suspend command processing for a few seconds and
4075 * reinitialize all except the chip.
4076 */
4077 sym_init (np, 2);
4078 }
4079
4080 /*
4081 * chip exception handler for SCSI parity error.
4082 *
4083 * When the chip detects a SCSI parity error and is
4084 * currently executing a (CH)MOV instruction, it does
4085 * not interrupt immediately, but tries to finish the
4086 * transfer of the current scatter entry before
4087 * interrupting. The following situations may occur:
4088 *
4089 * - The complete scatter entry has been transferred
4090 * without the device having changed phase.
4091 * The chip will then interrupt with the DSP pointing
4092 * to the instruction that follows the MOV.
4093 *
4094 * - A phase mismatch occurs before the MOV finished
4095 * and phase errors are to be handled by the C code.
4096 * The chip will then interrupt with both PAR and MA
4097 * conditions set.
4098 *
4099 * - A phase mismatch occurs before the MOV finished and
4100 * phase errors are to be handled by SCRIPTS.
4101 * The chip will load the DSP with the phase mismatch
4102 * JUMP address and interrupt the host processor.
4103 */
sym_int_par(hcb_p np,u_short sist)4104 static void sym_int_par (hcb_p np, u_short sist)
4105 {
4106 u_char hsts = INB (HS_PRT);
4107 u32 dsp = INL (nc_dsp);
4108 u32 dbc = INL (nc_dbc);
4109 u32 dsa = INL (nc_dsa);
4110 u_char sbcl = INB (nc_sbcl);
4111 u_char cmd = dbc >> 24;
4112 int phase = cmd & 7;
4113 ccb_p cp = sym_ccb_from_dsa(np, dsa);
4114
4115 device_printf(np->device,
4116 "SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", hsts, dbc,
4117 sbcl);
4118
4119 /*
4120 * Check that the chip is connected to the SCSI BUS.
4121 */
4122 if (!(INB (nc_scntl1) & ISCON)) {
4123 sym_recover_scsi_int(np, HS_UNEXPECTED);
4124 return;
4125 }
4126
4127 /*
4128 * If the nexus is not clearly identified, reset the bus.
4129 * We will try to do better later.
4130 */
4131 if (!cp)
4132 goto reset_all;
4133
4134 /*
4135 * Check instruction was a MOV, direction was INPUT and
4136 * ATN is asserted.
4137 */
4138 if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
4139 goto reset_all;
4140
4141 /*
4142 * Keep track of the parity error.
4143 */
4144 OUTONB (HF_PRT, HF_EXT_ERR);
4145 cp->xerr_status |= XE_PARITY_ERR;
4146
4147 /*
4148 * Prepare the message to send to the device.
4149 */
4150 np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
4151
4152 /*
4153 * If the old phase was DATA IN phase, we have to deal with
4154 * the 3 situations described above.
4155 * For other input phases (MSG IN and STATUS), the device
4156 * must resend the whole thing that failed parity checking
4157 * or signal error. So, jumping to dispatcher should be OK.
4158 */
4159 if (phase == 1 || phase == 5) {
4160 /* Phase mismatch handled by SCRIPTS */
4161 if (dsp == SCRIPTB_BA (np, pm_handle))
4162 OUTL_DSP (dsp);
4163 /* Phase mismatch handled by the C code */
4164 else if (sist & MA)
4165 sym_int_ma (np);
4166 /* No phase mismatch occurred */
4167 else {
4168 OUTL (nc_temp, dsp);
4169 OUTL_DSP (SCRIPTA_BA (np, dispatch));
4170 }
4171 }
4172 else
4173 OUTL_DSP (SCRIPTA_BA (np, clrack));
4174 return;
4175
4176 reset_all:
4177 sym_start_reset(np);
4178 }
4179
4180 /*
4181 * chip exception handler for phase errors.
4182 *
4183 * We have to construct a new transfer descriptor,
4184 * to transfer the rest of the current block.
4185 */
sym_int_ma(hcb_p np)4186 static void sym_int_ma (hcb_p np)
4187 {
4188 u32 dbc;
4189 u32 rest;
4190 u32 dsp;
4191 u32 dsa;
4192 u32 nxtdsp;
4193 u32 *vdsp;
4194 u32 oadr, olen;
4195 u32 *tblp;
4196 u32 newcmd;
4197 u_int delta;
4198 u_char cmd;
4199 u_char hflags, hflags0;
4200 struct sym_pmc *pm;
4201 ccb_p cp;
4202
4203 dsp = INL (nc_dsp);
4204 dbc = INL (nc_dbc);
4205 dsa = INL (nc_dsa);
4206
4207 cmd = dbc >> 24;
4208 rest = dbc & 0xffffff;
4209 delta = 0;
4210
4211 /*
4212 * locate matching cp if any.
4213 */
4214 cp = sym_ccb_from_dsa(np, dsa);
4215
4216 /*
4217 * Donnot take into account dma fifo and various buffers in
4218 * INPUT phase since the chip flushes everything before
4219 * raising the MA interrupt for interrupted INPUT phases.
4220 * For DATA IN phase, we will check for the SWIDE later.
4221 */
4222 if ((cmd & 7) != 1 && (cmd & 7) != 5) {
4223 u_char ss0, ss2;
4224
4225 if (np->features & FE_DFBC)
4226 delta = INW (nc_dfbc);
4227 else {
4228 u32 dfifo;
4229
4230 /*
4231 * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
4232 */
4233 dfifo = INL(nc_dfifo);
4234
4235 /*
4236 * Calculate remaining bytes in DMA fifo.
4237 * (CTEST5 = dfifo >> 16)
4238 */
4239 if (dfifo & (DFS << 16))
4240 delta = ((((dfifo >> 8) & 0x300) |
4241 (dfifo & 0xff)) - rest) & 0x3ff;
4242 else
4243 delta = ((dfifo & 0xff) - rest) & 0x7f;
4244 }
4245
4246 /*
4247 * The data in the dma fifo has not been transferred to
4248 * the target -> add the amount to the rest
4249 * and clear the data.
4250 * Check the sstat2 register in case of wide transfer.
4251 */
4252 rest += delta;
4253 ss0 = INB (nc_sstat0);
4254 if (ss0 & OLF) rest++;
4255 if (!(np->features & FE_C10))
4256 if (ss0 & ORF) rest++;
4257 if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
4258 ss2 = INB (nc_sstat2);
4259 if (ss2 & OLF1) rest++;
4260 if (!(np->features & FE_C10))
4261 if (ss2 & ORF1) rest++;
4262 }
4263
4264 /*
4265 * Clear fifos.
4266 */
4267 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */
4268 OUTB (nc_stest3, TE|CSF); /* scsi fifo */
4269 }
4270
4271 /*
4272 * log the information
4273 */
4274 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
4275 printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7,
4276 (unsigned) rest, (unsigned) delta);
4277
4278 /*
4279 * try to find the interrupted script command,
4280 * and the address at which to continue.
4281 */
4282 vdsp = NULL;
4283 nxtdsp = 0;
4284 if (dsp > np->scripta_ba &&
4285 dsp <= np->scripta_ba + np->scripta_sz) {
4286 vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8));
4287 nxtdsp = dsp;
4288 }
4289 else if (dsp > np->scriptb_ba &&
4290 dsp <= np->scriptb_ba + np->scriptb_sz) {
4291 vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8));
4292 nxtdsp = dsp;
4293 }
4294
4295 /*
4296 * log the information
4297 */
4298 if (DEBUG_FLAGS & DEBUG_PHASE) {
4299 printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
4300 cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
4301 }
4302
4303 if (!vdsp) {
4304 device_printf(np->device,
4305 "interrupted SCRIPT address not found.\n");
4306 goto reset_all;
4307 }
4308
4309 if (!cp) {
4310 device_printf(np->device,
4311 "SCSI phase error fixup: CCB already dequeued.\n");
4312 goto reset_all;
4313 }
4314
4315 /*
4316 * get old startaddress and old length.
4317 */
4318 oadr = scr_to_cpu(vdsp[1]);
4319
4320 if (cmd & 0x10) { /* Table indirect */
4321 tblp = (u32 *) ((char*) &cp->phys + oadr);
4322 olen = scr_to_cpu(tblp[0]);
4323 oadr = scr_to_cpu(tblp[1]);
4324 } else {
4325 tblp = (u32 *) 0;
4326 olen = scr_to_cpu(vdsp[0]) & 0xffffff;
4327 }
4328
4329 if (DEBUG_FLAGS & DEBUG_PHASE) {
4330 printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
4331 (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
4332 tblp,
4333 (unsigned) olen,
4334 (unsigned) oadr);
4335 }
4336
4337 /*
4338 * check cmd against assumed interrupted script command.
4339 * If dt data phase, the MOVE instruction hasn't bit 4 of
4340 * the phase.
4341 */
4342 if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
4343 PRINT_ADDR(cp);
4344 printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
4345 (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
4346
4347 goto reset_all;
4348 }
4349
4350 /*
4351 * if old phase not dataphase, leave here.
4352 */
4353 if (cmd & 2) {
4354 PRINT_ADDR(cp);
4355 printf ("phase change %x-%x %d@%08x resid=%d.\n",
4356 cmd&7, INB(nc_sbcl)&7, (unsigned)olen,
4357 (unsigned)oadr, (unsigned)rest);
4358 goto unexpected_phase;
4359 }
4360
4361 /*
4362 * Choose the correct PM save area.
4363 *
4364 * Look at the PM_SAVE SCRIPT if you want to understand
4365 * this stuff. The equivalent code is implemented in
4366 * SCRIPTS for the 895A, 896 and 1010 that are able to
4367 * handle PM from the SCRIPTS processor.
4368 */
4369 hflags0 = INB (HF_PRT);
4370 hflags = hflags0;
4371
4372 if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
4373 if (hflags & HF_IN_PM0)
4374 nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
4375 else if (hflags & HF_IN_PM1)
4376 nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
4377
4378 if (hflags & HF_DP_SAVED)
4379 hflags ^= HF_ACT_PM;
4380 }
4381
4382 if (!(hflags & HF_ACT_PM)) {
4383 pm = &cp->phys.pm0;
4384 newcmd = SCRIPTA_BA (np, pm0_data);
4385 }
4386 else {
4387 pm = &cp->phys.pm1;
4388 newcmd = SCRIPTA_BA (np, pm1_data);
4389 }
4390
4391 hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
4392 if (hflags != hflags0)
4393 OUTB (HF_PRT, hflags);
4394
4395 /*
4396 * fillin the phase mismatch context
4397 */
4398 pm->sg.addr = cpu_to_scr(oadr + olen - rest);
4399 pm->sg.size = cpu_to_scr(rest);
4400 pm->ret = cpu_to_scr(nxtdsp);
4401
4402 /*
4403 * If we have a SWIDE,
4404 * - prepare the address to write the SWIDE from SCRIPTS,
4405 * - compute the SCRIPTS address to restart from,
4406 * - move current data pointer context by one byte.
4407 */
4408 nxtdsp = SCRIPTA_BA (np, dispatch);
4409 if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) &&
4410 (INB (nc_scntl2) & WSR)) {
4411 u32 tmp;
4412
4413 /*
4414 * Set up the table indirect for the MOVE
4415 * of the residual byte and adjust the data
4416 * pointer context.
4417 */
4418 tmp = scr_to_cpu(pm->sg.addr);
4419 cp->phys.wresid.addr = cpu_to_scr(tmp);
4420 pm->sg.addr = cpu_to_scr(tmp + 1);
4421 tmp = scr_to_cpu(pm->sg.size);
4422 cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
4423 pm->sg.size = cpu_to_scr(tmp - 1);
4424
4425 /*
4426 * If only the residual byte is to be moved,
4427 * no PM context is needed.
4428 */
4429 if ((tmp&0xffffff) == 1)
4430 newcmd = pm->ret;
4431
4432 /*
4433 * Prepare the address of SCRIPTS that will
4434 * move the residual byte to memory.
4435 */
4436 nxtdsp = SCRIPTB_BA (np, wsr_ma_helper);
4437 }
4438
4439 if (DEBUG_FLAGS & DEBUG_PHASE) {
4440 PRINT_ADDR(cp);
4441 printf ("PM %x %x %x / %x %x %x.\n",
4442 hflags0, hflags, newcmd,
4443 (unsigned)scr_to_cpu(pm->sg.addr),
4444 (unsigned)scr_to_cpu(pm->sg.size),
4445 (unsigned)scr_to_cpu(pm->ret));
4446 }
4447
4448 /*
4449 * Restart the SCRIPTS processor.
4450 */
4451 OUTL (nc_temp, newcmd);
4452 OUTL_DSP (nxtdsp);
4453 return;
4454
4455 /*
4456 * Unexpected phase changes that occurs when the current phase
4457 * is not a DATA IN or DATA OUT phase are due to error conditions.
4458 * Such event may only happen when the SCRIPTS is using a
4459 * multibyte SCSI MOVE.
4460 *
4461 * Phase change Some possible cause
4462 *
4463 * COMMAND --> MSG IN SCSI parity error detected by target.
4464 * COMMAND --> STATUS Bad command or refused by target.
4465 * MSG OUT --> MSG IN Message rejected by target.
4466 * MSG OUT --> COMMAND Bogus target that discards extended
4467 * negotiation messages.
4468 *
4469 * The code below does not care of the new phase and so
4470 * trusts the target. Why to annoy it ?
4471 * If the interrupted phase is COMMAND phase, we restart at
4472 * dispatcher.
4473 * If a target does not get all the messages after selection,
4474 * the code assumes blindly that the target discards extended
4475 * messages and clears the negotiation status.
4476 * If the target does not want all our response to negotiation,
4477 * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
4478 * bloat for such a should_not_happen situation).
4479 * In all other situation, we reset the BUS.
4480 * Are these assumptions reasonnable ? (Wait and see ...)
4481 */
4482 unexpected_phase:
4483 dsp -= 8;
4484 nxtdsp = 0;
4485
4486 switch (cmd & 7) {
4487 case 2: /* COMMAND phase */
4488 nxtdsp = SCRIPTA_BA (np, dispatch);
4489 break;
4490 #if 0
4491 case 3: /* STATUS phase */
4492 nxtdsp = SCRIPTA_BA (np, dispatch);
4493 break;
4494 #endif
4495 case 6: /* MSG OUT phase */
4496 /*
4497 * If the device may want to use untagged when we want
4498 * tagged, we prepare an IDENTIFY without disc. granted,
4499 * since we will not be able to handle reselect.
4500 * Otherwise, we just don't care.
4501 */
4502 if (dsp == SCRIPTA_BA (np, send_ident)) {
4503 if (cp->tag != NO_TAG && olen - rest <= 3) {
4504 cp->host_status = HS_BUSY;
4505 np->msgout[0] = M_IDENTIFY | cp->lun;
4506 nxtdsp = SCRIPTB_BA (np, ident_break_atn);
4507 }
4508 else
4509 nxtdsp = SCRIPTB_BA (np, ident_break);
4510 }
4511 else if (dsp == SCRIPTB_BA (np, send_wdtr) ||
4512 dsp == SCRIPTB_BA (np, send_sdtr) ||
4513 dsp == SCRIPTB_BA (np, send_ppr)) {
4514 nxtdsp = SCRIPTB_BA (np, nego_bad_phase);
4515 }
4516 break;
4517 #if 0
4518 case 7: /* MSG IN phase */
4519 nxtdsp = SCRIPTA_BA (np, clrack);
4520 break;
4521 #endif
4522 }
4523
4524 if (nxtdsp) {
4525 OUTL_DSP (nxtdsp);
4526 return;
4527 }
4528
4529 reset_all:
4530 sym_start_reset(np);
4531 }
4532
4533 /*
4534 * Dequeue from the START queue all CCBs that match
4535 * a given target/lun/task condition (-1 means all),
4536 * and move them from the BUSY queue to the COMP queue
4537 * with CAM_REQUEUE_REQ status condition.
4538 * This function is used during error handling/recovery.
4539 * It is called with SCRIPTS not running.
4540 */
4541 static int
sym_dequeue_from_squeue(hcb_p np,int i,int target,int lun,int task)4542 sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task)
4543 {
4544 int j;
4545 ccb_p cp;
4546
4547 /*
4548 * Make sure the starting index is within range.
4549 */
4550 assert((i >= 0) && (i < 2*MAX_QUEUE));
4551
4552 /*
4553 * Walk until end of START queue and dequeue every job
4554 * that matches the target/lun/task condition.
4555 */
4556 j = i;
4557 while (i != np->squeueput) {
4558 cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
4559 assert(cp);
4560 #ifdef SYM_CONF_IARB_SUPPORT
4561 /* Forget hints for IARB, they may be no longer relevant */
4562 cp->host_flags &= ~HF_HINT_IARB;
4563 #endif
4564 if ((target == -1 || cp->target == target) &&
4565 (lun == -1 || cp->lun == lun) &&
4566 (task == -1 || cp->tag == task)) {
4567 sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ);
4568 sym_remque(&cp->link_ccbq);
4569 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
4570 }
4571 else {
4572 if (i != j)
4573 np->squeue[j] = np->squeue[i];
4574 if ((j += 2) >= MAX_QUEUE*2) j = 0;
4575 }
4576 if ((i += 2) >= MAX_QUEUE*2) i = 0;
4577 }
4578 if (i != j) /* Copy back the idle task if needed */
4579 np->squeue[j] = np->squeue[i];
4580 np->squeueput = j; /* Update our current start queue pointer */
4581
4582 return (i - j) / 2;
4583 }
4584
4585 /*
4586 * Complete all CCBs queued to the COMP queue.
4587 *
4588 * These CCBs are assumed:
4589 * - Not to be referenced either by devices or
4590 * SCRIPTS-related queues and datas.
4591 * - To have to be completed with an error condition
4592 * or requeued.
4593 *
4594 * The device queue freeze count is incremented
4595 * for each CCB that does not prevent this.
4596 * This function is called when all CCBs involved
4597 * in error handling/recovery have been reaped.
4598 */
4599 static void
sym_flush_comp_queue(hcb_p np,int cam_status)4600 sym_flush_comp_queue(hcb_p np, int cam_status)
4601 {
4602 SYM_QUEHEAD *qp;
4603 ccb_p cp;
4604
4605 while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) {
4606 union ccb *ccb;
4607 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
4608 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
4609 /* Leave quiet CCBs waiting for resources */
4610 if (cp->host_status == HS_WAIT)
4611 continue;
4612 ccb = cp->cam_ccb;
4613 if (cam_status)
4614 sym_set_cam_status(ccb, cam_status);
4615 sym_freeze_cam_ccb(ccb);
4616 sym_xpt_done(np, ccb, cp);
4617 sym_free_ccb(np, cp);
4618 }
4619 }
4620
4621 /*
4622 * chip handler for bad SCSI status condition
4623 *
4624 * In case of bad SCSI status, we unqueue all the tasks
4625 * currently queued to the controller but not yet started
4626 * and then restart the SCRIPTS processor immediately.
4627 *
4628 * QUEUE FULL and BUSY conditions are handled the same way.
4629 * Basically all the not yet started tasks are requeued in
4630 * device queue and the queue is frozen until a completion.
4631 *
4632 * For CHECK CONDITION and COMMAND TERMINATED status, we use
4633 * the CCB of the failed command to prepare a REQUEST SENSE
4634 * SCSI command and queue it to the controller queue.
4635 *
4636 * SCRATCHA is assumed to have been loaded with STARTPOS
4637 * before the SCRIPTS called the C code.
4638 */
sym_sir_bad_scsi_status(hcb_p np,ccb_p cp)4639 static void sym_sir_bad_scsi_status(hcb_p np, ccb_p cp)
4640 {
4641 tcb_p tp = &np->target[cp->target];
4642 u32 startp;
4643 u_char s_status = cp->ssss_status;
4644 u_char h_flags = cp->host_flags;
4645 int msglen;
4646 int nego;
4647 int i;
4648
4649 SYM_LOCK_ASSERT(MA_OWNED);
4650
4651 /*
4652 * Compute the index of the next job to start from SCRIPTS.
4653 */
4654 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
4655
4656 /*
4657 * The last CCB queued used for IARB hint may be
4658 * no longer relevant. Forget it.
4659 */
4660 #ifdef SYM_CONF_IARB_SUPPORT
4661 if (np->last_cp)
4662 np->last_cp = NULL;
4663 #endif
4664
4665 /*
4666 * Now deal with the SCSI status.
4667 */
4668 switch(s_status) {
4669 case S_BUSY:
4670 case S_QUEUE_FULL:
4671 if (sym_verbose >= 2) {
4672 PRINT_ADDR(cp);
4673 printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
4674 }
4675 /* FALLTHROUGH */
4676 default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
4677 sym_complete_error (np, cp);
4678 break;
4679 case S_TERMINATED:
4680 case S_CHECK_COND:
4681 /*
4682 * If we get an SCSI error when requesting sense, give up.
4683 */
4684 if (h_flags & HF_SENSE) {
4685 sym_complete_error (np, cp);
4686 break;
4687 }
4688
4689 /*
4690 * Dequeue all queued CCBs for that device not yet started,
4691 * and restart the SCRIPTS processor immediately.
4692 */
4693 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
4694 OUTL_DSP (SCRIPTA_BA (np, start));
4695
4696 /*
4697 * Save some info of the actual IO.
4698 * Compute the data residual.
4699 */
4700 cp->sv_scsi_status = cp->ssss_status;
4701 cp->sv_xerr_status = cp->xerr_status;
4702 cp->sv_resid = sym_compute_residual(np, cp);
4703
4704 /*
4705 * Prepare all needed data structures for
4706 * requesting sense data.
4707 */
4708
4709 /*
4710 * identify message
4711 */
4712 cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun;
4713 msglen = 1;
4714
4715 /*
4716 * If we are currently using anything different from
4717 * async. 8 bit data transfers with that target,
4718 * start a negotiation, since the device may want
4719 * to report us a UNIT ATTENTION condition due to
4720 * a cause we currently ignore, and we donnot want
4721 * to be stuck with WIDE and/or SYNC data transfer.
4722 *
4723 * cp->nego_status is filled by sym_prepare_nego().
4724 */
4725 cp->nego_status = 0;
4726 nego = 0;
4727 if (tp->tinfo.current.options & PPR_OPT_MASK)
4728 nego = NS_PPR;
4729 else if (tp->tinfo.current.width != BUS_8_BIT)
4730 nego = NS_WIDE;
4731 else if (tp->tinfo.current.offset != 0)
4732 nego = NS_SYNC;
4733 if (nego)
4734 msglen +=
4735 sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]);
4736 /*
4737 * Message table indirect structure.
4738 */
4739 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg2));
4740 cp->phys.smsg.size = cpu_to_scr(msglen);
4741
4742 /*
4743 * sense command
4744 */
4745 cp->phys.cmd.addr = cpu_to_scr(CCB_BA (cp, sensecmd));
4746 cp->phys.cmd.size = cpu_to_scr(6);
4747
4748 /*
4749 * patch requested size into sense command
4750 */
4751 cp->sensecmd[0] = 0x03;
4752 cp->sensecmd[1] = cp->lun << 5;
4753 if (tp->tinfo.current.scsi_version > 2 || cp->lun > 7)
4754 cp->sensecmd[1] = 0;
4755 cp->sensecmd[4] = SYM_SNS_BBUF_LEN;
4756 cp->data_len = SYM_SNS_BBUF_LEN;
4757
4758 /*
4759 * sense data
4760 */
4761 bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN);
4762 cp->phys.sense.addr = cpu_to_scr(vtobus(cp->sns_bbuf));
4763 cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN);
4764
4765 /*
4766 * requeue the command.
4767 */
4768 startp = SCRIPTB_BA (np, sdata_in);
4769
4770 cp->phys.head.savep = cpu_to_scr(startp);
4771 cp->phys.head.goalp = cpu_to_scr(startp + 16);
4772 cp->phys.head.lastp = cpu_to_scr(startp);
4773 cp->startp = cpu_to_scr(startp);
4774
4775 cp->actualquirks = SYM_QUIRK_AUTOSAVE;
4776 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
4777 cp->ssss_status = S_ILLEGAL;
4778 cp->host_flags = (HF_SENSE|HF_DATA_IN);
4779 cp->xerr_status = 0;
4780 cp->extra_bytes = 0;
4781
4782 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select));
4783
4784 /*
4785 * Requeue the command.
4786 */
4787 sym_put_start_queue(np, cp);
4788
4789 /*
4790 * Give back to upper layer everything we have dequeued.
4791 */
4792 sym_flush_comp_queue(np, 0);
4793 break;
4794 }
4795 }
4796
4797 /*
4798 * After a device has accepted some management message
4799 * as BUS DEVICE RESET, ABORT TASK, etc ..., or when
4800 * a device signals a UNIT ATTENTION condition, some
4801 * tasks are thrown away by the device. We are required
4802 * to reflect that on our tasks list since the device
4803 * will never complete these tasks.
4804 *
4805 * This function move from the BUSY queue to the COMP
4806 * queue all disconnected CCBs for a given target that
4807 * match the following criteria:
4808 * - lun=-1 means any logical UNIT otherwise a given one.
4809 * - task=-1 means any task, otherwise a given one.
4810 */
4811 static int
sym_clear_tasks(hcb_p np,int cam_status,int target,int lun,int task)4812 sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task)
4813 {
4814 SYM_QUEHEAD qtmp, *qp;
4815 int i = 0;
4816 ccb_p cp;
4817
4818 /*
4819 * Move the entire BUSY queue to our temporary queue.
4820 */
4821 sym_que_init(&qtmp);
4822 sym_que_splice(&np->busy_ccbq, &qtmp);
4823 sym_que_init(&np->busy_ccbq);
4824
4825 /*
4826 * Put all CCBs that matches our criteria into
4827 * the COMP queue and put back other ones into
4828 * the BUSY queue.
4829 */
4830 while ((qp = sym_remque_head(&qtmp)) != NULL) {
4831 union ccb *ccb;
4832 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
4833 ccb = cp->cam_ccb;
4834 if (cp->host_status != HS_DISCONNECT ||
4835 cp->target != target ||
4836 (lun != -1 && cp->lun != lun) ||
4837 (task != -1 &&
4838 (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) {
4839 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
4840 continue;
4841 }
4842 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
4843
4844 /* Preserve the software timeout condition */
4845 if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT)
4846 sym_set_cam_status(ccb, cam_status);
4847 ++i;
4848 #if 0
4849 printf("XXXX TASK @%p CLEARED\n", cp);
4850 #endif
4851 }
4852 return i;
4853 }
4854
4855 /*
4856 * chip handler for TASKS recovery
4857 *
4858 * We cannot safely abort a command, while the SCRIPTS
4859 * processor is running, since we just would be in race
4860 * with it.
4861 *
4862 * As long as we have tasks to abort, we keep the SEM
4863 * bit set in the ISTAT. When this bit is set, the
4864 * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
4865 * each time it enters the scheduler.
4866 *
4867 * If we have to reset a target, clear tasks of a unit,
4868 * or to perform the abort of a disconnected job, we
4869 * restart the SCRIPTS for selecting the target. Once
4870 * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
4871 * If it loses arbitration, the SCRIPTS will interrupt again
4872 * the next time it will enter its scheduler, and so on ...
4873 *
4874 * On SIR_TARGET_SELECTED, we scan for the more
4875 * appropriate thing to do:
4876 *
4877 * - If nothing, we just sent a M_ABORT message to the
4878 * target to get rid of the useless SCSI bus ownership.
4879 * According to the specs, no tasks shall be affected.
4880 * - If the target is to be reset, we send it a M_RESET
4881 * message.
4882 * - If a logical UNIT is to be cleared , we send the
4883 * IDENTIFY(lun) + M_ABORT.
4884 * - If an untagged task is to be aborted, we send the
4885 * IDENTIFY(lun) + M_ABORT.
4886 * - If a tagged task is to be aborted, we send the
4887 * IDENTIFY(lun) + task attributes + M_ABORT_TAG.
4888 *
4889 * Once our 'kiss of death' :) message has been accepted
4890 * by the target, the SCRIPTS interrupts again
4891 * (SIR_ABORT_SENT). On this interrupt, we complete
4892 * all the CCBs that should have been aborted by the
4893 * target according to our message.
4894 */
sym_sir_task_recovery(hcb_p np,int num)4895 static void sym_sir_task_recovery(hcb_p np, int num)
4896 {
4897 SYM_QUEHEAD *qp;
4898 ccb_p cp;
4899 tcb_p tp;
4900 int target=-1, lun=-1, task;
4901 int i, k;
4902
4903 switch(num) {
4904 /*
4905 * The SCRIPTS processor stopped before starting
4906 * the next command in order to allow us to perform
4907 * some task recovery.
4908 */
4909 case SIR_SCRIPT_STOPPED:
4910 /*
4911 * Do we have any target to reset or unit to clear ?
4912 */
4913 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
4914 tp = &np->target[i];
4915 if (tp->to_reset ||
4916 (tp->lun0p && tp->lun0p->to_clear)) {
4917 target = i;
4918 break;
4919 }
4920 if (!tp->lunmp)
4921 continue;
4922 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
4923 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
4924 target = i;
4925 break;
4926 }
4927 }
4928 if (target != -1)
4929 break;
4930 }
4931
4932 /*
4933 * If not, walk the busy queue for any
4934 * disconnected CCB to be aborted.
4935 */
4936 if (target == -1) {
4937 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
4938 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq);
4939 if (cp->host_status != HS_DISCONNECT)
4940 continue;
4941 if (cp->to_abort) {
4942 target = cp->target;
4943 break;
4944 }
4945 }
4946 }
4947
4948 /*
4949 * If some target is to be selected,
4950 * prepare and start the selection.
4951 */
4952 if (target != -1) {
4953 tp = &np->target[target];
4954 np->abrt_sel.sel_id = target;
4955 np->abrt_sel.sel_scntl3 = tp->head.wval;
4956 np->abrt_sel.sel_sxfer = tp->head.sval;
4957 OUTL(nc_dsa, np->hcb_ba);
4958 OUTL_DSP (SCRIPTB_BA (np, sel_for_abort));
4959 return;
4960 }
4961
4962 /*
4963 * Now look for a CCB to abort that haven't started yet.
4964 * Btw, the SCRIPTS processor is still stopped, so
4965 * we are not in race.
4966 */
4967 i = 0;
4968 cp = NULL;
4969 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
4970 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
4971 if (cp->host_status != HS_BUSY &&
4972 cp->host_status != HS_NEGOTIATE)
4973 continue;
4974 if (!cp->to_abort)
4975 continue;
4976 #ifdef SYM_CONF_IARB_SUPPORT
4977 /*
4978 * If we are using IMMEDIATE ARBITRATION, we donnot
4979 * want to cancel the last queued CCB, since the
4980 * SCRIPTS may have anticipated the selection.
4981 */
4982 if (cp == np->last_cp) {
4983 cp->to_abort = 0;
4984 continue;
4985 }
4986 #endif
4987 i = 1; /* Means we have found some */
4988 break;
4989 }
4990 if (!i) {
4991 /*
4992 * We are done, so we donnot need
4993 * to synchronize with the SCRIPTS anylonger.
4994 * Remove the SEM flag from the ISTAT.
4995 */
4996 np->istat_sem = 0;
4997 OUTB (nc_istat, SIGP);
4998 break;
4999 }
5000 /*
5001 * Compute index of next position in the start
5002 * queue the SCRIPTS intends to start and dequeue
5003 * all CCBs for that device that haven't been started.
5004 */
5005 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
5006 i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
5007
5008 /*
5009 * Make sure at least our IO to abort has been dequeued.
5010 */
5011 assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ);
5012
5013 /*
5014 * Keep track in cam status of the reason of the abort.
5015 */
5016 if (cp->to_abort == 2)
5017 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT);
5018 else
5019 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED);
5020
5021 /*
5022 * Complete with error everything that we have dequeued.
5023 */
5024 sym_flush_comp_queue(np, 0);
5025 break;
5026 /*
5027 * The SCRIPTS processor has selected a target
5028 * we may have some manual recovery to perform for.
5029 */
5030 case SIR_TARGET_SELECTED:
5031 target = (INB (nc_sdid) & 0xf);
5032 tp = &np->target[target];
5033
5034 np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg));
5035
5036 /*
5037 * If the target is to be reset, prepare a
5038 * M_RESET message and clear the to_reset flag
5039 * since we donnot expect this operation to fail.
5040 */
5041 if (tp->to_reset) {
5042 np->abrt_msg[0] = M_RESET;
5043 np->abrt_tbl.size = 1;
5044 tp->to_reset = 0;
5045 break;
5046 }
5047
5048 /*
5049 * Otherwise, look for some logical unit to be cleared.
5050 */
5051 if (tp->lun0p && tp->lun0p->to_clear)
5052 lun = 0;
5053 else if (tp->lunmp) {
5054 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
5055 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
5056 lun = k;
5057 break;
5058 }
5059 }
5060 }
5061
5062 /*
5063 * If a logical unit is to be cleared, prepare
5064 * an IDENTIFY(lun) + ABORT MESSAGE.
5065 */
5066 if (lun != -1) {
5067 lcb_p lp = sym_lp(tp, lun);
5068 lp->to_clear = 0; /* We donnot expect to fail here */
5069 np->abrt_msg[0] = M_IDENTIFY | lun;
5070 np->abrt_msg[1] = M_ABORT;
5071 np->abrt_tbl.size = 2;
5072 break;
5073 }
5074
5075 /*
5076 * Otherwise, look for some disconnected job to
5077 * abort for this target.
5078 */
5079 i = 0;
5080 cp = NULL;
5081 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
5082 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
5083 if (cp->host_status != HS_DISCONNECT)
5084 continue;
5085 if (cp->target != target)
5086 continue;
5087 if (!cp->to_abort)
5088 continue;
5089 i = 1; /* Means we have some */
5090 break;
5091 }
5092
5093 /*
5094 * If we have none, probably since the device has
5095 * completed the command before we won abitration,
5096 * send a M_ABORT message without IDENTIFY.
5097 * According to the specs, the device must just
5098 * disconnect the BUS and not abort any task.
5099 */
5100 if (!i) {
5101 np->abrt_msg[0] = M_ABORT;
5102 np->abrt_tbl.size = 1;
5103 break;
5104 }
5105
5106 /*
5107 * We have some task to abort.
5108 * Set the IDENTIFY(lun)
5109 */
5110 np->abrt_msg[0] = M_IDENTIFY | cp->lun;
5111
5112 /*
5113 * If we want to abort an untagged command, we
5114 * will send an IDENTIFY + M_ABORT.
5115 * Otherwise (tagged command), we will send
5116 * an IDENTIFY + task attributes + ABORT TAG.
5117 */
5118 if (cp->tag == NO_TAG) {
5119 np->abrt_msg[1] = M_ABORT;
5120 np->abrt_tbl.size = 2;
5121 }
5122 else {
5123 np->abrt_msg[1] = cp->scsi_smsg[1];
5124 np->abrt_msg[2] = cp->scsi_smsg[2];
5125 np->abrt_msg[3] = M_ABORT_TAG;
5126 np->abrt_tbl.size = 4;
5127 }
5128 /*
5129 * Keep track of software timeout condition, since the
5130 * peripheral driver may not count retries on abort
5131 * conditions not due to timeout.
5132 */
5133 if (cp->to_abort == 2)
5134 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT);
5135 cp->to_abort = 0; /* We donnot expect to fail here */
5136 break;
5137
5138 /*
5139 * The target has accepted our message and switched
5140 * to BUS FREE phase as we expected.
5141 */
5142 case SIR_ABORT_SENT:
5143 target = (INB (nc_sdid) & 0xf);
5144 tp = &np->target[target];
5145
5146 /*
5147 ** If we didn't abort anything, leave here.
5148 */
5149 if (np->abrt_msg[0] == M_ABORT)
5150 break;
5151
5152 /*
5153 * If we sent a M_RESET, then a hardware reset has
5154 * been performed by the target.
5155 * - Reset everything to async 8 bit
5156 * - Tell ourself to negotiate next time :-)
5157 * - Prepare to clear all disconnected CCBs for
5158 * this target from our task list (lun=task=-1)
5159 */
5160 lun = -1;
5161 task = -1;
5162 if (np->abrt_msg[0] == M_RESET) {
5163 tp->head.sval = 0;
5164 tp->head.wval = np->rv_scntl3;
5165 tp->head.uval = 0;
5166 tp->tinfo.current.period = 0;
5167 tp->tinfo.current.offset = 0;
5168 tp->tinfo.current.width = BUS_8_BIT;
5169 tp->tinfo.current.options = 0;
5170 }
5171
5172 /*
5173 * Otherwise, check for the LUN and TASK(s)
5174 * concerned by the cancellation.
5175 * If it is not ABORT_TAG then it is CLEAR_QUEUE
5176 * or an ABORT message :-)
5177 */
5178 else {
5179 lun = np->abrt_msg[0] & 0x3f;
5180 if (np->abrt_msg[1] == M_ABORT_TAG)
5181 task = np->abrt_msg[2];
5182 }
5183
5184 /*
5185 * Complete all the CCBs the device should have
5186 * aborted due to our 'kiss of death' message.
5187 */
5188 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
5189 (void) sym_dequeue_from_squeue(np, i, target, lun, -1);
5190 (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task);
5191 sym_flush_comp_queue(np, 0);
5192
5193 /*
5194 * If we sent a BDR, make uper layer aware of that.
5195 */
5196 if (np->abrt_msg[0] == M_RESET)
5197 xpt_async(AC_SENT_BDR, np->path, NULL);
5198 break;
5199 }
5200
5201 /*
5202 * Print to the log the message we intend to send.
5203 */
5204 if (num == SIR_TARGET_SELECTED) {
5205 PRINT_TARGET(np, target);
5206 sym_printl_hex("control msgout:", np->abrt_msg,
5207 np->abrt_tbl.size);
5208 np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
5209 }
5210
5211 /*
5212 * Let the SCRIPTS processor continue.
5213 */
5214 OUTONB_STD ();
5215 }
5216
5217 /*
5218 * Gerard's alchemy:) that deals with with the data
5219 * pointer for both MDP and the residual calculation.
5220 *
5221 * I didn't want to bloat the code by more than 200
5222 * lignes for the handling of both MDP and the residual.
5223 * This has been achieved by using a data pointer
5224 * representation consisting in an index in the data
5225 * array (dp_sg) and a negative offset (dp_ofs) that
5226 * have the following meaning:
5227 *
5228 * - dp_sg = SYM_CONF_MAX_SG
5229 * we are at the end of the data script.
5230 * - dp_sg < SYM_CONF_MAX_SG
5231 * dp_sg points to the next entry of the scatter array
5232 * we want to transfer.
5233 * - dp_ofs < 0
5234 * dp_ofs represents the residual of bytes of the
5235 * previous entry scatter entry we will send first.
5236 * - dp_ofs = 0
5237 * no residual to send first.
5238 *
5239 * The function sym_evaluate_dp() accepts an arbitray
5240 * offset (basically from the MDP message) and returns
5241 * the corresponding values of dp_sg and dp_ofs.
5242 */
sym_evaluate_dp(hcb_p np,ccb_p cp,u32 scr,int * ofs)5243 static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs)
5244 {
5245 u32 dp_scr;
5246 int dp_ofs, dp_sg, dp_sgmin;
5247 int tmp;
5248 struct sym_pmc *pm;
5249
5250 /*
5251 * Compute the resulted data pointer in term of a script
5252 * address within some DATA script and a signed byte offset.
5253 */
5254 dp_scr = scr;
5255 dp_ofs = *ofs;
5256 if (dp_scr == SCRIPTA_BA (np, pm0_data))
5257 pm = &cp->phys.pm0;
5258 else if (dp_scr == SCRIPTA_BA (np, pm1_data))
5259 pm = &cp->phys.pm1;
5260 else
5261 pm = NULL;
5262
5263 if (pm) {
5264 dp_scr = scr_to_cpu(pm->ret);
5265 dp_ofs -= scr_to_cpu(pm->sg.size);
5266 }
5267
5268 /*
5269 * If we are auto-sensing, then we are done.
5270 */
5271 if (cp->host_flags & HF_SENSE) {
5272 *ofs = dp_ofs;
5273 return 0;
5274 }
5275
5276 /*
5277 * Deduce the index of the sg entry.
5278 * Keep track of the index of the first valid entry.
5279 * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the
5280 * end of the data.
5281 */
5282 tmp = scr_to_cpu(cp->phys.head.goalp);
5283 dp_sg = SYM_CONF_MAX_SG;
5284 if (dp_scr != tmp)
5285 dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4);
5286 dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
5287
5288 /*
5289 * Move to the sg entry the data pointer belongs to.
5290 *
5291 * If we are inside the data area, we expect result to be:
5292 *
5293 * Either,
5294 * dp_ofs = 0 and dp_sg is the index of the sg entry
5295 * the data pointer belongs to (or the end of the data)
5296 * Or,
5297 * dp_ofs < 0 and dp_sg is the index of the sg entry
5298 * the data pointer belongs to + 1.
5299 */
5300 if (dp_ofs < 0) {
5301 int n;
5302 while (dp_sg > dp_sgmin) {
5303 --dp_sg;
5304 tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
5305 n = dp_ofs + (tmp & 0xffffff);
5306 if (n > 0) {
5307 ++dp_sg;
5308 break;
5309 }
5310 dp_ofs = n;
5311 }
5312 }
5313 else if (dp_ofs > 0) {
5314 while (dp_sg < SYM_CONF_MAX_SG) {
5315 tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
5316 dp_ofs -= (tmp & 0xffffff);
5317 ++dp_sg;
5318 if (dp_ofs <= 0)
5319 break;
5320 }
5321 }
5322
5323 /*
5324 * Make sure the data pointer is inside the data area.
5325 * If not, return some error.
5326 */
5327 if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
5328 goto out_err;
5329 else if (dp_sg > SYM_CONF_MAX_SG ||
5330 (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0))
5331 goto out_err;
5332
5333 /*
5334 * Save the extreme pointer if needed.
5335 */
5336 if (dp_sg > cp->ext_sg ||
5337 (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
5338 cp->ext_sg = dp_sg;
5339 cp->ext_ofs = dp_ofs;
5340 }
5341
5342 /*
5343 * Return data.
5344 */
5345 *ofs = dp_ofs;
5346 return dp_sg;
5347
5348 out_err:
5349 return -1;
5350 }
5351
5352 /*
5353 * chip handler for MODIFY DATA POINTER MESSAGE
5354 *
5355 * We also call this function on IGNORE WIDE RESIDUE
5356 * messages that do not match a SWIDE full condition.
5357 * Btw, we assume in that situation that such a message
5358 * is equivalent to a MODIFY DATA POINTER (offset=-1).
5359 */
sym_modify_dp(hcb_p np,ccb_p cp,int ofs)5360 static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs)
5361 {
5362 int dp_ofs = ofs;
5363 u32 dp_scr = INL (nc_temp);
5364 u32 dp_ret;
5365 u32 tmp;
5366 u_char hflags;
5367 int dp_sg;
5368 struct sym_pmc *pm;
5369
5370 /*
5371 * Not supported for auto-sense.
5372 */
5373 if (cp->host_flags & HF_SENSE)
5374 goto out_reject;
5375
5376 /*
5377 * Apply our alchemy:) (see comments in sym_evaluate_dp()),
5378 * to the resulted data pointer.
5379 */
5380 dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs);
5381 if (dp_sg < 0)
5382 goto out_reject;
5383
5384 /*
5385 * And our alchemy:) allows to easily calculate the data
5386 * script address we want to return for the next data phase.
5387 */
5388 dp_ret = cpu_to_scr(cp->phys.head.goalp);
5389 dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4);
5390
5391 /*
5392 * If offset / scatter entry is zero we donnot need
5393 * a context for the new current data pointer.
5394 */
5395 if (dp_ofs == 0) {
5396 dp_scr = dp_ret;
5397 goto out_ok;
5398 }
5399
5400 /*
5401 * Get a context for the new current data pointer.
5402 */
5403 hflags = INB (HF_PRT);
5404
5405 if (hflags & HF_DP_SAVED)
5406 hflags ^= HF_ACT_PM;
5407
5408 if (!(hflags & HF_ACT_PM)) {
5409 pm = &cp->phys.pm0;
5410 dp_scr = SCRIPTA_BA (np, pm0_data);
5411 }
5412 else {
5413 pm = &cp->phys.pm1;
5414 dp_scr = SCRIPTA_BA (np, pm1_data);
5415 }
5416
5417 hflags &= ~(HF_DP_SAVED);
5418
5419 OUTB (HF_PRT, hflags);
5420
5421 /*
5422 * Set up the new current data pointer.
5423 * ofs < 0 there, and for the next data phase, we
5424 * want to transfer part of the data of the sg entry
5425 * corresponding to index dp_sg-1 prior to returning
5426 * to the main data script.
5427 */
5428 pm->ret = cpu_to_scr(dp_ret);
5429 tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
5430 tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
5431 pm->sg.addr = cpu_to_scr(tmp);
5432 pm->sg.size = cpu_to_scr(-dp_ofs);
5433
5434 out_ok:
5435 OUTL (nc_temp, dp_scr);
5436 OUTL_DSP (SCRIPTA_BA (np, clrack));
5437 return;
5438
5439 out_reject:
5440 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5441 }
5442
5443 /*
5444 * chip calculation of the data residual.
5445 *
5446 * As I used to say, the requirement of data residual
5447 * in SCSI is broken, useless and cannot be achieved
5448 * without huge complexity.
5449 * But most OSes and even the official CAM require it.
5450 * When stupidity happens to be so widely spread inside
5451 * a community, it gets hard to convince.
5452 *
5453 * Anyway, I don't care, since I am not going to use
5454 * any software that considers this data residual as
5455 * a relevant information. :)
5456 */
sym_compute_residual(hcb_p np,ccb_p cp)5457 static int sym_compute_residual(hcb_p np, ccb_p cp)
5458 {
5459 int dp_sg, resid = 0;
5460 int dp_ofs = 0;
5461
5462 /*
5463 * Check for some data lost or just thrown away.
5464 * We are not required to be quite accurate in this
5465 * situation. Btw, if we are odd for output and the
5466 * device claims some more data, it may well happen
5467 * than our residual be zero. :-)
5468 */
5469 if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
5470 if (cp->xerr_status & XE_EXTRA_DATA)
5471 resid -= cp->extra_bytes;
5472 if (cp->xerr_status & XE_SODL_UNRUN)
5473 ++resid;
5474 if (cp->xerr_status & XE_SWIDE_OVRUN)
5475 --resid;
5476 }
5477
5478 /*
5479 * If all data has been transferred,
5480 * there is no residual.
5481 */
5482 if (cp->phys.head.lastp == cp->phys.head.goalp)
5483 return resid;
5484
5485 /*
5486 * If no data transfer occurs, or if the data
5487 * pointer is weird, return full residual.
5488 */
5489 if (cp->startp == cp->phys.head.lastp ||
5490 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
5491 &dp_ofs) < 0) {
5492 return cp->data_len;
5493 }
5494
5495 /*
5496 * If we were auto-sensing, then we are done.
5497 */
5498 if (cp->host_flags & HF_SENSE) {
5499 return -dp_ofs;
5500 }
5501
5502 /*
5503 * We are now full comfortable in the computation
5504 * of the data residual (2's complement).
5505 */
5506 resid = -cp->ext_ofs;
5507 for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
5508 u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
5509 resid += (tmp & 0xffffff);
5510 }
5511
5512 /*
5513 * Hopefully, the result is not too wrong.
5514 */
5515 return resid;
5516 }
5517
5518 /*
5519 * Print out the content of a SCSI message.
5520 */
sym_show_msg(u_char * msg)5521 static int sym_show_msg (u_char * msg)
5522 {
5523 u_char i;
5524 printf ("%x",*msg);
5525 if (*msg==M_EXTENDED) {
5526 for (i = 1; i < 8; i++) {
5527 if (i - 1 > msg[1]) break;
5528 printf ("-%x",msg[i]);
5529 }
5530 return (i+1);
5531 } else if ((*msg & 0xf0) == 0x20) {
5532 printf ("-%x",msg[1]);
5533 return (2);
5534 }
5535 return (1);
5536 }
5537
sym_print_msg(ccb_p cp,char * label,u_char * msg)5538 static void sym_print_msg (ccb_p cp, char *label, u_char *msg)
5539 {
5540 PRINT_ADDR(cp);
5541 if (label)
5542 printf ("%s: ", label);
5543
5544 (void) sym_show_msg (msg);
5545 printf (".\n");
5546 }
5547
5548 /*
5549 * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
5550 *
5551 * When we try to negotiate, we append the negotiation message
5552 * to the identify and (maybe) simple tag message.
5553 * The host status field is set to HS_NEGOTIATE to mark this
5554 * situation.
5555 *
5556 * If the target doesn't answer this message immediately
5557 * (as required by the standard), the SIR_NEGO_FAILED interrupt
5558 * will be raised eventually.
5559 * The handler removes the HS_NEGOTIATE status, and sets the
5560 * negotiated value to the default (async / nowide).
5561 *
5562 * If we receive a matching answer immediately, we check it
5563 * for validity, and set the values.
5564 *
5565 * If we receive a Reject message immediately, we assume the
5566 * negotiation has failed, and fall back to standard values.
5567 *
5568 * If we receive a negotiation message while not in HS_NEGOTIATE
5569 * state, it's a target initiated negotiation. We prepare a
5570 * (hopefully) valid answer, set our parameters, and send back
5571 * this answer to the target.
5572 *
5573 * If the target doesn't fetch the answer (no message out phase),
5574 * we assume the negotiation has failed, and fall back to default
5575 * settings (SIR_NEGO_PROTO interrupt).
5576 *
5577 * When we set the values, we adjust them in all ccbs belonging
5578 * to this target, in the controller's register, and in the "phys"
5579 * field of the controller's struct sym_hcb.
5580 */
5581
5582 /*
5583 * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message.
5584 */
sym_sync_nego(hcb_p np,tcb_p tp,ccb_p cp)5585 static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp)
5586 {
5587 u_char chg, ofs, per, fak, div;
5588 int req = 1;
5589
5590 /*
5591 * Synchronous request message received.
5592 */
5593 if (DEBUG_FLAGS & DEBUG_NEGO) {
5594 sym_print_msg(cp, "sync msgin", np->msgin);
5595 }
5596
5597 /*
5598 * request or answer ?
5599 */
5600 if (INB (HS_PRT) == HS_NEGOTIATE) {
5601 OUTB (HS_PRT, HS_BUSY);
5602 if (cp->nego_status && cp->nego_status != NS_SYNC)
5603 goto reject_it;
5604 req = 0;
5605 }
5606
5607 /*
5608 * get requested values.
5609 */
5610 chg = 0;
5611 per = np->msgin[3];
5612 ofs = np->msgin[4];
5613
5614 /*
5615 * check values against our limits.
5616 */
5617 if (ofs) {
5618 if (ofs > np->maxoffs)
5619 {chg = 1; ofs = np->maxoffs;}
5620 if (req) {
5621 if (ofs > tp->tinfo.user.offset)
5622 {chg = 1; ofs = tp->tinfo.user.offset;}
5623 }
5624 }
5625
5626 if (ofs) {
5627 if (per < np->minsync)
5628 {chg = 1; per = np->minsync;}
5629 if (req) {
5630 if (per < tp->tinfo.user.period)
5631 {chg = 1; per = tp->tinfo.user.period;}
5632 }
5633 }
5634
5635 div = fak = 0;
5636 if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0)
5637 goto reject_it;
5638
5639 if (DEBUG_FLAGS & DEBUG_NEGO) {
5640 PRINT_ADDR(cp);
5641 printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n",
5642 ofs, per, div, fak, chg);
5643 }
5644
5645 /*
5646 * This was an answer message
5647 */
5648 if (req == 0) {
5649 if (chg) /* Answer wasn't acceptable. */
5650 goto reject_it;
5651 sym_setsync (np, cp, ofs, per, div, fak);
5652 OUTL_DSP (SCRIPTA_BA (np, clrack));
5653 return;
5654 }
5655
5656 /*
5657 * It was a request. Set value and
5658 * prepare an answer message
5659 */
5660 sym_setsync (np, cp, ofs, per, div, fak);
5661
5662 np->msgout[0] = M_EXTENDED;
5663 np->msgout[1] = 3;
5664 np->msgout[2] = M_X_SYNC_REQ;
5665 np->msgout[3] = per;
5666 np->msgout[4] = ofs;
5667
5668 cp->nego_status = NS_SYNC;
5669
5670 if (DEBUG_FLAGS & DEBUG_NEGO) {
5671 sym_print_msg(cp, "sync msgout", np->msgout);
5672 }
5673
5674 np->msgin [0] = M_NOOP;
5675
5676 OUTL_DSP (SCRIPTB_BA (np, sdtr_resp));
5677 return;
5678 reject_it:
5679 sym_setsync (np, cp, 0, 0, 0, 0);
5680 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5681 }
5682
5683 /*
5684 * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message.
5685 */
sym_ppr_nego(hcb_p np,tcb_p tp,ccb_p cp)5686 static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp)
5687 {
5688 u_char chg, ofs, per, fak, dt, div, wide;
5689 int req = 1;
5690
5691 /*
5692 * Synchronous request message received.
5693 */
5694 if (DEBUG_FLAGS & DEBUG_NEGO) {
5695 sym_print_msg(cp, "ppr msgin", np->msgin);
5696 }
5697
5698 /*
5699 * get requested values.
5700 */
5701 chg = 0;
5702 per = np->msgin[3];
5703 ofs = np->msgin[5];
5704 wide = np->msgin[6];
5705 dt = np->msgin[7] & PPR_OPT_DT;
5706
5707 /*
5708 * request or answer ?
5709 */
5710 if (INB (HS_PRT) == HS_NEGOTIATE) {
5711 OUTB (HS_PRT, HS_BUSY);
5712 if (cp->nego_status && cp->nego_status != NS_PPR)
5713 goto reject_it;
5714 req = 0;
5715 }
5716
5717 /*
5718 * check values against our limits.
5719 */
5720 if (wide > np->maxwide)
5721 {chg = 1; wide = np->maxwide;}
5722 if (!wide || !(np->features & FE_ULTRA3))
5723 dt &= ~PPR_OPT_DT;
5724 if (req) {
5725 if (wide > tp->tinfo.user.width)
5726 {chg = 1; wide = tp->tinfo.user.width;}
5727 }
5728
5729 if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */
5730 dt &= ~PPR_OPT_DT;
5731
5732 if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1;
5733
5734 if (ofs) {
5735 if (dt) {
5736 if (ofs > np->maxoffs_dt)
5737 {chg = 1; ofs = np->maxoffs_dt;}
5738 }
5739 else if (ofs > np->maxoffs)
5740 {chg = 1; ofs = np->maxoffs;}
5741 if (req) {
5742 if (ofs > tp->tinfo.user.offset)
5743 {chg = 1; ofs = tp->tinfo.user.offset;}
5744 }
5745 }
5746
5747 if (ofs) {
5748 if (dt) {
5749 if (per < np->minsync_dt)
5750 {chg = 1; per = np->minsync_dt;}
5751 }
5752 else if (per < np->minsync)
5753 {chg = 1; per = np->minsync;}
5754 if (req) {
5755 if (per < tp->tinfo.user.period)
5756 {chg = 1; per = tp->tinfo.user.period;}
5757 }
5758 }
5759
5760 div = fak = 0;
5761 if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0)
5762 goto reject_it;
5763
5764 if (DEBUG_FLAGS & DEBUG_NEGO) {
5765 PRINT_ADDR(cp);
5766 printf ("ppr: "
5767 "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n",
5768 dt, ofs, per, wide, div, fak, chg);
5769 }
5770
5771 /*
5772 * It was an answer.
5773 */
5774 if (req == 0) {
5775 if (chg) /* Answer wasn't acceptable */
5776 goto reject_it;
5777 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak);
5778 OUTL_DSP (SCRIPTA_BA (np, clrack));
5779 return;
5780 }
5781
5782 /*
5783 * It was a request. Set value and
5784 * prepare an answer message
5785 */
5786 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak);
5787
5788 np->msgout[0] = M_EXTENDED;
5789 np->msgout[1] = 6;
5790 np->msgout[2] = M_X_PPR_REQ;
5791 np->msgout[3] = per;
5792 np->msgout[4] = 0;
5793 np->msgout[5] = ofs;
5794 np->msgout[6] = wide;
5795 np->msgout[7] = dt;
5796
5797 cp->nego_status = NS_PPR;
5798
5799 if (DEBUG_FLAGS & DEBUG_NEGO) {
5800 sym_print_msg(cp, "ppr msgout", np->msgout);
5801 }
5802
5803 np->msgin [0] = M_NOOP;
5804
5805 OUTL_DSP (SCRIPTB_BA (np, ppr_resp));
5806 return;
5807 reject_it:
5808 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0);
5809 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5810 /*
5811 * If it was a device response that should result in
5812 * ST, we may want to try a legacy negotiation later.
5813 */
5814 if (!req && !dt) {
5815 tp->tinfo.goal.options = 0;
5816 tp->tinfo.goal.width = wide;
5817 tp->tinfo.goal.period = per;
5818 tp->tinfo.goal.offset = ofs;
5819 }
5820 }
5821
5822 /*
5823 * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message.
5824 */
sym_wide_nego(hcb_p np,tcb_p tp,ccb_p cp)5825 static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp)
5826 {
5827 u_char chg, wide;
5828 int req = 1;
5829
5830 /*
5831 * Wide request message received.
5832 */
5833 if (DEBUG_FLAGS & DEBUG_NEGO) {
5834 sym_print_msg(cp, "wide msgin", np->msgin);
5835 }
5836
5837 /*
5838 * Is it a request from the device?
5839 */
5840 if (INB (HS_PRT) == HS_NEGOTIATE) {
5841 OUTB (HS_PRT, HS_BUSY);
5842 if (cp->nego_status && cp->nego_status != NS_WIDE)
5843 goto reject_it;
5844 req = 0;
5845 }
5846
5847 /*
5848 * get requested values.
5849 */
5850 chg = 0;
5851 wide = np->msgin[3];
5852
5853 /*
5854 * check values against driver limits.
5855 */
5856 if (wide > np->maxwide)
5857 {chg = 1; wide = np->maxwide;}
5858 if (req) {
5859 if (wide > tp->tinfo.user.width)
5860 {chg = 1; wide = tp->tinfo.user.width;}
5861 }
5862
5863 if (DEBUG_FLAGS & DEBUG_NEGO) {
5864 PRINT_ADDR(cp);
5865 printf ("wdtr: wide=%d chg=%d.\n", wide, chg);
5866 }
5867
5868 /*
5869 * This was an answer message
5870 */
5871 if (req == 0) {
5872 if (chg) /* Answer wasn't acceptable. */
5873 goto reject_it;
5874 sym_setwide (np, cp, wide);
5875
5876 /*
5877 * Negotiate for SYNC immediately after WIDE response.
5878 * This allows to negotiate for both WIDE and SYNC on
5879 * a single SCSI command (Suggested by Justin Gibbs).
5880 */
5881 if (tp->tinfo.goal.offset) {
5882 np->msgout[0] = M_EXTENDED;
5883 np->msgout[1] = 3;
5884 np->msgout[2] = M_X_SYNC_REQ;
5885 np->msgout[3] = tp->tinfo.goal.period;
5886 np->msgout[4] = tp->tinfo.goal.offset;
5887
5888 if (DEBUG_FLAGS & DEBUG_NEGO) {
5889 sym_print_msg(cp, "sync msgout", np->msgout);
5890 }
5891
5892 cp->nego_status = NS_SYNC;
5893 OUTB (HS_PRT, HS_NEGOTIATE);
5894 OUTL_DSP (SCRIPTB_BA (np, sdtr_resp));
5895 return;
5896 }
5897
5898 OUTL_DSP (SCRIPTA_BA (np, clrack));
5899 return;
5900 }
5901
5902 /*
5903 * It was a request, set value and
5904 * prepare an answer message
5905 */
5906 sym_setwide (np, cp, wide);
5907
5908 np->msgout[0] = M_EXTENDED;
5909 np->msgout[1] = 2;
5910 np->msgout[2] = M_X_WIDE_REQ;
5911 np->msgout[3] = wide;
5912
5913 np->msgin [0] = M_NOOP;
5914
5915 cp->nego_status = NS_WIDE;
5916
5917 if (DEBUG_FLAGS & DEBUG_NEGO) {
5918 sym_print_msg(cp, "wide msgout", np->msgout);
5919 }
5920
5921 OUTL_DSP (SCRIPTB_BA (np, wdtr_resp));
5922 return;
5923 reject_it:
5924 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5925 }
5926
5927 /*
5928 * Reset SYNC or WIDE to default settings.
5929 *
5930 * Called when a negotiation does not succeed either
5931 * on rejection or on protocol error.
5932 *
5933 * If it was a PPR that made problems, we may want to
5934 * try a legacy negotiation later.
5935 */
sym_nego_default(hcb_p np,tcb_p tp,ccb_p cp)5936 static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp)
5937 {
5938 /*
5939 * any error in negotiation:
5940 * fall back to default mode.
5941 */
5942 switch (cp->nego_status) {
5943 case NS_PPR:
5944 #if 0
5945 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0);
5946 #else
5947 tp->tinfo.goal.options = 0;
5948 if (tp->tinfo.goal.period < np->minsync)
5949 tp->tinfo.goal.period = np->minsync;
5950 if (tp->tinfo.goal.offset > np->maxoffs)
5951 tp->tinfo.goal.offset = np->maxoffs;
5952 #endif
5953 break;
5954 case NS_SYNC:
5955 sym_setsync (np, cp, 0, 0, 0, 0);
5956 break;
5957 case NS_WIDE:
5958 sym_setwide (np, cp, 0);
5959 break;
5960 }
5961 np->msgin [0] = M_NOOP;
5962 np->msgout[0] = M_NOOP;
5963 cp->nego_status = 0;
5964 }
5965
5966 /*
5967 * chip handler for MESSAGE REJECT received in response to
5968 * a WIDE or SYNCHRONOUS negotiation.
5969 */
sym_nego_rejected(hcb_p np,tcb_p tp,ccb_p cp)5970 static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp)
5971 {
5972 sym_nego_default(np, tp, cp);
5973 OUTB (HS_PRT, HS_BUSY);
5974 }
5975
5976 /*
5977 * chip exception handler for programmed interrupts.
5978 */
sym_int_sir(hcb_p np)5979 static void sym_int_sir (hcb_p np)
5980 {
5981 u_char num = INB (nc_dsps);
5982 u32 dsa = INL (nc_dsa);
5983 ccb_p cp = sym_ccb_from_dsa(np, dsa);
5984 u_char target = INB (nc_sdid) & 0x0f;
5985 tcb_p tp = &np->target[target];
5986 int tmp;
5987
5988 SYM_LOCK_ASSERT(MA_OWNED);
5989
5990 if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num);
5991
5992 switch (num) {
5993 /*
5994 * Command has been completed with error condition
5995 * or has been auto-sensed.
5996 */
5997 case SIR_COMPLETE_ERROR:
5998 if (!cp)
5999 goto out;
6000 sym_complete_error(np, cp);
6001 return;
6002 /*
6003 * The C code is currently trying to recover from something.
6004 * Typically, user want to abort some command.
6005 */
6006 case SIR_SCRIPT_STOPPED:
6007 case SIR_TARGET_SELECTED:
6008 case SIR_ABORT_SENT:
6009 sym_sir_task_recovery(np, num);
6010 return;
6011 /*
6012 * The device didn't go to MSG OUT phase after having
6013 * been selected with ATN. We donnot want to handle
6014 * that.
6015 */
6016 case SIR_SEL_ATN_NO_MSG_OUT:
6017 printf ("%s:%d: No MSG OUT phase after selection with ATN.\n",
6018 sym_name (np), target);
6019 goto out_stuck;
6020 /*
6021 * The device didn't switch to MSG IN phase after
6022 * having reseleted the initiator.
6023 */
6024 case SIR_RESEL_NO_MSG_IN:
6025 printf ("%s:%d: No MSG IN phase after reselection.\n",
6026 sym_name (np), target);
6027 goto out_stuck;
6028 /*
6029 * After reselection, the device sent a message that wasn't
6030 * an IDENTIFY.
6031 */
6032 case SIR_RESEL_NO_IDENTIFY:
6033 printf ("%s:%d: No IDENTIFY after reselection.\n",
6034 sym_name (np), target);
6035 goto out_stuck;
6036 /*
6037 * The device reselected a LUN we donnot know about.
6038 */
6039 case SIR_RESEL_BAD_LUN:
6040 np->msgout[0] = M_RESET;
6041 goto out;
6042 /*
6043 * The device reselected for an untagged nexus and we
6044 * haven't any.
6045 */
6046 case SIR_RESEL_BAD_I_T_L:
6047 np->msgout[0] = M_ABORT;
6048 goto out;
6049 /*
6050 * The device reselected for a tagged nexus that we donnot
6051 * have.
6052 */
6053 case SIR_RESEL_BAD_I_T_L_Q:
6054 np->msgout[0] = M_ABORT_TAG;
6055 goto out;
6056 /*
6057 * The SCRIPTS let us know that the device has grabbed
6058 * our message and will abort the job.
6059 */
6060 case SIR_RESEL_ABORTED:
6061 np->lastmsg = np->msgout[0];
6062 np->msgout[0] = M_NOOP;
6063 printf ("%s:%d: message %x sent on bad reselection.\n",
6064 sym_name (np), target, np->lastmsg);
6065 goto out;
6066 /*
6067 * The SCRIPTS let us know that a message has been
6068 * successfully sent to the device.
6069 */
6070 case SIR_MSG_OUT_DONE:
6071 np->lastmsg = np->msgout[0];
6072 np->msgout[0] = M_NOOP;
6073 /* Should we really care of that */
6074 if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
6075 if (cp) {
6076 cp->xerr_status &= ~XE_PARITY_ERR;
6077 if (!cp->xerr_status)
6078 OUTOFFB (HF_PRT, HF_EXT_ERR);
6079 }
6080 }
6081 goto out;
6082 /*
6083 * The device didn't send a GOOD SCSI status.
6084 * We may have some work to do prior to allow
6085 * the SCRIPTS processor to continue.
6086 */
6087 case SIR_BAD_SCSI_STATUS:
6088 if (!cp)
6089 goto out;
6090 sym_sir_bad_scsi_status(np, cp);
6091 return;
6092 /*
6093 * We are asked by the SCRIPTS to prepare a
6094 * REJECT message.
6095 */
6096 case SIR_REJECT_TO_SEND:
6097 sym_print_msg(cp, "M_REJECT to send for ", np->msgin);
6098 np->msgout[0] = M_REJECT;
6099 goto out;
6100 /*
6101 * We have been ODD at the end of a DATA IN
6102 * transfer and the device didn't send a
6103 * IGNORE WIDE RESIDUE message.
6104 * It is a data overrun condition.
6105 */
6106 case SIR_SWIDE_OVERRUN:
6107 if (cp) {
6108 OUTONB (HF_PRT, HF_EXT_ERR);
6109 cp->xerr_status |= XE_SWIDE_OVRUN;
6110 }
6111 goto out;
6112 /*
6113 * We have been ODD at the end of a DATA OUT
6114 * transfer.
6115 * It is a data underrun condition.
6116 */
6117 case SIR_SODL_UNDERRUN:
6118 if (cp) {
6119 OUTONB (HF_PRT, HF_EXT_ERR);
6120 cp->xerr_status |= XE_SODL_UNRUN;
6121 }
6122 goto out;
6123 /*
6124 * The device wants us to transfer more data than
6125 * expected or in the wrong direction.
6126 * The number of extra bytes is in scratcha.
6127 * It is a data overrun condition.
6128 */
6129 case SIR_DATA_OVERRUN:
6130 if (cp) {
6131 OUTONB (HF_PRT, HF_EXT_ERR);
6132 cp->xerr_status |= XE_EXTRA_DATA;
6133 cp->extra_bytes += INL (nc_scratcha);
6134 }
6135 goto out;
6136 /*
6137 * The device switched to an illegal phase (4/5).
6138 */
6139 case SIR_BAD_PHASE:
6140 if (cp) {
6141 OUTONB (HF_PRT, HF_EXT_ERR);
6142 cp->xerr_status |= XE_BAD_PHASE;
6143 }
6144 goto out;
6145 /*
6146 * We received a message.
6147 */
6148 case SIR_MSG_RECEIVED:
6149 if (!cp)
6150 goto out_stuck;
6151 switch (np->msgin [0]) {
6152 /*
6153 * We received an extended message.
6154 * We handle MODIFY DATA POINTER, SDTR, WDTR
6155 * and reject all other extended messages.
6156 */
6157 case M_EXTENDED:
6158 switch (np->msgin [2]) {
6159 case M_X_MODIFY_DP:
6160 if (DEBUG_FLAGS & DEBUG_POINTER)
6161 sym_print_msg(cp,"modify DP",np->msgin);
6162 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
6163 (np->msgin[5]<<8) + (np->msgin[6]);
6164 sym_modify_dp(np, cp, tmp);
6165 return;
6166 case M_X_SYNC_REQ:
6167 sym_sync_nego(np, tp, cp);
6168 return;
6169 case M_X_PPR_REQ:
6170 sym_ppr_nego(np, tp, cp);
6171 return;
6172 case M_X_WIDE_REQ:
6173 sym_wide_nego(np, tp, cp);
6174 return;
6175 default:
6176 goto out_reject;
6177 }
6178 break;
6179 /*
6180 * We received a 1/2 byte message not handled from SCRIPTS.
6181 * We are only expecting MESSAGE REJECT and IGNORE WIDE
6182 * RESIDUE messages that haven't been anticipated by
6183 * SCRIPTS on SWIDE full condition. Unanticipated IGNORE
6184 * WIDE RESIDUE messages are aliased as MODIFY DP (-1).
6185 */
6186 case M_IGN_RESIDUE:
6187 if (DEBUG_FLAGS & DEBUG_POINTER)
6188 sym_print_msg(cp,"ign wide residue", np->msgin);
6189 sym_modify_dp(np, cp, -1);
6190 return;
6191 case M_REJECT:
6192 if (INB (HS_PRT) == HS_NEGOTIATE)
6193 sym_nego_rejected(np, tp, cp);
6194 else {
6195 PRINT_ADDR(cp);
6196 printf ("M_REJECT received (%x:%x).\n",
6197 scr_to_cpu(np->lastmsg), np->msgout[0]);
6198 }
6199 goto out_clrack;
6200 break;
6201 default:
6202 goto out_reject;
6203 }
6204 break;
6205 /*
6206 * We received an unknown message.
6207 * Ignore all MSG IN phases and reject it.
6208 */
6209 case SIR_MSG_WEIRD:
6210 sym_print_msg(cp, "WEIRD message received", np->msgin);
6211 OUTL_DSP (SCRIPTB_BA (np, msg_weird));
6212 return;
6213 /*
6214 * Negotiation failed.
6215 * Target does not send us the reply.
6216 * Remove the HS_NEGOTIATE status.
6217 */
6218 case SIR_NEGO_FAILED:
6219 OUTB (HS_PRT, HS_BUSY);
6220 /*
6221 * Negotiation failed.
6222 * Target does not want answer message.
6223 */
6224 case SIR_NEGO_PROTO:
6225 if (!cp)
6226 goto out;
6227 sym_nego_default(np, tp, cp);
6228 goto out;
6229 }
6230
6231 out:
6232 OUTONB_STD ();
6233 return;
6234 out_reject:
6235 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
6236 return;
6237 out_clrack:
6238 OUTL_DSP (SCRIPTA_BA (np, clrack));
6239 return;
6240 out_stuck:
6241 return;
6242 }
6243
6244 /*
6245 * Acquire a control block
6246 */
sym_get_ccb(hcb_p np,u_char tn,u_char ln,u_char tag_order)6247 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order)
6248 {
6249 tcb_p tp = &np->target[tn];
6250 lcb_p lp = sym_lp(tp, ln);
6251 u_short tag = NO_TAG;
6252 SYM_QUEHEAD *qp;
6253 ccb_p cp = (ccb_p) NULL;
6254
6255 /*
6256 * Look for a free CCB
6257 */
6258 if (sym_que_empty(&np->free_ccbq))
6259 goto out;
6260 qp = sym_remque_head(&np->free_ccbq);
6261 if (!qp)
6262 goto out;
6263 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
6264
6265 /*
6266 * If the LCB is not yet available and the LUN
6267 * has been probed ok, try to allocate the LCB.
6268 */
6269 if (!lp && sym_is_bit(tp->lun_map, ln)) {
6270 lp = sym_alloc_lcb(np, tn, ln);
6271 if (!lp)
6272 goto out_free;
6273 }
6274
6275 /*
6276 * If the LCB is not available here, then the
6277 * logical unit is not yet discovered. For those
6278 * ones only accept 1 SCSI IO per logical unit,
6279 * since we cannot allow disconnections.
6280 */
6281 if (!lp) {
6282 if (!sym_is_bit(tp->busy0_map, ln))
6283 sym_set_bit(tp->busy0_map, ln);
6284 else
6285 goto out_free;
6286 } else {
6287 /*
6288 * If we have been asked for a tagged command, refuse
6289 * to overlap with an existing untagged one.
6290 */
6291 if (tag_order) {
6292 if (lp->busy_itl != 0)
6293 goto out_free;
6294 /*
6295 * Allocate resources for tags if not yet.
6296 */
6297 if (!lp->cb_tags) {
6298 sym_alloc_lcb_tags(np, tn, ln);
6299 if (!lp->cb_tags)
6300 goto out_free;
6301 }
6302 /*
6303 * Get a tag for this SCSI IO and set up
6304 * the CCB bus address for reselection,
6305 * and count it for this LUN.
6306 * Toggle reselect path to tagged.
6307 */
6308 if (lp->busy_itlq < SYM_CONF_MAX_TASK) {
6309 tag = lp->cb_tags[lp->ia_tag];
6310 if (++lp->ia_tag == SYM_CONF_MAX_TASK)
6311 lp->ia_tag = 0;
6312 lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba);
6313 ++lp->busy_itlq;
6314 lp->head.resel_sa =
6315 cpu_to_scr(SCRIPTA_BA (np, resel_tag));
6316 }
6317 else
6318 goto out_free;
6319 }
6320 /*
6321 * This command will not be tagged.
6322 * If we already have either a tagged or untagged
6323 * one, refuse to overlap this untagged one.
6324 */
6325 else {
6326 if (lp->busy_itlq != 0 || lp->busy_itl != 0)
6327 goto out_free;
6328 /*
6329 * Count this nexus for this LUN.
6330 * Set up the CCB bus address for reselection.
6331 * Toggle reselect path to untagged.
6332 */
6333 lp->busy_itl = 1;
6334 lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
6335 lp->head.resel_sa =
6336 cpu_to_scr(SCRIPTA_BA (np, resel_no_tag));
6337 }
6338 }
6339 /*
6340 * Put the CCB into the busy queue.
6341 */
6342 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
6343
6344 /*
6345 * Remember all informations needed to free this CCB.
6346 */
6347 cp->to_abort = 0;
6348 cp->tag = tag;
6349 cp->target = tn;
6350 cp->lun = ln;
6351
6352 if (DEBUG_FLAGS & DEBUG_TAGS) {
6353 PRINT_LUN(np, tn, ln);
6354 printf ("ccb @%p using tag %d.\n", cp, tag);
6355 }
6356
6357 out:
6358 return cp;
6359 out_free:
6360 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
6361 return NULL;
6362 }
6363
6364 /*
6365 * Release one control block
6366 */
sym_free_ccb(hcb_p np,ccb_p cp)6367 static void sym_free_ccb(hcb_p np, ccb_p cp)
6368 {
6369 tcb_p tp = &np->target[cp->target];
6370 lcb_p lp = sym_lp(tp, cp->lun);
6371
6372 if (DEBUG_FLAGS & DEBUG_TAGS) {
6373 PRINT_LUN(np, cp->target, cp->lun);
6374 printf ("ccb @%p freeing tag %d.\n", cp, cp->tag);
6375 }
6376
6377 /*
6378 * If LCB available,
6379 */
6380 if (lp) {
6381 /*
6382 * If tagged, release the tag, set the reselect path.
6383 */
6384 if (cp->tag != NO_TAG) {
6385 /*
6386 * Free the tag value.
6387 */
6388 lp->cb_tags[lp->if_tag] = cp->tag;
6389 if (++lp->if_tag == SYM_CONF_MAX_TASK)
6390 lp->if_tag = 0;
6391 /*
6392 * Make the reselect path invalid,
6393 * and uncount this CCB.
6394 */
6395 lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba);
6396 --lp->busy_itlq;
6397 } else { /* Untagged */
6398 /*
6399 * Make the reselect path invalid,
6400 * and uncount this CCB.
6401 */
6402 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
6403 lp->busy_itl = 0;
6404 }
6405 /*
6406 * If no JOB active, make the LUN reselect path invalid.
6407 */
6408 if (lp->busy_itlq == 0 && lp->busy_itl == 0)
6409 lp->head.resel_sa =
6410 cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun));
6411 }
6412 /*
6413 * Otherwise, we only accept 1 IO per LUN.
6414 * Clear the bit that keeps track of this IO.
6415 */
6416 else
6417 sym_clr_bit(tp->busy0_map, cp->lun);
6418
6419 /*
6420 * We donnot queue more than 1 ccb per target
6421 * with negotiation at any time. If this ccb was
6422 * used for negotiation, clear this info in the tcb.
6423 */
6424 if (cp == tp->nego_cp)
6425 tp->nego_cp = NULL;
6426
6427 #ifdef SYM_CONF_IARB_SUPPORT
6428 /*
6429 * If we just complete the last queued CCB,
6430 * clear this info that is no longer relevant.
6431 */
6432 if (cp == np->last_cp)
6433 np->last_cp = NULL;
6434 #endif
6435
6436 /*
6437 * Unmap user data from DMA map if needed.
6438 */
6439 if (cp->dmamapped) {
6440 bus_dmamap_unload(np->data_dmat, cp->dmamap);
6441 cp->dmamapped = 0;
6442 }
6443
6444 /*
6445 * Make this CCB available.
6446 */
6447 cp->cam_ccb = NULL;
6448 cp->host_status = HS_IDLE;
6449 sym_remque(&cp->link_ccbq);
6450 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
6451 }
6452
6453 /*
6454 * Allocate a CCB from memory and initialize its fixed part.
6455 */
sym_alloc_ccb(hcb_p np)6456 static ccb_p sym_alloc_ccb(hcb_p np)
6457 {
6458 ccb_p cp = NULL;
6459 int hcode;
6460
6461 SYM_LOCK_ASSERT(MA_NOTOWNED);
6462
6463 /*
6464 * Prevent from allocating more CCBs than we can
6465 * queue to the controller.
6466 */
6467 if (np->actccbs >= SYM_CONF_MAX_START)
6468 return NULL;
6469
6470 /*
6471 * Allocate memory for this CCB.
6472 */
6473 cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB");
6474 if (!cp)
6475 return NULL;
6476
6477 /*
6478 * Allocate a bounce buffer for sense data.
6479 */
6480 cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF");
6481 if (!cp->sns_bbuf)
6482 goto out_free;
6483
6484 /*
6485 * Allocate a map for the DMA of user data.
6486 */
6487 if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap))
6488 goto out_free;
6489 /*
6490 * Count it.
6491 */
6492 np->actccbs++;
6493
6494 /*
6495 * Initialize the callout.
6496 */
6497 callout_init(&cp->ch, 1);
6498
6499 /*
6500 * Compute the bus address of this ccb.
6501 */
6502 cp->ccb_ba = vtobus(cp);
6503
6504 /*
6505 * Insert this ccb into the hashed list.
6506 */
6507 hcode = CCB_HASH_CODE(cp->ccb_ba);
6508 cp->link_ccbh = np->ccbh[hcode];
6509 np->ccbh[hcode] = cp;
6510
6511 /*
6512 * Initialize the start and restart actions.
6513 */
6514 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, idle));
6515 cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
6516
6517 /*
6518 * Initilialyze some other fields.
6519 */
6520 cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2]));
6521
6522 /*
6523 * Chain into free ccb queue.
6524 */
6525 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
6526
6527 return cp;
6528 out_free:
6529 if (cp->sns_bbuf)
6530 sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF");
6531 sym_mfree_dma(cp, sizeof(*cp), "CCB");
6532 return NULL;
6533 }
6534
6535 /*
6536 * Look up a CCB from a DSA value.
6537 */
sym_ccb_from_dsa(hcb_p np,u32 dsa)6538 static ccb_p sym_ccb_from_dsa(hcb_p np, u32 dsa)
6539 {
6540 int hcode;
6541 ccb_p cp;
6542
6543 hcode = CCB_HASH_CODE(dsa);
6544 cp = np->ccbh[hcode];
6545 while (cp) {
6546 if (cp->ccb_ba == dsa)
6547 break;
6548 cp = cp->link_ccbh;
6549 }
6550
6551 return cp;
6552 }
6553
6554 /*
6555 * Lun control block allocation and initialization.
6556 */
sym_alloc_lcb(hcb_p np,u_char tn,u_char ln)6557 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln)
6558 {
6559 tcb_p tp = &np->target[tn];
6560 lcb_p lp = sym_lp(tp, ln);
6561
6562 /*
6563 * Already done, just return.
6564 */
6565 if (lp)
6566 return lp;
6567 /*
6568 * Check against some race.
6569 */
6570 assert(!sym_is_bit(tp->busy0_map, ln));
6571
6572 /*
6573 * Allocate the LCB bus address array.
6574 * Compute the bus address of this table.
6575 */
6576 if (ln && !tp->luntbl) {
6577 int i;
6578
6579 tp->luntbl = sym_calloc_dma(256, "LUNTBL");
6580 if (!tp->luntbl)
6581 goto fail;
6582 for (i = 0 ; i < 64 ; i++)
6583 tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
6584 tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl));
6585 }
6586
6587 /*
6588 * Allocate the table of pointers for LUN(s) > 0, if needed.
6589 */
6590 if (ln && !tp->lunmp) {
6591 tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p),
6592 "LUNMP");
6593 if (!tp->lunmp)
6594 goto fail;
6595 }
6596
6597 /*
6598 * Allocate the lcb.
6599 * Make it available to the chip.
6600 */
6601 lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB");
6602 if (!lp)
6603 goto fail;
6604 if (ln) {
6605 tp->lunmp[ln] = lp;
6606 tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
6607 }
6608 else {
6609 tp->lun0p = lp;
6610 tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
6611 }
6612
6613 /*
6614 * Let the itl task point to error handling.
6615 */
6616 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
6617
6618 /*
6619 * Set the reselect pattern to our default. :)
6620 */
6621 lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun));
6622
6623 /*
6624 * Set user capabilities.
6625 */
6626 lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
6627
6628 fail:
6629 return lp;
6630 }
6631
6632 /*
6633 * Allocate LCB resources for tagged command queuing.
6634 */
sym_alloc_lcb_tags(hcb_p np,u_char tn,u_char ln)6635 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln)
6636 {
6637 tcb_p tp = &np->target[tn];
6638 lcb_p lp = sym_lp(tp, ln);
6639 int i;
6640
6641 /*
6642 * If LCB not available, try to allocate it.
6643 */
6644 if (!lp && !(lp = sym_alloc_lcb(np, tn, ln)))
6645 return;
6646
6647 /*
6648 * Allocate the task table and and the tag allocation
6649 * circular buffer. We want both or none.
6650 */
6651 lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
6652 if (!lp->itlq_tbl)
6653 return;
6654 lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS");
6655 if (!lp->cb_tags) {
6656 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
6657 lp->itlq_tbl = NULL;
6658 return;
6659 }
6660
6661 /*
6662 * Initialize the task table with invalid entries.
6663 */
6664 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
6665 lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba);
6666
6667 /*
6668 * Fill up the tag buffer with tag numbers.
6669 */
6670 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
6671 lp->cb_tags[i] = i;
6672
6673 /*
6674 * Make the task table available to SCRIPTS,
6675 * And accept tagged commands now.
6676 */
6677 lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
6678 }
6679
6680 /*
6681 * Test the pci bus snoop logic :-(
6682 *
6683 * Has to be called with interrupts disabled.
6684 */
6685 #ifndef SYM_CONF_IOMAPPED
sym_regtest(hcb_p np)6686 static int sym_regtest (hcb_p np)
6687 {
6688 register volatile u32 data;
6689 /*
6690 * chip registers may NOT be cached.
6691 * write 0xffffffff to a read only register area,
6692 * and try to read it back.
6693 */
6694 data = 0xffffffff;
6695 OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data);
6696 data = INL_OFF(offsetof(struct sym_reg, nc_dstat));
6697 #if 1
6698 if (data == 0xffffffff) {
6699 #else
6700 if ((data & 0xe2f0fffd) != 0x02000080) {
6701 #endif
6702 printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
6703 (unsigned) data);
6704 return (0x10);
6705 }
6706 return (0);
6707 }
6708 #endif
6709
6710 static int sym_snooptest (hcb_p np)
6711 {
6712 u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat;
6713 int i, err=0;
6714 #ifndef SYM_CONF_IOMAPPED
6715 err |= sym_regtest (np);
6716 if (err) return (err);
6717 #endif
6718 restart_test:
6719 /*
6720 * Enable Master Parity Checking as we intend
6721 * to enable it for normal operations.
6722 */
6723 OUTB (nc_ctest4, (np->rv_ctest4 & MPEE));
6724 /*
6725 * init
6726 */
6727 pc = SCRIPTB0_BA (np, snooptest);
6728 host_wr = 1;
6729 sym_wr = 2;
6730 /*
6731 * Set memory and register.
6732 */
6733 np->cache = cpu_to_scr(host_wr);
6734 OUTL (nc_temp, sym_wr);
6735 /*
6736 * Start script (exchange values)
6737 */
6738 OUTL (nc_dsa, np->hcb_ba);
6739 OUTL_DSP (pc);
6740 /*
6741 * Wait 'til done (with timeout)
6742 */
6743 for (i = 0; i < SYM_SNOOP_TIMEOUT; i++)
6744 if (INB(nc_istat) & (INTF|SIP|DIP))
6745 break;
6746 if (i >= SYM_SNOOP_TIMEOUT) {
6747 printf ("CACHE TEST FAILED: timeout.\n");
6748 return (0x20);
6749 }
6750 /*
6751 * Check for fatal DMA errors.
6752 */
6753 dstat = INB (nc_dstat);
6754 #if 1 /* Band aiding for broken hardwares that fail PCI parity */
6755 if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) {
6756 device_printf(np->device, "PCI DATA PARITY ERROR DETECTED - "
6757 "DISABLING MASTER DATA PARITY CHECKING.\n");
6758 np->rv_ctest4 &= ~MPEE;
6759 goto restart_test;
6760 }
6761 #endif
6762 if (dstat & (MDPE|BF|IID)) {
6763 device_printf(np->device,
6764 "CACHE TEST FAILED: DMA error (dstat=0x%02x).\n", dstat);
6765 return (0x80);
6766 }
6767 /*
6768 * Save termination position.
6769 */
6770 pc = INL (nc_dsp);
6771 /*
6772 * Read memory and register.
6773 */
6774 host_rd = scr_to_cpu(np->cache);
6775 sym_rd = INL (nc_scratcha);
6776 sym_bk = INL (nc_temp);
6777
6778 /*
6779 * Check termination position.
6780 */
6781 if (pc != SCRIPTB0_BA (np, snoopend)+8) {
6782 device_printf(np->device,
6783 "CACHE TEST FAILED: script execution failed.\n");
6784 device_printf(np->device, "start=%08lx, pc=%08lx, end=%08lx\n",
6785 (u_long)SCRIPTB0_BA(np, snooptest), (u_long)pc,
6786 (u_long)SCRIPTB0_BA(np, snoopend) + 8);
6787 return (0x40);
6788 }
6789 /*
6790 * Show results.
6791 */
6792 if (host_wr != sym_rd) {
6793 device_printf(np->device,
6794 "CACHE TEST FAILED: host wrote %d, chip read %d.\n",
6795 (int)host_wr, (int)sym_rd);
6796 err |= 1;
6797 }
6798 if (host_rd != sym_wr) {
6799 device_printf(np->device,
6800 "CACHE TEST FAILED: chip wrote %d, host read %d.\n",
6801 (int)sym_wr, (int)host_rd);
6802 err |= 2;
6803 }
6804 if (sym_bk != sym_wr) {
6805 device_printf(np->device,
6806 "CACHE TEST FAILED: chip wrote %d, read back %d.\n",
6807 (int)sym_wr, (int)sym_bk);
6808 err |= 4;
6809 }
6810
6811 return (err);
6812 }
6813
6814 /*
6815 * Determine the chip's clock frequency.
6816 *
6817 * This is essential for the negotiation of the synchronous
6818 * transfer rate.
6819 *
6820 * Note: we have to return the correct value.
6821 * THERE IS NO SAFE DEFAULT VALUE.
6822 *
6823 * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
6824 * 53C860 and 53C875 rev. 1 support fast20 transfers but
6825 * do not have a clock doubler and so are provided with a
6826 * 80 MHz clock. All other fast20 boards incorporate a doubler
6827 * and so should be delivered with a 40 MHz clock.
6828 * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base
6829 * clock and provide a clock quadrupler (160 Mhz).
6830 */
6831
6832 /*
6833 * Select SCSI clock frequency
6834 */
6835 static void sym_selectclock(hcb_p np, u_char scntl3)
6836 {
6837 /*
6838 * If multiplier not present or not selected, leave here.
6839 */
6840 if (np->multiplier <= 1) {
6841 OUTB(nc_scntl3, scntl3);
6842 return;
6843 }
6844
6845 if (sym_verbose >= 2)
6846 device_printf(np->device, "enabling clock multiplier\n");
6847
6848 OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
6849 /*
6850 * Wait for the LCKFRQ bit to be set if supported by the chip.
6851 * Otherwise wait 20 micro-seconds.
6852 */
6853 if (np->features & FE_LCKFRQ) {
6854 int i = 20;
6855 while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
6856 UDELAY (20);
6857 if (!i)
6858 device_printf(np->device,
6859 "the chip cannot lock the frequency\n");
6860 } else
6861 UDELAY (20);
6862 OUTB(nc_stest3, HSC); /* Halt the scsi clock */
6863 OUTB(nc_scntl3, scntl3);
6864 OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
6865 OUTB(nc_stest3, 0x00); /* Restart scsi clock */
6866 }
6867
6868 /*
6869 * calculate SCSI clock frequency (in KHz)
6870 */
6871 static unsigned getfreq (hcb_p np, int gen)
6872 {
6873 unsigned int ms = 0;
6874 unsigned int f;
6875
6876 /*
6877 * Measure GEN timer delay in order
6878 * to calculate SCSI clock frequency
6879 *
6880 * This code will never execute too
6881 * many loop iterations (if DELAY is
6882 * reasonably correct). It could get
6883 * too low a delay (too high a freq.)
6884 * if the CPU is slow executing the
6885 * loop for some reason (an NMI, for
6886 * example). For this reason we will
6887 * if multiple measurements are to be
6888 * performed trust the higher delay
6889 * (lower frequency returned).
6890 */
6891 OUTW (nc_sien , 0); /* mask all scsi interrupts */
6892 (void) INW (nc_sist); /* clear pending scsi interrupt */
6893 OUTB (nc_dien , 0); /* mask all dma interrupts */
6894 (void) INW (nc_sist); /* another one, just to be sure :) */
6895 OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */
6896 OUTB (nc_stime1, 0); /* disable general purpose timer */
6897 OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
6898 while (!(INW(nc_sist) & GEN) && ms++ < 100000)
6899 UDELAY (1000); /* count ms */
6900 OUTB (nc_stime1, 0); /* disable general purpose timer */
6901 /*
6902 * set prescaler to divide by whatever 0 means
6903 * 0 ought to choose divide by 2, but appears
6904 * to set divide by 3.5 mode in my 53c810 ...
6905 */
6906 OUTB (nc_scntl3, 0);
6907
6908 /*
6909 * adjust for prescaler, and convert into KHz
6910 */
6911 f = ms ? ((1 << gen) * 4340) / ms : 0;
6912
6913 if (sym_verbose >= 2)
6914 device_printf(np->device, "Delay (GEN=%d): %u msec, %u KHz\n",
6915 gen, ms, f);
6916
6917 return f;
6918 }
6919
6920 static unsigned sym_getfreq (hcb_p np)
6921 {
6922 u_int f1, f2;
6923 int gen = 11;
6924
6925 (void) getfreq (np, gen); /* throw away first result */
6926 f1 = getfreq (np, gen);
6927 f2 = getfreq (np, gen);
6928 if (f1 > f2) f1 = f2; /* trust lower result */
6929 return f1;
6930 }
6931
6932 /*
6933 * Get/probe chip SCSI clock frequency
6934 */
6935 static void sym_getclock (hcb_p np, int mult)
6936 {
6937 unsigned char scntl3 = np->sv_scntl3;
6938 unsigned char stest1 = np->sv_stest1;
6939 unsigned f1;
6940
6941 /*
6942 * For the C10 core, assume 40 MHz.
6943 */
6944 if (np->features & FE_C10) {
6945 np->multiplier = mult;
6946 np->clock_khz = 40000 * mult;
6947 return;
6948 }
6949
6950 np->multiplier = 1;
6951 f1 = 40000;
6952 /*
6953 * True with 875/895/896/895A with clock multiplier selected
6954 */
6955 if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
6956 if (sym_verbose >= 2)
6957 device_printf(np->device, "clock multiplier found\n");
6958 np->multiplier = mult;
6959 }
6960
6961 /*
6962 * If multiplier not found or scntl3 not 7,5,3,
6963 * reset chip and get frequency from general purpose timer.
6964 * Otherwise trust scntl3 BIOS setting.
6965 */
6966 if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
6967 OUTB (nc_stest1, 0); /* make sure doubler is OFF */
6968 f1 = sym_getfreq (np);
6969
6970 if (sym_verbose)
6971 device_printf(np->device, "chip clock is %uKHz\n", f1);
6972
6973 if (f1 < 45000) f1 = 40000;
6974 else if (f1 < 55000) f1 = 50000;
6975 else f1 = 80000;
6976
6977 if (f1 < 80000 && mult > 1) {
6978 if (sym_verbose >= 2)
6979 device_printf(np->device,
6980 "clock multiplier assumed\n");
6981 np->multiplier = mult;
6982 }
6983 } else {
6984 if ((scntl3 & 7) == 3) f1 = 40000;
6985 else if ((scntl3 & 7) == 5) f1 = 80000;
6986 else f1 = 160000;
6987
6988 f1 /= np->multiplier;
6989 }
6990
6991 /*
6992 * Compute controller synchronous parameters.
6993 */
6994 f1 *= np->multiplier;
6995 np->clock_khz = f1;
6996 }
6997
6998 /*
6999 * Get/probe PCI clock frequency
7000 */
7001 static int sym_getpciclock (hcb_p np)
7002 {
7003 int f = 0;
7004
7005 /*
7006 * For the C1010-33, this doesn't work.
7007 * For the C1010-66, this will be tested when I'll have
7008 * such a beast to play with.
7009 */
7010 if (!(np->features & FE_C10)) {
7011 OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
7012 f = (int) sym_getfreq (np);
7013 OUTB (nc_stest1, 0);
7014 }
7015 np->pciclk_khz = f;
7016
7017 return f;
7018 }
7019
7020 /*============= DRIVER ACTION/COMPLETION ====================*/
7021
7022 /*
7023 * Print something that tells about extended errors.
7024 */
7025 static void sym_print_xerr(ccb_p cp, int x_status)
7026 {
7027 if (x_status & XE_PARITY_ERR) {
7028 PRINT_ADDR(cp);
7029 printf ("unrecovered SCSI parity error.\n");
7030 }
7031 if (x_status & XE_EXTRA_DATA) {
7032 PRINT_ADDR(cp);
7033 printf ("extraneous data discarded.\n");
7034 }
7035 if (x_status & XE_BAD_PHASE) {
7036 PRINT_ADDR(cp);
7037 printf ("illegal scsi phase (4/5).\n");
7038 }
7039 if (x_status & XE_SODL_UNRUN) {
7040 PRINT_ADDR(cp);
7041 printf ("ODD transfer in DATA OUT phase.\n");
7042 }
7043 if (x_status & XE_SWIDE_OVRUN) {
7044 PRINT_ADDR(cp);
7045 printf ("ODD transfer in DATA IN phase.\n");
7046 }
7047 }
7048
7049 /*
7050 * Choose the more appropriate CAM status if
7051 * the IO encountered an extended error.
7052 */
7053 static int sym_xerr_cam_status(int cam_status, int x_status)
7054 {
7055 if (x_status) {
7056 if (x_status & XE_PARITY_ERR)
7057 cam_status = CAM_UNCOR_PARITY;
7058 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
7059 cam_status = CAM_DATA_RUN_ERR;
7060 else if (x_status & XE_BAD_PHASE)
7061 cam_status = CAM_REQ_CMP_ERR;
7062 else
7063 cam_status = CAM_REQ_CMP_ERR;
7064 }
7065 return cam_status;
7066 }
7067
7068 /*
7069 * Complete execution of a SCSI command with extented
7070 * error, SCSI status error, or having been auto-sensed.
7071 *
7072 * The SCRIPTS processor is not running there, so we
7073 * can safely access IO registers and remove JOBs from
7074 * the START queue.
7075 * SCRATCHA is assumed to have been loaded with STARTPOS
7076 * before the SCRIPTS called the C code.
7077 */
7078 static void sym_complete_error (hcb_p np, ccb_p cp)
7079 {
7080 struct ccb_scsiio *csio;
7081 u_int cam_status;
7082 int i, sense_returned;
7083
7084 SYM_LOCK_ASSERT(MA_OWNED);
7085
7086 /*
7087 * Paranoid check. :)
7088 */
7089 if (!cp || !cp->cam_ccb)
7090 return;
7091
7092 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) {
7093 printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp,
7094 cp->host_status, cp->ssss_status, cp->host_flags,
7095 cp->target, cp->lun);
7096 MDELAY(100);
7097 }
7098
7099 /*
7100 * Get CAM command pointer.
7101 */
7102 csio = &cp->cam_ccb->csio;
7103
7104 /*
7105 * Check for extended errors.
7106 */
7107 if (cp->xerr_status) {
7108 if (sym_verbose)
7109 sym_print_xerr(cp, cp->xerr_status);
7110 if (cp->host_status == HS_COMPLETE)
7111 cp->host_status = HS_COMP_ERR;
7112 }
7113
7114 /*
7115 * Calculate the residual.
7116 */
7117 csio->sense_resid = 0;
7118 csio->resid = sym_compute_residual(np, cp);
7119
7120 if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */
7121 csio->resid = 0; /* throw them away. :) */
7122 cp->sv_resid = 0;
7123 }
7124
7125 if (cp->host_flags & HF_SENSE) { /* Auto sense */
7126 csio->scsi_status = cp->sv_scsi_status; /* Restore status */
7127 csio->sense_resid = csio->resid; /* Swap residuals */
7128 csio->resid = cp->sv_resid;
7129 cp->sv_resid = 0;
7130 if (sym_verbose && cp->sv_xerr_status)
7131 sym_print_xerr(cp, cp->sv_xerr_status);
7132 if (cp->host_status == HS_COMPLETE &&
7133 cp->ssss_status == S_GOOD &&
7134 cp->xerr_status == 0) {
7135 cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR,
7136 cp->sv_xerr_status);
7137 cam_status |= CAM_AUTOSNS_VALID;
7138 /*
7139 * Bounce back the sense data to user and
7140 * fix the residual.
7141 */
7142 bzero(&csio->sense_data, sizeof(csio->sense_data));
7143 sense_returned = SYM_SNS_BBUF_LEN - csio->sense_resid;
7144 if (sense_returned < csio->sense_len)
7145 csio->sense_resid = csio->sense_len -
7146 sense_returned;
7147 else
7148 csio->sense_resid = 0;
7149 memcpy(&csio->sense_data, cp->sns_bbuf,
7150 MIN(csio->sense_len, sense_returned));
7151 #if 0
7152 /*
7153 * If the device reports a UNIT ATTENTION condition
7154 * due to a RESET condition, we should consider all
7155 * disconnect CCBs for this unit as aborted.
7156 */
7157 if (1) {
7158 u_char *p;
7159 p = (u_char *) csio->sense_data;
7160 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
7161 sym_clear_tasks(np, CAM_REQ_ABORTED,
7162 cp->target,cp->lun, -1);
7163 }
7164 #endif
7165 }
7166 else
7167 cam_status = CAM_AUTOSENSE_FAIL;
7168 }
7169 else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */
7170 csio->scsi_status = cp->ssss_status;
7171 cam_status = CAM_SCSI_STATUS_ERROR;
7172 }
7173 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */
7174 cam_status = CAM_SEL_TIMEOUT;
7175 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/
7176 cam_status = CAM_UNEXP_BUSFREE;
7177 else { /* Extended error */
7178 if (sym_verbose) {
7179 PRINT_ADDR(cp);
7180 printf ("COMMAND FAILED (%x %x %x).\n",
7181 cp->host_status, cp->ssss_status,
7182 cp->xerr_status);
7183 }
7184 csio->scsi_status = cp->ssss_status;
7185 /*
7186 * Set the most appropriate value for CAM status.
7187 */
7188 cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR,
7189 cp->xerr_status);
7190 }
7191
7192 /*
7193 * Dequeue all queued CCBs for that device
7194 * not yet started by SCRIPTS.
7195 */
7196 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
7197 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
7198
7199 /*
7200 * Restart the SCRIPTS processor.
7201 */
7202 OUTL_DSP (SCRIPTA_BA (np, start));
7203
7204 /*
7205 * Synchronize DMA map if needed.
7206 */
7207 if (cp->dmamapped) {
7208 bus_dmamap_sync(np->data_dmat, cp->dmamap,
7209 (cp->dmamapped == SYM_DMA_READ ?
7210 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
7211 }
7212 /*
7213 * Add this one to the COMP queue.
7214 * Complete all those commands with either error
7215 * or requeue condition.
7216 */
7217 sym_set_cam_status((union ccb *) csio, cam_status);
7218 sym_remque(&cp->link_ccbq);
7219 sym_insque_head(&cp->link_ccbq, &np->comp_ccbq);
7220 sym_flush_comp_queue(np, 0);
7221 }
7222
7223 /*
7224 * Complete execution of a successful SCSI command.
7225 *
7226 * Only successful commands go to the DONE queue,
7227 * since we need to have the SCRIPTS processor
7228 * stopped on any error condition.
7229 * The SCRIPTS processor is running while we are
7230 * completing successful commands.
7231 */
7232 static void sym_complete_ok (hcb_p np, ccb_p cp)
7233 {
7234 struct ccb_scsiio *csio;
7235 tcb_p tp;
7236 lcb_p lp;
7237
7238 SYM_LOCK_ASSERT(MA_OWNED);
7239
7240 /*
7241 * Paranoid check. :)
7242 */
7243 if (!cp || !cp->cam_ccb)
7244 return;
7245 assert (cp->host_status == HS_COMPLETE);
7246
7247 /*
7248 * Get command, target and lun pointers.
7249 */
7250 csio = &cp->cam_ccb->csio;
7251 tp = &np->target[cp->target];
7252 lp = sym_lp(tp, cp->lun);
7253
7254 /*
7255 * Assume device discovered on first success.
7256 */
7257 if (!lp)
7258 sym_set_bit(tp->lun_map, cp->lun);
7259
7260 /*
7261 * If all data have been transferred, given than no
7262 * extended error did occur, there is no residual.
7263 */
7264 csio->resid = 0;
7265 if (cp->phys.head.lastp != cp->phys.head.goalp)
7266 csio->resid = sym_compute_residual(np, cp);
7267
7268 /*
7269 * Wrong transfer residuals may be worse than just always
7270 * returning zero. User can disable this feature from
7271 * sym_conf.h. Residual support is enabled by default.
7272 */
7273 if (!SYM_CONF_RESIDUAL_SUPPORT)
7274 csio->resid = 0;
7275
7276 /*
7277 * Synchronize DMA map if needed.
7278 */
7279 if (cp->dmamapped) {
7280 bus_dmamap_sync(np->data_dmat, cp->dmamap,
7281 (cp->dmamapped == SYM_DMA_READ ?
7282 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
7283 }
7284 /*
7285 * Set status and complete the command.
7286 */
7287 csio->scsi_status = cp->ssss_status;
7288 sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP);
7289 sym_xpt_done(np, (union ccb *) csio, cp);
7290 sym_free_ccb(np, cp);
7291 }
7292
7293 /*
7294 * Our callout handler
7295 */
7296 static void sym_callout(void *arg)
7297 {
7298 union ccb *ccb = (union ccb *) arg;
7299 hcb_p np = ccb->ccb_h.sym_hcb_ptr;
7300
7301 /*
7302 * Check that the CAM CCB is still queued.
7303 */
7304 if (!np)
7305 return;
7306
7307 SYM_LOCK();
7308
7309 switch(ccb->ccb_h.func_code) {
7310 case XPT_SCSI_IO:
7311 (void) sym_abort_scsiio(np, ccb, 1);
7312 break;
7313 default:
7314 break;
7315 }
7316
7317 SYM_UNLOCK();
7318 }
7319
7320 /*
7321 * Abort an SCSI IO.
7322 */
7323 static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out)
7324 {
7325 ccb_p cp;
7326 SYM_QUEHEAD *qp;
7327
7328 SYM_LOCK_ASSERT(MA_OWNED);
7329
7330 /*
7331 * Look up our CCB control block.
7332 */
7333 cp = NULL;
7334 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
7335 ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
7336 if (cp2->cam_ccb == ccb) {
7337 cp = cp2;
7338 break;
7339 }
7340 }
7341 if (!cp || cp->host_status == HS_WAIT)
7342 return -1;
7343
7344 /*
7345 * If a previous abort didn't succeed in time,
7346 * perform a BUS reset.
7347 */
7348 if (cp->to_abort) {
7349 sym_reset_scsi_bus(np, 1);
7350 return 0;
7351 }
7352
7353 /*
7354 * Mark the CCB for abort and allow time for.
7355 */
7356 cp->to_abort = timed_out ? 2 : 1;
7357 callout_reset(&cp->ch, 10 * hz, sym_callout, (caddr_t) ccb);
7358
7359 /*
7360 * Tell the SCRIPTS processor to stop and synchronize with us.
7361 */
7362 np->istat_sem = SEM;
7363 OUTB (nc_istat, SIGP|SEM);
7364 return 0;
7365 }
7366
7367 /*
7368 * Reset a SCSI device (all LUNs of a target).
7369 */
7370 static void sym_reset_dev(hcb_p np, union ccb *ccb)
7371 {
7372 tcb_p tp;
7373 struct ccb_hdr *ccb_h = &ccb->ccb_h;
7374
7375 SYM_LOCK_ASSERT(MA_OWNED);
7376
7377 if (ccb_h->target_id == np->myaddr ||
7378 ccb_h->target_id >= SYM_CONF_MAX_TARGET ||
7379 ccb_h->target_lun >= SYM_CONF_MAX_LUN) {
7380 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
7381 return;
7382 }
7383
7384 tp = &np->target[ccb_h->target_id];
7385
7386 tp->to_reset = 1;
7387 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
7388
7389 np->istat_sem = SEM;
7390 OUTB (nc_istat, SIGP|SEM);
7391 }
7392
7393 /*
7394 * SIM action entry point.
7395 */
7396 static void sym_action(struct cam_sim *sim, union ccb *ccb)
7397 {
7398 hcb_p np;
7399 tcb_p tp;
7400 lcb_p lp;
7401 ccb_p cp;
7402 int tmp;
7403 u_char idmsg, *msgptr;
7404 u_int msglen;
7405 struct ccb_scsiio *csio;
7406 struct ccb_hdr *ccb_h;
7407
7408 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n"));
7409
7410 /*
7411 * Retrieve our controller data structure.
7412 */
7413 np = (hcb_p) cam_sim_softc(sim);
7414
7415 SYM_LOCK_ASSERT(MA_OWNED);
7416
7417 /*
7418 * The common case is SCSI IO.
7419 * We deal with other ones elsewhere.
7420 */
7421 if (ccb->ccb_h.func_code != XPT_SCSI_IO) {
7422 sym_action2(sim, ccb);
7423 return;
7424 }
7425 csio = &ccb->csio;
7426 ccb_h = &csio->ccb_h;
7427
7428 /*
7429 * Work around races.
7430 */
7431 if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
7432 xpt_done(ccb);
7433 return;
7434 }
7435
7436 /*
7437 * Minimal checkings, so that we will not
7438 * go outside our tables.
7439 */
7440 if (ccb_h->target_id == np->myaddr ||
7441 ccb_h->target_id >= SYM_CONF_MAX_TARGET ||
7442 ccb_h->target_lun >= SYM_CONF_MAX_LUN) {
7443 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
7444 return;
7445 }
7446
7447 /*
7448 * Retrieve the target and lun descriptors.
7449 */
7450 tp = &np->target[ccb_h->target_id];
7451 lp = sym_lp(tp, ccb_h->target_lun);
7452
7453 /*
7454 * Complete the 1st INQUIRY command with error
7455 * condition if the device is flagged NOSCAN
7456 * at BOOT in the NVRAM. This may speed up
7457 * the boot and maintain coherency with BIOS
7458 * device numbering. Clearing the flag allows
7459 * user to rescan skipped devices later.
7460 * We also return error for devices not flagged
7461 * for SCAN LUNS in the NVRAM since some mono-lun
7462 * devices behave badly when asked for some non
7463 * zero LUN. Btw, this is an absolute hack.:-)
7464 */
7465 if (!(ccb_h->flags & CAM_CDB_PHYS) &&
7466 (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ?
7467 csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) {
7468 if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) ||
7469 ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) &&
7470 ccb_h->target_lun != 0)) {
7471 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
7472 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
7473 return;
7474 }
7475 }
7476
7477 /*
7478 * Get a control block for this IO.
7479 */
7480 tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0);
7481 cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp);
7482 if (!cp) {
7483 sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL);
7484 return;
7485 }
7486
7487 /*
7488 * Keep track of the IO in our CCB.
7489 */
7490 cp->cam_ccb = ccb;
7491
7492 /*
7493 * Build the IDENTIFY message.
7494 */
7495 idmsg = M_IDENTIFY | cp->lun;
7496 if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED)))
7497 idmsg |= 0x40;
7498
7499 msgptr = cp->scsi_smsg;
7500 msglen = 0;
7501 msgptr[msglen++] = idmsg;
7502
7503 /*
7504 * Build the tag message if present.
7505 */
7506 if (cp->tag != NO_TAG) {
7507 u_char order = csio->tag_action;
7508
7509 switch(order) {
7510 case M_ORDERED_TAG:
7511 break;
7512 case M_HEAD_TAG:
7513 break;
7514 default:
7515 order = M_SIMPLE_TAG;
7516 }
7517 msgptr[msglen++] = order;
7518
7519 /*
7520 * For less than 128 tags, actual tags are numbered
7521 * 1,3,5,..2*MAXTAGS+1,since we may have to deal
7522 * with devices that have problems with #TAG 0 or too
7523 * great #TAG numbers. For more tags (up to 256),
7524 * we use directly our tag number.
7525 */
7526 #if SYM_CONF_MAX_TASK > (512/4)
7527 msgptr[msglen++] = cp->tag;
7528 #else
7529 msgptr[msglen++] = (cp->tag << 1) + 1;
7530 #endif
7531 }
7532
7533 /*
7534 * Build a negotiation message if needed.
7535 * (nego_status is filled by sym_prepare_nego())
7536 */
7537 cp->nego_status = 0;
7538 if (tp->tinfo.current.width != tp->tinfo.goal.width ||
7539 tp->tinfo.current.period != tp->tinfo.goal.period ||
7540 tp->tinfo.current.offset != tp->tinfo.goal.offset ||
7541 tp->tinfo.current.options != tp->tinfo.goal.options) {
7542 if (!tp->nego_cp && lp)
7543 msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen);
7544 }
7545
7546 /*
7547 * Fill in our ccb
7548 */
7549
7550 /*
7551 * Startqueue
7552 */
7553 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select));
7554 cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA (np, resel_dsa));
7555
7556 /*
7557 * select
7558 */
7559 cp->phys.select.sel_id = cp->target;
7560 cp->phys.select.sel_scntl3 = tp->head.wval;
7561 cp->phys.select.sel_sxfer = tp->head.sval;
7562 cp->phys.select.sel_scntl4 = tp->head.uval;
7563
7564 /*
7565 * message
7566 */
7567 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg));
7568 cp->phys.smsg.size = cpu_to_scr(msglen);
7569
7570 /*
7571 * command
7572 */
7573 if (sym_setup_cdb(np, csio, cp) < 0) {
7574 sym_xpt_done(np, ccb, cp);
7575 sym_free_ccb(np, cp);
7576 return;
7577 }
7578
7579 /*
7580 * status
7581 */
7582 #if 0 /* Provision */
7583 cp->actualquirks = tp->quirks;
7584 #endif
7585 cp->actualquirks = SYM_QUIRK_AUTOSAVE;
7586 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
7587 cp->ssss_status = S_ILLEGAL;
7588 cp->xerr_status = 0;
7589 cp->host_flags = 0;
7590 cp->extra_bytes = 0;
7591
7592 /*
7593 * extreme data pointer.
7594 * shall be positive, so -1 is lower than lowest.:)
7595 */
7596 cp->ext_sg = -1;
7597 cp->ext_ofs = 0;
7598
7599 /*
7600 * Build the data descriptor block
7601 * and start the IO.
7602 */
7603 sym_setup_data_and_start(np, csio, cp);
7604 }
7605
7606 /*
7607 * Setup buffers and pointers that address the CDB.
7608 * I bet, physical CDBs will never be used on the planet,
7609 * since they can be bounced without significant overhead.
7610 */
7611 static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
7612 {
7613 struct ccb_hdr *ccb_h;
7614 u32 cmd_ba;
7615 int cmd_len;
7616
7617 SYM_LOCK_ASSERT(MA_OWNED);
7618
7619 ccb_h = &csio->ccb_h;
7620
7621 /*
7622 * CDB is 16 bytes max.
7623 */
7624 if (csio->cdb_len > sizeof(cp->cdb_buf)) {
7625 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
7626 return -1;
7627 }
7628 cmd_len = csio->cdb_len;
7629
7630 if (ccb_h->flags & CAM_CDB_POINTER) {
7631 /* CDB is a pointer */
7632 if (!(ccb_h->flags & CAM_CDB_PHYS)) {
7633 /* CDB pointer is virtual */
7634 memcpy(cp->cdb_buf, csio->cdb_io.cdb_ptr, cmd_len);
7635 cmd_ba = CCB_BA (cp, cdb_buf[0]);
7636 } else {
7637 /* CDB pointer is physical */
7638 #if 0
7639 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff;
7640 #else
7641 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
7642 return -1;
7643 #endif
7644 }
7645 } else {
7646 /* CDB is in the CAM ccb (buffer) */
7647 memcpy(cp->cdb_buf, csio->cdb_io.cdb_bytes, cmd_len);
7648 cmd_ba = CCB_BA (cp, cdb_buf[0]);
7649 }
7650
7651 cp->phys.cmd.addr = cpu_to_scr(cmd_ba);
7652 cp->phys.cmd.size = cpu_to_scr(cmd_len);
7653
7654 return 0;
7655 }
7656
7657 /*
7658 * Set up data pointers used by SCRIPTS.
7659 */
7660 static void __inline
7661 sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir)
7662 {
7663 u32 lastp, goalp;
7664
7665 SYM_LOCK_ASSERT(MA_OWNED);
7666
7667 /*
7668 * No segments means no data.
7669 */
7670 if (!cp->segments)
7671 dir = CAM_DIR_NONE;
7672
7673 /*
7674 * Set the data pointer.
7675 */
7676 switch(dir) {
7677 case CAM_DIR_OUT:
7678 goalp = SCRIPTA_BA (np, data_out2) + 8;
7679 lastp = goalp - 8 - (cp->segments * (2*4));
7680 break;
7681 case CAM_DIR_IN:
7682 cp->host_flags |= HF_DATA_IN;
7683 goalp = SCRIPTA_BA (np, data_in2) + 8;
7684 lastp = goalp - 8 - (cp->segments * (2*4));
7685 break;
7686 case CAM_DIR_NONE:
7687 default:
7688 lastp = goalp = SCRIPTB_BA (np, no_data);
7689 break;
7690 }
7691
7692 cp->phys.head.lastp = cpu_to_scr(lastp);
7693 cp->phys.head.goalp = cpu_to_scr(goalp);
7694 cp->phys.head.savep = cpu_to_scr(lastp);
7695 cp->startp = cp->phys.head.savep;
7696 }
7697
7698 /*
7699 * Call back routine for the DMA map service.
7700 * If bounce buffers are used (why ?), we may sleep and then
7701 * be called there in another context.
7702 */
7703 static void
7704 sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error)
7705 {
7706 ccb_p cp;
7707 hcb_p np;
7708 union ccb *ccb;
7709
7710 cp = (ccb_p) arg;
7711 ccb = cp->cam_ccb;
7712 np = (hcb_p) cp->arg;
7713
7714 SYM_LOCK_ASSERT(MA_OWNED);
7715
7716 /*
7717 * Deal with weird races.
7718 */
7719 if (sym_get_cam_status(ccb) != CAM_REQ_INPROG)
7720 goto out_abort;
7721
7722 /*
7723 * Deal with weird errors.
7724 */
7725 if (error) {
7726 cp->dmamapped = 0;
7727 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED);
7728 goto out_abort;
7729 }
7730
7731 /*
7732 * Build the data descriptor for the chip.
7733 */
7734 if (nsegs) {
7735 int retv;
7736 /* 896 rev 1 requires to be careful about boundaries */
7737 if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1)
7738 retv = sym_scatter_sg_physical(np, cp, psegs, nsegs);
7739 else
7740 retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs);
7741 if (retv < 0) {
7742 sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG);
7743 goto out_abort;
7744 }
7745 }
7746
7747 /*
7748 * Synchronize the DMA map only if we have
7749 * actually mapped the data.
7750 */
7751 if (cp->dmamapped) {
7752 bus_dmamap_sync(np->data_dmat, cp->dmamap,
7753 (cp->dmamapped == SYM_DMA_READ ?
7754 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
7755 }
7756
7757 /*
7758 * Set host status to busy state.
7759 * May have been set back to HS_WAIT to avoid a race.
7760 */
7761 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
7762
7763 /*
7764 * Set data pointers.
7765 */
7766 sym_setup_data_pointers(np, cp, (ccb->ccb_h.flags & CAM_DIR_MASK));
7767
7768 /*
7769 * Enqueue this IO in our pending queue.
7770 */
7771 sym_enqueue_cam_ccb(cp);
7772
7773 /*
7774 * When `#ifed 1', the code below makes the driver
7775 * panic on the first attempt to write to a SCSI device.
7776 * It is the first test we want to do after a driver
7777 * change that does not seem obviously safe. :)
7778 */
7779 #if 0
7780 switch (cp->cdb_buf[0]) {
7781 case 0x0A: case 0x2A: case 0xAA:
7782 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
7783 MDELAY(10000);
7784 break;
7785 default:
7786 break;
7787 }
7788 #endif
7789 /*
7790 * Activate this job.
7791 */
7792 sym_put_start_queue(np, cp);
7793 return;
7794 out_abort:
7795 sym_xpt_done(np, ccb, cp);
7796 sym_free_ccb(np, cp);
7797 }
7798
7799 /*
7800 * How complex it gets to deal with the data in CAM.
7801 * The Bus Dma stuff makes things still more complex.
7802 */
7803 static void
7804 sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
7805 {
7806 struct ccb_hdr *ccb_h;
7807 int dir, retv;
7808
7809 SYM_LOCK_ASSERT(MA_OWNED);
7810
7811 ccb_h = &csio->ccb_h;
7812
7813 /*
7814 * Now deal with the data.
7815 */
7816 cp->data_len = csio->dxfer_len;
7817 cp->arg = np;
7818
7819 /*
7820 * No direction means no data.
7821 */
7822 dir = (ccb_h->flags & CAM_DIR_MASK);
7823 if (dir == CAM_DIR_NONE) {
7824 sym_execute_ccb(cp, NULL, 0, 0);
7825 return;
7826 }
7827
7828 cp->dmamapped = (dir == CAM_DIR_IN) ? SYM_DMA_READ : SYM_DMA_WRITE;
7829 retv = bus_dmamap_load_ccb(np->data_dmat, cp->dmamap,
7830 (union ccb *)csio, sym_execute_ccb, cp, 0);
7831 if (retv == EINPROGRESS) {
7832 cp->host_status = HS_WAIT;
7833 xpt_freeze_simq(np->sim, 1);
7834 csio->ccb_h.status |= CAM_RELEASE_SIMQ;
7835 }
7836 }
7837
7838 /*
7839 * Move the scatter list to our data block.
7840 */
7841 static int
7842 sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp,
7843 bus_dma_segment_t *psegs, int nsegs)
7844 {
7845 struct sym_tblmove *data;
7846 bus_dma_segment_t *psegs2;
7847
7848 SYM_LOCK_ASSERT(MA_OWNED);
7849
7850 if (nsegs > SYM_CONF_MAX_SG)
7851 return -1;
7852
7853 data = &cp->phys.data[SYM_CONF_MAX_SG-1];
7854 psegs2 = &psegs[nsegs-1];
7855 cp->segments = nsegs;
7856
7857 while (1) {
7858 data->addr = cpu_to_scr(psegs2->ds_addr);
7859 data->size = cpu_to_scr(psegs2->ds_len);
7860 if (DEBUG_FLAGS & DEBUG_SCATTER) {
7861 device_printf(np->device,
7862 "scatter: paddr=%lx len=%ld\n",
7863 (long)psegs2->ds_addr, (long)psegs2->ds_len);
7864 }
7865 if (psegs2 != psegs) {
7866 --data;
7867 --psegs2;
7868 continue;
7869 }
7870 break;
7871 }
7872 return 0;
7873 }
7874
7875 /*
7876 * Scatter a SG list with physical addresses into bus addressable chunks.
7877 */
7878 static int
7879 sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs)
7880 {
7881 u_long ps, pe, pn;
7882 u_long k;
7883 int s, t;
7884
7885 SYM_LOCK_ASSERT(MA_OWNED);
7886
7887 s = SYM_CONF_MAX_SG - 1;
7888 t = nsegs - 1;
7889 ps = psegs[t].ds_addr;
7890 pe = ps + psegs[t].ds_len;
7891
7892 while (s >= 0) {
7893 pn = rounddown2(pe - 1, SYM_CONF_DMA_BOUNDARY);
7894 if (pn <= ps)
7895 pn = ps;
7896 k = pe - pn;
7897 if (DEBUG_FLAGS & DEBUG_SCATTER) {
7898 device_printf(np->device,
7899 "scatter: paddr=%lx len=%ld\n", pn, k);
7900 }
7901 cp->phys.data[s].addr = cpu_to_scr(pn);
7902 cp->phys.data[s].size = cpu_to_scr(k);
7903 --s;
7904 if (pn == ps) {
7905 if (--t < 0)
7906 break;
7907 ps = psegs[t].ds_addr;
7908 pe = ps + psegs[t].ds_len;
7909 }
7910 else
7911 pe = pn;
7912 }
7913
7914 cp->segments = SYM_CONF_MAX_SG - 1 - s;
7915
7916 return t >= 0 ? -1 : 0;
7917 }
7918
7919 /*
7920 * SIM action for non performance critical stuff.
7921 */
7922 static void sym_action2(struct cam_sim *sim, union ccb *ccb)
7923 {
7924 union ccb *abort_ccb;
7925 struct ccb_hdr *ccb_h;
7926 struct ccb_pathinq *cpi;
7927 struct ccb_trans_settings *cts;
7928 struct sym_trans *tip;
7929 hcb_p np;
7930 tcb_p tp;
7931 lcb_p lp;
7932 u_char dflags;
7933
7934 /*
7935 * Retrieve our controller data structure.
7936 */
7937 np = (hcb_p) cam_sim_softc(sim);
7938
7939 SYM_LOCK_ASSERT(MA_OWNED);
7940
7941 ccb_h = &ccb->ccb_h;
7942
7943 switch (ccb_h->func_code) {
7944 case XPT_SET_TRAN_SETTINGS:
7945 cts = &ccb->cts;
7946 tp = &np->target[ccb_h->target_id];
7947
7948 /*
7949 * Update SPI transport settings in TARGET control block.
7950 * Update SCSI device settings in LUN control block.
7951 */
7952 lp = sym_lp(tp, ccb_h->target_lun);
7953 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
7954 sym_update_trans(np, &tp->tinfo.goal, cts);
7955 if (lp)
7956 sym_update_dflags(np, &lp->current_flags, cts);
7957 }
7958 if (cts->type == CTS_TYPE_USER_SETTINGS) {
7959 sym_update_trans(np, &tp->tinfo.user, cts);
7960 if (lp)
7961 sym_update_dflags(np, &lp->user_flags, cts);
7962 }
7963
7964 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
7965 break;
7966 case XPT_GET_TRAN_SETTINGS:
7967 cts = &ccb->cts;
7968 tp = &np->target[ccb_h->target_id];
7969 lp = sym_lp(tp, ccb_h->target_lun);
7970
7971 #define cts__scsi (&cts->proto_specific.scsi)
7972 #define cts__spi (&cts->xport_specific.spi)
7973 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
7974 tip = &tp->tinfo.current;
7975 dflags = lp ? lp->current_flags : 0;
7976 }
7977 else {
7978 tip = &tp->tinfo.user;
7979 dflags = lp ? lp->user_flags : tp->usrflags;
7980 }
7981
7982 cts->protocol = PROTO_SCSI;
7983 cts->transport = XPORT_SPI;
7984 cts->protocol_version = tip->scsi_version;
7985 cts->transport_version = tip->spi_version;
7986
7987 cts__spi->sync_period = tip->period;
7988 cts__spi->sync_offset = tip->offset;
7989 cts__spi->bus_width = tip->width;
7990 cts__spi->ppr_options = tip->options;
7991
7992 cts__spi->valid = CTS_SPI_VALID_SYNC_RATE
7993 | CTS_SPI_VALID_SYNC_OFFSET
7994 | CTS_SPI_VALID_BUS_WIDTH
7995 | CTS_SPI_VALID_PPR_OPTIONS;
7996
7997 cts__spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
7998 if (dflags & SYM_DISC_ENABLED)
7999 cts__spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
8000 cts__spi->valid |= CTS_SPI_VALID_DISC;
8001
8002 cts__scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
8003 if (dflags & SYM_TAGS_ENABLED)
8004 cts__scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
8005 cts__scsi->valid |= CTS_SCSI_VALID_TQ;
8006 #undef cts__spi
8007 #undef cts__scsi
8008 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8009 break;
8010 case XPT_CALC_GEOMETRY:
8011 cam_calc_geometry(&ccb->ccg, /*extended*/1);
8012 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8013 break;
8014 case XPT_PATH_INQ:
8015 cpi = &ccb->cpi;
8016 cpi->version_num = 1;
8017 cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE;
8018 if ((np->features & FE_WIDE) != 0)
8019 cpi->hba_inquiry |= PI_WIDE_16;
8020 cpi->target_sprt = 0;
8021 cpi->hba_misc = PIM_UNMAPPED;
8022 if (np->usrflags & SYM_SCAN_TARGETS_HILO)
8023 cpi->hba_misc |= PIM_SCANHILO;
8024 if (np->usrflags & SYM_AVOID_BUS_RESET)
8025 cpi->hba_misc |= PIM_NOBUSRESET;
8026 cpi->hba_eng_cnt = 0;
8027 cpi->max_target = (np->features & FE_WIDE) ? 15 : 7;
8028 /* Semantic problem:)LUN number max = max number of LUNs - 1 */
8029 cpi->max_lun = SYM_CONF_MAX_LUN-1;
8030 if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN)
8031 cpi->max_lun = SYM_SETUP_MAX_LUN-1;
8032 cpi->bus_id = cam_sim_bus(sim);
8033 cpi->initiator_id = np->myaddr;
8034 cpi->base_transfer_speed = 3300;
8035 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
8036 strlcpy(cpi->hba_vid, "Symbios", HBA_IDLEN);
8037 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
8038 cpi->unit_number = cam_sim_unit(sim);
8039
8040 cpi->protocol = PROTO_SCSI;
8041 cpi->protocol_version = SCSI_REV_2;
8042 cpi->transport = XPORT_SPI;
8043 cpi->transport_version = 2;
8044 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
8045 if (np->features & FE_ULTRA3) {
8046 cpi->transport_version = 3;
8047 cpi->xport_specific.spi.ppr_options =
8048 SID_SPI_CLOCK_DT_ST;
8049 }
8050 cpi->maxio = SYM_CONF_MAX_SG * PAGE_SIZE;
8051 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8052 break;
8053 case XPT_ABORT:
8054 abort_ccb = ccb->cab.abort_ccb;
8055 switch(abort_ccb->ccb_h.func_code) {
8056 case XPT_SCSI_IO:
8057 if (sym_abort_scsiio(np, abort_ccb, 0) == 0) {
8058 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8059 break;
8060 }
8061 default:
8062 sym_xpt_done2(np, ccb, CAM_UA_ABORT);
8063 break;
8064 }
8065 break;
8066 case XPT_RESET_DEV:
8067 sym_reset_dev(np, ccb);
8068 break;
8069 case XPT_RESET_BUS:
8070 sym_reset_scsi_bus(np, 0);
8071 if (sym_verbose) {
8072 xpt_print_path(np->path);
8073 printf("SCSI BUS reset delivered.\n");
8074 }
8075 sym_init (np, 1);
8076 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8077 break;
8078 case XPT_TERM_IO:
8079 default:
8080 sym_xpt_done2(np, ccb, CAM_REQ_INVALID);
8081 break;
8082 }
8083 }
8084
8085 /*
8086 * Asynchronous notification handler.
8087 */
8088 static void
8089 sym_async(void *cb_arg, u32 code, struct cam_path *path, void *args __unused)
8090 {
8091 hcb_p np;
8092 struct cam_sim *sim;
8093 u_int tn;
8094 tcb_p tp;
8095
8096 sim = (struct cam_sim *) cb_arg;
8097 np = (hcb_p) cam_sim_softc(sim);
8098
8099 SYM_LOCK_ASSERT(MA_OWNED);
8100
8101 switch (code) {
8102 case AC_LOST_DEVICE:
8103 tn = xpt_path_target_id(path);
8104 if (tn >= SYM_CONF_MAX_TARGET)
8105 break;
8106
8107 tp = &np->target[tn];
8108
8109 tp->to_reset = 0;
8110 tp->head.sval = 0;
8111 tp->head.wval = np->rv_scntl3;
8112 tp->head.uval = 0;
8113
8114 tp->tinfo.current.period = tp->tinfo.goal.period = 0;
8115 tp->tinfo.current.offset = tp->tinfo.goal.offset = 0;
8116 tp->tinfo.current.width = tp->tinfo.goal.width = BUS_8_BIT;
8117 tp->tinfo.current.options = tp->tinfo.goal.options = 0;
8118
8119 break;
8120 default:
8121 break;
8122 }
8123 }
8124
8125 /*
8126 * Update transfer settings of a target.
8127 */
8128 static void sym_update_trans(hcb_p np, struct sym_trans *tip,
8129 struct ccb_trans_settings *cts)
8130 {
8131
8132 SYM_LOCK_ASSERT(MA_OWNED);
8133
8134 /*
8135 * Update the infos.
8136 */
8137 #define cts__spi (&cts->xport_specific.spi)
8138 if ((cts__spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
8139 tip->width = cts__spi->bus_width;
8140 if ((cts__spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)
8141 tip->offset = cts__spi->sync_offset;
8142 if ((cts__spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
8143 tip->period = cts__spi->sync_period;
8144 if ((cts__spi->valid & CTS_SPI_VALID_PPR_OPTIONS) != 0)
8145 tip->options = (cts__spi->ppr_options & PPR_OPT_DT);
8146 if (cts->protocol_version != PROTO_VERSION_UNSPECIFIED &&
8147 cts->protocol_version != PROTO_VERSION_UNKNOWN)
8148 tip->scsi_version = cts->protocol_version;
8149 if (cts->transport_version != XPORT_VERSION_UNSPECIFIED &&
8150 cts->transport_version != XPORT_VERSION_UNKNOWN)
8151 tip->spi_version = cts->transport_version;
8152 #undef cts__spi
8153 /*
8154 * Scale against driver configuration limits.
8155 */
8156 if (tip->width > SYM_SETUP_MAX_WIDE) tip->width = SYM_SETUP_MAX_WIDE;
8157 if (tip->period && tip->offset) {
8158 if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset = SYM_SETUP_MAX_OFFS;
8159 if (tip->period < SYM_SETUP_MIN_SYNC) tip->period = SYM_SETUP_MIN_SYNC;
8160 } else {
8161 tip->offset = 0;
8162 tip->period = 0;
8163 }
8164
8165 /*
8166 * Scale against actual controller BUS width.
8167 */
8168 if (tip->width > np->maxwide)
8169 tip->width = np->maxwide;
8170
8171 /*
8172 * Only accept DT if controller supports and SYNC/WIDE asked.
8173 */
8174 if (!((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) ||
8175 !(tip->width == BUS_16_BIT && tip->offset)) {
8176 tip->options &= ~PPR_OPT_DT;
8177 }
8178
8179 /*
8180 * Scale period factor and offset against controller limits.
8181 */
8182 if (tip->offset && tip->period) {
8183 if (tip->options & PPR_OPT_DT) {
8184 if (tip->period < np->minsync_dt)
8185 tip->period = np->minsync_dt;
8186 if (tip->period > np->maxsync_dt)
8187 tip->period = np->maxsync_dt;
8188 if (tip->offset > np->maxoffs_dt)
8189 tip->offset = np->maxoffs_dt;
8190 }
8191 else {
8192 if (tip->period < np->minsync)
8193 tip->period = np->minsync;
8194 if (tip->period > np->maxsync)
8195 tip->period = np->maxsync;
8196 if (tip->offset > np->maxoffs)
8197 tip->offset = np->maxoffs;
8198 }
8199 }
8200 }
8201
8202 /*
8203 * Update flags for a device (logical unit).
8204 */
8205 static void
8206 sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts)
8207 {
8208
8209 SYM_LOCK_ASSERT(MA_OWNED);
8210
8211 #define cts__scsi (&cts->proto_specific.scsi)
8212 #define cts__spi (&cts->xport_specific.spi)
8213 if ((cts__spi->valid & CTS_SPI_VALID_DISC) != 0) {
8214 if ((cts__spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
8215 *flags |= SYM_DISC_ENABLED;
8216 else
8217 *flags &= ~SYM_DISC_ENABLED;
8218 }
8219
8220 if ((cts__scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
8221 if ((cts__scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
8222 *flags |= SYM_TAGS_ENABLED;
8223 else
8224 *flags &= ~SYM_TAGS_ENABLED;
8225 }
8226 #undef cts__spi
8227 #undef cts__scsi
8228 }
8229
8230 /*============= DRIVER INITIALISATION ==================*/
8231
8232 static device_method_t sym_pci_methods[] = {
8233 DEVMETHOD(device_probe, sym_pci_probe),
8234 DEVMETHOD(device_attach, sym_pci_attach),
8235 DEVMETHOD(device_detach, sym_pci_detach),
8236 DEVMETHOD_END
8237 };
8238
8239 static driver_t sym_pci_driver = {
8240 "sym",
8241 sym_pci_methods,
8242 1 /* no softc */
8243 };
8244
8245 DRIVER_MODULE(sym, pci, sym_pci_driver, NULL, NULL);
8246 MODULE_DEPEND(sym, cam, 1, 1, 1);
8247 MODULE_DEPEND(sym, pci, 1, 1, 1);
8248
8249 static const struct sym_pci_chip sym_pci_dev_table[] = {
8250 {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 64,
8251 FE_ERL}
8252 ,
8253 #ifdef SYM_DEBUG_GENERIC_SUPPORT
8254 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1,
8255 FE_BOF}
8256 ,
8257 #else
8258 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1,
8259 FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF}
8260 ,
8261 #endif
8262 {PCI_ID_SYM53C815, 0xff, "815", 4, 8, 4, 64,
8263 FE_BOF|FE_ERL}
8264 ,
8265 {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 64,
8266 FE_WIDE|FE_BOF|FE_ERL|FE_DIFF}
8267 ,
8268 {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2,
8269 FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF}
8270 ,
8271 {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1,
8272 FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN}
8273 ,
8274 {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2,
8275 FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8276 FE_RAM|FE_DIFF}
8277 ,
8278 {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2,
8279 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8280 FE_RAM|FE_DIFF}
8281 ,
8282 {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2,
8283 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8284 FE_RAM|FE_DIFF}
8285 ,
8286 {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2,
8287 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8288 FE_RAM|FE_DIFF}
8289 ,
8290 #ifdef SYM_DEBUG_GENERIC_SUPPORT
8291 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2,
8292 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|
8293 FE_RAM|FE_LCKFRQ}
8294 ,
8295 #else
8296 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2,
8297 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8298 FE_RAM|FE_LCKFRQ}
8299 ,
8300 #endif
8301 {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4,
8302 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8303 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
8304 ,
8305 {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4,
8306 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8307 FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
8308 ,
8309 {PCI_ID_LSI53C1010, 0x00, "1010-33", 6, 31, 7, 8,
8310 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
8311 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
8312 FE_C10}
8313 ,
8314 {PCI_ID_LSI53C1010, 0xff, "1010-33", 6, 31, 7, 8,
8315 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
8316 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
8317 FE_C10|FE_U3EN}
8318 ,
8319 {PCI_ID_LSI53C1010_2, 0xff, "1010-66", 6, 31, 7, 8,
8320 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
8321 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC|
8322 FE_C10|FE_U3EN}
8323 ,
8324 {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4,
8325 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8326 FE_RAM|FE_IO256|FE_LEDC}
8327 };
8328
8329 /*
8330 * Look up the chip table.
8331 *
8332 * Return a pointer to the chip entry if found,
8333 * zero otherwise.
8334 */
8335 static const struct sym_pci_chip *
8336 sym_find_pci_chip(device_t dev)
8337 {
8338 const struct sym_pci_chip *chip;
8339 int i;
8340 u_short device_id;
8341 u_char revision;
8342
8343 if (pci_get_vendor(dev) != PCI_VENDOR_NCR)
8344 return NULL;
8345
8346 device_id = pci_get_device(dev);
8347 revision = pci_get_revid(dev);
8348
8349 for (i = 0; i < nitems(sym_pci_dev_table); i++) {
8350 chip = &sym_pci_dev_table[i];
8351 if (device_id != chip->device_id)
8352 continue;
8353 if (revision > chip->revision_id)
8354 continue;
8355 return chip;
8356 }
8357
8358 return NULL;
8359 }
8360
8361 /*
8362 * Tell upper layer if the chip is supported.
8363 */
8364 static int
8365 sym_pci_probe(device_t dev)
8366 {
8367 const struct sym_pci_chip *chip;
8368
8369 chip = sym_find_pci_chip(dev);
8370 if (chip && sym_find_firmware(chip)) {
8371 device_set_desc(dev, chip->name);
8372 return BUS_PROBE_DEFAULT;
8373 }
8374 return ENXIO;
8375 }
8376
8377 /*
8378 * Attach a sym53c8xx device.
8379 */
8380 static int
8381 sym_pci_attach(device_t dev)
8382 {
8383 const struct sym_pci_chip *chip;
8384 u_short command;
8385 u_char cachelnsz;
8386 struct sym_hcb *np = NULL;
8387 struct sym_nvram nvram;
8388 const struct sym_fw *fw = NULL;
8389 int i;
8390 bus_dma_tag_t bus_dmat;
8391
8392 bus_dmat = bus_get_dma_tag(dev);
8393
8394 /*
8395 * Only probed devices should be attached.
8396 * We just enjoy being paranoid. :)
8397 */
8398 chip = sym_find_pci_chip(dev);
8399 if (chip == NULL || (fw = sym_find_firmware(chip)) == NULL)
8400 return (ENXIO);
8401
8402 /*
8403 * Allocate immediately the host control block,
8404 * since we are only expecting to succeed. :)
8405 * We keep track in the HCB of all the resources that
8406 * are to be released on error.
8407 */
8408 np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB");
8409 if (np)
8410 np->bus_dmat = bus_dmat;
8411 else
8412 return (ENXIO);
8413 device_set_softc(dev, np);
8414
8415 SYM_LOCK_INIT();
8416
8417 /*
8418 * Copy some useful infos to the HCB.
8419 */
8420 np->hcb_ba = vtobus(np);
8421 np->verbose = bootverbose;
8422 np->device = dev;
8423 np->device_id = pci_get_device(dev);
8424 np->revision_id = pci_get_revid(dev);
8425 np->features = chip->features;
8426 np->clock_divn = chip->nr_divisor;
8427 np->maxoffs = chip->offset_max;
8428 np->maxburst = chip->burst_max;
8429 np->scripta_sz = fw->a_size;
8430 np->scriptb_sz = fw->b_size;
8431 np->fw_setup = fw->setup;
8432 np->fw_patch = fw->patch;
8433 np->fw_name = fw->name;
8434
8435 #ifdef __amd64__
8436 np->target = sym_calloc_dma(SYM_CONF_MAX_TARGET * sizeof(*(np->target)),
8437 "TARGET");
8438 if (!np->target)
8439 goto attach_failed;
8440 #endif
8441
8442 /*
8443 * Initialize the CCB free and busy queues.
8444 */
8445 sym_que_init(&np->free_ccbq);
8446 sym_que_init(&np->busy_ccbq);
8447 sym_que_init(&np->comp_ccbq);
8448 sym_que_init(&np->cam_ccbq);
8449
8450 /*
8451 * Allocate a tag for the DMA of user data.
8452 */
8453 if (bus_dma_tag_create(np->bus_dmat, 1, SYM_CONF_DMA_BOUNDARY,
8454 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
8455 BUS_SPACE_MAXSIZE_32BIT, SYM_CONF_MAX_SG, SYM_CONF_DMA_BOUNDARY,
8456 0, busdma_lock_mutex, &np->mtx, &np->data_dmat)) {
8457 device_printf(dev, "failed to create DMA tag.\n");
8458 goto attach_failed;
8459 }
8460
8461 /*
8462 * Read and apply some fix-ups to the PCI COMMAND
8463 * register. We want the chip to be enabled for:
8464 * - BUS mastering
8465 * - PCI parity checking (reporting would also be fine)
8466 * - Write And Invalidate.
8467 */
8468 command = pci_read_config(dev, PCIR_COMMAND, 2);
8469 command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_PERRESPEN |
8470 PCIM_CMD_MWRICEN;
8471 pci_write_config(dev, PCIR_COMMAND, command, 2);
8472
8473 /*
8474 * Let the device know about the cache line size,
8475 * if it doesn't yet.
8476 */
8477 cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
8478 if (!cachelnsz) {
8479 cachelnsz = 8;
8480 pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1);
8481 }
8482
8483 /*
8484 * Alloc/get/map/retrieve everything that deals with MMIO.
8485 */
8486 i = SYM_PCI_MMIO;
8487 np->mmio_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i,
8488 RF_ACTIVE);
8489 if (!np->mmio_res) {
8490 device_printf(dev, "failed to allocate MMIO resources\n");
8491 goto attach_failed;
8492 }
8493 np->mmio_ba = rman_get_start(np->mmio_res);
8494
8495 /*
8496 * Allocate the IRQ.
8497 */
8498 i = 0;
8499 np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i,
8500 RF_ACTIVE | RF_SHAREABLE);
8501 if (!np->irq_res) {
8502 device_printf(dev, "failed to allocate IRQ resource\n");
8503 goto attach_failed;
8504 }
8505
8506 #ifdef SYM_CONF_IOMAPPED
8507 /*
8508 * User want us to use normal IO with PCI.
8509 * Alloc/get/map/retrieve everything that deals with IO.
8510 */
8511 i = SYM_PCI_IO;
8512 np->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &i, RF_ACTIVE);
8513 if (!np->io_res) {
8514 device_printf(dev, "failed to allocate IO resources\n");
8515 goto attach_failed;
8516 }
8517
8518 #endif /* SYM_CONF_IOMAPPED */
8519
8520 /*
8521 * If the chip has RAM.
8522 * Alloc/get/map/retrieve the corresponding resources.
8523 */
8524 if (np->features & (FE_RAM|FE_RAM8K)) {
8525 i = SYM_PCI_RAM;
8526 if (np->features & FE_64BIT)
8527 i = SYM_PCI_RAM64;
8528 np->ram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i,
8529 RF_ACTIVE);
8530 if (!np->ram_res) {
8531 device_printf(dev,"failed to allocate RAM resources\n");
8532 goto attach_failed;
8533 }
8534 np->ram_ba = rman_get_start(np->ram_res);
8535 }
8536
8537 /*
8538 * Save setting of some IO registers, so we will
8539 * be able to probe specific implementations.
8540 */
8541 sym_save_initial_setting (np);
8542
8543 /*
8544 * Reset the chip now, since it has been reported
8545 * that SCSI clock calibration may not work properly
8546 * if the chip is currently active.
8547 */
8548 sym_chip_reset (np);
8549
8550 /*
8551 * Try to read the user set-up.
8552 */
8553 (void) sym_read_nvram(np, &nvram);
8554
8555 /*
8556 * Prepare controller and devices settings, according
8557 * to chip features, user set-up and driver set-up.
8558 */
8559 (void) sym_prepare_setting(np, &nvram);
8560
8561 /*
8562 * Check the PCI clock frequency.
8563 * Must be performed after prepare_setting since it destroys
8564 * STEST1 that is used to probe for the clock doubler.
8565 */
8566 i = sym_getpciclock(np);
8567 if (i > 37000)
8568 device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i);
8569
8570 /*
8571 * Allocate the start queue.
8572 */
8573 np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE");
8574 if (!np->squeue)
8575 goto attach_failed;
8576 np->squeue_ba = vtobus(np->squeue);
8577
8578 /*
8579 * Allocate the done queue.
8580 */
8581 np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE");
8582 if (!np->dqueue)
8583 goto attach_failed;
8584 np->dqueue_ba = vtobus(np->dqueue);
8585
8586 /*
8587 * Allocate the target bus address array.
8588 */
8589 np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL");
8590 if (!np->targtbl)
8591 goto attach_failed;
8592 np->targtbl_ba = vtobus(np->targtbl);
8593
8594 /*
8595 * Allocate SCRIPTS areas.
8596 */
8597 np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0");
8598 np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0");
8599 if (!np->scripta0 || !np->scriptb0)
8600 goto attach_failed;
8601
8602 /*
8603 * Allocate the CCBs. We need at least ONE.
8604 */
8605 for (i = 0; sym_alloc_ccb(np) != NULL; i++)
8606 ;
8607 if (i < 1)
8608 goto attach_failed;
8609
8610 /*
8611 * Calculate BUS addresses where we are going
8612 * to load the SCRIPTS.
8613 */
8614 np->scripta_ba = vtobus(np->scripta0);
8615 np->scriptb_ba = vtobus(np->scriptb0);
8616 np->scriptb0_ba = np->scriptb_ba;
8617
8618 if (np->ram_ba) {
8619 np->scripta_ba = np->ram_ba;
8620 if (np->features & FE_RAM8K) {
8621 np->ram_ws = 8192;
8622 np->scriptb_ba = np->scripta_ba + 4096;
8623 #ifdef __LP64__
8624 np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32);
8625 #endif
8626 }
8627 else
8628 np->ram_ws = 4096;
8629 }
8630
8631 /*
8632 * Copy scripts to controller instance.
8633 */
8634 memcpy(np->scripta0, fw->a_base, np->scripta_sz);
8635 memcpy(np->scriptb0, fw->b_base, np->scriptb_sz);
8636
8637 /*
8638 * Setup variable parts in scripts and compute
8639 * scripts bus addresses used from the C code.
8640 */
8641 np->fw_setup(np, fw);
8642
8643 /*
8644 * Bind SCRIPTS with physical addresses usable by the
8645 * SCRIPTS processor (as seen from the BUS = BUS addresses).
8646 */
8647 sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz);
8648 sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz);
8649
8650 #ifdef SYM_CONF_IARB_SUPPORT
8651 /*
8652 * If user wants IARB to be set when we win arbitration
8653 * and have other jobs, compute the max number of consecutive
8654 * settings of IARB hints before we leave devices a chance to
8655 * arbitrate for reselection.
8656 */
8657 #ifdef SYM_SETUP_IARB_MAX
8658 np->iarb_max = SYM_SETUP_IARB_MAX;
8659 #else
8660 np->iarb_max = 4;
8661 #endif
8662 #endif
8663
8664 /*
8665 * Prepare the idle and invalid task actions.
8666 */
8667 np->idletask.start = cpu_to_scr(SCRIPTA_BA (np, idle));
8668 np->idletask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
8669 np->idletask_ba = vtobus(&np->idletask);
8670
8671 np->notask.start = cpu_to_scr(SCRIPTA_BA (np, idle));
8672 np->notask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
8673 np->notask_ba = vtobus(&np->notask);
8674
8675 np->bad_itl.start = cpu_to_scr(SCRIPTA_BA (np, idle));
8676 np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
8677 np->bad_itl_ba = vtobus(&np->bad_itl);
8678
8679 np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA (np, idle));
8680 np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA (np,bad_i_t_l_q));
8681 np->bad_itlq_ba = vtobus(&np->bad_itlq);
8682
8683 /*
8684 * Allocate and prepare the lun JUMP table that is used
8685 * for a target prior the probing of devices (bad lun table).
8686 * A private table will be allocated for the target on the
8687 * first INQUIRY response received.
8688 */
8689 np->badluntbl = sym_calloc_dma(256, "BADLUNTBL");
8690 if (!np->badluntbl)
8691 goto attach_failed;
8692
8693 np->badlun_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun));
8694 for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */
8695 np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
8696
8697 /*
8698 * Prepare the bus address array that contains the bus
8699 * address of each target control block.
8700 * For now, assume all logical units are wrong. :)
8701 */
8702 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
8703 np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i]));
8704 np->target[i].head.luntbl_sa =
8705 cpu_to_scr(vtobus(np->badluntbl));
8706 np->target[i].head.lun0_sa =
8707 cpu_to_scr(vtobus(&np->badlun_sa));
8708 }
8709
8710 /*
8711 * Now check the cache handling of the pci chipset.
8712 */
8713 if (sym_snooptest (np)) {
8714 device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n");
8715 goto attach_failed;
8716 }
8717
8718 /*
8719 * Now deal with CAM.
8720 * Hopefully, we will succeed with that one.:)
8721 */
8722 if (!sym_cam_attach(np))
8723 goto attach_failed;
8724
8725 /*
8726 * Sigh! we are done.
8727 */
8728 return 0;
8729
8730 /*
8731 * We have failed.
8732 * We will try to free all the resources we have
8733 * allocated, but if we are a boot device, this
8734 * will not help that much.;)
8735 */
8736 attach_failed:
8737 if (np)
8738 sym_pci_detach(dev);
8739 return ENXIO;
8740 }
8741
8742 /*
8743 * Detach a device by freeing everything that has been allocated for it.
8744 */
8745 static int
8746 sym_pci_detach(device_t dev)
8747 {
8748 hcb_p np;
8749 SYM_QUEHEAD *qp;
8750 ccb_p cp;
8751 tcb_p tp;
8752 lcb_p lp;
8753 int target, lun;
8754
8755 np = device_get_softc(dev);
8756
8757 /*
8758 * First free CAM resources.
8759 */
8760 sym_cam_free(np);
8761
8762 /*
8763 * Now every should be quiet for us to
8764 * free other resources.
8765 */
8766 if (np->ram_res)
8767 bus_release_resource(np->device, SYS_RES_MEMORY,
8768 rman_get_rid(np->ram_res), np->ram_res);
8769 if (np->mmio_res)
8770 bus_release_resource(np->device, SYS_RES_MEMORY,
8771 rman_get_rid(np->mmio_res), np->mmio_res);
8772 if (np->io_res)
8773 bus_release_resource(np->device, SYS_RES_IOPORT,
8774 rman_get_rid(np->io_res), np->io_res);
8775 if (np->irq_res)
8776 bus_release_resource(np->device, SYS_RES_IRQ, 0, np->irq_res);
8777
8778 if (np->scriptb0)
8779 sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0");
8780 if (np->scripta0)
8781 sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0");
8782 if (np->squeue)
8783 sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE");
8784 if (np->dqueue)
8785 sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
8786
8787 while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) {
8788 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
8789 bus_dmamap_destroy(np->data_dmat, cp->dmamap);
8790 sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF");
8791 sym_mfree_dma(cp, sizeof(*cp), "CCB");
8792 }
8793
8794 if (np->badluntbl)
8795 sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
8796
8797 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
8798 tp = &np->target[target];
8799 for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) {
8800 lp = sym_lp(tp, lun);
8801 if (!lp)
8802 continue;
8803 if (lp->itlq_tbl)
8804 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4,
8805 "ITLQ_TBL");
8806 if (lp->cb_tags)
8807 sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK,
8808 "CB_TAGS");
8809 sym_mfree_dma(lp, sizeof(*lp), "LCB");
8810 }
8811 #if SYM_CONF_MAX_LUN > 1
8812 if (tp->lunmp)
8813 sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p),
8814 "LUNMP");
8815 #endif
8816 }
8817 #ifdef __amd64__
8818 if (np->target)
8819 sym_mfree_dma(np->target,
8820 SYM_CONF_MAX_TARGET * sizeof(*(np->target)), "TARGET");
8821 #endif
8822 if (np->targtbl)
8823 sym_mfree_dma(np->targtbl, 256, "TARGTBL");
8824 if (np->data_dmat)
8825 bus_dma_tag_destroy(np->data_dmat);
8826 if (SYM_LOCK_INITIALIZED() != 0)
8827 SYM_LOCK_DESTROY();
8828 device_set_softc(np->device, NULL);
8829 sym_mfree_dma(np, sizeof(*np), "HCB");
8830
8831 return (0);
8832 }
8833
8834 /*
8835 * Allocate CAM resources and register a bus to CAM.
8836 */
8837 static int sym_cam_attach(hcb_p np)
8838 {
8839 struct cam_devq *devq = NULL;
8840 struct cam_sim *sim = NULL;
8841 struct cam_path *path = NULL;
8842 int err;
8843
8844 /*
8845 * Establish our interrupt handler.
8846 */
8847 err = bus_setup_intr(np->device, np->irq_res,
8848 INTR_ENTROPY | INTR_MPSAFE | INTR_TYPE_CAM,
8849 NULL, sym_intr, np, &np->intr);
8850 if (err) {
8851 device_printf(np->device, "bus_setup_intr() failed: %d\n",
8852 err);
8853 goto fail;
8854 }
8855
8856 /*
8857 * Create the device queue for our sym SIM.
8858 */
8859 devq = cam_simq_alloc(SYM_CONF_MAX_START);
8860 if (!devq)
8861 goto fail;
8862
8863 /*
8864 * Construct our SIM entry.
8865 */
8866 sim = cam_sim_alloc(sym_action, sym_poll, "sym", np,
8867 device_get_unit(np->device),
8868 &np->mtx, 1, SYM_SETUP_MAX_TAG, devq);
8869 if (!sim)
8870 goto fail;
8871
8872 SYM_LOCK();
8873
8874 if (xpt_bus_register(sim, np->device, 0) != CAM_SUCCESS)
8875 goto fail;
8876 np->sim = sim;
8877 sim = NULL;
8878
8879 if (xpt_create_path(&path, NULL,
8880 cam_sim_path(np->sim), CAM_TARGET_WILDCARD,
8881 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
8882 goto fail;
8883 }
8884 np->path = path;
8885
8886 /*
8887 * Establish our async notification handler.
8888 */
8889 if (xpt_register_async(AC_LOST_DEVICE, sym_async, np->sim, path) !=
8890 CAM_REQ_CMP)
8891 goto fail;
8892
8893 /*
8894 * Start the chip now, without resetting the BUS, since
8895 * it seems that this must stay under control of CAM.
8896 * With LVD/SE capable chips and BUS in SE mode, we may
8897 * get a spurious SMBC interrupt.
8898 */
8899 sym_init (np, 0);
8900
8901 SYM_UNLOCK();
8902
8903 return 1;
8904 fail:
8905 SYM_UNLOCK();
8906
8907 sym_cam_free(np);
8908
8909 return 0;
8910 }
8911
8912 /*
8913 * Free everything that deals with CAM.
8914 */
8915 static void sym_cam_free(hcb_p np)
8916 {
8917
8918 SYM_LOCK_ASSERT(MA_NOTOWNED);
8919
8920 if (np->intr) {
8921 bus_teardown_intr(np->device, np->irq_res, np->intr);
8922 np->intr = NULL;
8923 }
8924
8925 SYM_LOCK();
8926
8927 if (np->path) {
8928 xpt_async(AC_LOST_DEVICE, np->path, NULL);
8929 xpt_free_path(np->path);
8930 np->path = NULL;
8931 }
8932 if (np->sim) {
8933 xpt_bus_deregister(cam_sim_path(np->sim));
8934 cam_sim_free(np->sim, /*free_devq*/ TRUE);
8935 np->sim = NULL;
8936 }
8937
8938 SYM_UNLOCK();
8939 }
8940
8941 /*============ OPTIONNAL NVRAM SUPPORT =================*/
8942
8943 /*
8944 * Get host setup from NVRAM.
8945 */
8946 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram)
8947 {
8948 #ifdef SYM_CONF_NVRAM_SUPPORT
8949 /*
8950 * Get parity checking, host ID, verbose mode
8951 * and miscellaneous host flags from NVRAM.
8952 */
8953 switch(nvram->type) {
8954 case SYM_SYMBIOS_NVRAM:
8955 if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
8956 np->rv_scntl0 &= ~0x0a;
8957 np->myaddr = nvram->data.Symbios.host_id & 0x0f;
8958 if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
8959 np->verbose += 1;
8960 if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO)
8961 np->usrflags |= SYM_SCAN_TARGETS_HILO;
8962 if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET)
8963 np->usrflags |= SYM_AVOID_BUS_RESET;
8964 break;
8965 case SYM_TEKRAM_NVRAM:
8966 np->myaddr = nvram->data.Tekram.host_id & 0x0f;
8967 break;
8968 default:
8969 break;
8970 }
8971 #endif
8972 }
8973
8974 /*
8975 * Get target setup from NVRAM.
8976 */
8977 #ifdef SYM_CONF_NVRAM_SUPPORT
8978 static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram);
8979 static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram);
8980 #endif
8981
8982 static void
8983 sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp)
8984 {
8985 #ifdef SYM_CONF_NVRAM_SUPPORT
8986 switch(nvp->type) {
8987 case SYM_SYMBIOS_NVRAM:
8988 sym_Symbios_setup_target (np, target, &nvp->data.Symbios);
8989 break;
8990 case SYM_TEKRAM_NVRAM:
8991 sym_Tekram_setup_target (np, target, &nvp->data.Tekram);
8992 break;
8993 default:
8994 break;
8995 }
8996 #endif
8997 }
8998
8999 #ifdef SYM_CONF_NVRAM_SUPPORT
9000 /*
9001 * Get target set-up from Symbios format NVRAM.
9002 */
9003 static void
9004 sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram)
9005 {
9006 tcb_p tp = &np->target[target];
9007 Symbios_target *tn = &nvram->target[target];
9008
9009 tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0;
9010 tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT;
9011 tp->usrtags =
9012 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0;
9013
9014 if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
9015 tp->usrflags &= ~SYM_DISC_ENABLED;
9016 if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
9017 tp->usrflags |= SYM_SCAN_BOOT_DISABLED;
9018 if (!(tn->flags & SYMBIOS_SCAN_LUNS))
9019 tp->usrflags |= SYM_SCAN_LUNS_DISABLED;
9020 }
9021
9022 /*
9023 * Get target set-up from Tekram format NVRAM.
9024 */
9025 static void
9026 sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram)
9027 {
9028 tcb_p tp = &np->target[target];
9029 struct Tekram_target *tn = &nvram->target[target];
9030 int i;
9031
9032 if (tn->flags & TEKRAM_SYNC_NEGO) {
9033 i = tn->sync_index & 0xf;
9034 tp->tinfo.user.period = Tekram_sync[i];
9035 }
9036
9037 tp->tinfo.user.width =
9038 (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT;
9039
9040 if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
9041 tp->usrtags = 2 << nvram->max_tags_index;
9042 }
9043
9044 if (tn->flags & TEKRAM_DISCONNECT_ENABLE)
9045 tp->usrflags |= SYM_DISC_ENABLED;
9046
9047 /* If any device does not support parity, we will not use this option */
9048 if (!(tn->flags & TEKRAM_PARITY_CHECK))
9049 np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
9050 }
9051
9052 #ifdef SYM_CONF_DEBUG_NVRAM
9053 /*
9054 * Dump Symbios format NVRAM for debugging purpose.
9055 */
9056 static void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram)
9057 {
9058 int i;
9059
9060 /* display Symbios nvram host data */
9061 device_printf(np->device, "HOST ID=%d%s%s%s%s%s%s\n",
9062 nvram->host_id & 0x0f,
9063 (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" : "",
9064 (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" : "",
9065 (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" : "",
9066 (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" : "",
9067 (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET) ? " NO_RESET" : "",
9068 (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" : "");
9069
9070 /* display Symbios nvram drive data */
9071 for (i = 0 ; i < 15 ; i++) {
9072 struct Symbios_target *tn = &nvram->target[i];
9073 printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
9074 sym_name(np), i,
9075 (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
9076 (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
9077 (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
9078 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
9079 tn->bus_width,
9080 tn->sync_period / 4,
9081 tn->timeout);
9082 }
9083 }
9084
9085 /*
9086 * Dump TEKRAM format NVRAM for debugging purpose.
9087 */
9088 static const u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120};
9089 static void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram)
9090 {
9091 int i, tags, boot_delay;
9092 char *rem;
9093
9094 /* display Tekram nvram host data */
9095 tags = 2 << nvram->max_tags_index;
9096 boot_delay = 0;
9097 if (nvram->boot_delay_index < 6)
9098 boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
9099 switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
9100 default:
9101 case 0: rem = ""; break;
9102 case 1: rem = " REMOVABLE=boot device"; break;
9103 case 2: rem = " REMOVABLE=all"; break;
9104 }
9105
9106 device_printf(np->device,
9107 "HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
9108 nvram->host_id & 0x0f,
9109 (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" : "",
9110 (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" : "",
9111 (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" : "",
9112 (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" : "",
9113 (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" : "",
9114 (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" : "",
9115 (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" : "",
9116 (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" : "",
9117 rem, boot_delay, tags);
9118
9119 /* display Tekram nvram drive data */
9120 for (i = 0; i <= 15; i++) {
9121 int sync, j;
9122 struct Tekram_target *tn = &nvram->target[i];
9123 j = tn->sync_index & 0xf;
9124 sync = Tekram_sync[j];
9125 printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
9126 sym_name(np), i,
9127 (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
9128 (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
9129 (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
9130 (tn->flags & TEKRAM_START_CMD) ? " START" : "",
9131 (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
9132 (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
9133 sync);
9134 }
9135 }
9136 #endif /* SYM_CONF_DEBUG_NVRAM */
9137 #endif /* SYM_CONF_NVRAM_SUPPORT */
9138
9139 /*
9140 * Try reading Symbios or Tekram NVRAM
9141 */
9142 #ifdef SYM_CONF_NVRAM_SUPPORT
9143 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram);
9144 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram);
9145 #endif
9146
9147 static int sym_read_nvram(hcb_p np, struct sym_nvram *nvp)
9148 {
9149 #ifdef SYM_CONF_NVRAM_SUPPORT
9150 /*
9151 * Try to read SYMBIOS nvram.
9152 * Try to read TEKRAM nvram if Symbios nvram not found.
9153 */
9154 if (SYM_SETUP_SYMBIOS_NVRAM &&
9155 !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) {
9156 nvp->type = SYM_SYMBIOS_NVRAM;
9157 #ifdef SYM_CONF_DEBUG_NVRAM
9158 sym_display_Symbios_nvram(np, &nvp->data.Symbios);
9159 #endif
9160 }
9161 else if (SYM_SETUP_TEKRAM_NVRAM &&
9162 !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) {
9163 nvp->type = SYM_TEKRAM_NVRAM;
9164 #ifdef SYM_CONF_DEBUG_NVRAM
9165 sym_display_Tekram_nvram(np, &nvp->data.Tekram);
9166 #endif
9167 }
9168 else
9169 nvp->type = 0;
9170 #else
9171 nvp->type = 0;
9172 #endif
9173 return nvp->type;
9174 }
9175
9176 #ifdef SYM_CONF_NVRAM_SUPPORT
9177 /*
9178 * 24C16 EEPROM reading.
9179 *
9180 * GPOI0 - data in/data out
9181 * GPIO1 - clock
9182 * Symbios NVRAM wiring now also used by Tekram.
9183 */
9184
9185 #define SET_BIT 0
9186 #define CLR_BIT 1
9187 #define SET_CLK 2
9188 #define CLR_CLK 3
9189
9190 /*
9191 * Set/clear data/clock bit in GPIO0
9192 */
9193 static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg,
9194 int bit_mode)
9195 {
9196 UDELAY (5);
9197 switch (bit_mode){
9198 case SET_BIT:
9199 *gpreg |= write_bit;
9200 break;
9201 case CLR_BIT:
9202 *gpreg &= 0xfe;
9203 break;
9204 case SET_CLK:
9205 *gpreg |= 0x02;
9206 break;
9207 case CLR_CLK:
9208 *gpreg &= 0xfd;
9209 break;
9210 }
9211 OUTB (nc_gpreg, *gpreg);
9212 UDELAY (5);
9213 }
9214
9215 /*
9216 * Send START condition to NVRAM to wake it up.
9217 */
9218 static void S24C16_start(hcb_p np, u_char *gpreg)
9219 {
9220 S24C16_set_bit(np, 1, gpreg, SET_BIT);
9221 S24C16_set_bit(np, 0, gpreg, SET_CLK);
9222 S24C16_set_bit(np, 0, gpreg, CLR_BIT);
9223 S24C16_set_bit(np, 0, gpreg, CLR_CLK);
9224 }
9225
9226 /*
9227 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
9228 */
9229 static void S24C16_stop(hcb_p np, u_char *gpreg)
9230 {
9231 S24C16_set_bit(np, 0, gpreg, SET_CLK);
9232 S24C16_set_bit(np, 1, gpreg, SET_BIT);
9233 }
9234
9235 /*
9236 * Read or write a bit to the NVRAM,
9237 * read if GPIO0 input else write if GPIO0 output
9238 */
9239 static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit,
9240 u_char *gpreg)
9241 {
9242 S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
9243 S24C16_set_bit(np, 0, gpreg, SET_CLK);
9244 if (read_bit)
9245 *read_bit = INB (nc_gpreg);
9246 S24C16_set_bit(np, 0, gpreg, CLR_CLK);
9247 S24C16_set_bit(np, 0, gpreg, CLR_BIT);
9248 }
9249
9250 /*
9251 * Output an ACK to the NVRAM after reading,
9252 * change GPIO0 to output and when done back to an input
9253 */
9254 static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg,
9255 u_char *gpcntl)
9256 {
9257 OUTB (nc_gpcntl, *gpcntl & 0xfe);
9258 S24C16_do_bit(np, 0, write_bit, gpreg);
9259 OUTB (nc_gpcntl, *gpcntl);
9260 }
9261
9262 /*
9263 * Input an ACK from NVRAM after writing,
9264 * change GPIO0 to input and when done back to an output
9265 */
9266 static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg,
9267 u_char *gpcntl)
9268 {
9269 OUTB (nc_gpcntl, *gpcntl | 0x01);
9270 S24C16_do_bit(np, read_bit, 1, gpreg);
9271 OUTB (nc_gpcntl, *gpcntl);
9272 }
9273
9274 /*
9275 * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
9276 * GPIO0 must already be set as an output
9277 */
9278 static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data,
9279 u_char *gpreg, u_char *gpcntl)
9280 {
9281 int x;
9282
9283 for (x = 0; x < 8; x++)
9284 S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
9285
9286 S24C16_read_ack(np, ack_data, gpreg, gpcntl);
9287 }
9288
9289 /*
9290 * READ a byte from the NVRAM and then send an ACK to say we have got it,
9291 * GPIO0 must already be set as an input
9292 */
9293 static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data,
9294 u_char *gpreg, u_char *gpcntl)
9295 {
9296 int x;
9297 u_char read_bit;
9298
9299 *read_data = 0;
9300 for (x = 0; x < 8; x++) {
9301 S24C16_do_bit(np, &read_bit, 1, gpreg);
9302 *read_data |= ((read_bit & 0x01) << (7 - x));
9303 }
9304
9305 S24C16_write_ack(np, ack_data, gpreg, gpcntl);
9306 }
9307
9308 /*
9309 * Read 'len' bytes starting at 'offset'.
9310 */
9311 static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len)
9312 {
9313 u_char gpcntl, gpreg;
9314 u_char old_gpcntl, old_gpreg;
9315 u_char ack_data;
9316 int retv = 1;
9317 int x;
9318
9319 /* save current state of GPCNTL and GPREG */
9320 old_gpreg = INB (nc_gpreg);
9321 old_gpcntl = INB (nc_gpcntl);
9322 gpcntl = old_gpcntl & 0x1c;
9323
9324 /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
9325 OUTB (nc_gpreg, old_gpreg);
9326 OUTB (nc_gpcntl, gpcntl);
9327
9328 /* this is to set NVRAM into a known state with GPIO0/1 both low */
9329 gpreg = old_gpreg;
9330 S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
9331 S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
9332
9333 /* now set NVRAM inactive with GPIO0/1 both high */
9334 S24C16_stop(np, &gpreg);
9335
9336 /* activate NVRAM */
9337 S24C16_start(np, &gpreg);
9338
9339 /* write device code and random address MSB */
9340 S24C16_write_byte(np, &ack_data,
9341 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
9342 if (ack_data & 0x01)
9343 goto out;
9344
9345 /* write random address LSB */
9346 S24C16_write_byte(np, &ack_data,
9347 offset & 0xff, &gpreg, &gpcntl);
9348 if (ack_data & 0x01)
9349 goto out;
9350
9351 /* regenerate START state to set up for reading */
9352 S24C16_start(np, &gpreg);
9353
9354 /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
9355 S24C16_write_byte(np, &ack_data,
9356 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
9357 if (ack_data & 0x01)
9358 goto out;
9359
9360 /* now set up GPIO0 for inputting data */
9361 gpcntl |= 0x01;
9362 OUTB (nc_gpcntl, gpcntl);
9363
9364 /* input all requested data - only part of total NVRAM */
9365 for (x = 0; x < len; x++)
9366 S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
9367
9368 /* finally put NVRAM back in inactive mode */
9369 gpcntl &= 0xfe;
9370 OUTB (nc_gpcntl, gpcntl);
9371 S24C16_stop(np, &gpreg);
9372 retv = 0;
9373 out:
9374 /* return GPIO0/1 to original states after having accessed NVRAM */
9375 OUTB (nc_gpcntl, old_gpcntl);
9376 OUTB (nc_gpreg, old_gpreg);
9377
9378 return retv;
9379 }
9380
9381 #undef SET_BIT /* 0 */
9382 #undef CLR_BIT /* 1 */
9383 #undef SET_CLK /* 2 */
9384 #undef CLR_CLK /* 3 */
9385
9386 /*
9387 * Try reading Symbios NVRAM.
9388 * Return 0 if OK.
9389 */
9390 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram)
9391 {
9392 static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
9393 u_char *data = (u_char *) nvram;
9394 int len = sizeof(*nvram);
9395 u_short csum;
9396 int x;
9397
9398 /* probe the 24c16 and read the SYMBIOS 24c16 area */
9399 if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
9400 return 1;
9401
9402 /* check valid NVRAM signature, verify byte count and checksum */
9403 if (nvram->type != 0 ||
9404 bcmp(nvram->trailer, Symbios_trailer, 6) ||
9405 nvram->byte_count != len - 12)
9406 return 1;
9407
9408 /* verify checksum */
9409 for (x = 6, csum = 0; x < len - 6; x++)
9410 csum += data[x];
9411 if (csum != nvram->checksum)
9412 return 1;
9413
9414 return 0;
9415 }
9416
9417 /*
9418 * 93C46 EEPROM reading.
9419 *
9420 * GPOI0 - data in
9421 * GPIO1 - data out
9422 * GPIO2 - clock
9423 * GPIO4 - chip select
9424 *
9425 * Used by Tekram.
9426 */
9427
9428 /*
9429 * Pulse clock bit in GPIO0
9430 */
9431 static void T93C46_Clk(hcb_p np, u_char *gpreg)
9432 {
9433 OUTB (nc_gpreg, *gpreg | 0x04);
9434 UDELAY (2);
9435 OUTB (nc_gpreg, *gpreg);
9436 }
9437
9438 /*
9439 * Read bit from NVRAM
9440 */
9441 static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg)
9442 {
9443 UDELAY (2);
9444 T93C46_Clk(np, gpreg);
9445 *read_bit = INB (nc_gpreg);
9446 }
9447
9448 /*
9449 * Write bit to GPIO0
9450 */
9451 static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg)
9452 {
9453 if (write_bit & 0x01)
9454 *gpreg |= 0x02;
9455 else
9456 *gpreg &= 0xfd;
9457
9458 *gpreg |= 0x10;
9459
9460 OUTB (nc_gpreg, *gpreg);
9461 UDELAY (2);
9462
9463 T93C46_Clk(np, gpreg);
9464 }
9465
9466 /*
9467 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
9468 */
9469 static void T93C46_Stop(hcb_p np, u_char *gpreg)
9470 {
9471 *gpreg &= 0xef;
9472 OUTB (nc_gpreg, *gpreg);
9473 UDELAY (2);
9474
9475 T93C46_Clk(np, gpreg);
9476 }
9477
9478 /*
9479 * Send read command and address to NVRAM
9480 */
9481 static void T93C46_Send_Command(hcb_p np, u_short write_data,
9482 u_char *read_bit, u_char *gpreg)
9483 {
9484 int x;
9485
9486 /* send 9 bits, start bit (1), command (2), address (6) */
9487 for (x = 0; x < 9; x++)
9488 T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
9489
9490 *read_bit = INB (nc_gpreg);
9491 }
9492
9493 /*
9494 * READ 2 bytes from the NVRAM
9495 */
9496 static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg)
9497 {
9498 int x;
9499 u_char read_bit;
9500
9501 *nvram_data = 0;
9502 for (x = 0; x < 16; x++) {
9503 T93C46_Read_Bit(np, &read_bit, gpreg);
9504
9505 if (read_bit & 0x01)
9506 *nvram_data |= (0x01 << (15 - x));
9507 else
9508 *nvram_data &= ~(0x01 << (15 - x));
9509 }
9510 }
9511
9512 /*
9513 * Read Tekram NvRAM data.
9514 */
9515 static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg)
9516 {
9517 u_char read_bit;
9518 int x;
9519
9520 for (x = 0; x < len; x++) {
9521 /* output read command and address */
9522 T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
9523 if (read_bit & 0x01)
9524 return 1; /* Bad */
9525 T93C46_Read_Word(np, &data[x], gpreg);
9526 T93C46_Stop(np, gpreg);
9527 }
9528
9529 return 0;
9530 }
9531
9532 /*
9533 * Try reading 93C46 Tekram NVRAM.
9534 */
9535 static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram)
9536 {
9537 u_char gpcntl, gpreg;
9538 u_char old_gpcntl, old_gpreg;
9539 int retv = 1;
9540
9541 /* save current state of GPCNTL and GPREG */
9542 old_gpreg = INB (nc_gpreg);
9543 old_gpcntl = INB (nc_gpcntl);
9544
9545 /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
9546 1/2/4 out */
9547 gpreg = old_gpreg & 0xe9;
9548 OUTB (nc_gpreg, gpreg);
9549 gpcntl = (old_gpcntl & 0xe9) | 0x09;
9550 OUTB (nc_gpcntl, gpcntl);
9551
9552 /* input all of NVRAM, 64 words */
9553 retv = T93C46_Read_Data(np, (u_short *) nvram,
9554 sizeof(*nvram) / sizeof(short), &gpreg);
9555
9556 /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
9557 OUTB (nc_gpcntl, old_gpcntl);
9558 OUTB (nc_gpreg, old_gpreg);
9559
9560 return retv;
9561 }
9562
9563 /*
9564 * Try reading Tekram NVRAM.
9565 * Return 0 if OK.
9566 */
9567 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram)
9568 {
9569 u_char *data = (u_char *) nvram;
9570 int len = sizeof(*nvram);
9571 u_short csum;
9572 int x;
9573
9574 switch (np->device_id) {
9575 case PCI_ID_SYM53C885:
9576 case PCI_ID_SYM53C895:
9577 case PCI_ID_SYM53C896:
9578 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
9579 data, len);
9580 break;
9581 case PCI_ID_SYM53C875:
9582 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
9583 data, len);
9584 if (!x)
9585 break;
9586 default:
9587 x = sym_read_T93C46_nvram(np, nvram);
9588 break;
9589 }
9590 if (x)
9591 return 1;
9592
9593 /* verify checksum */
9594 for (x = 0, csum = 0; x < len - 1; x += 2)
9595 csum += data[x] + (data[x+1] << 8);
9596 if (csum != 0x1234)
9597 return 1;
9598
9599 return 0;
9600 }
9601
9602 #endif /* SYM_CONF_NVRAM_SUPPORT */
9603