1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010
5 * PCI-SCSI controllers.
6 *
7 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
8 *
9 * This driver also supports the following Symbios/LSI PCI-SCSI chips:
10 * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895,
11 * 53C810, 53C815, 53C825 and the 53C1510D is 53C8XX mode.
12 *
13 *
14 * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver.
15 * Copyright (C) 1998-1999 Gerard Roudier
16 *
17 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
18 * a port of the FreeBSD ncr driver to Linux-1.2.13.
19 *
20 * The original ncr driver has been written for 386bsd and FreeBSD by
21 * Wolfgang Stanglmeier <wolf@cologne.de>
22 * Stefan Esser <se@mi.Uni-Koeln.de>
23 * Copyright (C) 1994 Wolfgang Stanglmeier
24 *
25 * The initialisation code, and part of the code that addresses
26 * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM
27 * written by Justin T. Gibbs.
28 *
29 * Other major contributions:
30 *
31 * NVRAM detection and reading.
32 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
33 *
34 *-----------------------------------------------------------------------------
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
51 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 */
59
60 #include <sys/cdefs.h>
61
62 /* #define SYM_DEBUG_GENERIC_SUPPORT */
63
64 #include <sys/param.h>
65
66 /*
67 * Driver configuration options.
68 */
69 #include "opt_sym.h"
70 #include <dev/sym/sym_conf.h>
71
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/endian.h>
75 #include <sys/kernel.h>
76 #include <sys/lock.h>
77 #include <sys/mutex.h>
78 #include <sys/module.h>
79 #include <sys/bus.h>
80
81 #include <sys/proc.h>
82
83 #include <dev/pci/pcireg.h>
84 #include <dev/pci/pcivar.h>
85
86 #include <machine/bus.h>
87 #include <machine/resource.h>
88 #include <machine/atomic.h>
89
90 #include <sys/rman.h>
91
92 #include <cam/cam.h>
93 #include <cam/cam_ccb.h>
94 #include <cam/cam_sim.h>
95 #include <cam/cam_xpt_sim.h>
96 #include <cam/cam_debug.h>
97
98 #include <cam/scsi/scsi_all.h>
99 #include <cam/scsi/scsi_message.h>
100
101 /* Short and quite clear integer types */
102 typedef int8_t s8;
103 typedef int16_t s16;
104 typedef int32_t s32;
105 typedef u_int8_t u8;
106 typedef u_int16_t u16;
107 typedef u_int32_t u32;
108
109 /*
110 * Driver definitions.
111 */
112 #include <dev/sym/sym_defs.h>
113 #include <dev/sym/sym_fw.h>
114
115 /*
116 * Architectures may implement weak ordering that requires memory barriers
117 * to be used for LOADS and STORES to become globally visible (and also IO
118 * barriers when they make sense).
119 */
120 #ifdef __powerpc__
121 #define MEMORY_READ_BARRIER() __asm__ volatile("eieio; sync" : : : "memory")
122 #define MEMORY_WRITE_BARRIER() MEMORY_READ_BARRIER()
123 #else
124 #define MEMORY_READ_BARRIER() rmb()
125 #define MEMORY_WRITE_BARRIER() wmb()
126 #endif
127
128 /*
129 * A la VMS/CAM-3 queue management.
130 */
131 typedef struct sym_quehead {
132 struct sym_quehead *flink; /* Forward pointer */
133 struct sym_quehead *blink; /* Backward pointer */
134 } SYM_QUEHEAD;
135
136 #define sym_que_init(ptr) do { \
137 (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
138 } while (0)
139
__sym_que_add(struct sym_quehead * new,struct sym_quehead * blink,struct sym_quehead * flink)140 static __inline void __sym_que_add(struct sym_quehead * new,
141 struct sym_quehead * blink,
142 struct sym_quehead * flink)
143 {
144 flink->blink = new;
145 new->flink = flink;
146 new->blink = blink;
147 blink->flink = new;
148 }
149
__sym_que_del(struct sym_quehead * blink,struct sym_quehead * flink)150 static __inline void __sym_que_del(struct sym_quehead * blink,
151 struct sym_quehead * flink)
152 {
153 flink->blink = blink;
154 blink->flink = flink;
155 }
156
sym_que_empty(struct sym_quehead * head)157 static __inline int sym_que_empty(struct sym_quehead *head)
158 {
159 return head->flink == head;
160 }
161
sym_que_splice(struct sym_quehead * list,struct sym_quehead * head)162 static __inline void sym_que_splice(struct sym_quehead *list,
163 struct sym_quehead *head)
164 {
165 struct sym_quehead *first = list->flink;
166
167 if (first != list) {
168 struct sym_quehead *last = list->blink;
169 struct sym_quehead *at = head->flink;
170
171 first->blink = head;
172 head->flink = first;
173
174 last->flink = at;
175 at->blink = last;
176 }
177 }
178
179 #define sym_que_entry(ptr, type, member) \
180 ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
181
182 #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)
183
184 #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink)
185
186 #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink)
187
sym_remque_head(struct sym_quehead * head)188 static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
189 {
190 struct sym_quehead *elem = head->flink;
191
192 if (elem != head)
193 __sym_que_del(head, elem->flink);
194 else
195 elem = NULL;
196 return elem;
197 }
198
199 #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head)
200
201 /*
202 * This one may be useful.
203 */
204 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \
205 for (qp = (head)->flink; qp != (head); qp = qp->flink)
206 /*
207 * FreeBSD does not offer our kind of queue in the CAM CCB.
208 * So, we have to cast.
209 */
210 #define sym_qptr(p) ((struct sym_quehead *) (p))
211
212 /*
213 * Simple bitmap operations.
214 */
215 #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f)))
216 #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f)))
217 #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f)))
218
219 /*
220 * Number of tasks per device we want to handle.
221 */
222 #if SYM_CONF_MAX_TAG_ORDER > 8
223 #error "more than 256 tags per logical unit not allowed."
224 #endif
225 #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER)
226
227 /*
228 * Donnot use more tasks that we can handle.
229 */
230 #ifndef SYM_CONF_MAX_TAG
231 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
232 #endif
233 #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
234 #undef SYM_CONF_MAX_TAG
235 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
236 #endif
237
238 /*
239 * This one means 'NO TAG for this job'
240 */
241 #define NO_TAG (256)
242
243 /*
244 * Number of SCSI targets.
245 */
246 #if SYM_CONF_MAX_TARGET > 16
247 #error "more than 16 targets not allowed."
248 #endif
249
250 /*
251 * Number of logical units per target.
252 */
253 #if SYM_CONF_MAX_LUN > 64
254 #error "more than 64 logical units per target not allowed."
255 #endif
256
257 /*
258 * Asynchronous pre-scaler (ns). Shall be 40 for
259 * the SCSI timings to be compliant.
260 */
261 #define SYM_CONF_MIN_ASYNC (40)
262
263 /*
264 * Number of entries in the START and DONE queues.
265 *
266 * We limit to 1 PAGE in order to succeed allocation of
267 * these queues. Each entry is 8 bytes long (2 DWORDS).
268 */
269 #ifdef SYM_CONF_MAX_START
270 #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
271 #else
272 #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
273 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
274 #endif
275
276 #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8
277 #undef SYM_CONF_MAX_QUEUE
278 #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8
279 #undef SYM_CONF_MAX_START
280 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
281 #endif
282
283 /*
284 * For this one, we want a short name :-)
285 */
286 #define MAX_QUEUE SYM_CONF_MAX_QUEUE
287
288 /*
289 * Active debugging tags and verbosity.
290 */
291 #define DEBUG_ALLOC (0x0001)
292 #define DEBUG_PHASE (0x0002)
293 #define DEBUG_POLL (0x0004)
294 #define DEBUG_QUEUE (0x0008)
295 #define DEBUG_RESULT (0x0010)
296 #define DEBUG_SCATTER (0x0020)
297 #define DEBUG_SCRIPT (0x0040)
298 #define DEBUG_TINY (0x0080)
299 #define DEBUG_TIMING (0x0100)
300 #define DEBUG_NEGO (0x0200)
301 #define DEBUG_TAGS (0x0400)
302 #define DEBUG_POINTER (0x0800)
303
304 #if 0
305 static int sym_debug = 0;
306 #define DEBUG_FLAGS sym_debug
307 #else
308 /* #define DEBUG_FLAGS (0x0631) */
309 #define DEBUG_FLAGS (0x0000)
310
311 #endif
312 #define sym_verbose (np->verbose)
313
314 /*
315 * Insert a delay in micro-seconds and milli-seconds.
316 */
UDELAY(int us)317 static void UDELAY(int us) { DELAY(us); }
MDELAY(int ms)318 static void MDELAY(int ms) { while (ms--) UDELAY(1000); }
319
320 /*
321 * Simple power of two buddy-like allocator.
322 *
323 * This simple code is not intended to be fast, but to
324 * provide power of 2 aligned memory allocations.
325 * Since the SCRIPTS processor only supplies 8 bit arithmetic,
326 * this allocator allows simple and fast address calculations
327 * from the SCRIPTS code. In addition, cache line alignment
328 * is guaranteed for power of 2 cache line size.
329 *
330 * This allocator has been developed for the Linux sym53c8xx
331 * driver, since this O/S does not provide naturally aligned
332 * allocations.
333 * It has the advantage of allowing the driver to use private
334 * pages of memory that will be useful if we ever need to deal
335 * with IO MMUs for PCI.
336 */
337 #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
338 #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
339 #if 0
340 #define MEMO_FREE_UNUSED /* Free unused pages immediately */
341 #endif
342 #define MEMO_WARN 1
343 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
344 #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
345 #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
346
347 #define get_pages() malloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_NOWAIT)
348 #define free_pages(p) free((p), M_DEVBUF)
349
350 typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
351
352 typedef struct m_link { /* Link between free memory chunks */
353 struct m_link *next;
354 } m_link_s;
355
356 typedef struct m_vtob { /* Virtual to Bus address translation */
357 struct m_vtob *next;
358 bus_dmamap_t dmamap; /* Map for this chunk */
359 m_addr_t vaddr; /* Virtual address */
360 m_addr_t baddr; /* Bus physical address */
361 } m_vtob_s;
362 /* Hash this stuff a bit to speed up translations */
363 #define VTOB_HASH_SHIFT 5
364 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
365 #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
366 #define VTOB_HASH_CODE(m) \
367 ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
368
369 typedef struct m_pool { /* Memory pool of a given kind */
370 bus_dma_tag_t dev_dmat; /* Identifies the pool */
371 bus_dma_tag_t dmat; /* Tag for our fixed allocations */
372 m_addr_t (*getp)(struct m_pool *);
373 #ifdef MEMO_FREE_UNUSED
374 void (*freep)(struct m_pool *, m_addr_t);
375 #endif
376 #define M_GETP() mp->getp(mp)
377 #define M_FREEP(p) mp->freep(mp, p)
378 int nump;
379 m_vtob_s *(vtob[VTOB_HASH_SIZE]);
380 struct m_pool *next;
381 struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1];
382 } m_pool_s;
383
___sym_malloc(m_pool_s * mp,int size)384 static void *___sym_malloc(m_pool_s *mp, int size)
385 {
386 int i = 0;
387 int s = (1 << MEMO_SHIFT);
388 int j;
389 m_addr_t a;
390 m_link_s *h = mp->h;
391
392 if (size > MEMO_CLUSTER_SIZE)
393 return NULL;
394
395 while (size > s) {
396 s <<= 1;
397 ++i;
398 }
399
400 j = i;
401 while (!h[j].next) {
402 if (s == MEMO_CLUSTER_SIZE) {
403 h[j].next = (m_link_s *) M_GETP();
404 if (h[j].next)
405 h[j].next->next = NULL;
406 break;
407 }
408 ++j;
409 s <<= 1;
410 }
411 a = (m_addr_t) h[j].next;
412 if (a) {
413 h[j].next = h[j].next->next;
414 while (j > i) {
415 j -= 1;
416 s >>= 1;
417 h[j].next = (m_link_s *) (a+s);
418 h[j].next->next = NULL;
419 }
420 }
421 #ifdef DEBUG
422 printf("___sym_malloc(%d) = %p\n", size, (void *) a);
423 #endif
424 return (void *) a;
425 }
426
___sym_mfree(m_pool_s * mp,void * ptr,int size)427 static void ___sym_mfree(m_pool_s *mp, void *ptr, int size)
428 {
429 int i = 0;
430 int s = (1 << MEMO_SHIFT);
431 m_link_s *q;
432 m_addr_t a, b;
433 m_link_s *h = mp->h;
434
435 #ifdef DEBUG
436 printf("___sym_mfree(%p, %d)\n", ptr, size);
437 #endif
438
439 if (size > MEMO_CLUSTER_SIZE)
440 return;
441
442 while (size > s) {
443 s <<= 1;
444 ++i;
445 }
446
447 a = (m_addr_t) ptr;
448
449 while (1) {
450 #ifdef MEMO_FREE_UNUSED
451 if (s == MEMO_CLUSTER_SIZE) {
452 M_FREEP(a);
453 break;
454 }
455 #endif
456 b = a ^ s;
457 q = &h[i];
458 while (q->next && q->next != (m_link_s *) b) {
459 q = q->next;
460 }
461 if (!q->next) {
462 ((m_link_s *) a)->next = h[i].next;
463 h[i].next = (m_link_s *) a;
464 break;
465 }
466 q->next = q->next->next;
467 a = a & b;
468 s <<= 1;
469 ++i;
470 }
471 }
472
__sym_calloc2(m_pool_s * mp,int size,char * name,int uflags)473 static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags)
474 {
475 void *p;
476
477 p = ___sym_malloc(mp, size);
478
479 if (DEBUG_FLAGS & DEBUG_ALLOC)
480 printf ("new %-10s[%4d] @%p.\n", name, size, p);
481
482 if (p)
483 bzero(p, size);
484 else if (uflags & MEMO_WARN)
485 printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
486
487 return p;
488 }
489
490 #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, MEMO_WARN)
491
__sym_mfree(m_pool_s * mp,void * ptr,int size,char * name)492 static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name)
493 {
494 if (DEBUG_FLAGS & DEBUG_ALLOC)
495 printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
496
497 ___sym_mfree(mp, ptr, size);
498
499 }
500
501 /*
502 * Default memory pool we donnot need to involve in DMA.
503 */
504 /*
505 * With the `bus dma abstraction', we use a separate pool for
506 * memory we donnot need to involve in DMA.
507 */
___mp0_getp(m_pool_s * mp)508 static m_addr_t ___mp0_getp(m_pool_s *mp)
509 {
510 m_addr_t m = (m_addr_t) get_pages();
511 if (m)
512 ++mp->nump;
513 return m;
514 }
515
516 #ifdef MEMO_FREE_UNUSED
___mp0_freep(m_pool_s * mp,m_addr_t m)517 static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
518 {
519 free_pages(m);
520 --mp->nump;
521 }
522 #endif
523
524 #ifdef MEMO_FREE_UNUSED
525 static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep};
526 #else
527 static m_pool_s mp0 = {0, 0, ___mp0_getp};
528 #endif
529
530 /*
531 * Actual memory allocation routine for non-DMAed memory.
532 */
sym_calloc(int size,char * name)533 static void *sym_calloc(int size, char *name)
534 {
535 void *m;
536 /* Lock */
537 m = __sym_calloc(&mp0, size, name);
538 /* Unlock */
539 return m;
540 }
541
542 /*
543 * Actual memory allocation routine for non-DMAed memory.
544 */
sym_mfree(void * ptr,int size,char * name)545 static void sym_mfree(void *ptr, int size, char *name)
546 {
547 /* Lock */
548 __sym_mfree(&mp0, ptr, size, name);
549 /* Unlock */
550 }
551
552 /*
553 * DMAable pools.
554 */
555 /*
556 * With `bus dma abstraction', we use a separate pool per parent
557 * BUS handle. A reverse table (hashed) is maintained for virtual
558 * to BUS address translation.
559 */
getbaddrcb(void * arg,bus_dma_segment_t * segs,int nseg __diagused,int error)560 static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg __diagused,
561 int error)
562 {
563 bus_addr_t *baddr;
564
565 KASSERT(nseg == 1, ("%s: too many DMA segments (%d)", __func__, nseg));
566
567 baddr = (bus_addr_t *)arg;
568 if (error)
569 *baddr = 0;
570 else
571 *baddr = segs->ds_addr;
572 }
573
___dma_getp(m_pool_s * mp)574 static m_addr_t ___dma_getp(m_pool_s *mp)
575 {
576 m_vtob_s *vbp;
577 void *vaddr = NULL;
578 bus_addr_t baddr = 0;
579
580 vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
581 if (!vbp)
582 goto out_err;
583
584 if (bus_dmamem_alloc(mp->dmat, &vaddr,
585 BUS_DMA_COHERENT | BUS_DMA_WAITOK, &vbp->dmamap))
586 goto out_err;
587 bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr,
588 MEMO_CLUSTER_SIZE, getbaddrcb, &baddr, BUS_DMA_NOWAIT);
589 if (baddr) {
590 int hc = VTOB_HASH_CODE(vaddr);
591 vbp->vaddr = (m_addr_t) vaddr;
592 vbp->baddr = (m_addr_t) baddr;
593 vbp->next = mp->vtob[hc];
594 mp->vtob[hc] = vbp;
595 ++mp->nump;
596 return (m_addr_t) vaddr;
597 }
598 out_err:
599 if (vaddr)
600 bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap);
601 if (vbp)
602 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
603 return 0;
604 }
605
606 #ifdef MEMO_FREE_UNUSED
___dma_freep(m_pool_s * mp,m_addr_t m)607 static void ___dma_freep(m_pool_s *mp, m_addr_t m)
608 {
609 m_vtob_s **vbpp, *vbp;
610 int hc = VTOB_HASH_CODE(m);
611
612 vbpp = &mp->vtob[hc];
613 while (*vbpp && (*vbpp)->vaddr != m)
614 vbpp = &(*vbpp)->next;
615 if (*vbpp) {
616 vbp = *vbpp;
617 *vbpp = (*vbpp)->next;
618 bus_dmamap_unload(mp->dmat, vbp->dmamap);
619 bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap);
620 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
621 --mp->nump;
622 }
623 }
624 #endif
625
___get_dma_pool(bus_dma_tag_t dev_dmat)626 static __inline m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat)
627 {
628 m_pool_s *mp;
629 for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next);
630 return mp;
631 }
632
___cre_dma_pool(bus_dma_tag_t dev_dmat)633 static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat)
634 {
635 m_pool_s *mp = NULL;
636
637 mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
638 if (mp) {
639 mp->dev_dmat = dev_dmat;
640 if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE,
641 BUS_SPACE_MAXADDR_32BIT,
642 BUS_SPACE_MAXADDR,
643 NULL, NULL, MEMO_CLUSTER_SIZE, 1,
644 MEMO_CLUSTER_SIZE, 0,
645 NULL, NULL, &mp->dmat)) {
646 mp->getp = ___dma_getp;
647 #ifdef MEMO_FREE_UNUSED
648 mp->freep = ___dma_freep;
649 #endif
650 mp->next = mp0.next;
651 mp0.next = mp;
652 return mp;
653 }
654 }
655 if (mp)
656 __sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL");
657 return NULL;
658 }
659
660 #ifdef MEMO_FREE_UNUSED
___del_dma_pool(m_pool_s * p)661 static void ___del_dma_pool(m_pool_s *p)
662 {
663 struct m_pool **pp = &mp0.next;
664
665 while (*pp && *pp != p)
666 pp = &(*pp)->next;
667 if (*pp) {
668 *pp = (*pp)->next;
669 bus_dma_tag_destroy(p->dmat);
670 __sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
671 }
672 }
673 #endif
674
__sym_calloc_dma(bus_dma_tag_t dev_dmat,int size,char * name)675 static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name)
676 {
677 struct m_pool *mp;
678 void *m = NULL;
679
680 /* Lock */
681 mp = ___get_dma_pool(dev_dmat);
682 if (!mp)
683 mp = ___cre_dma_pool(dev_dmat);
684 if (mp)
685 m = __sym_calloc(mp, size, name);
686 #ifdef MEMO_FREE_UNUSED
687 if (mp && !mp->nump)
688 ___del_dma_pool(mp);
689 #endif
690 /* Unlock */
691
692 return m;
693 }
694
695 static void
__sym_mfree_dma(bus_dma_tag_t dev_dmat,void * m,int size,char * name)696 __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name)
697 {
698 struct m_pool *mp;
699
700 /* Lock */
701 mp = ___get_dma_pool(dev_dmat);
702 if (mp)
703 __sym_mfree(mp, m, size, name);
704 #ifdef MEMO_FREE_UNUSED
705 if (mp && !mp->nump)
706 ___del_dma_pool(mp);
707 #endif
708 /* Unlock */
709 }
710
__vtobus(bus_dma_tag_t dev_dmat,void * m)711 static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m)
712 {
713 m_pool_s *mp;
714 int hc = VTOB_HASH_CODE(m);
715 m_vtob_s *vp = NULL;
716 m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
717
718 /* Lock */
719 mp = ___get_dma_pool(dev_dmat);
720 if (mp) {
721 vp = mp->vtob[hc];
722 while (vp && (m_addr_t) vp->vaddr != a)
723 vp = vp->next;
724 }
725 /* Unlock */
726 if (!vp)
727 panic("sym: VTOBUS FAILED!\n");
728 return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
729 }
730
731 /*
732 * Verbs for DMAable memory handling.
733 * The _uvptv_ macro avoids a nasty warning about pointer to volatile
734 * being discarded.
735 */
736 #define _uvptv_(p) ((void *)((vm_offset_t)(p)))
737 #define _sym_calloc_dma(np, s, n) __sym_calloc_dma(np->bus_dmat, s, n)
738 #define _sym_mfree_dma(np, p, s, n) \
739 __sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n)
740 #define sym_calloc_dma(s, n) _sym_calloc_dma(np, s, n)
741 #define sym_mfree_dma(p, s, n) _sym_mfree_dma(np, p, s, n)
742 #define _vtobus(np, p) __vtobus(np->bus_dmat, _uvptv_(p))
743 #define vtobus(p) _vtobus(np, p)
744
745 /*
746 * Print a buffer in hexadecimal format.
747 */
sym_printb_hex(u_char * p,int n)748 static void sym_printb_hex (u_char *p, int n)
749 {
750 while (n-- > 0)
751 printf (" %x", *p++);
752 }
753
754 /*
755 * Same with a label at beginning and .\n at end.
756 */
sym_printl_hex(char * label,u_char * p,int n)757 static void sym_printl_hex (char *label, u_char *p, int n)
758 {
759 printf ("%s", label);
760 sym_printb_hex (p, n);
761 printf (".\n");
762 }
763
764 /*
765 * Return a string for SCSI BUS mode.
766 */
sym_scsi_bus_mode(int mode)767 static const char *sym_scsi_bus_mode(int mode)
768 {
769 switch(mode) {
770 case SMODE_HVD: return "HVD";
771 case SMODE_SE: return "SE";
772 case SMODE_LVD: return "LVD";
773 }
774 return "??";
775 }
776
777 /*
778 * Some poor and bogus sync table that refers to Tekram NVRAM layout.
779 */
780 #ifdef SYM_CONF_NVRAM_SUPPORT
781 static const u_char Tekram_sync[16] =
782 {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10};
783 #endif
784
785 /*
786 * Union of supported NVRAM formats.
787 */
788 struct sym_nvram {
789 int type;
790 #define SYM_SYMBIOS_NVRAM (1)
791 #define SYM_TEKRAM_NVRAM (2)
792 #ifdef SYM_CONF_NVRAM_SUPPORT
793 union {
794 Symbios_nvram Symbios;
795 Tekram_nvram Tekram;
796 } data;
797 #endif
798 };
799
800 /*
801 * This one is hopefully useless, but actually useful. :-)
802 */
803 #ifndef assert
804 #define assert(expression) { \
805 if (!(expression)) { \
806 (void)panic( \
807 "assertion \"%s\" failed: file \"%s\", line %d\n", \
808 #expression, \
809 __FILE__, __LINE__); \
810 } \
811 }
812 #endif
813
814 /*
815 * Some provision for a possible big endian mode supported by
816 * Symbios chips (never seen, by the way).
817 * For now, this stuff does not deserve any comments. :)
818 */
819 #define sym_offb(o) (o)
820 #define sym_offw(o) (o)
821
822 /*
823 * Some provision for support for BIG ENDIAN CPU.
824 */
825 #define cpu_to_scr(dw) htole32(dw)
826 #define scr_to_cpu(dw) le32toh(dw)
827
828 /*
829 * Access to the chip IO registers and on-chip RAM.
830 * We use the `bus space' interface under FreeBSD-4 and
831 * later kernel versions.
832 */
833 #if defined(SYM_CONF_IOMAPPED)
834
835 #define INB_OFF(o) bus_read_1(np->io_res, (o))
836 #define INW_OFF(o) bus_read_2(np->io_res, (o))
837 #define INL_OFF(o) bus_read_4(np->io_res, (o))
838
839 #define OUTB_OFF(o, v) bus_write_1(np->io_res, (o), (v))
840 #define OUTW_OFF(o, v) bus_write_2(np->io_res, (o), (v))
841 #define OUTL_OFF(o, v) bus_write_4(np->io_res, (o), (v))
842
843 #else /* Memory mapped IO */
844
845 #define INB_OFF(o) bus_read_1(np->mmio_res, (o))
846 #define INW_OFF(o) bus_read_2(np->mmio_res, (o))
847 #define INL_OFF(o) bus_read_4(np->mmio_res, (o))
848
849 #define OUTB_OFF(o, v) bus_write_1(np->mmio_res, (o), (v))
850 #define OUTW_OFF(o, v) bus_write_2(np->mmio_res, (o), (v))
851 #define OUTL_OFF(o, v) bus_write_4(np->mmio_res, (o), (v))
852
853 #endif /* SYM_CONF_IOMAPPED */
854
855 #define OUTRAM_OFF(o, a, l) \
856 bus_write_region_1(np->ram_res, (o), (a), (l))
857
858 /*
859 * Common definitions for both bus space and legacy IO methods.
860 */
861 #define INB(r) INB_OFF(offsetof(struct sym_reg,r))
862 #define INW(r) INW_OFF(offsetof(struct sym_reg,r))
863 #define INL(r) INL_OFF(offsetof(struct sym_reg,r))
864
865 #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v))
866 #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v))
867 #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v))
868
869 #define OUTONB(r, m) OUTB(r, INB(r) | (m))
870 #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
871 #define OUTONW(r, m) OUTW(r, INW(r) | (m))
872 #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
873 #define OUTONL(r, m) OUTL(r, INL(r) | (m))
874 #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
875
876 /*
877 * We normally want the chip to have a consistent view
878 * of driver internal data structures when we restart it.
879 * Thus these macros.
880 */
881 #define OUTL_DSP(v) \
882 do { \
883 MEMORY_WRITE_BARRIER(); \
884 OUTL (nc_dsp, (v)); \
885 } while (0)
886
887 #define OUTONB_STD() \
888 do { \
889 MEMORY_WRITE_BARRIER(); \
890 OUTONB (nc_dcntl, (STD|NOCOM)); \
891 } while (0)
892
893 /*
894 * Command control block states.
895 */
896 #define HS_IDLE (0)
897 #define HS_BUSY (1)
898 #define HS_NEGOTIATE (2) /* sync/wide data transfer*/
899 #define HS_DISCONNECT (3) /* Disconnected by target */
900 #define HS_WAIT (4) /* waiting for resource */
901
902 #define HS_DONEMASK (0x80)
903 #define HS_COMPLETE (4|HS_DONEMASK)
904 #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
905 #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */
906 #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */
907
908 /*
909 * Software Interrupt Codes
910 */
911 #define SIR_BAD_SCSI_STATUS (1)
912 #define SIR_SEL_ATN_NO_MSG_OUT (2)
913 #define SIR_MSG_RECEIVED (3)
914 #define SIR_MSG_WEIRD (4)
915 #define SIR_NEGO_FAILED (5)
916 #define SIR_NEGO_PROTO (6)
917 #define SIR_SCRIPT_STOPPED (7)
918 #define SIR_REJECT_TO_SEND (8)
919 #define SIR_SWIDE_OVERRUN (9)
920 #define SIR_SODL_UNDERRUN (10)
921 #define SIR_RESEL_NO_MSG_IN (11)
922 #define SIR_RESEL_NO_IDENTIFY (12)
923 #define SIR_RESEL_BAD_LUN (13)
924 #define SIR_TARGET_SELECTED (14)
925 #define SIR_RESEL_BAD_I_T_L (15)
926 #define SIR_RESEL_BAD_I_T_L_Q (16)
927 #define SIR_ABORT_SENT (17)
928 #define SIR_RESEL_ABORTED (18)
929 #define SIR_MSG_OUT_DONE (19)
930 #define SIR_COMPLETE_ERROR (20)
931 #define SIR_DATA_OVERRUN (21)
932 #define SIR_BAD_PHASE (22)
933 #define SIR_MAX (22)
934
935 /*
936 * Extended error bit codes.
937 * xerr_status field of struct sym_ccb.
938 */
939 #define XE_EXTRA_DATA (1) /* unexpected data phase */
940 #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */
941 #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */
942 #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */
943 #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */
944
945 /*
946 * Negotiation status.
947 * nego_status field of struct sym_ccb.
948 */
949 #define NS_SYNC (1)
950 #define NS_WIDE (2)
951 #define NS_PPR (3)
952
953 /*
954 * A CCB hashed table is used to retrieve CCB address
955 * from DSA value.
956 */
957 #define CCB_HASH_SHIFT 8
958 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
959 #define CCB_HASH_MASK (CCB_HASH_SIZE-1)
960 #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK)
961
962 /*
963 * Device flags.
964 */
965 #define SYM_DISC_ENABLED (1)
966 #define SYM_TAGS_ENABLED (1<<1)
967 #define SYM_SCAN_BOOT_DISABLED (1<<2)
968 #define SYM_SCAN_LUNS_DISABLED (1<<3)
969
970 /*
971 * Host adapter miscellaneous flags.
972 */
973 #define SYM_AVOID_BUS_RESET (1)
974 #define SYM_SCAN_TARGETS_HILO (1<<1)
975
976 /*
977 * Device quirks.
978 * Some devices, for example the CHEETAH 2 LVD, disconnects without
979 * saving the DATA POINTER then reselects and terminates the IO.
980 * On reselection, the automatic RESTORE DATA POINTER makes the
981 * CURRENT DATA POINTER not point at the end of the IO.
982 * This behaviour just breaks our calculation of the residual.
983 * For now, we just force an AUTO SAVE on disconnection and will
984 * fix that in a further driver version.
985 */
986 #define SYM_QUIRK_AUTOSAVE 1
987
988 /*
989 * Misc.
990 */
991 #define SYM_LOCK() mtx_lock(&np->mtx)
992 #define SYM_LOCK_ASSERT(_what) mtx_assert(&np->mtx, (_what))
993 #define SYM_LOCK_DESTROY() mtx_destroy(&np->mtx)
994 #define SYM_LOCK_INIT() mtx_init(&np->mtx, "sym_lock", NULL, MTX_DEF)
995 #define SYM_LOCK_INITIALIZED() mtx_initialized(&np->mtx)
996 #define SYM_UNLOCK() mtx_unlock(&np->mtx)
997
998 #define SYM_SNOOP_TIMEOUT (10000000)
999 #define SYM_PCI_IO PCIR_BAR(0)
1000 #define SYM_PCI_MMIO PCIR_BAR(1)
1001 #define SYM_PCI_RAM PCIR_BAR(2)
1002 #define SYM_PCI_RAM64 PCIR_BAR(3)
1003
1004 /*
1005 * Back-pointer from the CAM CCB to our data structures.
1006 */
1007 #define sym_hcb_ptr spriv_ptr0
1008 /* #define sym_ccb_ptr spriv_ptr1 */
1009
1010 /*
1011 * We mostly have to deal with pointers.
1012 * Thus these typedef's.
1013 */
1014 typedef struct sym_tcb *tcb_p;
1015 typedef struct sym_lcb *lcb_p;
1016 typedef struct sym_ccb *ccb_p;
1017 typedef struct sym_hcb *hcb_p;
1018
1019 /*
1020 * Gather negotiable parameters value
1021 */
1022 struct sym_trans {
1023 u8 scsi_version;
1024 u8 spi_version;
1025 u8 period;
1026 u8 offset;
1027 u8 width;
1028 u8 options; /* PPR options */
1029 };
1030
1031 struct sym_tinfo {
1032 struct sym_trans current;
1033 struct sym_trans goal;
1034 struct sym_trans user;
1035 };
1036
1037 #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT
1038 #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT
1039
1040 /*
1041 * Global TCB HEADER.
1042 *
1043 * Due to lack of indirect addressing on earlier NCR chips,
1044 * this substructure is copied from the TCB to a global
1045 * address after selection.
1046 * For SYMBIOS chips that support LOAD/STORE this copy is
1047 * not needed and thus not performed.
1048 */
1049 struct sym_tcbh {
1050 /*
1051 * Scripts bus addresses of LUN table accessed from scripts.
1052 * LUN #0 is a special case, since multi-lun devices are rare,
1053 * and we we want to speed-up the general case and not waste
1054 * resources.
1055 */
1056 u32 luntbl_sa; /* bus address of this table */
1057 u32 lun0_sa; /* bus address of LCB #0 */
1058 /*
1059 * Actual SYNC/WIDE IO registers value for this target.
1060 * 'sval', 'wval' and 'uval' are read from SCRIPTS and
1061 * so have alignment constraints.
1062 */
1063 /*0*/ u_char uval; /* -> SCNTL4 register */
1064 /*1*/ u_char sval; /* -> SXFER io register */
1065 /*2*/ u_char filler1;
1066 /*3*/ u_char wval; /* -> SCNTL3 io register */
1067 };
1068
1069 /*
1070 * Target Control Block
1071 */
1072 struct sym_tcb {
1073 /*
1074 * TCB header.
1075 * Assumed at offset 0.
1076 */
1077 /*0*/ struct sym_tcbh head;
1078
1079 /*
1080 * LUN table used by the SCRIPTS processor.
1081 * An array of bus addresses is used on reselection.
1082 */
1083 u32 *luntbl; /* LCBs bus address table */
1084
1085 /*
1086 * LUN table used by the C code.
1087 */
1088 lcb_p lun0p; /* LCB of LUN #0 (usual case) */
1089 #if SYM_CONF_MAX_LUN > 1
1090 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */
1091 #endif
1092
1093 /*
1094 * Bitmap that tells about LUNs that succeeded at least
1095 * 1 IO and therefore assumed to be a real device.
1096 * Avoid useless allocation of the LCB structure.
1097 */
1098 u32 lun_map[(SYM_CONF_MAX_LUN+31)/32];
1099
1100 /*
1101 * Bitmap that tells about LUNs that haven't yet an LCB
1102 * allocated (not discovered or LCB allocation failed).
1103 */
1104 u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32];
1105
1106 /*
1107 * Transfer capabilities (SIP)
1108 */
1109 struct sym_tinfo tinfo;
1110
1111 /*
1112 * Keep track of the CCB used for the negotiation in order
1113 * to ensure that only 1 negotiation is queued at a time.
1114 */
1115 ccb_p nego_cp; /* CCB used for the nego */
1116
1117 /*
1118 * Set when we want to reset the device.
1119 */
1120 u_char to_reset;
1121
1122 /*
1123 * Other user settable limits and options.
1124 * These limits are read from the NVRAM if present.
1125 */
1126 u_char usrflags;
1127 u_short usrtags;
1128 };
1129
1130 /*
1131 * Assert some alignments required by the chip.
1132 */
1133 CTASSERT(((offsetof(struct sym_reg, nc_sxfer) ^
1134 offsetof(struct sym_tcb, head.sval)) &3) == 0);
1135 CTASSERT(((offsetof(struct sym_reg, nc_scntl3) ^
1136 offsetof(struct sym_tcb, head.wval)) &3) == 0);
1137
1138 /*
1139 * Global LCB HEADER.
1140 *
1141 * Due to lack of indirect addressing on earlier NCR chips,
1142 * this substructure is copied from the LCB to a global
1143 * address after selection.
1144 * For SYMBIOS chips that support LOAD/STORE this copy is
1145 * not needed and thus not performed.
1146 */
1147 struct sym_lcbh {
1148 /*
1149 * SCRIPTS address jumped by SCRIPTS on reselection.
1150 * For not probed logical units, this address points to
1151 * SCRIPTS that deal with bad LU handling (must be at
1152 * offset zero of the LCB for that reason).
1153 */
1154 /*0*/ u32 resel_sa;
1155
1156 /*
1157 * Task (bus address of a CCB) read from SCRIPTS that points
1158 * to the unique ITL nexus allowed to be disconnected.
1159 */
1160 u32 itl_task_sa;
1161
1162 /*
1163 * Task table bus address (read from SCRIPTS).
1164 */
1165 u32 itlq_tbl_sa;
1166 };
1167
1168 /*
1169 * Logical Unit Control Block
1170 */
1171 struct sym_lcb {
1172 /*
1173 * TCB header.
1174 * Assumed at offset 0.
1175 */
1176 /*0*/ struct sym_lcbh head;
1177
1178 /*
1179 * Task table read from SCRIPTS that contains pointers to
1180 * ITLQ nexuses. The bus address read from SCRIPTS is
1181 * inside the header.
1182 */
1183 u32 *itlq_tbl; /* Kernel virtual address */
1184
1185 /*
1186 * Busy CCBs management.
1187 */
1188 u_short busy_itlq; /* Number of busy tagged CCBs */
1189 u_short busy_itl; /* Number of busy untagged CCBs */
1190
1191 /*
1192 * Circular tag allocation buffer.
1193 */
1194 u_short ia_tag; /* Tag allocation index */
1195 u_short if_tag; /* Tag release index */
1196 u_char *cb_tags; /* Circular tags buffer */
1197
1198 /*
1199 * Set when we want to clear all tasks.
1200 */
1201 u_char to_clear;
1202
1203 /*
1204 * Capabilities.
1205 */
1206 u_char user_flags;
1207 u_char current_flags;
1208 };
1209
1210 /*
1211 * Action from SCRIPTS on a task.
1212 * Is part of the CCB, but is also used separately to plug
1213 * error handling action to perform from SCRIPTS.
1214 */
1215 struct sym_actscr {
1216 u32 start; /* Jumped by SCRIPTS after selection */
1217 u32 restart; /* Jumped by SCRIPTS on relection */
1218 };
1219
1220 /*
1221 * Phase mismatch context.
1222 *
1223 * It is part of the CCB and is used as parameters for the
1224 * DATA pointer. We need two contexts to handle correctly the
1225 * SAVED DATA POINTER.
1226 */
1227 struct sym_pmc {
1228 struct sym_tblmove sg; /* Updated interrupted SG block */
1229 u32 ret; /* SCRIPT return address */
1230 };
1231
1232 /*
1233 * LUN control block lookup.
1234 * We use a direct pointer for LUN #0, and a table of
1235 * pointers which is only allocated for devices that support
1236 * LUN(s) > 0.
1237 */
1238 #if SYM_CONF_MAX_LUN <= 1
1239 #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : 0
1240 #else
1241 #define sym_lp(tp, lun) \
1242 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0
1243 #endif
1244
1245 /*
1246 * Status are used by the host and the script processor.
1247 *
1248 * The last four bytes (status[4]) are copied to the
1249 * scratchb register (declared as scr0..scr3) just after the
1250 * select/reselect, and copied back just after disconnecting.
1251 * Inside the script the XX_REG are used.
1252 */
1253
1254 /*
1255 * Last four bytes (script)
1256 */
1257 #define QU_REG scr0
1258 #define HS_REG scr1
1259 #define HS_PRT nc_scr1
1260 #define SS_REG scr2
1261 #define SS_PRT nc_scr2
1262 #define HF_REG scr3
1263 #define HF_PRT nc_scr3
1264
1265 /*
1266 * Last four bytes (host)
1267 */
1268 #define actualquirks phys.head.status[0]
1269 #define host_status phys.head.status[1]
1270 #define ssss_status phys.head.status[2]
1271 #define host_flags phys.head.status[3]
1272
1273 /*
1274 * Host flags
1275 */
1276 #define HF_IN_PM0 1u
1277 #define HF_IN_PM1 (1u<<1)
1278 #define HF_ACT_PM (1u<<2)
1279 #define HF_DP_SAVED (1u<<3)
1280 #define HF_SENSE (1u<<4)
1281 #define HF_EXT_ERR (1u<<5)
1282 #define HF_DATA_IN (1u<<6)
1283 #ifdef SYM_CONF_IARB_SUPPORT
1284 #define HF_HINT_IARB (1u<<7)
1285 #endif
1286
1287 /*
1288 * Global CCB HEADER.
1289 *
1290 * Due to lack of indirect addressing on earlier NCR chips,
1291 * this substructure is copied from the ccb to a global
1292 * address after selection (or reselection) and copied back
1293 * before disconnect.
1294 * For SYMBIOS chips that support LOAD/STORE this copy is
1295 * not needed and thus not performed.
1296 */
1297 struct sym_ccbh {
1298 /*
1299 * Start and restart SCRIPTS addresses (must be at 0).
1300 */
1301 /*0*/ struct sym_actscr go;
1302
1303 /*
1304 * SCRIPTS jump address that deal with data pointers.
1305 * 'savep' points to the position in the script responsible
1306 * for the actual transfer of data.
1307 * It's written on reception of a SAVE_DATA_POINTER message.
1308 */
1309 u32 savep; /* Jump address to saved data pointer */
1310 u32 lastp; /* SCRIPTS address at end of data */
1311 u32 goalp; /* Not accessed for now from SCRIPTS */
1312
1313 /*
1314 * Status fields.
1315 */
1316 u8 status[4];
1317 };
1318
1319 /*
1320 * Data Structure Block
1321 *
1322 * During execution of a ccb by the script processor, the
1323 * DSA (data structure address) register points to this
1324 * substructure of the ccb.
1325 */
1326 struct sym_dsb {
1327 /*
1328 * CCB header.
1329 * Also assumed at offset 0 of the sym_ccb structure.
1330 */
1331 /*0*/ struct sym_ccbh head;
1332
1333 /*
1334 * Phase mismatch contexts.
1335 * We need two to handle correctly the SAVED DATA POINTER.
1336 * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic
1337 * for address calculation from SCRIPTS.
1338 */
1339 struct sym_pmc pm0;
1340 struct sym_pmc pm1;
1341
1342 /*
1343 * Table data for Script
1344 */
1345 struct sym_tblsel select;
1346 struct sym_tblmove smsg;
1347 struct sym_tblmove smsg_ext;
1348 struct sym_tblmove cmd;
1349 struct sym_tblmove sense;
1350 struct sym_tblmove wresid;
1351 struct sym_tblmove data [SYM_CONF_MAX_SG];
1352 };
1353
1354 /*
1355 * Our Command Control Block
1356 */
1357 struct sym_ccb {
1358 /*
1359 * This is the data structure which is pointed by the DSA
1360 * register when it is executed by the script processor.
1361 * It must be the first entry.
1362 */
1363 struct sym_dsb phys;
1364
1365 /*
1366 * Pointer to CAM ccb and related stuff.
1367 */
1368 struct callout ch; /* callout handle */
1369 union ccb *cam_ccb; /* CAM scsiio ccb */
1370 u8 cdb_buf[16]; /* Copy of CDB */
1371 u8 *sns_bbuf; /* Bounce buffer for sense data */
1372 #define SYM_SNS_BBUF_LEN sizeof(struct scsi_sense_data)
1373 int data_len; /* Total data length */
1374 int segments; /* Number of SG segments */
1375
1376 /*
1377 * Miscellaneous status'.
1378 */
1379 u_char nego_status; /* Negotiation status */
1380 u_char xerr_status; /* Extended error flags */
1381 u32 extra_bytes; /* Extraneous bytes transferred */
1382
1383 /*
1384 * Message areas.
1385 * We prepare a message to be sent after selection.
1386 * We may use a second one if the command is rescheduled
1387 * due to CHECK_CONDITION or COMMAND TERMINATED.
1388 * Contents are IDENTIFY and SIMPLE_TAG.
1389 * While negotiating sync or wide transfer,
1390 * a SDTR or WDTR message is appended.
1391 */
1392 u_char scsi_smsg [12];
1393 u_char scsi_smsg2[12];
1394
1395 /*
1396 * Auto request sense related fields.
1397 */
1398 u_char sensecmd[6]; /* Request Sense command */
1399 u_char sv_scsi_status; /* Saved SCSI status */
1400 u_char sv_xerr_status; /* Saved extended status */
1401 int sv_resid; /* Saved residual */
1402
1403 /*
1404 * Map for the DMA of user data.
1405 */
1406 void *arg; /* Argument for some callback */
1407 bus_dmamap_t dmamap; /* DMA map for user data */
1408 u_char dmamapped;
1409 #define SYM_DMA_NONE 0
1410 #define SYM_DMA_READ 1
1411 #define SYM_DMA_WRITE 2
1412 /*
1413 * Other fields.
1414 */
1415 u32 ccb_ba; /* BUS address of this CCB */
1416 u_short tag; /* Tag for this transfer */
1417 /* NO_TAG means no tag */
1418 u_char target;
1419 u_char lun;
1420 ccb_p link_ccbh; /* Host adapter CCB hash chain */
1421 SYM_QUEHEAD
1422 link_ccbq; /* Link to free/busy CCB queue */
1423 u32 startp; /* Initial data pointer */
1424 int ext_sg; /* Extreme data pointer, used */
1425 int ext_ofs; /* to calculate the residual. */
1426 u_char to_abort; /* Want this IO to be aborted */
1427 };
1428
1429 #define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl))
1430
1431 /*
1432 * Host Control Block
1433 */
1434 struct sym_hcb {
1435 struct mtx mtx;
1436
1437 /*
1438 * Global headers.
1439 * Due to poorness of addressing capabilities, earlier
1440 * chips (810, 815, 825) copy part of the data structures
1441 * (CCB, TCB and LCB) in fixed areas.
1442 */
1443 #ifdef SYM_CONF_GENERIC_SUPPORT
1444 struct sym_ccbh ccb_head;
1445 struct sym_tcbh tcb_head;
1446 struct sym_lcbh lcb_head;
1447 #endif
1448 /*
1449 * Idle task and invalid task actions and
1450 * their bus addresses.
1451 */
1452 struct sym_actscr idletask, notask, bad_itl, bad_itlq;
1453 vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
1454
1455 /*
1456 * Dummy lun table to protect us against target
1457 * returning bad lun number on reselection.
1458 */
1459 u32 *badluntbl; /* Table physical address */
1460 u32 badlun_sa; /* SCRIPT handler BUS address */
1461
1462 /*
1463 * Bus address of this host control block.
1464 */
1465 u32 hcb_ba;
1466
1467 /*
1468 * Bit 32-63 of the on-chip RAM bus address in LE format.
1469 * The START_RAM64 script loads the MMRS and MMWS from this
1470 * field.
1471 */
1472 u32 scr_ram_seg;
1473
1474 /*
1475 * Chip and controller indentification.
1476 */
1477 device_t device;
1478
1479 /*
1480 * Initial value of some IO register bits.
1481 * These values are assumed to have been set by BIOS, and may
1482 * be used to probe adapter implementation differences.
1483 */
1484 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
1485 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
1486 sv_stest1;
1487
1488 /*
1489 * Actual initial value of IO register bits used by the
1490 * driver. They are loaded at initialisation according to
1491 * features that are to be enabled/disabled.
1492 */
1493 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
1494 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
1495
1496 /*
1497 * Target data.
1498 */
1499 #ifdef __amd64__
1500 struct sym_tcb *target;
1501 #else
1502 struct sym_tcb target[SYM_CONF_MAX_TARGET];
1503 #endif
1504
1505 /*
1506 * Target control block bus address array used by the SCRIPT
1507 * on reselection.
1508 */
1509 u32 *targtbl;
1510 u32 targtbl_ba;
1511
1512 /*
1513 * CAM SIM information for this instance.
1514 */
1515 struct cam_sim *sim;
1516 struct cam_path *path;
1517
1518 /*
1519 * Allocated hardware resources.
1520 */
1521 struct resource *irq_res;
1522 struct resource *io_res;
1523 struct resource *mmio_res;
1524 struct resource *ram_res;
1525 int ram_id;
1526 void *intr;
1527
1528 /*
1529 * Bus stuff.
1530 *
1531 * My understanding of PCI is that all agents must share the
1532 * same addressing range and model.
1533 * But some hardware architecture guys provide complex and
1534 * brain-deaded stuff that makes shit.
1535 * This driver only support PCI compliant implementations and
1536 * deals with part of the BUS stuff complexity only to fit O/S
1537 * requirements.
1538 */
1539
1540 /*
1541 * DMA stuff.
1542 */
1543 bus_dma_tag_t bus_dmat; /* DMA tag from parent BUS */
1544 bus_dma_tag_t data_dmat; /* DMA tag for user data */
1545 /*
1546 * BUS addresses of the chip
1547 */
1548 vm_offset_t mmio_ba; /* MMIO BUS address */
1549 int mmio_ws; /* MMIO Window size */
1550
1551 vm_offset_t ram_ba; /* RAM BUS address */
1552 int ram_ws; /* RAM window size */
1553
1554 /*
1555 * SCRIPTS virtual and physical bus addresses.
1556 * 'script' is loaded in the on-chip RAM if present.
1557 * 'scripth' stays in main memory for all chips except the
1558 * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
1559 */
1560 u_char *scripta0; /* Copies of script and scripth */
1561 u_char *scriptb0; /* Copies of script and scripth */
1562 vm_offset_t scripta_ba; /* Actual script and scripth */
1563 vm_offset_t scriptb_ba; /* bus addresses. */
1564 vm_offset_t scriptb0_ba;
1565 u_short scripta_sz; /* Actual size of script A */
1566 u_short scriptb_sz; /* Actual size of script B */
1567
1568 /*
1569 * Bus addresses, setup and patch methods for
1570 * the selected firmware.
1571 */
1572 struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */
1573 struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */
1574 void (*fw_setup)(hcb_p np, const struct sym_fw *fw);
1575 void (*fw_patch)(hcb_p np);
1576 const char *fw_name;
1577
1578 /*
1579 * General controller parameters and configuration.
1580 */
1581 u_short device_id; /* PCI device id */
1582 u_char revision_id; /* PCI device revision id */
1583 u_int features; /* Chip features map */
1584 u_char myaddr; /* SCSI id of the adapter */
1585 u_char maxburst; /* log base 2 of dwords burst */
1586 u_char maxwide; /* Maximum transfer width */
1587 u_char minsync; /* Min sync period factor (ST) */
1588 u_char maxsync; /* Max sync period factor (ST) */
1589 u_char maxoffs; /* Max scsi offset (ST) */
1590 u_char minsync_dt; /* Min sync period factor (DT) */
1591 u_char maxsync_dt; /* Max sync period factor (DT) */
1592 u_char maxoffs_dt; /* Max scsi offset (DT) */
1593 u_char multiplier; /* Clock multiplier (1,2,4) */
1594 u_char clock_divn; /* Number of clock divisors */
1595 u32 clock_khz; /* SCSI clock frequency in KHz */
1596 u32 pciclk_khz; /* Estimated PCI clock in KHz */
1597 /*
1598 * Start queue management.
1599 * It is filled up by the host processor and accessed by the
1600 * SCRIPTS processor in order to start SCSI commands.
1601 */
1602 volatile /* Prevent code optimizations */
1603 u32 *squeue; /* Start queue virtual address */
1604 u32 squeue_ba; /* Start queue BUS address */
1605 u_short squeueput; /* Next free slot of the queue */
1606 u_short actccbs; /* Number of allocated CCBs */
1607
1608 /*
1609 * Command completion queue.
1610 * It is the same size as the start queue to avoid overflow.
1611 */
1612 u_short dqueueget; /* Next position to scan */
1613 volatile /* Prevent code optimizations */
1614 u32 *dqueue; /* Completion (done) queue */
1615 u32 dqueue_ba; /* Done queue BUS address */
1616
1617 /*
1618 * Miscellaneous buffers accessed by the scripts-processor.
1619 * They shall be DWORD aligned, because they may be read or
1620 * written with a script command.
1621 */
1622 u_char msgout[8]; /* Buffer for MESSAGE OUT */
1623 u_char msgin [8]; /* Buffer for MESSAGE IN */
1624 u32 lastmsg; /* Last SCSI message sent */
1625 u_char scratch; /* Scratch for SCSI receive */
1626
1627 /*
1628 * Miscellaneous configuration and status parameters.
1629 */
1630 u_char usrflags; /* Miscellaneous user flags */
1631 u_char scsi_mode; /* Current SCSI BUS mode */
1632 u_char verbose; /* Verbosity for this controller*/
1633 u32 cache; /* Used for cache test at init. */
1634
1635 /*
1636 * CCB lists and queue.
1637 */
1638 ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */
1639 SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */
1640 SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
1641
1642 /*
1643 * During error handling and/or recovery,
1644 * active CCBs that are to be completed with
1645 * error or requeued are moved from the busy_ccbq
1646 * to the comp_ccbq prior to completion.
1647 */
1648 SYM_QUEHEAD comp_ccbq;
1649
1650 /*
1651 * CAM CCB pending queue.
1652 */
1653 SYM_QUEHEAD cam_ccbq;
1654
1655 /*
1656 * IMMEDIATE ARBITRATION (IARB) control.
1657 *
1658 * We keep track in 'last_cp' of the last CCB that has been
1659 * queued to the SCRIPTS processor and clear 'last_cp' when
1660 * this CCB completes. If last_cp is not zero at the moment
1661 * we queue a new CCB, we set a flag in 'last_cp' that is
1662 * used by the SCRIPTS as a hint for setting IARB.
1663 * We donnot set more than 'iarb_max' consecutive hints for
1664 * IARB in order to leave devices a chance to reselect.
1665 * By the way, any non zero value of 'iarb_max' is unfair. :)
1666 */
1667 #ifdef SYM_CONF_IARB_SUPPORT
1668 u_short iarb_max; /* Max. # consecutive IARB hints*/
1669 u_short iarb_count; /* Actual # of these hints */
1670 ccb_p last_cp;
1671 #endif
1672
1673 /*
1674 * Command abort handling.
1675 * We need to synchronize tightly with the SCRIPTS
1676 * processor in order to handle things correctly.
1677 */
1678 u_char abrt_msg[4]; /* Message to send buffer */
1679 struct sym_tblmove abrt_tbl; /* Table for the MOV of it */
1680 struct sym_tblsel abrt_sel; /* Sync params for selection */
1681 u_char istat_sem; /* Tells the chip to stop (SEM) */
1682 };
1683
1684 #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl))
1685
1686 /*
1687 * Return the name of the controller.
1688 */
sym_name(hcb_p np)1689 static __inline const char *sym_name(hcb_p np)
1690 {
1691 return device_get_nameunit(np->device);
1692 }
1693
1694 /*--------------------------------------------------------------------------*/
1695 /*------------------------------ FIRMWARES ---------------------------------*/
1696 /*--------------------------------------------------------------------------*/
1697
1698 /*
1699 * This stuff will be moved to a separate source file when
1700 * the driver will be broken into several source modules.
1701 */
1702
1703 /*
1704 * Macros used for all firmwares.
1705 */
1706 #define SYM_GEN_A(s, label) ((short) offsetof(s, label)),
1707 #define SYM_GEN_B(s, label) ((short) offsetof(s, label)),
1708 #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label)
1709 #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label)
1710
1711 #ifdef SYM_CONF_GENERIC_SUPPORT
1712 /*
1713 * Allocate firmware #1 script area.
1714 */
1715 #define SYM_FWA_SCR sym_fw1a_scr
1716 #define SYM_FWB_SCR sym_fw1b_scr
1717 #include <dev/sym/sym_fw1.h>
1718 static const struct sym_fwa_ofs sym_fw1a_ofs = {
1719 SYM_GEN_FW_A(struct SYM_FWA_SCR)
1720 };
1721 static const struct sym_fwb_ofs sym_fw1b_ofs = {
1722 SYM_GEN_FW_B(struct SYM_FWB_SCR)
1723 };
1724 #undef SYM_FWA_SCR
1725 #undef SYM_FWB_SCR
1726 #endif /* SYM_CONF_GENERIC_SUPPORT */
1727
1728 /*
1729 * Allocate firmware #2 script area.
1730 */
1731 #define SYM_FWA_SCR sym_fw2a_scr
1732 #define SYM_FWB_SCR sym_fw2b_scr
1733 #include <dev/sym/sym_fw2.h>
1734 static const struct sym_fwa_ofs sym_fw2a_ofs = {
1735 SYM_GEN_FW_A(struct SYM_FWA_SCR)
1736 };
1737 static const struct sym_fwb_ofs sym_fw2b_ofs = {
1738 SYM_GEN_FW_B(struct SYM_FWB_SCR)
1739 SYM_GEN_B(struct SYM_FWB_SCR, start64)
1740 SYM_GEN_B(struct SYM_FWB_SCR, pm_handle)
1741 };
1742 #undef SYM_FWA_SCR
1743 #undef SYM_FWB_SCR
1744
1745 #undef SYM_GEN_A
1746 #undef SYM_GEN_B
1747 #undef PADDR_A
1748 #undef PADDR_B
1749
1750 #ifdef SYM_CONF_GENERIC_SUPPORT
1751 /*
1752 * Patch routine for firmware #1.
1753 */
1754 static void
sym_fw1_patch(hcb_p np)1755 sym_fw1_patch(hcb_p np)
1756 {
1757 struct sym_fw1a_scr *scripta0;
1758 struct sym_fw1b_scr *scriptb0;
1759
1760 scripta0 = (struct sym_fw1a_scr *) np->scripta0;
1761 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
1762
1763 /*
1764 * Remove LED support if not needed.
1765 */
1766 if (!(np->features & FE_LED0)) {
1767 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
1768 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
1769 scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
1770 }
1771
1772 #ifdef SYM_CONF_IARB_SUPPORT
1773 /*
1774 * If user does not want to use IMMEDIATE ARBITRATION
1775 * when we are reselected while attempting to arbitrate,
1776 * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
1777 */
1778 if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
1779 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
1780 #endif
1781 /*
1782 * Patch some data in SCRIPTS.
1783 * - start and done queue initial bus address.
1784 * - target bus address table bus address.
1785 */
1786 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
1787 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
1788 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
1789 }
1790 #endif /* SYM_CONF_GENERIC_SUPPORT */
1791
1792 /*
1793 * Patch routine for firmware #2.
1794 */
1795 static void
sym_fw2_patch(hcb_p np)1796 sym_fw2_patch(hcb_p np)
1797 {
1798 struct sym_fw2a_scr *scripta0;
1799 struct sym_fw2b_scr *scriptb0;
1800
1801 scripta0 = (struct sym_fw2a_scr *) np->scripta0;
1802 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
1803
1804 /*
1805 * Remove LED support if not needed.
1806 */
1807 if (!(np->features & FE_LED0)) {
1808 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
1809 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
1810 scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
1811 }
1812
1813 #ifdef SYM_CONF_IARB_SUPPORT
1814 /*
1815 * If user does not want to use IMMEDIATE ARBITRATION
1816 * when we are reselected while attempting to arbitrate,
1817 * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
1818 */
1819 if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
1820 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
1821 #endif
1822 /*
1823 * Patch some variable in SCRIPTS.
1824 * - start and done queue initial bus address.
1825 * - target bus address table bus address.
1826 */
1827 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
1828 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
1829 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
1830
1831 /*
1832 * Remove the load of SCNTL4 on reselection if not a C10.
1833 */
1834 if (!(np->features & FE_C10)) {
1835 scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP);
1836 scripta0->resel_scntl4[1] = cpu_to_scr(0);
1837 }
1838
1839 /*
1840 * Remove a couple of work-arounds specific to C1010 if
1841 * they are not desirable. See `sym_fw2.h' for more details.
1842 */
1843 if (!(np->device_id == PCI_ID_LSI53C1010_2 &&
1844 np->revision_id < 0x1 &&
1845 np->pciclk_khz < 60000)) {
1846 scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP);
1847 scripta0->datao_phase[1] = cpu_to_scr(0);
1848 }
1849 if (!(np->device_id == PCI_ID_LSI53C1010 &&
1850 /* np->revision_id < 0xff */ 1)) {
1851 scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP);
1852 scripta0->sel_done[1] = cpu_to_scr(0);
1853 }
1854
1855 /*
1856 * Patch some other variables in SCRIPTS.
1857 * These ones are loaded by the SCRIPTS processor.
1858 */
1859 scriptb0->pm0_data_addr[0] =
1860 cpu_to_scr(np->scripta_ba +
1861 offsetof(struct sym_fw2a_scr, pm0_data));
1862 scriptb0->pm1_data_addr[0] =
1863 cpu_to_scr(np->scripta_ba +
1864 offsetof(struct sym_fw2a_scr, pm1_data));
1865 }
1866
1867 /*
1868 * Fill the data area in scripts.
1869 * To be done for all firmwares.
1870 */
1871 static void
sym_fw_fill_data(u32 * in,u32 * out)1872 sym_fw_fill_data (u32 *in, u32 *out)
1873 {
1874 int i;
1875
1876 for (i = 0; i < SYM_CONF_MAX_SG; i++) {
1877 *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN;
1878 *in++ = offsetof (struct sym_dsb, data[i]);
1879 *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT;
1880 *out++ = offsetof (struct sym_dsb, data[i]);
1881 }
1882 }
1883
1884 /*
1885 * Setup useful script bus addresses.
1886 * To be done for all firmwares.
1887 */
1888 static void
sym_fw_setup_bus_addresses(hcb_p np,const struct sym_fw * fw)1889 sym_fw_setup_bus_addresses(hcb_p np, const struct sym_fw *fw)
1890 {
1891 u32 *pa;
1892 const u_short *po;
1893 int i;
1894
1895 /*
1896 * Build the bus address table for script A
1897 * from the script A offset table.
1898 */
1899 po = (const u_short *) fw->a_ofs;
1900 pa = (u32 *) &np->fwa_bas;
1901 for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++)
1902 pa[i] = np->scripta_ba + po[i];
1903
1904 /*
1905 * Same for script B.
1906 */
1907 po = (const u_short *) fw->b_ofs;
1908 pa = (u32 *) &np->fwb_bas;
1909 for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++)
1910 pa[i] = np->scriptb_ba + po[i];
1911 }
1912
1913 #ifdef SYM_CONF_GENERIC_SUPPORT
1914 /*
1915 * Setup routine for firmware #1.
1916 */
1917 static void
sym_fw1_setup(hcb_p np,const struct sym_fw * fw)1918 sym_fw1_setup(hcb_p np, const struct sym_fw *fw)
1919 {
1920 struct sym_fw1a_scr *scripta0;
1921
1922 scripta0 = (struct sym_fw1a_scr *) np->scripta0;
1923
1924 /*
1925 * Fill variable parts in scripts.
1926 */
1927 sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
1928
1929 /*
1930 * Setup bus addresses used from the C code..
1931 */
1932 sym_fw_setup_bus_addresses(np, fw);
1933 }
1934 #endif /* SYM_CONF_GENERIC_SUPPORT */
1935
1936 /*
1937 * Setup routine for firmware #2.
1938 */
1939 static void
sym_fw2_setup(hcb_p np,const struct sym_fw * fw)1940 sym_fw2_setup(hcb_p np, const struct sym_fw *fw)
1941 {
1942 struct sym_fw2a_scr *scripta0;
1943
1944 scripta0 = (struct sym_fw2a_scr *) np->scripta0;
1945
1946 /*
1947 * Fill variable parts in scripts.
1948 */
1949 sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
1950
1951 /*
1952 * Setup bus addresses used from the C code..
1953 */
1954 sym_fw_setup_bus_addresses(np, fw);
1955 }
1956
1957 /*
1958 * Allocate firmware descriptors.
1959 */
1960 #ifdef SYM_CONF_GENERIC_SUPPORT
1961 static const struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic");
1962 #endif /* SYM_CONF_GENERIC_SUPPORT */
1963 static const struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based");
1964
1965 /*
1966 * Find the most appropriate firmware for a chip.
1967 */
1968 static const struct sym_fw *
sym_find_firmware(const struct sym_pci_chip * chip)1969 sym_find_firmware(const struct sym_pci_chip *chip)
1970 {
1971 if (chip->features & FE_LDSTR)
1972 return &sym_fw2;
1973 #ifdef SYM_CONF_GENERIC_SUPPORT
1974 else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC)))
1975 return &sym_fw1;
1976 #endif
1977 else
1978 return NULL;
1979 }
1980
1981 /*
1982 * Bind a script to physical addresses.
1983 */
sym_fw_bind_script(hcb_p np,u32 * start,int len)1984 static void sym_fw_bind_script (hcb_p np, u32 *start, int len)
1985 {
1986 u32 opcode, new, old, tmp1, tmp2;
1987 u32 *end, *cur;
1988 int relocs;
1989
1990 cur = start;
1991 end = start + len/4;
1992
1993 while (cur < end) {
1994 opcode = *cur;
1995
1996 /*
1997 * If we forget to change the length
1998 * in scripts, a field will be
1999 * padded with 0. This is an illegal
2000 * command.
2001 */
2002 if (opcode == 0) {
2003 device_printf(np->device, "ERROR0 IN SCRIPT at %d.\n",
2004 (int)(cur-start));
2005 MDELAY (10000);
2006 ++cur;
2007 continue;
2008 }
2009
2010 /*
2011 * We use the bogus value 0xf00ff00f ;-)
2012 * to reserve data area in SCRIPTS.
2013 */
2014 if (opcode == SCR_DATA_ZERO) {
2015 *cur++ = 0;
2016 continue;
2017 }
2018
2019 if (DEBUG_FLAGS & DEBUG_SCRIPT)
2020 printf ("%d: <%x>\n", (int) (cur-start),
2021 (unsigned)opcode);
2022
2023 /*
2024 * We don't have to decode ALL commands
2025 */
2026 switch (opcode >> 28) {
2027 case 0xf:
2028 /*
2029 * LOAD / STORE DSA relative, don't relocate.
2030 */
2031 relocs = 0;
2032 break;
2033 case 0xe:
2034 /*
2035 * LOAD / STORE absolute.
2036 */
2037 relocs = 1;
2038 break;
2039 case 0xc:
2040 /*
2041 * COPY has TWO arguments.
2042 */
2043 relocs = 2;
2044 tmp1 = cur[1];
2045 tmp2 = cur[2];
2046 if ((tmp1 ^ tmp2) & 3) {
2047 device_printf(np->device,
2048 "ERROR1 IN SCRIPT at %d.\n",
2049 (int)(cur-start));
2050 MDELAY (10000);
2051 }
2052 /*
2053 * If PREFETCH feature not enabled, remove
2054 * the NO FLUSH bit if present.
2055 */
2056 if ((opcode & SCR_NO_FLUSH) &&
2057 !(np->features & FE_PFEN)) {
2058 opcode = (opcode & ~SCR_NO_FLUSH);
2059 }
2060 break;
2061 case 0x0:
2062 /*
2063 * MOVE/CHMOV (absolute address)
2064 */
2065 if (!(np->features & FE_WIDE))
2066 opcode = (opcode | OPC_MOVE);
2067 relocs = 1;
2068 break;
2069 case 0x1:
2070 /*
2071 * MOVE/CHMOV (table indirect)
2072 */
2073 if (!(np->features & FE_WIDE))
2074 opcode = (opcode | OPC_MOVE);
2075 relocs = 0;
2076 break;
2077 case 0x8:
2078 /*
2079 * JUMP / CALL
2080 * dont't relocate if relative :-)
2081 */
2082 if (opcode & 0x00800000)
2083 relocs = 0;
2084 else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
2085 relocs = 2;
2086 else
2087 relocs = 1;
2088 break;
2089 case 0x4:
2090 case 0x5:
2091 case 0x6:
2092 case 0x7:
2093 relocs = 1;
2094 break;
2095 default:
2096 relocs = 0;
2097 break;
2098 }
2099
2100 /*
2101 * Scriptify:) the opcode.
2102 */
2103 *cur++ = cpu_to_scr(opcode);
2104
2105 /*
2106 * If no relocation, assume 1 argument
2107 * and just scriptize:) it.
2108 */
2109 if (!relocs) {
2110 *cur = cpu_to_scr(*cur);
2111 ++cur;
2112 continue;
2113 }
2114
2115 /*
2116 * Otherwise performs all needed relocations.
2117 */
2118 while (relocs--) {
2119 old = *cur;
2120
2121 switch (old & RELOC_MASK) {
2122 case RELOC_REGISTER:
2123 new = (old & ~RELOC_MASK) + np->mmio_ba;
2124 break;
2125 case RELOC_LABEL_A:
2126 new = (old & ~RELOC_MASK) + np->scripta_ba;
2127 break;
2128 case RELOC_LABEL_B:
2129 new = (old & ~RELOC_MASK) + np->scriptb_ba;
2130 break;
2131 case RELOC_SOFTC:
2132 new = (old & ~RELOC_MASK) + np->hcb_ba;
2133 break;
2134 case 0:
2135 /*
2136 * Don't relocate a 0 address.
2137 * They are mostly used for patched or
2138 * script self-modified areas.
2139 */
2140 if (old == 0) {
2141 new = old;
2142 break;
2143 }
2144 /* fall through */
2145 default:
2146 new = 0;
2147 panic("sym_fw_bind_script: "
2148 "weird relocation %x\n", old);
2149 break;
2150 }
2151
2152 *cur++ = cpu_to_scr(new);
2153 }
2154 }
2155 }
2156
2157 /*---------------------------------------------------------------------------*/
2158 /*--------------------------- END OF FIRMWARES -----------------------------*/
2159 /*---------------------------------------------------------------------------*/
2160
2161 /*
2162 * Function prototypes.
2163 */
2164 static void sym_save_initial_setting (hcb_p np);
2165 static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram);
2166 static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr);
2167 static void sym_put_start_queue (hcb_p np, ccb_p cp);
2168 static void sym_chip_reset (hcb_p np);
2169 static void sym_soft_reset (hcb_p np);
2170 static void sym_start_reset (hcb_p np);
2171 static int sym_reset_scsi_bus (hcb_p np, int enab_int);
2172 static int sym_wakeup_done (hcb_p np);
2173 static void sym_flush_busy_queue (hcb_p np, int cam_status);
2174 static void sym_flush_comp_queue (hcb_p np, int cam_status);
2175 static void sym_init (hcb_p np, int reason);
2176 static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp,
2177 u_char *fakp);
2178 static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per,
2179 u_char div, u_char fak);
2180 static void sym_setwide (hcb_p np, ccb_p cp, u_char wide);
2181 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
2182 u_char per, u_char wide, u_char div, u_char fak);
2183 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
2184 u_char per, u_char wide, u_char div, u_char fak);
2185 static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat);
2186 static void sym_intr (void *arg);
2187 static void sym_poll (struct cam_sim *sim);
2188 static void sym_recover_scsi_int (hcb_p np, u_char hsts);
2189 static void sym_int_sto (hcb_p np);
2190 static void sym_int_udc (hcb_p np);
2191 static void sym_int_sbmc (hcb_p np);
2192 static void sym_int_par (hcb_p np, u_short sist);
2193 static void sym_int_ma (hcb_p np);
2194 static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun,
2195 int task);
2196 static void sym_sir_bad_scsi_status (hcb_p np, ccb_p cp);
2197 static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task);
2198 static void sym_sir_task_recovery (hcb_p np, int num);
2199 static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs);
2200 static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs);
2201 static int sym_compute_residual (hcb_p np, ccb_p cp);
2202 static int sym_show_msg (u_char * msg);
2203 static void sym_print_msg (ccb_p cp, char *label, u_char *msg);
2204 static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp);
2205 static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp);
2206 static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp);
2207 static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp);
2208 static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp);
2209 static void sym_int_sir (hcb_p np);
2210 static void sym_free_ccb (hcb_p np, ccb_p cp);
2211 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order);
2212 static ccb_p sym_alloc_ccb (hcb_p np);
2213 static ccb_p sym_ccb_from_dsa (hcb_p np, u32 dsa);
2214 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln);
2215 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln);
2216 static int sym_snooptest (hcb_p np);
2217 static void sym_selectclock(hcb_p np, u_char scntl3);
2218 static void sym_getclock (hcb_p np, int mult);
2219 static int sym_getpciclock (hcb_p np);
2220 static void sym_complete_ok (hcb_p np, ccb_p cp);
2221 static void sym_complete_error (hcb_p np, ccb_p cp);
2222 static void sym_callout (void *arg);
2223 static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out);
2224 static void sym_reset_dev (hcb_p np, union ccb *ccb);
2225 static void sym_action (struct cam_sim *sim, union ccb *ccb);
2226 static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp);
2227 static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio,
2228 ccb_p cp);
2229 static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp,
2230 bus_dma_segment_t *psegs, int nsegs);
2231 static int sym_scatter_sg_physical (hcb_p np, ccb_p cp,
2232 bus_dma_segment_t *psegs, int nsegs);
2233 static void sym_action2 (struct cam_sim *sim, union ccb *ccb);
2234 static void sym_update_trans(hcb_p np, struct sym_trans *tip,
2235 struct ccb_trans_settings *cts);
2236 static void sym_update_dflags(hcb_p np, u_char *flags,
2237 struct ccb_trans_settings *cts);
2238
2239 static const struct sym_pci_chip *sym_find_pci_chip (device_t dev);
2240
2241 static device_probe_t sym_pci_probe;
2242 static device_attach_t sym_pci_attach;
2243 static device_detach_t sym_pci_detach;
2244
2245 static int sym_cam_attach (hcb_p np);
2246 static void sym_cam_free (hcb_p np);
2247
2248 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram);
2249 static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp);
2250 static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp);
2251
2252 /*
2253 * Print something which allows to retrieve the controller type,
2254 * unit, target, lun concerned by a kernel message.
2255 */
PRINT_TARGET(hcb_p np,int target)2256 static void PRINT_TARGET (hcb_p np, int target)
2257 {
2258 printf ("%s:%d:", sym_name(np), target);
2259 }
2260
PRINT_LUN(hcb_p np,int target,int lun)2261 static void PRINT_LUN(hcb_p np, int target, int lun)
2262 {
2263 printf ("%s:%d:%d:", sym_name(np), target, lun);
2264 }
2265
PRINT_ADDR(ccb_p cp)2266 static void PRINT_ADDR (ccb_p cp)
2267 {
2268 if (cp && cp->cam_ccb)
2269 xpt_print_path(cp->cam_ccb->ccb_h.path);
2270 }
2271
2272 /*
2273 * Take into account this ccb in the freeze count.
2274 */
sym_freeze_cam_ccb(union ccb * ccb)2275 static void sym_freeze_cam_ccb(union ccb *ccb)
2276 {
2277 if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) {
2278 if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
2279 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2280 xpt_freeze_devq(ccb->ccb_h.path, 1);
2281 }
2282 }
2283 }
2284
2285 /*
2286 * Set the status field of a CAM CCB.
2287 */
sym_set_cam_status(union ccb * ccb,cam_status status)2288 static __inline void sym_set_cam_status(union ccb *ccb, cam_status status)
2289 {
2290 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2291 ccb->ccb_h.status |= status;
2292 }
2293
2294 /*
2295 * Get the status field of a CAM CCB.
2296 */
sym_get_cam_status(union ccb * ccb)2297 static __inline int sym_get_cam_status(union ccb *ccb)
2298 {
2299 return ccb->ccb_h.status & CAM_STATUS_MASK;
2300 }
2301
2302 /*
2303 * Enqueue a CAM CCB.
2304 */
sym_enqueue_cam_ccb(ccb_p cp)2305 static void sym_enqueue_cam_ccb(ccb_p cp)
2306 {
2307 hcb_p np;
2308 union ccb *ccb;
2309
2310 ccb = cp->cam_ccb;
2311 np = (hcb_p) cp->arg;
2312
2313 assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED));
2314 ccb->ccb_h.status = CAM_REQ_INPROG;
2315
2316 callout_reset_sbt(&cp->ch, SBT_1MS * ccb->ccb_h.timeout, 0, sym_callout,
2317 (caddr_t)ccb, 0);
2318 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2319 ccb->ccb_h.sym_hcb_ptr = np;
2320
2321 sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq);
2322 }
2323
2324 /*
2325 * Complete a pending CAM CCB.
2326 */
2327
sym_xpt_done(hcb_p np,union ccb * ccb,ccb_p cp)2328 static void sym_xpt_done(hcb_p np, union ccb *ccb, ccb_p cp)
2329 {
2330
2331 SYM_LOCK_ASSERT(MA_OWNED);
2332
2333 if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
2334 callout_stop(&cp->ch);
2335 sym_remque(sym_qptr(&ccb->ccb_h.sim_links));
2336 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2337 ccb->ccb_h.sym_hcb_ptr = NULL;
2338 }
2339 xpt_done(ccb);
2340 }
2341
sym_xpt_done2(hcb_p np,union ccb * ccb,int cam_status)2342 static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status)
2343 {
2344
2345 SYM_LOCK_ASSERT(MA_OWNED);
2346
2347 sym_set_cam_status(ccb, cam_status);
2348 xpt_done(ccb);
2349 }
2350
2351 /*
2352 * SYMBIOS chip clock divisor table.
2353 *
2354 * Divisors are multiplied by 10,000,000 in order to make
2355 * calculations more simple.
2356 */
2357 #define _5M 5000000
2358 static const u32 div_10M[] =
2359 {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
2360
2361 /*
2362 * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
2363 * 128 transfers. All chips support at least 16 transfers
2364 * bursts. The 825A, 875 and 895 chips support bursts of up
2365 * to 128 transfers and the 895A and 896 support bursts of up
2366 * to 64 transfers. All other chips support up to 16
2367 * transfers bursts.
2368 *
2369 * For PCI 32 bit data transfers each transfer is a DWORD.
2370 * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
2371 *
2372 * We use log base 2 (burst length) as internal code, with
2373 * value 0 meaning "burst disabled".
2374 */
2375
2376 /*
2377 * Burst length from burst code.
2378 */
2379 #define burst_length(bc) (!(bc))? 0 : 1 << (bc)
2380
2381 /*
2382 * Burst code from io register bits.
2383 */
2384 #define burst_code(dmode, ctest4, ctest5) \
2385 (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
2386
2387 /*
2388 * Set initial io register bits from burst code.
2389 */
sym_init_burst(hcb_p np,u_char bc)2390 static __inline void sym_init_burst(hcb_p np, u_char bc)
2391 {
2392 np->rv_ctest4 &= ~0x80;
2393 np->rv_dmode &= ~(0x3 << 6);
2394 np->rv_ctest5 &= ~0x4;
2395
2396 if (!bc) {
2397 np->rv_ctest4 |= 0x80;
2398 }
2399 else {
2400 --bc;
2401 np->rv_dmode |= ((bc & 0x3) << 6);
2402 np->rv_ctest5 |= (bc & 0x4);
2403 }
2404 }
2405
2406 /*
2407 * Print out the list of targets that have some flag disabled by user.
2408 */
sym_print_targets_flag(hcb_p np,int mask,char * msg)2409 static void sym_print_targets_flag(hcb_p np, int mask, char *msg)
2410 {
2411 int cnt;
2412 int i;
2413
2414 for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
2415 if (i == np->myaddr)
2416 continue;
2417 if (np->target[i].usrflags & mask) {
2418 if (!cnt++)
2419 device_printf(np->device,
2420 "%s disabled for targets", msg);
2421 printf(" %d", i);
2422 }
2423 }
2424 if (cnt)
2425 printf(".\n");
2426 }
2427
2428 /*
2429 * Save initial settings of some IO registers.
2430 * Assumed to have been set by BIOS.
2431 * We cannot reset the chip prior to reading the
2432 * IO registers, since informations will be lost.
2433 * Since the SCRIPTS processor may be running, this
2434 * is not safe on paper, but it seems to work quite
2435 * well. :)
2436 */
sym_save_initial_setting(hcb_p np)2437 static void sym_save_initial_setting (hcb_p np)
2438 {
2439 np->sv_scntl0 = INB(nc_scntl0) & 0x0a;
2440 np->sv_scntl3 = INB(nc_scntl3) & 0x07;
2441 np->sv_dmode = INB(nc_dmode) & 0xce;
2442 np->sv_dcntl = INB(nc_dcntl) & 0xa8;
2443 np->sv_ctest3 = INB(nc_ctest3) & 0x01;
2444 np->sv_ctest4 = INB(nc_ctest4) & 0x80;
2445 np->sv_gpcntl = INB(nc_gpcntl);
2446 np->sv_stest1 = INB(nc_stest1);
2447 np->sv_stest2 = INB(nc_stest2) & 0x20;
2448 np->sv_stest4 = INB(nc_stest4);
2449 if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */
2450 np->sv_scntl4 = INB(nc_scntl4);
2451 np->sv_ctest5 = INB(nc_ctest5) & 0x04;
2452 }
2453 else
2454 np->sv_ctest5 = INB(nc_ctest5) & 0x24;
2455 }
2456
2457 /*
2458 * Prepare io register values used by sym_init() according
2459 * to selected and supported features.
2460 */
sym_prepare_setting(hcb_p np,struct sym_nvram * nvram)2461 static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram)
2462 {
2463 u_char burst_max;
2464 u32 period;
2465 int i;
2466
2467 /*
2468 * Wide ?
2469 */
2470 np->maxwide = (np->features & FE_WIDE)? 1 : 0;
2471
2472 /*
2473 * Get the frequency of the chip's clock.
2474 */
2475 if (np->features & FE_QUAD)
2476 np->multiplier = 4;
2477 else if (np->features & FE_DBLR)
2478 np->multiplier = 2;
2479 else
2480 np->multiplier = 1;
2481
2482 np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
2483 np->clock_khz *= np->multiplier;
2484
2485 if (np->clock_khz != 40000)
2486 sym_getclock(np, np->multiplier);
2487
2488 /*
2489 * Divisor to be used for async (timer pre-scaler).
2490 */
2491 i = np->clock_divn - 1;
2492 while (--i >= 0) {
2493 if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) {
2494 ++i;
2495 break;
2496 }
2497 }
2498 np->rv_scntl3 = i+1;
2499
2500 /*
2501 * The C1010 uses hardwired divisors for async.
2502 * So, we just throw away, the async. divisor.:-)
2503 */
2504 if (np->features & FE_C10)
2505 np->rv_scntl3 = 0;
2506
2507 /*
2508 * Minimum synchronous period factor supported by the chip.
2509 * Btw, 'period' is in tenths of nanoseconds.
2510 */
2511 period = howmany(4 * div_10M[0], np->clock_khz);
2512 if (period <= 250) np->minsync = 10;
2513 else if (period <= 303) np->minsync = 11;
2514 else if (period <= 500) np->minsync = 12;
2515 else np->minsync = howmany(period, 40);
2516
2517 /*
2518 * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
2519 */
2520 if (np->minsync < 25 &&
2521 !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
2522 np->minsync = 25;
2523 else if (np->minsync < 12 &&
2524 !(np->features & (FE_ULTRA2|FE_ULTRA3)))
2525 np->minsync = 12;
2526
2527 /*
2528 * Maximum synchronous period factor supported by the chip.
2529 */
2530 period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
2531 np->maxsync = period > 2540 ? 254 : period / 10;
2532
2533 /*
2534 * If chip is a C1010, guess the sync limits in DT mode.
2535 */
2536 if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) {
2537 if (np->clock_khz == 160000) {
2538 np->minsync_dt = 9;
2539 np->maxsync_dt = 50;
2540 np->maxoffs_dt = 62;
2541 }
2542 }
2543
2544 /*
2545 * 64 bit addressing (895A/896/1010) ?
2546 */
2547 if (np->features & FE_DAC)
2548 #ifdef __LP64__
2549 np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
2550 #else
2551 np->rv_ccntl1 |= (DDAC);
2552 #endif
2553
2554 /*
2555 * Phase mismatch handled by SCRIPTS (895A/896/1010) ?
2556 */
2557 if (np->features & FE_NOPM)
2558 np->rv_ccntl0 |= (ENPMJ);
2559
2560 /*
2561 * C1010 Errata.
2562 * In dual channel mode, contention occurs if internal cycles
2563 * are used. Disable internal cycles.
2564 */
2565 if (np->device_id == PCI_ID_LSI53C1010 &&
2566 np->revision_id < 0x2)
2567 np->rv_ccntl0 |= DILS;
2568
2569 /*
2570 * Select burst length (dwords)
2571 */
2572 burst_max = SYM_SETUP_BURST_ORDER;
2573 if (burst_max == 255)
2574 burst_max = burst_code(np->sv_dmode, np->sv_ctest4,
2575 np->sv_ctest5);
2576 if (burst_max > 7)
2577 burst_max = 7;
2578 if (burst_max > np->maxburst)
2579 burst_max = np->maxburst;
2580
2581 /*
2582 * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
2583 * This chip and the 860 Rev 1 may wrongly use PCI cache line
2584 * based transactions on LOAD/STORE instructions. So we have
2585 * to prevent these chips from using such PCI transactions in
2586 * this driver. The generic ncr driver that does not use
2587 * LOAD/STORE instructions does not need this work-around.
2588 */
2589 if ((np->device_id == PCI_ID_SYM53C810 &&
2590 np->revision_id >= 0x10 && np->revision_id <= 0x11) ||
2591 (np->device_id == PCI_ID_SYM53C860 &&
2592 np->revision_id <= 0x1))
2593 np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
2594
2595 /*
2596 * Select all supported special features.
2597 * If we are using on-board RAM for scripts, prefetch (PFEN)
2598 * does not help, but burst op fetch (BOF) does.
2599 * Disabling PFEN makes sure BOF will be used.
2600 */
2601 if (np->features & FE_ERL)
2602 np->rv_dmode |= ERL; /* Enable Read Line */
2603 if (np->features & FE_BOF)
2604 np->rv_dmode |= BOF; /* Burst Opcode Fetch */
2605 if (np->features & FE_ERMP)
2606 np->rv_dmode |= ERMP; /* Enable Read Multiple */
2607 #if 1
2608 if ((np->features & FE_PFEN) && !np->ram_ba)
2609 #else
2610 if (np->features & FE_PFEN)
2611 #endif
2612 np->rv_dcntl |= PFEN; /* Prefetch Enable */
2613 if (np->features & FE_CLSE)
2614 np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
2615 if (np->features & FE_WRIE)
2616 np->rv_ctest3 |= WRIE; /* Write and Invalidate */
2617 if (np->features & FE_DFS)
2618 np->rv_ctest5 |= DFS; /* Dma Fifo Size */
2619
2620 /*
2621 * Select some other
2622 */
2623 if (SYM_SETUP_PCI_PARITY)
2624 np->rv_ctest4 |= MPEE; /* Master parity checking */
2625 if (SYM_SETUP_SCSI_PARITY)
2626 np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
2627
2628 /*
2629 * Get parity checking, host ID and verbose mode from NVRAM
2630 */
2631 np->myaddr = 255;
2632 sym_nvram_setup_host (np, nvram);
2633
2634 /*
2635 * Get SCSI addr of host adapter (set by bios?).
2636 */
2637 if (np->myaddr == 255) {
2638 np->myaddr = INB(nc_scid) & 0x07;
2639 if (!np->myaddr)
2640 np->myaddr = SYM_SETUP_HOST_ID;
2641 }
2642
2643 /*
2644 * Prepare initial io register bits for burst length
2645 */
2646 sym_init_burst(np, burst_max);
2647
2648 /*
2649 * Set SCSI BUS mode.
2650 * - LVD capable chips (895/895A/896/1010) report the
2651 * current BUS mode through the STEST4 IO register.
2652 * - For previous generation chips (825/825A/875),
2653 * user has to tell us how to check against HVD,
2654 * since a 100% safe algorithm is not possible.
2655 */
2656 np->scsi_mode = SMODE_SE;
2657 if (np->features & (FE_ULTRA2|FE_ULTRA3))
2658 np->scsi_mode = (np->sv_stest4 & SMODE);
2659 else if (np->features & FE_DIFF) {
2660 if (SYM_SETUP_SCSI_DIFF == 1) {
2661 if (np->sv_scntl3) {
2662 if (np->sv_stest2 & 0x20)
2663 np->scsi_mode = SMODE_HVD;
2664 }
2665 else if (nvram->type == SYM_SYMBIOS_NVRAM) {
2666 if (!(INB(nc_gpreg) & 0x08))
2667 np->scsi_mode = SMODE_HVD;
2668 }
2669 }
2670 else if (SYM_SETUP_SCSI_DIFF == 2)
2671 np->scsi_mode = SMODE_HVD;
2672 }
2673 if (np->scsi_mode == SMODE_HVD)
2674 np->rv_stest2 |= 0x20;
2675
2676 /*
2677 * Set LED support from SCRIPTS.
2678 * Ignore this feature for boards known to use a
2679 * specific GPIO wiring and for the 895A, 896
2680 * and 1010 that drive the LED directly.
2681 */
2682 if ((SYM_SETUP_SCSI_LED ||
2683 (nvram->type == SYM_SYMBIOS_NVRAM ||
2684 (nvram->type == SYM_TEKRAM_NVRAM &&
2685 np->device_id == PCI_ID_SYM53C895))) &&
2686 !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
2687 np->features |= FE_LED0;
2688
2689 /*
2690 * Set irq mode.
2691 */
2692 switch(SYM_SETUP_IRQ_MODE & 3) {
2693 case 2:
2694 np->rv_dcntl |= IRQM;
2695 break;
2696 case 1:
2697 np->rv_dcntl |= (np->sv_dcntl & IRQM);
2698 break;
2699 default:
2700 break;
2701 }
2702
2703 /*
2704 * Configure targets according to driver setup.
2705 * If NVRAM present get targets setup from NVRAM.
2706 */
2707 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
2708 tcb_p tp = &np->target[i];
2709
2710 tp->tinfo.user.scsi_version = tp->tinfo.current.scsi_version= 2;
2711 tp->tinfo.user.spi_version = tp->tinfo.current.spi_version = 2;
2712 tp->tinfo.user.period = np->minsync;
2713 if (np->features & FE_ULTRA3)
2714 tp->tinfo.user.period = np->minsync_dt;
2715 tp->tinfo.user.offset = np->maxoffs;
2716 tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT;
2717 tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
2718 tp->usrtags = SYM_SETUP_MAX_TAG;
2719
2720 sym_nvram_setup_target (np, i, nvram);
2721
2722 /*
2723 * For now, guess PPR/DT support from the period
2724 * and BUS width.
2725 */
2726 if (np->features & FE_ULTRA3) {
2727 if (tp->tinfo.user.period <= 9 &&
2728 tp->tinfo.user.width == BUS_16_BIT) {
2729 tp->tinfo.user.options |= PPR_OPT_DT;
2730 tp->tinfo.user.offset = np->maxoffs_dt;
2731 tp->tinfo.user.spi_version = 3;
2732 }
2733 }
2734
2735 if (!tp->usrtags)
2736 tp->usrflags &= ~SYM_TAGS_ENABLED;
2737 }
2738
2739 /*
2740 * Let user know about the settings.
2741 */
2742 i = nvram->type;
2743 device_printf(np->device, "%s NVRAM, ID %d, Fast-%d, %s, %s\n",
2744 i == SYM_SYMBIOS_NVRAM ? "Symbios" :
2745 (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"),
2746 np->myaddr,
2747 (np->features & FE_ULTRA3) ? 80 :
2748 (np->features & FE_ULTRA2) ? 40 :
2749 (np->features & FE_ULTRA) ? 20 : 10,
2750 sym_scsi_bus_mode(np->scsi_mode),
2751 (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity");
2752 /*
2753 * Tell him more on demand.
2754 */
2755 if (sym_verbose) {
2756 device_printf(np->device, "%s IRQ line driver%s\n",
2757 np->rv_dcntl & IRQM ? "totem pole" : "open drain",
2758 np->ram_ba ? ", using on-chip SRAM" : "");
2759 device_printf(np->device, "using %s firmware.\n", np->fw_name);
2760 if (np->features & FE_NOPM)
2761 device_printf(np->device,
2762 "handling phase mismatch from SCRIPTS.\n");
2763 }
2764 /*
2765 * And still more.
2766 */
2767 if (sym_verbose > 1) {
2768 device_printf(np->device,
2769 "initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
2770 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
2771 np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3,
2772 np->sv_ctest4, np->sv_ctest5);
2773
2774 device_printf(np->device,
2775 "final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
2776 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
2777 np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
2778 np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
2779 }
2780 /*
2781 * Let user be aware of targets that have some disable flags set.
2782 */
2783 sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT");
2784 if (sym_verbose)
2785 sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED,
2786 "SCAN FOR LUNS");
2787
2788 return 0;
2789 }
2790
2791 /*
2792 * Prepare the next negotiation message if needed.
2793 *
2794 * Fill in the part of message buffer that contains the
2795 * negotiation and the nego_status field of the CCB.
2796 * Returns the size of the message in bytes.
2797 */
sym_prepare_nego(hcb_p np,ccb_p cp,int nego,u_char * msgptr)2798 static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr)
2799 {
2800 tcb_p tp = &np->target[cp->target];
2801 int msglen = 0;
2802
2803 /*
2804 * Early C1010 chips need a work-around for DT
2805 * data transfer to work.
2806 */
2807 if (!(np->features & FE_U3EN))
2808 tp->tinfo.goal.options = 0;
2809 /*
2810 * negotiate using PPR ?
2811 */
2812 if (tp->tinfo.goal.options & PPR_OPT_MASK)
2813 nego = NS_PPR;
2814 /*
2815 * negotiate wide transfers ?
2816 */
2817 else if (tp->tinfo.current.width != tp->tinfo.goal.width)
2818 nego = NS_WIDE;
2819 /*
2820 * negotiate synchronous transfers?
2821 */
2822 else if (tp->tinfo.current.period != tp->tinfo.goal.period ||
2823 tp->tinfo.current.offset != tp->tinfo.goal.offset)
2824 nego = NS_SYNC;
2825
2826 switch (nego) {
2827 case NS_SYNC:
2828 msgptr[msglen++] = M_EXTENDED;
2829 msgptr[msglen++] = 3;
2830 msgptr[msglen++] = M_X_SYNC_REQ;
2831 msgptr[msglen++] = tp->tinfo.goal.period;
2832 msgptr[msglen++] = tp->tinfo.goal.offset;
2833 break;
2834 case NS_WIDE:
2835 msgptr[msglen++] = M_EXTENDED;
2836 msgptr[msglen++] = 2;
2837 msgptr[msglen++] = M_X_WIDE_REQ;
2838 msgptr[msglen++] = tp->tinfo.goal.width;
2839 break;
2840 case NS_PPR:
2841 msgptr[msglen++] = M_EXTENDED;
2842 msgptr[msglen++] = 6;
2843 msgptr[msglen++] = M_X_PPR_REQ;
2844 msgptr[msglen++] = tp->tinfo.goal.period;
2845 msgptr[msglen++] = 0;
2846 msgptr[msglen++] = tp->tinfo.goal.offset;
2847 msgptr[msglen++] = tp->tinfo.goal.width;
2848 msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT;
2849 break;
2850 }
2851
2852 cp->nego_status = nego;
2853
2854 if (nego) {
2855 tp->nego_cp = cp; /* Keep track a nego will be performed */
2856 if (DEBUG_FLAGS & DEBUG_NEGO) {
2857 sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" :
2858 nego == NS_WIDE ? "wide msgout" :
2859 "ppr msgout", msgptr);
2860 }
2861 }
2862
2863 return msglen;
2864 }
2865
2866 /*
2867 * Insert a job into the start queue.
2868 */
sym_put_start_queue(hcb_p np,ccb_p cp)2869 static void sym_put_start_queue(hcb_p np, ccb_p cp)
2870 {
2871 u_short qidx;
2872
2873 #ifdef SYM_CONF_IARB_SUPPORT
2874 /*
2875 * If the previously queued CCB is not yet done,
2876 * set the IARB hint. The SCRIPTS will go with IARB
2877 * for this job when starting the previous one.
2878 * We leave devices a chance to win arbitration by
2879 * not using more than 'iarb_max' consecutive
2880 * immediate arbitrations.
2881 */
2882 if (np->last_cp && np->iarb_count < np->iarb_max) {
2883 np->last_cp->host_flags |= HF_HINT_IARB;
2884 ++np->iarb_count;
2885 }
2886 else
2887 np->iarb_count = 0;
2888 np->last_cp = cp;
2889 #endif
2890
2891 /*
2892 * Insert first the idle task and then our job.
2893 * The MB should ensure proper ordering.
2894 */
2895 qidx = np->squeueput + 2;
2896 if (qidx >= MAX_QUEUE*2) qidx = 0;
2897
2898 np->squeue [qidx] = cpu_to_scr(np->idletask_ba);
2899 MEMORY_WRITE_BARRIER();
2900 np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba);
2901
2902 np->squeueput = qidx;
2903
2904 if (DEBUG_FLAGS & DEBUG_QUEUE)
2905 device_printf(np->device, "queuepos=%d.\n", np->squeueput);
2906
2907 /*
2908 * Script processor may be waiting for reselect.
2909 * Wake it up.
2910 */
2911 MEMORY_WRITE_BARRIER();
2912 OUTB (nc_istat, SIGP|np->istat_sem);
2913 }
2914
2915 /*
2916 * Soft reset the chip.
2917 *
2918 * Raising SRST when the chip is running may cause
2919 * problems on dual function chips (see below).
2920 * On the other hand, LVD devices need some delay
2921 * to settle and report actual BUS mode in STEST4.
2922 */
sym_chip_reset(hcb_p np)2923 static void sym_chip_reset (hcb_p np)
2924 {
2925 OUTB (nc_istat, SRST);
2926 UDELAY (10);
2927 OUTB (nc_istat, 0);
2928 UDELAY(2000); /* For BUS MODE to settle */
2929 }
2930
2931 /*
2932 * Soft reset the chip.
2933 *
2934 * Some 896 and 876 chip revisions may hang-up if we set
2935 * the SRST (soft reset) bit at the wrong time when SCRIPTS
2936 * are running.
2937 * So, we need to abort the current operation prior to
2938 * soft resetting the chip.
2939 */
sym_soft_reset(hcb_p np)2940 static void sym_soft_reset (hcb_p np)
2941 {
2942 u_char istat;
2943 int i;
2944
2945 OUTB (nc_istat, CABRT);
2946 for (i = 1000000 ; i ; --i) {
2947 istat = INB (nc_istat);
2948 if (istat & SIP) {
2949 INW (nc_sist);
2950 continue;
2951 }
2952 if (istat & DIP) {
2953 OUTB (nc_istat, 0);
2954 INB (nc_dstat);
2955 break;
2956 }
2957 }
2958 if (!i)
2959 device_printf(np->device,
2960 "unable to abort current chip operation.\n");
2961 sym_chip_reset (np);
2962 }
2963
2964 /*
2965 * Start reset process.
2966 *
2967 * The interrupt handler will reinitialize the chip.
2968 */
sym_start_reset(hcb_p np)2969 static void sym_start_reset(hcb_p np)
2970 {
2971 (void) sym_reset_scsi_bus(np, 1);
2972 }
2973
sym_reset_scsi_bus(hcb_p np,int enab_int)2974 static int sym_reset_scsi_bus(hcb_p np, int enab_int)
2975 {
2976 u32 term;
2977 int retv = 0;
2978
2979 sym_soft_reset(np); /* Soft reset the chip */
2980 if (enab_int)
2981 OUTW (nc_sien, RST);
2982 /*
2983 * Enable Tolerant, reset IRQD if present and
2984 * properly set IRQ mode, prior to resetting the bus.
2985 */
2986 OUTB (nc_stest3, TE);
2987 OUTB (nc_dcntl, (np->rv_dcntl & IRQM));
2988 OUTB (nc_scntl1, CRST);
2989 UDELAY (200);
2990
2991 if (!SYM_SETUP_SCSI_BUS_CHECK)
2992 goto out;
2993 /*
2994 * Check for no terminators or SCSI bus shorts to ground.
2995 * Read SCSI data bus, data parity bits and control signals.
2996 * We are expecting RESET to be TRUE and other signals to be
2997 * FALSE.
2998 */
2999 term = INB(nc_sstat0);
3000 term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
3001 term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */
3002 ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */
3003 ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */
3004 INB(nc_sbcl); /* req ack bsy sel atn msg cd io */
3005
3006 if (!(np->features & FE_WIDE))
3007 term &= 0x3ffff;
3008
3009 if (term != (2<<7)) {
3010 device_printf(np->device,
3011 "suspicious SCSI data while resetting the BUS.\n");
3012 device_printf(np->device,
3013 "%sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
3014 "0x%lx, expecting 0x%lx\n", (np->features & FE_WIDE) ?
3015 "dp1,d15-8," : "", (u_long)term, (u_long)(2 << 7));
3016 if (SYM_SETUP_SCSI_BUS_CHECK == 1)
3017 retv = 1;
3018 }
3019 out:
3020 OUTB (nc_scntl1, 0);
3021 /* MDELAY(100); */
3022 return retv;
3023 }
3024
3025 /*
3026 * The chip may have completed jobs. Look at the DONE QUEUE.
3027 *
3028 * On architectures that may reorder LOAD/STORE operations,
3029 * a memory barrier may be needed after the reading of the
3030 * so-called `flag' and prior to dealing with the data.
3031 */
sym_wakeup_done(hcb_p np)3032 static int sym_wakeup_done (hcb_p np)
3033 {
3034 ccb_p cp;
3035 int i, n;
3036 u32 dsa;
3037
3038 SYM_LOCK_ASSERT(MA_OWNED);
3039
3040 n = 0;
3041 i = np->dqueueget;
3042 while (1) {
3043 dsa = scr_to_cpu(np->dqueue[i]);
3044 if (!dsa)
3045 break;
3046 np->dqueue[i] = 0;
3047 if ((i = i+2) >= MAX_QUEUE*2)
3048 i = 0;
3049
3050 cp = sym_ccb_from_dsa(np, dsa);
3051 if (cp) {
3052 MEMORY_READ_BARRIER();
3053 sym_complete_ok (np, cp);
3054 ++n;
3055 } else
3056 device_printf(np->device,
3057 "bad DSA (%x) in done queue.\n", (u_int)dsa);
3058 }
3059 np->dqueueget = i;
3060
3061 return n;
3062 }
3063
3064 /*
3065 * Complete all active CCBs with error.
3066 * Used on CHIP/SCSI RESET.
3067 */
sym_flush_busy_queue(hcb_p np,int cam_status)3068 static void sym_flush_busy_queue (hcb_p np, int cam_status)
3069 {
3070 /*
3071 * Move all active CCBs to the COMP queue
3072 * and flush this queue.
3073 */
3074 sym_que_splice(&np->busy_ccbq, &np->comp_ccbq);
3075 sym_que_init(&np->busy_ccbq);
3076 sym_flush_comp_queue(np, cam_status);
3077 }
3078
3079 /*
3080 * Start chip.
3081 *
3082 * 'reason' means:
3083 * 0: initialisation.
3084 * 1: SCSI BUS RESET delivered or received.
3085 * 2: SCSI BUS MODE changed.
3086 */
sym_init(hcb_p np,int reason)3087 static void sym_init (hcb_p np, int reason)
3088 {
3089 int i;
3090 u32 phys;
3091
3092 SYM_LOCK_ASSERT(MA_OWNED);
3093
3094 /*
3095 * Reset chip if asked, otherwise just clear fifos.
3096 */
3097 if (reason == 1)
3098 sym_soft_reset(np);
3099 else {
3100 OUTB (nc_stest3, TE|CSF);
3101 OUTONB (nc_ctest3, CLF);
3102 }
3103
3104 /*
3105 * Clear Start Queue
3106 */
3107 phys = np->squeue_ba;
3108 for (i = 0; i < MAX_QUEUE*2; i += 2) {
3109 np->squeue[i] = cpu_to_scr(np->idletask_ba);
3110 np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
3111 }
3112 np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
3113
3114 /*
3115 * Start at first entry.
3116 */
3117 np->squeueput = 0;
3118
3119 /*
3120 * Clear Done Queue
3121 */
3122 phys = np->dqueue_ba;
3123 for (i = 0; i < MAX_QUEUE*2; i += 2) {
3124 np->dqueue[i] = 0;
3125 np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
3126 }
3127 np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
3128
3129 /*
3130 * Start at first entry.
3131 */
3132 np->dqueueget = 0;
3133
3134 /*
3135 * Install patches in scripts.
3136 * This also let point to first position the start
3137 * and done queue pointers used from SCRIPTS.
3138 */
3139 np->fw_patch(np);
3140
3141 /*
3142 * Wakeup all pending jobs.
3143 */
3144 sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET);
3145
3146 /*
3147 * Init chip.
3148 */
3149 OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */
3150 UDELAY (2000); /* The 895 needs time for the bus mode to settle */
3151
3152 OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
3153 /* full arb., ena parity, par->ATN */
3154 OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
3155
3156 sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
3157
3158 OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
3159 OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */
3160 OUTB (nc_istat , SIGP ); /* Signal Process */
3161 OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */
3162 OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
3163
3164 OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
3165 OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */
3166 OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */
3167
3168 /* Extended Sreq/Sack filtering not supported on the C10 */
3169 if (np->features & FE_C10)
3170 OUTB (nc_stest2, np->rv_stest2);
3171 else
3172 OUTB (nc_stest2, EXT|np->rv_stest2);
3173
3174 OUTB (nc_stest3, TE); /* TolerANT enable */
3175 OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */
3176
3177 /*
3178 * For now, disable AIP generation on C1010-66.
3179 */
3180 if (np->device_id == PCI_ID_LSI53C1010_2)
3181 OUTB (nc_aipcntl1, DISAIP);
3182
3183 /*
3184 * C10101 Errata.
3185 * Errant SGE's when in narrow. Write bits 4 & 5 of
3186 * STEST1 register to disable SGE. We probably should do
3187 * that from SCRIPTS for each selection/reselection, but
3188 * I just don't want. :)
3189 */
3190 if (np->device_id == PCI_ID_LSI53C1010 &&
3191 /* np->revision_id < 0xff */ 1)
3192 OUTB (nc_stest1, INB(nc_stest1) | 0x30);
3193
3194 /*
3195 * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
3196 * Disable overlapped arbitration for some dual function devices,
3197 * regardless revision id (kind of post-chip-design feature. ;-))
3198 */
3199 if (np->device_id == PCI_ID_SYM53C875)
3200 OUTB (nc_ctest0, (1<<5));
3201 else if (np->device_id == PCI_ID_SYM53C896)
3202 np->rv_ccntl0 |= DPR;
3203
3204 /*
3205 * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing
3206 * and/or hardware phase mismatch, since only such chips
3207 * seem to support those IO registers.
3208 */
3209 if (np->features & (FE_DAC|FE_NOPM)) {
3210 OUTB (nc_ccntl0, np->rv_ccntl0);
3211 OUTB (nc_ccntl1, np->rv_ccntl1);
3212 }
3213
3214 /*
3215 * If phase mismatch handled by scripts (895A/896/1010),
3216 * set PM jump addresses.
3217 */
3218 if (np->features & FE_NOPM) {
3219 OUTL (nc_pmjad1, SCRIPTB_BA (np, pm_handle));
3220 OUTL (nc_pmjad2, SCRIPTB_BA (np, pm_handle));
3221 }
3222
3223 /*
3224 * Enable GPIO0 pin for writing if LED support from SCRIPTS.
3225 * Also set GPIO5 and clear GPIO6 if hardware LED control.
3226 */
3227 if (np->features & FE_LED0)
3228 OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01);
3229 else if (np->features & FE_LEDC)
3230 OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20);
3231
3232 /*
3233 * enable ints
3234 */
3235 OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
3236 OUTB (nc_dien , MDPE|BF|SSI|SIR|IID);
3237
3238 /*
3239 * For 895/6 enable SBMC interrupt and save current SCSI bus mode.
3240 * Try to eat the spurious SBMC interrupt that may occur when
3241 * we reset the chip but not the SCSI BUS (at initialization).
3242 */
3243 if (np->features & (FE_ULTRA2|FE_ULTRA3)) {
3244 OUTONW (nc_sien, SBMC);
3245 if (reason == 0) {
3246 MDELAY(100);
3247 INW (nc_sist);
3248 }
3249 np->scsi_mode = INB (nc_stest4) & SMODE;
3250 }
3251
3252 /*
3253 * Fill in target structure.
3254 * Reinitialize usrsync.
3255 * Reinitialize usrwide.
3256 * Prepare sync negotiation according to actual SCSI bus mode.
3257 */
3258 for (i = 0; i < SYM_CONF_MAX_TARGET; i++) {
3259 tcb_p tp = &np->target[i];
3260
3261 tp->to_reset = 0;
3262 tp->head.sval = 0;
3263 tp->head.wval = np->rv_scntl3;
3264 tp->head.uval = 0;
3265
3266 tp->tinfo.current.period = 0;
3267 tp->tinfo.current.offset = 0;
3268 tp->tinfo.current.width = BUS_8_BIT;
3269 tp->tinfo.current.options = 0;
3270 }
3271
3272 /*
3273 * Download SCSI SCRIPTS to on-chip RAM if present,
3274 * and start script processor.
3275 */
3276 if (np->ram_ba) {
3277 if (sym_verbose > 1)
3278 device_printf(np->device,
3279 "Downloading SCSI SCRIPTS.\n");
3280 if (np->ram_ws == 8192) {
3281 OUTRAM_OFF(4096, np->scriptb0, np->scriptb_sz);
3282 OUTL (nc_mmws, np->scr_ram_seg);
3283 OUTL (nc_mmrs, np->scr_ram_seg);
3284 OUTL (nc_sfs, np->scr_ram_seg);
3285 phys = SCRIPTB_BA (np, start64);
3286 }
3287 else
3288 phys = SCRIPTA_BA (np, init);
3289 OUTRAM_OFF(0, np->scripta0, np->scripta_sz);
3290 }
3291 else
3292 phys = SCRIPTA_BA (np, init);
3293
3294 np->istat_sem = 0;
3295
3296 OUTL (nc_dsa, np->hcb_ba);
3297 OUTL_DSP (phys);
3298
3299 /*
3300 * Notify the XPT about the RESET condition.
3301 */
3302 if (reason != 0)
3303 xpt_async(AC_BUS_RESET, np->path, NULL);
3304 }
3305
3306 /*
3307 * Get clock factor and sync divisor for a given
3308 * synchronous factor period.
3309 */
3310 static int
sym_getsync(hcb_p np,u_char dt,u_char sfac,u_char * divp,u_char * fakp)3311 sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp)
3312 {
3313 u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */
3314 int div = np->clock_divn; /* Number of divisors supported */
3315 u32 fak; /* Sync factor in sxfer */
3316 u32 per; /* Period in tenths of ns */
3317 u32 kpc; /* (per * clk) */
3318 int ret;
3319
3320 /*
3321 * Compute the synchronous period in tenths of nano-seconds
3322 */
3323 if (dt && sfac <= 9) per = 125;
3324 else if (sfac <= 10) per = 250;
3325 else if (sfac == 11) per = 303;
3326 else if (sfac == 12) per = 500;
3327 else per = 40 * sfac;
3328 ret = per;
3329
3330 kpc = per * clk;
3331 if (dt)
3332 kpc <<= 1;
3333
3334 /*
3335 * For earliest C10 revision 0, we cannot use extra
3336 * clocks for the setting of the SCSI clocking.
3337 * Note that this limits the lowest sync data transfer
3338 * to 5 Mega-transfers per second and may result in
3339 * using higher clock divisors.
3340 */
3341 #if 1
3342 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) {
3343 /*
3344 * Look for the lowest clock divisor that allows an
3345 * output speed not faster than the period.
3346 */
3347 while (div > 0) {
3348 --div;
3349 if (kpc > (div_10M[div] << 2)) {
3350 ++div;
3351 break;
3352 }
3353 }
3354 fak = 0; /* No extra clocks */
3355 if (div == np->clock_divn) { /* Are we too fast ? */
3356 ret = -1;
3357 }
3358 *divp = div;
3359 *fakp = fak;
3360 return ret;
3361 }
3362 #endif
3363
3364 /*
3365 * Look for the greatest clock divisor that allows an
3366 * input speed faster than the period.
3367 */
3368 while (div-- > 0)
3369 if (kpc >= (div_10M[div] << 2)) break;
3370
3371 /*
3372 * Calculate the lowest clock factor that allows an output
3373 * speed not faster than the period, and the max output speed.
3374 * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT.
3375 * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT.
3376 */
3377 if (dt) {
3378 fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2;
3379 /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */
3380 }
3381 else {
3382 fak = (kpc - 1) / div_10M[div] + 1 - 4;
3383 /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */
3384 }
3385
3386 /*
3387 * Check against our hardware limits, or bugs :).
3388 */
3389 if (fak > 2) {fak = 2; ret = -1;}
3390
3391 /*
3392 * Compute and return sync parameters.
3393 */
3394 *divp = div;
3395 *fakp = fak;
3396
3397 return ret;
3398 }
3399
3400 /*
3401 * Tell the SCSI layer about the new transfer parameters.
3402 */
3403 static void
sym_xpt_async_transfer_neg(hcb_p np,int target,u_int spi_valid)3404 sym_xpt_async_transfer_neg(hcb_p np, int target, u_int spi_valid)
3405 {
3406 struct ccb_trans_settings cts;
3407 struct cam_path *path;
3408 int sts;
3409 tcb_p tp = &np->target[target];
3410
3411 sts = xpt_create_path(&path, NULL, cam_sim_path(np->sim), target,
3412 CAM_LUN_WILDCARD);
3413 if (sts != CAM_REQ_CMP)
3414 return;
3415
3416 bzero(&cts, sizeof(cts));
3417
3418 #define cts__scsi (cts.proto_specific.scsi)
3419 #define cts__spi (cts.xport_specific.spi)
3420
3421 cts.type = CTS_TYPE_CURRENT_SETTINGS;
3422 cts.protocol = PROTO_SCSI;
3423 cts.transport = XPORT_SPI;
3424 cts.protocol_version = tp->tinfo.current.scsi_version;
3425 cts.transport_version = tp->tinfo.current.spi_version;
3426
3427 cts__spi.valid = spi_valid;
3428 if (spi_valid & CTS_SPI_VALID_SYNC_RATE)
3429 cts__spi.sync_period = tp->tinfo.current.period;
3430 if (spi_valid & CTS_SPI_VALID_SYNC_OFFSET)
3431 cts__spi.sync_offset = tp->tinfo.current.offset;
3432 if (spi_valid & CTS_SPI_VALID_BUS_WIDTH)
3433 cts__spi.bus_width = tp->tinfo.current.width;
3434 if (spi_valid & CTS_SPI_VALID_PPR_OPTIONS)
3435 cts__spi.ppr_options = tp->tinfo.current.options;
3436 #undef cts__spi
3437 #undef cts__scsi
3438 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
3439 xpt_async(AC_TRANSFER_NEG, path, &cts);
3440 xpt_free_path(path);
3441 }
3442
3443 #define SYM_SPI_VALID_WDTR \
3444 CTS_SPI_VALID_BUS_WIDTH | \
3445 CTS_SPI_VALID_SYNC_RATE | \
3446 CTS_SPI_VALID_SYNC_OFFSET
3447 #define SYM_SPI_VALID_SDTR \
3448 CTS_SPI_VALID_SYNC_RATE | \
3449 CTS_SPI_VALID_SYNC_OFFSET
3450 #define SYM_SPI_VALID_PPR \
3451 CTS_SPI_VALID_PPR_OPTIONS | \
3452 CTS_SPI_VALID_BUS_WIDTH | \
3453 CTS_SPI_VALID_SYNC_RATE | \
3454 CTS_SPI_VALID_SYNC_OFFSET
3455
3456 /*
3457 * We received a WDTR.
3458 * Let everything be aware of the changes.
3459 */
sym_setwide(hcb_p np,ccb_p cp,u_char wide)3460 static void sym_setwide(hcb_p np, ccb_p cp, u_char wide)
3461 {
3462 tcb_p tp = &np->target[cp->target];
3463
3464 sym_settrans(np, cp, 0, 0, 0, wide, 0, 0);
3465
3466 /*
3467 * Tell the SCSI layer about the new transfer parameters.
3468 */
3469 tp->tinfo.goal.width = tp->tinfo.current.width = wide;
3470 tp->tinfo.current.offset = 0;
3471 tp->tinfo.current.period = 0;
3472 tp->tinfo.current.options = 0;
3473
3474 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_WDTR);
3475 }
3476
3477 /*
3478 * We received a SDTR.
3479 * Let everything be aware of the changes.
3480 */
3481 static void
sym_setsync(hcb_p np,ccb_p cp,u_char ofs,u_char per,u_char div,u_char fak)3482 sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak)
3483 {
3484 tcb_p tp = &np->target[cp->target];
3485 u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0;
3486
3487 sym_settrans(np, cp, 0, ofs, per, wide, div, fak);
3488
3489 /*
3490 * Tell the SCSI layer about the new transfer parameters.
3491 */
3492 tp->tinfo.goal.period = tp->tinfo.current.period = per;
3493 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs;
3494 tp->tinfo.goal.options = tp->tinfo.current.options = 0;
3495
3496 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_SDTR);
3497 }
3498
3499 /*
3500 * We received a PPR.
3501 * Let everything be aware of the changes.
3502 */
sym_setpprot(hcb_p np,ccb_p cp,u_char dt,u_char ofs,u_char per,u_char wide,u_char div,u_char fak)3503 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
3504 u_char per, u_char wide, u_char div, u_char fak)
3505 {
3506 tcb_p tp = &np->target[cp->target];
3507
3508 sym_settrans(np, cp, dt, ofs, per, wide, div, fak);
3509
3510 /*
3511 * Tell the SCSI layer about the new transfer parameters.
3512 */
3513 tp->tinfo.goal.width = tp->tinfo.current.width = wide;
3514 tp->tinfo.goal.period = tp->tinfo.current.period = per;
3515 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs;
3516 tp->tinfo.goal.options = tp->tinfo.current.options = dt;
3517
3518 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_PPR);
3519 }
3520
3521 /*
3522 * Switch trans mode for current job and it's target.
3523 */
sym_settrans(hcb_p np,ccb_p cp,u_char dt,u_char ofs,u_char per,u_char wide,u_char div,u_char fak)3524 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
3525 u_char per, u_char wide, u_char div, u_char fak)
3526 {
3527 SYM_QUEHEAD *qp;
3528 union ccb *ccb;
3529 tcb_p tp;
3530 u_char target = INB (nc_sdid) & 0x0f;
3531 u_char sval, wval, uval;
3532
3533 assert (cp);
3534 if (!cp) return;
3535 ccb = cp->cam_ccb;
3536 assert (ccb);
3537 if (!ccb) return;
3538 assert (target == (cp->target & 0xf));
3539 tp = &np->target[target];
3540
3541 sval = tp->head.sval;
3542 wval = tp->head.wval;
3543 uval = tp->head.uval;
3544
3545 #if 0
3546 printf("XXXX sval=%x wval=%x uval=%x (%x)\n",
3547 sval, wval, uval, np->rv_scntl3);
3548 #endif
3549 /*
3550 * Set the offset.
3551 */
3552 if (!(np->features & FE_C10))
3553 sval = (sval & ~0x1f) | ofs;
3554 else
3555 sval = (sval & ~0x3f) | ofs;
3556
3557 /*
3558 * Set the sync divisor and extra clock factor.
3559 */
3560 if (ofs != 0) {
3561 wval = (wval & ~0x70) | ((div+1) << 4);
3562 if (!(np->features & FE_C10))
3563 sval = (sval & ~0xe0) | (fak << 5);
3564 else {
3565 uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT);
3566 if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT);
3567 if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT);
3568 }
3569 }
3570
3571 /*
3572 * Set the bus width.
3573 */
3574 wval = wval & ~EWS;
3575 if (wide != 0)
3576 wval |= EWS;
3577
3578 /*
3579 * Set misc. ultra enable bits.
3580 */
3581 if (np->features & FE_C10) {
3582 uval = uval & ~(U3EN|AIPCKEN);
3583 if (dt) {
3584 assert(np->features & FE_U3EN);
3585 uval |= U3EN;
3586 }
3587 }
3588 else {
3589 wval = wval & ~ULTRA;
3590 if (per <= 12) wval |= ULTRA;
3591 }
3592
3593 /*
3594 * Stop there if sync parameters are unchanged.
3595 */
3596 if (tp->head.sval == sval &&
3597 tp->head.wval == wval &&
3598 tp->head.uval == uval)
3599 return;
3600 tp->head.sval = sval;
3601 tp->head.wval = wval;
3602 tp->head.uval = uval;
3603
3604 /*
3605 * Disable extended Sreq/Sack filtering if per < 50.
3606 * Not supported on the C1010.
3607 */
3608 if (per < 50 && !(np->features & FE_C10))
3609 OUTOFFB (nc_stest2, EXT);
3610
3611 /*
3612 * set actual value and sync_status
3613 */
3614 OUTB (nc_sxfer, tp->head.sval);
3615 OUTB (nc_scntl3, tp->head.wval);
3616
3617 if (np->features & FE_C10) {
3618 OUTB (nc_scntl4, tp->head.uval);
3619 }
3620
3621 /*
3622 * patch ALL busy ccbs of this target.
3623 */
3624 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
3625 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
3626 if (cp->target != target)
3627 continue;
3628 cp->phys.select.sel_scntl3 = tp->head.wval;
3629 cp->phys.select.sel_sxfer = tp->head.sval;
3630 if (np->features & FE_C10) {
3631 cp->phys.select.sel_scntl4 = tp->head.uval;
3632 }
3633 }
3634 }
3635
3636 /*
3637 * log message for real hard errors
3638 *
3639 * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc).
3640 * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf.
3641 *
3642 * exception register:
3643 * ds: dstat
3644 * si: sist
3645 *
3646 * SCSI bus lines:
3647 * so: control lines as driven by chip.
3648 * si: control lines as seen by chip.
3649 * sd: scsi data lines as seen by chip.
3650 *
3651 * wide/fastmode:
3652 * sxfer: (see the manual)
3653 * scntl3: (see the manual)
3654 *
3655 * current script command:
3656 * dsp: script address (relative to start of script).
3657 * dbc: first word of script command.
3658 *
3659 * First 24 register of the chip:
3660 * r0..rf
3661 */
sym_log_hard_error(hcb_p np,u_short sist,u_char dstat)3662 static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat)
3663 {
3664 u32 dsp;
3665 int script_ofs;
3666 int script_size;
3667 char *script_name;
3668 u_char *script_base;
3669 int i;
3670
3671 dsp = INL (nc_dsp);
3672
3673 if (dsp > np->scripta_ba &&
3674 dsp <= np->scripta_ba + np->scripta_sz) {
3675 script_ofs = dsp - np->scripta_ba;
3676 script_size = np->scripta_sz;
3677 script_base = (u_char *) np->scripta0;
3678 script_name = "scripta";
3679 }
3680 else if (np->scriptb_ba < dsp &&
3681 dsp <= np->scriptb_ba + np->scriptb_sz) {
3682 script_ofs = dsp - np->scriptb_ba;
3683 script_size = np->scriptb_sz;
3684 script_base = (u_char *) np->scriptb0;
3685 script_name = "scriptb";
3686 } else {
3687 script_ofs = dsp;
3688 script_size = 0;
3689 script_base = NULL;
3690 script_name = "mem";
3691 }
3692
3693 printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
3694 sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist,
3695 (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl),
3696 (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),
3697 (unsigned)INB (nc_scntl3), script_name, script_ofs,
3698 (unsigned)INL (nc_dbc));
3699
3700 if (((script_ofs & 3) == 0) &&
3701 (unsigned)script_ofs < script_size) {
3702 device_printf(np->device, "script cmd = %08x\n",
3703 scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
3704 }
3705
3706 device_printf(np->device, "regdump:");
3707 for (i = 0; i < 24; i++)
3708 printf (" %02x", (unsigned)INB_OFF(i));
3709 printf (".\n");
3710
3711 /*
3712 * PCI BUS error, read the PCI ststus register.
3713 */
3714 if (dstat & (MDPE|BF)) {
3715 u_short pci_sts;
3716 pci_sts = pci_read_config(np->device, PCIR_STATUS, 2);
3717 if (pci_sts & 0xf900) {
3718 pci_write_config(np->device, PCIR_STATUS, pci_sts, 2);
3719 device_printf(np->device, "PCI STATUS = 0x%04x\n",
3720 pci_sts & 0xf900);
3721 }
3722 }
3723 }
3724
3725 /*
3726 * chip interrupt handler
3727 *
3728 * In normal situations, interrupt conditions occur one at
3729 * a time. But when something bad happens on the SCSI BUS,
3730 * the chip may raise several interrupt flags before
3731 * stopping and interrupting the CPU. The additionnal
3732 * interrupt flags are stacked in some extra registers
3733 * after the SIP and/or DIP flag has been raised in the
3734 * ISTAT. After the CPU has read the interrupt condition
3735 * flag from SIST or DSTAT, the chip unstacks the other
3736 * interrupt flags and sets the corresponding bits in
3737 * SIST or DSTAT. Since the chip starts stacking once the
3738 * SIP or DIP flag is set, there is a small window of time
3739 * where the stacking does not occur.
3740 *
3741 * Typically, multiple interrupt conditions may happen in
3742 * the following situations:
3743 *
3744 * - SCSI parity error + Phase mismatch (PAR|MA)
3745 * When a parity error is detected in input phase
3746 * and the device switches to msg-in phase inside a
3747 * block MOV.
3748 * - SCSI parity error + Unexpected disconnect (PAR|UDC)
3749 * When a stupid device does not want to handle the
3750 * recovery of an SCSI parity error.
3751 * - Some combinations of STO, PAR, UDC, ...
3752 * When using non compliant SCSI stuff, when user is
3753 * doing non compliant hot tampering on the BUS, when
3754 * something really bad happens to a device, etc ...
3755 *
3756 * The heuristic suggested by SYMBIOS to handle
3757 * multiple interrupts is to try unstacking all
3758 * interrupts conditions and to handle them on some
3759 * priority based on error severity.
3760 * This will work when the unstacking has been
3761 * successful, but we cannot be 100 % sure of that,
3762 * since the CPU may have been faster to unstack than
3763 * the chip is able to stack. Hmmm ... But it seems that
3764 * such a situation is very unlikely to happen.
3765 *
3766 * If this happen, for example STO caught by the CPU
3767 * then UDC happenning before the CPU have restarted
3768 * the SCRIPTS, the driver may wrongly complete the
3769 * same command on UDC, since the SCRIPTS didn't restart
3770 * and the DSA still points to the same command.
3771 * We avoid this situation by setting the DSA to an
3772 * invalid value when the CCB is completed and before
3773 * restarting the SCRIPTS.
3774 *
3775 * Another issue is that we need some section of our
3776 * recovery procedures to be somehow uninterruptible but
3777 * the SCRIPTS processor does not provides such a
3778 * feature. For this reason, we handle recovery preferently
3779 * from the C code and check against some SCRIPTS critical
3780 * sections from the C code.
3781 *
3782 * Hopefully, the interrupt handling of the driver is now
3783 * able to resist to weird BUS error conditions, but donnot
3784 * ask me for any guarantee that it will never fail. :-)
3785 * Use at your own decision and risk.
3786 */
sym_intr1(hcb_p np)3787 static void sym_intr1 (hcb_p np)
3788 {
3789 u_char istat, istatc;
3790 u_char dstat;
3791 u_short sist;
3792
3793 SYM_LOCK_ASSERT(MA_OWNED);
3794
3795 /*
3796 * interrupt on the fly ?
3797 *
3798 * A `dummy read' is needed to ensure that the
3799 * clear of the INTF flag reaches the device
3800 * before the scanning of the DONE queue.
3801 */
3802 istat = INB (nc_istat);
3803 if (istat & INTF) {
3804 OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem);
3805 istat = INB (nc_istat); /* DUMMY READ */
3806 if (DEBUG_FLAGS & DEBUG_TINY) printf ("F ");
3807 (void)sym_wakeup_done (np);
3808 }
3809
3810 if (!(istat & (SIP|DIP)))
3811 return;
3812
3813 #if 0 /* We should never get this one */
3814 if (istat & CABRT)
3815 OUTB (nc_istat, CABRT);
3816 #endif
3817
3818 /*
3819 * PAR and MA interrupts may occur at the same time,
3820 * and we need to know of both in order to handle
3821 * this situation properly. We try to unstack SCSI
3822 * interrupts for that reason. BTW, I dislike a LOT
3823 * such a loop inside the interrupt routine.
3824 * Even if DMA interrupt stacking is very unlikely to
3825 * happen, we also try unstacking these ones, since
3826 * this has no performance impact.
3827 */
3828 sist = 0;
3829 dstat = 0;
3830 istatc = istat;
3831 do {
3832 if (istatc & SIP)
3833 sist |= INW (nc_sist);
3834 if (istatc & DIP)
3835 dstat |= INB (nc_dstat);
3836 istatc = INB (nc_istat);
3837 istat |= istatc;
3838 } while (istatc & (SIP|DIP));
3839
3840 if (DEBUG_FLAGS & DEBUG_TINY)
3841 printf ("<%d|%x:%x|%x:%x>",
3842 (int)INB(nc_scr0),
3843 dstat,sist,
3844 (unsigned)INL(nc_dsp),
3845 (unsigned)INL(nc_dbc));
3846 /*
3847 * On paper, a memory barrier may be needed here.
3848 * And since we are paranoid ... :)
3849 */
3850 MEMORY_READ_BARRIER();
3851
3852 /*
3853 * First, interrupts we want to service cleanly.
3854 *
3855 * Phase mismatch (MA) is the most frequent interrupt
3856 * for chip earlier than the 896 and so we have to service
3857 * it as quickly as possible.
3858 * A SCSI parity error (PAR) may be combined with a phase
3859 * mismatch condition (MA).
3860 * Programmed interrupts (SIR) are used to call the C code
3861 * from SCRIPTS.
3862 * The single step interrupt (SSI) is not used in this
3863 * driver.
3864 */
3865 if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
3866 !(dstat & (MDPE|BF|ABRT|IID))) {
3867 if (sist & PAR) sym_int_par (np, sist);
3868 else if (sist & MA) sym_int_ma (np);
3869 else if (dstat & SIR) sym_int_sir (np);
3870 else if (dstat & SSI) OUTONB_STD ();
3871 else goto unknown_int;
3872 return;
3873 }
3874
3875 /*
3876 * Now, interrupts that donnot happen in normal
3877 * situations and that we may need to recover from.
3878 *
3879 * On SCSI RESET (RST), we reset everything.
3880 * On SCSI BUS MODE CHANGE (SBMC), we complete all
3881 * active CCBs with RESET status, prepare all devices
3882 * for negotiating again and restart the SCRIPTS.
3883 * On STO and UDC, we complete the CCB with the corres-
3884 * ponding status and restart the SCRIPTS.
3885 */
3886 if (sist & RST) {
3887 xpt_print_path(np->path);
3888 printf("SCSI BUS reset detected.\n");
3889 sym_init (np, 1);
3890 return;
3891 }
3892
3893 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
3894 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
3895
3896 if (!(sist & (GEN|HTH|SGE)) &&
3897 !(dstat & (MDPE|BF|ABRT|IID))) {
3898 if (sist & SBMC) sym_int_sbmc (np);
3899 else if (sist & STO) sym_int_sto (np);
3900 else if (sist & UDC) sym_int_udc (np);
3901 else goto unknown_int;
3902 return;
3903 }
3904
3905 /*
3906 * Now, interrupts we are not able to recover cleanly.
3907 *
3908 * Log message for hard errors.
3909 * Reset everything.
3910 */
3911
3912 sym_log_hard_error(np, sist, dstat);
3913
3914 if ((sist & (GEN|HTH|SGE)) ||
3915 (dstat & (MDPE|BF|ABRT|IID))) {
3916 sym_start_reset(np);
3917 return;
3918 }
3919
3920 unknown_int:
3921 /*
3922 * We just miss the cause of the interrupt. :(
3923 * Print a message. The timeout will do the real work.
3924 */
3925 device_printf(np->device,
3926 "unknown interrupt(s) ignored, ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
3927 istat, dstat, sist);
3928 }
3929
sym_intr(void * arg)3930 static void sym_intr(void *arg)
3931 {
3932 hcb_p np = arg;
3933
3934 SYM_LOCK();
3935
3936 if (DEBUG_FLAGS & DEBUG_TINY) printf ("[");
3937 sym_intr1((hcb_p) arg);
3938 if (DEBUG_FLAGS & DEBUG_TINY) printf ("]");
3939
3940 SYM_UNLOCK();
3941 }
3942
sym_poll(struct cam_sim * sim)3943 static void sym_poll(struct cam_sim *sim)
3944 {
3945 sym_intr1(cam_sim_softc(sim));
3946 }
3947
3948 /*
3949 * generic recovery from scsi interrupt
3950 *
3951 * The doc says that when the chip gets an SCSI interrupt,
3952 * it tries to stop in an orderly fashion, by completing
3953 * an instruction fetch that had started or by flushing
3954 * the DMA fifo for a write to memory that was executing.
3955 * Such a fashion is not enough to know if the instruction
3956 * that was just before the current DSP value has been
3957 * executed or not.
3958 *
3959 * There are some small SCRIPTS sections that deal with
3960 * the start queue and the done queue that may break any
3961 * assomption from the C code if we are interrupted
3962 * inside, so we reset if this happens. Btw, since these
3963 * SCRIPTS sections are executed while the SCRIPTS hasn't
3964 * started SCSI operations, it is very unlikely to happen.
3965 *
3966 * All the driver data structures are supposed to be
3967 * allocated from the same 4 GB memory window, so there
3968 * is a 1 to 1 relationship between DSA and driver data
3969 * structures. Since we are careful :) to invalidate the
3970 * DSA when we complete a command or when the SCRIPTS
3971 * pushes a DSA into a queue, we can trust it when it
3972 * points to a CCB.
3973 */
sym_recover_scsi_int(hcb_p np,u_char hsts)3974 static void sym_recover_scsi_int (hcb_p np, u_char hsts)
3975 {
3976 u32 dsp = INL (nc_dsp);
3977 u32 dsa = INL (nc_dsa);
3978 ccb_p cp = sym_ccb_from_dsa(np, dsa);
3979
3980 /*
3981 * If we haven't been interrupted inside the SCRIPTS
3982 * critical paths, we can safely restart the SCRIPTS
3983 * and trust the DSA value if it matches a CCB.
3984 */
3985 if ((!(dsp > SCRIPTA_BA (np, getjob_begin) &&
3986 dsp < SCRIPTA_BA (np, getjob_end) + 1)) &&
3987 (!(dsp > SCRIPTA_BA (np, ungetjob) &&
3988 dsp < SCRIPTA_BA (np, reselect) + 1)) &&
3989 (!(dsp > SCRIPTB_BA (np, sel_for_abort) &&
3990 dsp < SCRIPTB_BA (np, sel_for_abort_1) + 1)) &&
3991 (!(dsp > SCRIPTA_BA (np, done) &&
3992 dsp < SCRIPTA_BA (np, done_end) + 1))) {
3993 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
3994 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
3995 /*
3996 * If we have a CCB, let the SCRIPTS call us back for
3997 * the handling of the error with SCRATCHA filled with
3998 * STARTPOS. This way, we will be able to freeze the
3999 * device queue and requeue awaiting IOs.
4000 */
4001 if (cp) {
4002 cp->host_status = hsts;
4003 OUTL_DSP (SCRIPTA_BA (np, complete_error));
4004 }
4005 /*
4006 * Otherwise just restart the SCRIPTS.
4007 */
4008 else {
4009 OUTL (nc_dsa, 0xffffff);
4010 OUTL_DSP (SCRIPTA_BA (np, start));
4011 }
4012 }
4013 else
4014 goto reset_all;
4015
4016 return;
4017
4018 reset_all:
4019 sym_start_reset(np);
4020 }
4021
4022 /*
4023 * chip exception handler for selection timeout
4024 */
sym_int_sto(hcb_p np)4025 static void sym_int_sto (hcb_p np)
4026 {
4027 u32 dsp = INL (nc_dsp);
4028
4029 if (DEBUG_FLAGS & DEBUG_TINY) printf ("T");
4030
4031 if (dsp == SCRIPTA_BA (np, wf_sel_done) + 8)
4032 sym_recover_scsi_int(np, HS_SEL_TIMEOUT);
4033 else
4034 sym_start_reset(np);
4035 }
4036
4037 /*
4038 * chip exception handler for unexpected disconnect
4039 */
sym_int_udc(hcb_p np)4040 static void sym_int_udc (hcb_p np)
4041 {
4042 device_printf(np->device, "unexpected disconnect\n");
4043 sym_recover_scsi_int(np, HS_UNEXPECTED);
4044 }
4045
4046 /*
4047 * chip exception handler for SCSI bus mode change
4048 *
4049 * spi2-r12 11.2.3 says a transceiver mode change must
4050 * generate a reset event and a device that detects a reset
4051 * event shall initiate a hard reset. It says also that a
4052 * device that detects a mode change shall set data transfer
4053 * mode to eight bit asynchronous, etc...
4054 * So, just reinitializing all except chip should be enough.
4055 */
sym_int_sbmc(hcb_p np)4056 static void sym_int_sbmc (hcb_p np)
4057 {
4058 u_char scsi_mode = INB (nc_stest4) & SMODE;
4059
4060 /*
4061 * Notify user.
4062 */
4063 xpt_print_path(np->path);
4064 printf("SCSI BUS mode change from %s to %s.\n",
4065 sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode));
4066
4067 /*
4068 * Should suspend command processing for a few seconds and
4069 * reinitialize all except the chip.
4070 */
4071 sym_init (np, 2);
4072 }
4073
4074 /*
4075 * chip exception handler for SCSI parity error.
4076 *
4077 * When the chip detects a SCSI parity error and is
4078 * currently executing a (CH)MOV instruction, it does
4079 * not interrupt immediately, but tries to finish the
4080 * transfer of the current scatter entry before
4081 * interrupting. The following situations may occur:
4082 *
4083 * - The complete scatter entry has been transferred
4084 * without the device having changed phase.
4085 * The chip will then interrupt with the DSP pointing
4086 * to the instruction that follows the MOV.
4087 *
4088 * - A phase mismatch occurs before the MOV finished
4089 * and phase errors are to be handled by the C code.
4090 * The chip will then interrupt with both PAR and MA
4091 * conditions set.
4092 *
4093 * - A phase mismatch occurs before the MOV finished and
4094 * phase errors are to be handled by SCRIPTS.
4095 * The chip will load the DSP with the phase mismatch
4096 * JUMP address and interrupt the host processor.
4097 */
sym_int_par(hcb_p np,u_short sist)4098 static void sym_int_par (hcb_p np, u_short sist)
4099 {
4100 u_char hsts = INB (HS_PRT);
4101 u32 dsp = INL (nc_dsp);
4102 u32 dbc = INL (nc_dbc);
4103 u32 dsa = INL (nc_dsa);
4104 u_char sbcl = INB (nc_sbcl);
4105 u_char cmd = dbc >> 24;
4106 int phase = cmd & 7;
4107 ccb_p cp = sym_ccb_from_dsa(np, dsa);
4108
4109 device_printf(np->device,
4110 "SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", hsts, dbc,
4111 sbcl);
4112
4113 /*
4114 * Check that the chip is connected to the SCSI BUS.
4115 */
4116 if (!(INB (nc_scntl1) & ISCON)) {
4117 sym_recover_scsi_int(np, HS_UNEXPECTED);
4118 return;
4119 }
4120
4121 /*
4122 * If the nexus is not clearly identified, reset the bus.
4123 * We will try to do better later.
4124 */
4125 if (!cp)
4126 goto reset_all;
4127
4128 /*
4129 * Check instruction was a MOV, direction was INPUT and
4130 * ATN is asserted.
4131 */
4132 if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
4133 goto reset_all;
4134
4135 /*
4136 * Keep track of the parity error.
4137 */
4138 OUTONB (HF_PRT, HF_EXT_ERR);
4139 cp->xerr_status |= XE_PARITY_ERR;
4140
4141 /*
4142 * Prepare the message to send to the device.
4143 */
4144 np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
4145
4146 /*
4147 * If the old phase was DATA IN phase, we have to deal with
4148 * the 3 situations described above.
4149 * For other input phases (MSG IN and STATUS), the device
4150 * must resend the whole thing that failed parity checking
4151 * or signal error. So, jumping to dispatcher should be OK.
4152 */
4153 if (phase == 1 || phase == 5) {
4154 /* Phase mismatch handled by SCRIPTS */
4155 if (dsp == SCRIPTB_BA (np, pm_handle))
4156 OUTL_DSP (dsp);
4157 /* Phase mismatch handled by the C code */
4158 else if (sist & MA)
4159 sym_int_ma (np);
4160 /* No phase mismatch occurred */
4161 else {
4162 OUTL (nc_temp, dsp);
4163 OUTL_DSP (SCRIPTA_BA (np, dispatch));
4164 }
4165 }
4166 else
4167 OUTL_DSP (SCRIPTA_BA (np, clrack));
4168 return;
4169
4170 reset_all:
4171 sym_start_reset(np);
4172 }
4173
4174 /*
4175 * chip exception handler for phase errors.
4176 *
4177 * We have to construct a new transfer descriptor,
4178 * to transfer the rest of the current block.
4179 */
sym_int_ma(hcb_p np)4180 static void sym_int_ma (hcb_p np)
4181 {
4182 u32 dbc;
4183 u32 rest;
4184 u32 dsp;
4185 u32 dsa;
4186 u32 nxtdsp;
4187 u32 *vdsp;
4188 u32 oadr, olen;
4189 u32 *tblp;
4190 u32 newcmd;
4191 u_int delta;
4192 u_char cmd;
4193 u_char hflags, hflags0;
4194 struct sym_pmc *pm;
4195 ccb_p cp;
4196
4197 dsp = INL (nc_dsp);
4198 dbc = INL (nc_dbc);
4199 dsa = INL (nc_dsa);
4200
4201 cmd = dbc >> 24;
4202 rest = dbc & 0xffffff;
4203 delta = 0;
4204
4205 /*
4206 * locate matching cp if any.
4207 */
4208 cp = sym_ccb_from_dsa(np, dsa);
4209
4210 /*
4211 * Donnot take into account dma fifo and various buffers in
4212 * INPUT phase since the chip flushes everything before
4213 * raising the MA interrupt for interrupted INPUT phases.
4214 * For DATA IN phase, we will check for the SWIDE later.
4215 */
4216 if ((cmd & 7) != 1 && (cmd & 7) != 5) {
4217 u_char ss0, ss2;
4218
4219 if (np->features & FE_DFBC)
4220 delta = INW (nc_dfbc);
4221 else {
4222 u32 dfifo;
4223
4224 /*
4225 * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
4226 */
4227 dfifo = INL(nc_dfifo);
4228
4229 /*
4230 * Calculate remaining bytes in DMA fifo.
4231 * (CTEST5 = dfifo >> 16)
4232 */
4233 if (dfifo & (DFS << 16))
4234 delta = ((((dfifo >> 8) & 0x300) |
4235 (dfifo & 0xff)) - rest) & 0x3ff;
4236 else
4237 delta = ((dfifo & 0xff) - rest) & 0x7f;
4238 }
4239
4240 /*
4241 * The data in the dma fifo has not been transferred to
4242 * the target -> add the amount to the rest
4243 * and clear the data.
4244 * Check the sstat2 register in case of wide transfer.
4245 */
4246 rest += delta;
4247 ss0 = INB (nc_sstat0);
4248 if (ss0 & OLF) rest++;
4249 if (!(np->features & FE_C10))
4250 if (ss0 & ORF) rest++;
4251 if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
4252 ss2 = INB (nc_sstat2);
4253 if (ss2 & OLF1) rest++;
4254 if (!(np->features & FE_C10))
4255 if (ss2 & ORF1) rest++;
4256 }
4257
4258 /*
4259 * Clear fifos.
4260 */
4261 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */
4262 OUTB (nc_stest3, TE|CSF); /* scsi fifo */
4263 }
4264
4265 /*
4266 * log the information
4267 */
4268 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
4269 printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7,
4270 (unsigned) rest, (unsigned) delta);
4271
4272 /*
4273 * try to find the interrupted script command,
4274 * and the address at which to continue.
4275 */
4276 vdsp = NULL;
4277 nxtdsp = 0;
4278 if (dsp > np->scripta_ba &&
4279 dsp <= np->scripta_ba + np->scripta_sz) {
4280 vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8));
4281 nxtdsp = dsp;
4282 }
4283 else if (dsp > np->scriptb_ba &&
4284 dsp <= np->scriptb_ba + np->scriptb_sz) {
4285 vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8));
4286 nxtdsp = dsp;
4287 }
4288
4289 /*
4290 * log the information
4291 */
4292 if (DEBUG_FLAGS & DEBUG_PHASE) {
4293 printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
4294 cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
4295 }
4296
4297 if (!vdsp) {
4298 device_printf(np->device,
4299 "interrupted SCRIPT address not found.\n");
4300 goto reset_all;
4301 }
4302
4303 if (!cp) {
4304 device_printf(np->device,
4305 "SCSI phase error fixup: CCB already dequeued.\n");
4306 goto reset_all;
4307 }
4308
4309 /*
4310 * get old startaddress and old length.
4311 */
4312 oadr = scr_to_cpu(vdsp[1]);
4313
4314 if (cmd & 0x10) { /* Table indirect */
4315 tblp = (u32 *) ((char*) &cp->phys + oadr);
4316 olen = scr_to_cpu(tblp[0]);
4317 oadr = scr_to_cpu(tblp[1]);
4318 } else {
4319 tblp = (u32 *) 0;
4320 olen = scr_to_cpu(vdsp[0]) & 0xffffff;
4321 }
4322
4323 if (DEBUG_FLAGS & DEBUG_PHASE) {
4324 printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
4325 (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
4326 tblp,
4327 (unsigned) olen,
4328 (unsigned) oadr);
4329 }
4330
4331 /*
4332 * check cmd against assumed interrupted script command.
4333 * If dt data phase, the MOVE instruction hasn't bit 4 of
4334 * the phase.
4335 */
4336 if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
4337 PRINT_ADDR(cp);
4338 printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
4339 (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
4340
4341 goto reset_all;
4342 }
4343
4344 /*
4345 * if old phase not dataphase, leave here.
4346 */
4347 if (cmd & 2) {
4348 PRINT_ADDR(cp);
4349 printf ("phase change %x-%x %d@%08x resid=%d.\n",
4350 cmd&7, INB(nc_sbcl)&7, (unsigned)olen,
4351 (unsigned)oadr, (unsigned)rest);
4352 goto unexpected_phase;
4353 }
4354
4355 /*
4356 * Choose the correct PM save area.
4357 *
4358 * Look at the PM_SAVE SCRIPT if you want to understand
4359 * this stuff. The equivalent code is implemented in
4360 * SCRIPTS for the 895A, 896 and 1010 that are able to
4361 * handle PM from the SCRIPTS processor.
4362 */
4363 hflags0 = INB (HF_PRT);
4364 hflags = hflags0;
4365
4366 if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
4367 if (hflags & HF_IN_PM0)
4368 nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
4369 else if (hflags & HF_IN_PM1)
4370 nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
4371
4372 if (hflags & HF_DP_SAVED)
4373 hflags ^= HF_ACT_PM;
4374 }
4375
4376 if (!(hflags & HF_ACT_PM)) {
4377 pm = &cp->phys.pm0;
4378 newcmd = SCRIPTA_BA (np, pm0_data);
4379 }
4380 else {
4381 pm = &cp->phys.pm1;
4382 newcmd = SCRIPTA_BA (np, pm1_data);
4383 }
4384
4385 hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
4386 if (hflags != hflags0)
4387 OUTB (HF_PRT, hflags);
4388
4389 /*
4390 * fillin the phase mismatch context
4391 */
4392 pm->sg.addr = cpu_to_scr(oadr + olen - rest);
4393 pm->sg.size = cpu_to_scr(rest);
4394 pm->ret = cpu_to_scr(nxtdsp);
4395
4396 /*
4397 * If we have a SWIDE,
4398 * - prepare the address to write the SWIDE from SCRIPTS,
4399 * - compute the SCRIPTS address to restart from,
4400 * - move current data pointer context by one byte.
4401 */
4402 nxtdsp = SCRIPTA_BA (np, dispatch);
4403 if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) &&
4404 (INB (nc_scntl2) & WSR)) {
4405 u32 tmp;
4406
4407 /*
4408 * Set up the table indirect for the MOVE
4409 * of the residual byte and adjust the data
4410 * pointer context.
4411 */
4412 tmp = scr_to_cpu(pm->sg.addr);
4413 cp->phys.wresid.addr = cpu_to_scr(tmp);
4414 pm->sg.addr = cpu_to_scr(tmp + 1);
4415 tmp = scr_to_cpu(pm->sg.size);
4416 cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
4417 pm->sg.size = cpu_to_scr(tmp - 1);
4418
4419 /*
4420 * If only the residual byte is to be moved,
4421 * no PM context is needed.
4422 */
4423 if ((tmp&0xffffff) == 1)
4424 newcmd = pm->ret;
4425
4426 /*
4427 * Prepare the address of SCRIPTS that will
4428 * move the residual byte to memory.
4429 */
4430 nxtdsp = SCRIPTB_BA (np, wsr_ma_helper);
4431 }
4432
4433 if (DEBUG_FLAGS & DEBUG_PHASE) {
4434 PRINT_ADDR(cp);
4435 printf ("PM %x %x %x / %x %x %x.\n",
4436 hflags0, hflags, newcmd,
4437 (unsigned)scr_to_cpu(pm->sg.addr),
4438 (unsigned)scr_to_cpu(pm->sg.size),
4439 (unsigned)scr_to_cpu(pm->ret));
4440 }
4441
4442 /*
4443 * Restart the SCRIPTS processor.
4444 */
4445 OUTL (nc_temp, newcmd);
4446 OUTL_DSP (nxtdsp);
4447 return;
4448
4449 /*
4450 * Unexpected phase changes that occurs when the current phase
4451 * is not a DATA IN or DATA OUT phase are due to error conditions.
4452 * Such event may only happen when the SCRIPTS is using a
4453 * multibyte SCSI MOVE.
4454 *
4455 * Phase change Some possible cause
4456 *
4457 * COMMAND --> MSG IN SCSI parity error detected by target.
4458 * COMMAND --> STATUS Bad command or refused by target.
4459 * MSG OUT --> MSG IN Message rejected by target.
4460 * MSG OUT --> COMMAND Bogus target that discards extended
4461 * negotiation messages.
4462 *
4463 * The code below does not care of the new phase and so
4464 * trusts the target. Why to annoy it ?
4465 * If the interrupted phase is COMMAND phase, we restart at
4466 * dispatcher.
4467 * If a target does not get all the messages after selection,
4468 * the code assumes blindly that the target discards extended
4469 * messages and clears the negotiation status.
4470 * If the target does not want all our response to negotiation,
4471 * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
4472 * bloat for such a should_not_happen situation).
4473 * In all other situation, we reset the BUS.
4474 * Are these assumptions reasonnable ? (Wait and see ...)
4475 */
4476 unexpected_phase:
4477 dsp -= 8;
4478 nxtdsp = 0;
4479
4480 switch (cmd & 7) {
4481 case 2: /* COMMAND phase */
4482 nxtdsp = SCRIPTA_BA (np, dispatch);
4483 break;
4484 #if 0
4485 case 3: /* STATUS phase */
4486 nxtdsp = SCRIPTA_BA (np, dispatch);
4487 break;
4488 #endif
4489 case 6: /* MSG OUT phase */
4490 /*
4491 * If the device may want to use untagged when we want
4492 * tagged, we prepare an IDENTIFY without disc. granted,
4493 * since we will not be able to handle reselect.
4494 * Otherwise, we just don't care.
4495 */
4496 if (dsp == SCRIPTA_BA (np, send_ident)) {
4497 if (cp->tag != NO_TAG && olen - rest <= 3) {
4498 cp->host_status = HS_BUSY;
4499 np->msgout[0] = M_IDENTIFY | cp->lun;
4500 nxtdsp = SCRIPTB_BA (np, ident_break_atn);
4501 }
4502 else
4503 nxtdsp = SCRIPTB_BA (np, ident_break);
4504 }
4505 else if (dsp == SCRIPTB_BA (np, send_wdtr) ||
4506 dsp == SCRIPTB_BA (np, send_sdtr) ||
4507 dsp == SCRIPTB_BA (np, send_ppr)) {
4508 nxtdsp = SCRIPTB_BA (np, nego_bad_phase);
4509 }
4510 break;
4511 #if 0
4512 case 7: /* MSG IN phase */
4513 nxtdsp = SCRIPTA_BA (np, clrack);
4514 break;
4515 #endif
4516 }
4517
4518 if (nxtdsp) {
4519 OUTL_DSP (nxtdsp);
4520 return;
4521 }
4522
4523 reset_all:
4524 sym_start_reset(np);
4525 }
4526
4527 /*
4528 * Dequeue from the START queue all CCBs that match
4529 * a given target/lun/task condition (-1 means all),
4530 * and move them from the BUSY queue to the COMP queue
4531 * with CAM_REQUEUE_REQ status condition.
4532 * This function is used during error handling/recovery.
4533 * It is called with SCRIPTS not running.
4534 */
4535 static int
sym_dequeue_from_squeue(hcb_p np,int i,int target,int lun,int task)4536 sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task)
4537 {
4538 int j;
4539 ccb_p cp;
4540
4541 /*
4542 * Make sure the starting index is within range.
4543 */
4544 assert((i >= 0) && (i < 2*MAX_QUEUE));
4545
4546 /*
4547 * Walk until end of START queue and dequeue every job
4548 * that matches the target/lun/task condition.
4549 */
4550 j = i;
4551 while (i != np->squeueput) {
4552 cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
4553 assert(cp);
4554 #ifdef SYM_CONF_IARB_SUPPORT
4555 /* Forget hints for IARB, they may be no longer relevant */
4556 cp->host_flags &= ~HF_HINT_IARB;
4557 #endif
4558 if ((target == -1 || cp->target == target) &&
4559 (lun == -1 || cp->lun == lun) &&
4560 (task == -1 || cp->tag == task)) {
4561 sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ);
4562 sym_remque(&cp->link_ccbq);
4563 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
4564 }
4565 else {
4566 if (i != j)
4567 np->squeue[j] = np->squeue[i];
4568 if ((j += 2) >= MAX_QUEUE*2) j = 0;
4569 }
4570 if ((i += 2) >= MAX_QUEUE*2) i = 0;
4571 }
4572 if (i != j) /* Copy back the idle task if needed */
4573 np->squeue[j] = np->squeue[i];
4574 np->squeueput = j; /* Update our current start queue pointer */
4575
4576 return (i - j) / 2;
4577 }
4578
4579 /*
4580 * Complete all CCBs queued to the COMP queue.
4581 *
4582 * These CCBs are assumed:
4583 * - Not to be referenced either by devices or
4584 * SCRIPTS-related queues and datas.
4585 * - To have to be completed with an error condition
4586 * or requeued.
4587 *
4588 * The device queue freeze count is incremented
4589 * for each CCB that does not prevent this.
4590 * This function is called when all CCBs involved
4591 * in error handling/recovery have been reaped.
4592 */
4593 static void
sym_flush_comp_queue(hcb_p np,int cam_status)4594 sym_flush_comp_queue(hcb_p np, int cam_status)
4595 {
4596 SYM_QUEHEAD *qp;
4597 ccb_p cp;
4598
4599 while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) {
4600 union ccb *ccb;
4601 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
4602 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
4603 /* Leave quiet CCBs waiting for resources */
4604 if (cp->host_status == HS_WAIT)
4605 continue;
4606 ccb = cp->cam_ccb;
4607 if (cam_status)
4608 sym_set_cam_status(ccb, cam_status);
4609 sym_freeze_cam_ccb(ccb);
4610 sym_xpt_done(np, ccb, cp);
4611 sym_free_ccb(np, cp);
4612 }
4613 }
4614
4615 /*
4616 * chip handler for bad SCSI status condition
4617 *
4618 * In case of bad SCSI status, we unqueue all the tasks
4619 * currently queued to the controller but not yet started
4620 * and then restart the SCRIPTS processor immediately.
4621 *
4622 * QUEUE FULL and BUSY conditions are handled the same way.
4623 * Basically all the not yet started tasks are requeued in
4624 * device queue and the queue is frozen until a completion.
4625 *
4626 * For CHECK CONDITION and COMMAND TERMINATED status, we use
4627 * the CCB of the failed command to prepare a REQUEST SENSE
4628 * SCSI command and queue it to the controller queue.
4629 *
4630 * SCRATCHA is assumed to have been loaded with STARTPOS
4631 * before the SCRIPTS called the C code.
4632 */
sym_sir_bad_scsi_status(hcb_p np,ccb_p cp)4633 static void sym_sir_bad_scsi_status(hcb_p np, ccb_p cp)
4634 {
4635 tcb_p tp = &np->target[cp->target];
4636 u32 startp;
4637 u_char s_status = cp->ssss_status;
4638 u_char h_flags = cp->host_flags;
4639 int msglen;
4640 int nego;
4641 int i;
4642
4643 SYM_LOCK_ASSERT(MA_OWNED);
4644
4645 /*
4646 * Compute the index of the next job to start from SCRIPTS.
4647 */
4648 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
4649
4650 /*
4651 * The last CCB queued used for IARB hint may be
4652 * no longer relevant. Forget it.
4653 */
4654 #ifdef SYM_CONF_IARB_SUPPORT
4655 if (np->last_cp)
4656 np->last_cp = NULL;
4657 #endif
4658
4659 /*
4660 * Now deal with the SCSI status.
4661 */
4662 switch(s_status) {
4663 case S_BUSY:
4664 case S_QUEUE_FULL:
4665 if (sym_verbose >= 2) {
4666 PRINT_ADDR(cp);
4667 printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
4668 }
4669 /* FALLTHROUGH */
4670 default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
4671 sym_complete_error (np, cp);
4672 break;
4673 case S_TERMINATED:
4674 case S_CHECK_COND:
4675 /*
4676 * If we get an SCSI error when requesting sense, give up.
4677 */
4678 if (h_flags & HF_SENSE) {
4679 sym_complete_error (np, cp);
4680 break;
4681 }
4682
4683 /*
4684 * Dequeue all queued CCBs for that device not yet started,
4685 * and restart the SCRIPTS processor immediately.
4686 */
4687 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
4688 OUTL_DSP (SCRIPTA_BA (np, start));
4689
4690 /*
4691 * Save some info of the actual IO.
4692 * Compute the data residual.
4693 */
4694 cp->sv_scsi_status = cp->ssss_status;
4695 cp->sv_xerr_status = cp->xerr_status;
4696 cp->sv_resid = sym_compute_residual(np, cp);
4697
4698 /*
4699 * Prepare all needed data structures for
4700 * requesting sense data.
4701 */
4702
4703 /*
4704 * identify message
4705 */
4706 cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun;
4707 msglen = 1;
4708
4709 /*
4710 * If we are currently using anything different from
4711 * async. 8 bit data transfers with that target,
4712 * start a negotiation, since the device may want
4713 * to report us a UNIT ATTENTION condition due to
4714 * a cause we currently ignore, and we donnot want
4715 * to be stuck with WIDE and/or SYNC data transfer.
4716 *
4717 * cp->nego_status is filled by sym_prepare_nego().
4718 */
4719 cp->nego_status = 0;
4720 nego = 0;
4721 if (tp->tinfo.current.options & PPR_OPT_MASK)
4722 nego = NS_PPR;
4723 else if (tp->tinfo.current.width != BUS_8_BIT)
4724 nego = NS_WIDE;
4725 else if (tp->tinfo.current.offset != 0)
4726 nego = NS_SYNC;
4727 if (nego)
4728 msglen +=
4729 sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]);
4730 /*
4731 * Message table indirect structure.
4732 */
4733 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg2));
4734 cp->phys.smsg.size = cpu_to_scr(msglen);
4735
4736 /*
4737 * sense command
4738 */
4739 cp->phys.cmd.addr = cpu_to_scr(CCB_BA (cp, sensecmd));
4740 cp->phys.cmd.size = cpu_to_scr(6);
4741
4742 /*
4743 * patch requested size into sense command
4744 */
4745 cp->sensecmd[0] = 0x03;
4746 cp->sensecmd[1] = cp->lun << 5;
4747 if (tp->tinfo.current.scsi_version > 2 || cp->lun > 7)
4748 cp->sensecmd[1] = 0;
4749 cp->sensecmd[4] = SYM_SNS_BBUF_LEN;
4750 cp->data_len = SYM_SNS_BBUF_LEN;
4751
4752 /*
4753 * sense data
4754 */
4755 bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN);
4756 cp->phys.sense.addr = cpu_to_scr(vtobus(cp->sns_bbuf));
4757 cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN);
4758
4759 /*
4760 * requeue the command.
4761 */
4762 startp = SCRIPTB_BA (np, sdata_in);
4763
4764 cp->phys.head.savep = cpu_to_scr(startp);
4765 cp->phys.head.goalp = cpu_to_scr(startp + 16);
4766 cp->phys.head.lastp = cpu_to_scr(startp);
4767 cp->startp = cpu_to_scr(startp);
4768
4769 cp->actualquirks = SYM_QUIRK_AUTOSAVE;
4770 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
4771 cp->ssss_status = S_ILLEGAL;
4772 cp->host_flags = (HF_SENSE|HF_DATA_IN);
4773 cp->xerr_status = 0;
4774 cp->extra_bytes = 0;
4775
4776 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select));
4777
4778 /*
4779 * Requeue the command.
4780 */
4781 sym_put_start_queue(np, cp);
4782
4783 /*
4784 * Give back to upper layer everything we have dequeued.
4785 */
4786 sym_flush_comp_queue(np, 0);
4787 break;
4788 }
4789 }
4790
4791 /*
4792 * After a device has accepted some management message
4793 * as BUS DEVICE RESET, ABORT TASK, etc ..., or when
4794 * a device signals a UNIT ATTENTION condition, some
4795 * tasks are thrown away by the device. We are required
4796 * to reflect that on our tasks list since the device
4797 * will never complete these tasks.
4798 *
4799 * This function move from the BUSY queue to the COMP
4800 * queue all disconnected CCBs for a given target that
4801 * match the following criteria:
4802 * - lun=-1 means any logical UNIT otherwise a given one.
4803 * - task=-1 means any task, otherwise a given one.
4804 */
4805 static int
sym_clear_tasks(hcb_p np,int cam_status,int target,int lun,int task)4806 sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task)
4807 {
4808 SYM_QUEHEAD qtmp, *qp;
4809 int i = 0;
4810 ccb_p cp;
4811
4812 /*
4813 * Move the entire BUSY queue to our temporary queue.
4814 */
4815 sym_que_init(&qtmp);
4816 sym_que_splice(&np->busy_ccbq, &qtmp);
4817 sym_que_init(&np->busy_ccbq);
4818
4819 /*
4820 * Put all CCBs that matches our criteria into
4821 * the COMP queue and put back other ones into
4822 * the BUSY queue.
4823 */
4824 while ((qp = sym_remque_head(&qtmp)) != NULL) {
4825 union ccb *ccb;
4826 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
4827 ccb = cp->cam_ccb;
4828 if (cp->host_status != HS_DISCONNECT ||
4829 cp->target != target ||
4830 (lun != -1 && cp->lun != lun) ||
4831 (task != -1 &&
4832 (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) {
4833 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
4834 continue;
4835 }
4836 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
4837
4838 /* Preserve the software timeout condition */
4839 if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT)
4840 sym_set_cam_status(ccb, cam_status);
4841 ++i;
4842 #if 0
4843 printf("XXXX TASK @%p CLEARED\n", cp);
4844 #endif
4845 }
4846 return i;
4847 }
4848
4849 /*
4850 * chip handler for TASKS recovery
4851 *
4852 * We cannot safely abort a command, while the SCRIPTS
4853 * processor is running, since we just would be in race
4854 * with it.
4855 *
4856 * As long as we have tasks to abort, we keep the SEM
4857 * bit set in the ISTAT. When this bit is set, the
4858 * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
4859 * each time it enters the scheduler.
4860 *
4861 * If we have to reset a target, clear tasks of a unit,
4862 * or to perform the abort of a disconnected job, we
4863 * restart the SCRIPTS for selecting the target. Once
4864 * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
4865 * If it loses arbitration, the SCRIPTS will interrupt again
4866 * the next time it will enter its scheduler, and so on ...
4867 *
4868 * On SIR_TARGET_SELECTED, we scan for the more
4869 * appropriate thing to do:
4870 *
4871 * - If nothing, we just sent a M_ABORT message to the
4872 * target to get rid of the useless SCSI bus ownership.
4873 * According to the specs, no tasks shall be affected.
4874 * - If the target is to be reset, we send it a M_RESET
4875 * message.
4876 * - If a logical UNIT is to be cleared , we send the
4877 * IDENTIFY(lun) + M_ABORT.
4878 * - If an untagged task is to be aborted, we send the
4879 * IDENTIFY(lun) + M_ABORT.
4880 * - If a tagged task is to be aborted, we send the
4881 * IDENTIFY(lun) + task attributes + M_ABORT_TAG.
4882 *
4883 * Once our 'kiss of death' :) message has been accepted
4884 * by the target, the SCRIPTS interrupts again
4885 * (SIR_ABORT_SENT). On this interrupt, we complete
4886 * all the CCBs that should have been aborted by the
4887 * target according to our message.
4888 */
sym_sir_task_recovery(hcb_p np,int num)4889 static void sym_sir_task_recovery(hcb_p np, int num)
4890 {
4891 SYM_QUEHEAD *qp;
4892 ccb_p cp;
4893 tcb_p tp;
4894 int target=-1, lun=-1, task;
4895 int i, k;
4896
4897 switch(num) {
4898 /*
4899 * The SCRIPTS processor stopped before starting
4900 * the next command in order to allow us to perform
4901 * some task recovery.
4902 */
4903 case SIR_SCRIPT_STOPPED:
4904 /*
4905 * Do we have any target to reset or unit to clear ?
4906 */
4907 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
4908 tp = &np->target[i];
4909 if (tp->to_reset ||
4910 (tp->lun0p && tp->lun0p->to_clear)) {
4911 target = i;
4912 break;
4913 }
4914 if (!tp->lunmp)
4915 continue;
4916 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
4917 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
4918 target = i;
4919 break;
4920 }
4921 }
4922 if (target != -1)
4923 break;
4924 }
4925
4926 /*
4927 * If not, walk the busy queue for any
4928 * disconnected CCB to be aborted.
4929 */
4930 if (target == -1) {
4931 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
4932 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq);
4933 if (cp->host_status != HS_DISCONNECT)
4934 continue;
4935 if (cp->to_abort) {
4936 target = cp->target;
4937 break;
4938 }
4939 }
4940 }
4941
4942 /*
4943 * If some target is to be selected,
4944 * prepare and start the selection.
4945 */
4946 if (target != -1) {
4947 tp = &np->target[target];
4948 np->abrt_sel.sel_id = target;
4949 np->abrt_sel.sel_scntl3 = tp->head.wval;
4950 np->abrt_sel.sel_sxfer = tp->head.sval;
4951 OUTL(nc_dsa, np->hcb_ba);
4952 OUTL_DSP (SCRIPTB_BA (np, sel_for_abort));
4953 return;
4954 }
4955
4956 /*
4957 * Now look for a CCB to abort that haven't started yet.
4958 * Btw, the SCRIPTS processor is still stopped, so
4959 * we are not in race.
4960 */
4961 i = 0;
4962 cp = NULL;
4963 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
4964 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
4965 if (cp->host_status != HS_BUSY &&
4966 cp->host_status != HS_NEGOTIATE)
4967 continue;
4968 if (!cp->to_abort)
4969 continue;
4970 #ifdef SYM_CONF_IARB_SUPPORT
4971 /*
4972 * If we are using IMMEDIATE ARBITRATION, we donnot
4973 * want to cancel the last queued CCB, since the
4974 * SCRIPTS may have anticipated the selection.
4975 */
4976 if (cp == np->last_cp) {
4977 cp->to_abort = 0;
4978 continue;
4979 }
4980 #endif
4981 i = 1; /* Means we have found some */
4982 break;
4983 }
4984 if (!i) {
4985 /*
4986 * We are done, so we donnot need
4987 * to synchronize with the SCRIPTS anylonger.
4988 * Remove the SEM flag from the ISTAT.
4989 */
4990 np->istat_sem = 0;
4991 OUTB (nc_istat, SIGP);
4992 break;
4993 }
4994 /*
4995 * Compute index of next position in the start
4996 * queue the SCRIPTS intends to start and dequeue
4997 * all CCBs for that device that haven't been started.
4998 */
4999 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
5000 i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
5001
5002 /*
5003 * Make sure at least our IO to abort has been dequeued.
5004 */
5005 assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ);
5006
5007 /*
5008 * Keep track in cam status of the reason of the abort.
5009 */
5010 if (cp->to_abort == 2)
5011 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT);
5012 else
5013 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED);
5014
5015 /*
5016 * Complete with error everything that we have dequeued.
5017 */
5018 sym_flush_comp_queue(np, 0);
5019 break;
5020 /*
5021 * The SCRIPTS processor has selected a target
5022 * we may have some manual recovery to perform for.
5023 */
5024 case SIR_TARGET_SELECTED:
5025 target = (INB (nc_sdid) & 0xf);
5026 tp = &np->target[target];
5027
5028 np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg));
5029
5030 /*
5031 * If the target is to be reset, prepare a
5032 * M_RESET message and clear the to_reset flag
5033 * since we donnot expect this operation to fail.
5034 */
5035 if (tp->to_reset) {
5036 np->abrt_msg[0] = M_RESET;
5037 np->abrt_tbl.size = 1;
5038 tp->to_reset = 0;
5039 break;
5040 }
5041
5042 /*
5043 * Otherwise, look for some logical unit to be cleared.
5044 */
5045 if (tp->lun0p && tp->lun0p->to_clear)
5046 lun = 0;
5047 else if (tp->lunmp) {
5048 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
5049 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
5050 lun = k;
5051 break;
5052 }
5053 }
5054 }
5055
5056 /*
5057 * If a logical unit is to be cleared, prepare
5058 * an IDENTIFY(lun) + ABORT MESSAGE.
5059 */
5060 if (lun != -1) {
5061 lcb_p lp = sym_lp(tp, lun);
5062 lp->to_clear = 0; /* We donnot expect to fail here */
5063 np->abrt_msg[0] = M_IDENTIFY | lun;
5064 np->abrt_msg[1] = M_ABORT;
5065 np->abrt_tbl.size = 2;
5066 break;
5067 }
5068
5069 /*
5070 * Otherwise, look for some disconnected job to
5071 * abort for this target.
5072 */
5073 i = 0;
5074 cp = NULL;
5075 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
5076 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
5077 if (cp->host_status != HS_DISCONNECT)
5078 continue;
5079 if (cp->target != target)
5080 continue;
5081 if (!cp->to_abort)
5082 continue;
5083 i = 1; /* Means we have some */
5084 break;
5085 }
5086
5087 /*
5088 * If we have none, probably since the device has
5089 * completed the command before we won abitration,
5090 * send a M_ABORT message without IDENTIFY.
5091 * According to the specs, the device must just
5092 * disconnect the BUS and not abort any task.
5093 */
5094 if (!i) {
5095 np->abrt_msg[0] = M_ABORT;
5096 np->abrt_tbl.size = 1;
5097 break;
5098 }
5099
5100 /*
5101 * We have some task to abort.
5102 * Set the IDENTIFY(lun)
5103 */
5104 np->abrt_msg[0] = M_IDENTIFY | cp->lun;
5105
5106 /*
5107 * If we want to abort an untagged command, we
5108 * will send an IDENTIFY + M_ABORT.
5109 * Otherwise (tagged command), we will send
5110 * an IDENTIFY + task attributes + ABORT TAG.
5111 */
5112 if (cp->tag == NO_TAG) {
5113 np->abrt_msg[1] = M_ABORT;
5114 np->abrt_tbl.size = 2;
5115 }
5116 else {
5117 np->abrt_msg[1] = cp->scsi_smsg[1];
5118 np->abrt_msg[2] = cp->scsi_smsg[2];
5119 np->abrt_msg[3] = M_ABORT_TAG;
5120 np->abrt_tbl.size = 4;
5121 }
5122 /*
5123 * Keep track of software timeout condition, since the
5124 * peripheral driver may not count retries on abort
5125 * conditions not due to timeout.
5126 */
5127 if (cp->to_abort == 2)
5128 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT);
5129 cp->to_abort = 0; /* We donnot expect to fail here */
5130 break;
5131
5132 /*
5133 * The target has accepted our message and switched
5134 * to BUS FREE phase as we expected.
5135 */
5136 case SIR_ABORT_SENT:
5137 target = (INB (nc_sdid) & 0xf);
5138 tp = &np->target[target];
5139
5140 /*
5141 ** If we didn't abort anything, leave here.
5142 */
5143 if (np->abrt_msg[0] == M_ABORT)
5144 break;
5145
5146 /*
5147 * If we sent a M_RESET, then a hardware reset has
5148 * been performed by the target.
5149 * - Reset everything to async 8 bit
5150 * - Tell ourself to negotiate next time :-)
5151 * - Prepare to clear all disconnected CCBs for
5152 * this target from our task list (lun=task=-1)
5153 */
5154 lun = -1;
5155 task = -1;
5156 if (np->abrt_msg[0] == M_RESET) {
5157 tp->head.sval = 0;
5158 tp->head.wval = np->rv_scntl3;
5159 tp->head.uval = 0;
5160 tp->tinfo.current.period = 0;
5161 tp->tinfo.current.offset = 0;
5162 tp->tinfo.current.width = BUS_8_BIT;
5163 tp->tinfo.current.options = 0;
5164 }
5165
5166 /*
5167 * Otherwise, check for the LUN and TASK(s)
5168 * concerned by the cancellation.
5169 * If it is not ABORT_TAG then it is CLEAR_QUEUE
5170 * or an ABORT message :-)
5171 */
5172 else {
5173 lun = np->abrt_msg[0] & 0x3f;
5174 if (np->abrt_msg[1] == M_ABORT_TAG)
5175 task = np->abrt_msg[2];
5176 }
5177
5178 /*
5179 * Complete all the CCBs the device should have
5180 * aborted due to our 'kiss of death' message.
5181 */
5182 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
5183 (void) sym_dequeue_from_squeue(np, i, target, lun, -1);
5184 (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task);
5185 sym_flush_comp_queue(np, 0);
5186
5187 /*
5188 * If we sent a BDR, make uper layer aware of that.
5189 */
5190 if (np->abrt_msg[0] == M_RESET)
5191 xpt_async(AC_SENT_BDR, np->path, NULL);
5192 break;
5193 }
5194
5195 /*
5196 * Print to the log the message we intend to send.
5197 */
5198 if (num == SIR_TARGET_SELECTED) {
5199 PRINT_TARGET(np, target);
5200 sym_printl_hex("control msgout:", np->abrt_msg,
5201 np->abrt_tbl.size);
5202 np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
5203 }
5204
5205 /*
5206 * Let the SCRIPTS processor continue.
5207 */
5208 OUTONB_STD ();
5209 }
5210
5211 /*
5212 * Gerard's alchemy:) that deals with with the data
5213 * pointer for both MDP and the residual calculation.
5214 *
5215 * I didn't want to bloat the code by more than 200
5216 * lignes for the handling of both MDP and the residual.
5217 * This has been achieved by using a data pointer
5218 * representation consisting in an index in the data
5219 * array (dp_sg) and a negative offset (dp_ofs) that
5220 * have the following meaning:
5221 *
5222 * - dp_sg = SYM_CONF_MAX_SG
5223 * we are at the end of the data script.
5224 * - dp_sg < SYM_CONF_MAX_SG
5225 * dp_sg points to the next entry of the scatter array
5226 * we want to transfer.
5227 * - dp_ofs < 0
5228 * dp_ofs represents the residual of bytes of the
5229 * previous entry scatter entry we will send first.
5230 * - dp_ofs = 0
5231 * no residual to send first.
5232 *
5233 * The function sym_evaluate_dp() accepts an arbitray
5234 * offset (basically from the MDP message) and returns
5235 * the corresponding values of dp_sg and dp_ofs.
5236 */
sym_evaluate_dp(hcb_p np,ccb_p cp,u32 scr,int * ofs)5237 static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs)
5238 {
5239 u32 dp_scr;
5240 int dp_ofs, dp_sg, dp_sgmin;
5241 int tmp;
5242 struct sym_pmc *pm;
5243
5244 /*
5245 * Compute the resulted data pointer in term of a script
5246 * address within some DATA script and a signed byte offset.
5247 */
5248 dp_scr = scr;
5249 dp_ofs = *ofs;
5250 if (dp_scr == SCRIPTA_BA (np, pm0_data))
5251 pm = &cp->phys.pm0;
5252 else if (dp_scr == SCRIPTA_BA (np, pm1_data))
5253 pm = &cp->phys.pm1;
5254 else
5255 pm = NULL;
5256
5257 if (pm) {
5258 dp_scr = scr_to_cpu(pm->ret);
5259 dp_ofs -= scr_to_cpu(pm->sg.size);
5260 }
5261
5262 /*
5263 * If we are auto-sensing, then we are done.
5264 */
5265 if (cp->host_flags & HF_SENSE) {
5266 *ofs = dp_ofs;
5267 return 0;
5268 }
5269
5270 /*
5271 * Deduce the index of the sg entry.
5272 * Keep track of the index of the first valid entry.
5273 * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the
5274 * end of the data.
5275 */
5276 tmp = scr_to_cpu(cp->phys.head.goalp);
5277 dp_sg = SYM_CONF_MAX_SG;
5278 if (dp_scr != tmp)
5279 dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4);
5280 dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
5281
5282 /*
5283 * Move to the sg entry the data pointer belongs to.
5284 *
5285 * If we are inside the data area, we expect result to be:
5286 *
5287 * Either,
5288 * dp_ofs = 0 and dp_sg is the index of the sg entry
5289 * the data pointer belongs to (or the end of the data)
5290 * Or,
5291 * dp_ofs < 0 and dp_sg is the index of the sg entry
5292 * the data pointer belongs to + 1.
5293 */
5294 if (dp_ofs < 0) {
5295 int n;
5296 while (dp_sg > dp_sgmin) {
5297 --dp_sg;
5298 tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
5299 n = dp_ofs + (tmp & 0xffffff);
5300 if (n > 0) {
5301 ++dp_sg;
5302 break;
5303 }
5304 dp_ofs = n;
5305 }
5306 }
5307 else if (dp_ofs > 0) {
5308 while (dp_sg < SYM_CONF_MAX_SG) {
5309 tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
5310 dp_ofs -= (tmp & 0xffffff);
5311 ++dp_sg;
5312 if (dp_ofs <= 0)
5313 break;
5314 }
5315 }
5316
5317 /*
5318 * Make sure the data pointer is inside the data area.
5319 * If not, return some error.
5320 */
5321 if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
5322 goto out_err;
5323 else if (dp_sg > SYM_CONF_MAX_SG ||
5324 (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0))
5325 goto out_err;
5326
5327 /*
5328 * Save the extreme pointer if needed.
5329 */
5330 if (dp_sg > cp->ext_sg ||
5331 (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
5332 cp->ext_sg = dp_sg;
5333 cp->ext_ofs = dp_ofs;
5334 }
5335
5336 /*
5337 * Return data.
5338 */
5339 *ofs = dp_ofs;
5340 return dp_sg;
5341
5342 out_err:
5343 return -1;
5344 }
5345
5346 /*
5347 * chip handler for MODIFY DATA POINTER MESSAGE
5348 *
5349 * We also call this function on IGNORE WIDE RESIDUE
5350 * messages that do not match a SWIDE full condition.
5351 * Btw, we assume in that situation that such a message
5352 * is equivalent to a MODIFY DATA POINTER (offset=-1).
5353 */
sym_modify_dp(hcb_p np,ccb_p cp,int ofs)5354 static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs)
5355 {
5356 int dp_ofs = ofs;
5357 u32 dp_scr = INL (nc_temp);
5358 u32 dp_ret;
5359 u32 tmp;
5360 u_char hflags;
5361 int dp_sg;
5362 struct sym_pmc *pm;
5363
5364 /*
5365 * Not supported for auto-sense.
5366 */
5367 if (cp->host_flags & HF_SENSE)
5368 goto out_reject;
5369
5370 /*
5371 * Apply our alchemy:) (see comments in sym_evaluate_dp()),
5372 * to the resulted data pointer.
5373 */
5374 dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs);
5375 if (dp_sg < 0)
5376 goto out_reject;
5377
5378 /*
5379 * And our alchemy:) allows to easily calculate the data
5380 * script address we want to return for the next data phase.
5381 */
5382 dp_ret = cpu_to_scr(cp->phys.head.goalp);
5383 dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4);
5384
5385 /*
5386 * If offset / scatter entry is zero we donnot need
5387 * a context for the new current data pointer.
5388 */
5389 if (dp_ofs == 0) {
5390 dp_scr = dp_ret;
5391 goto out_ok;
5392 }
5393
5394 /*
5395 * Get a context for the new current data pointer.
5396 */
5397 hflags = INB (HF_PRT);
5398
5399 if (hflags & HF_DP_SAVED)
5400 hflags ^= HF_ACT_PM;
5401
5402 if (!(hflags & HF_ACT_PM)) {
5403 pm = &cp->phys.pm0;
5404 dp_scr = SCRIPTA_BA (np, pm0_data);
5405 }
5406 else {
5407 pm = &cp->phys.pm1;
5408 dp_scr = SCRIPTA_BA (np, pm1_data);
5409 }
5410
5411 hflags &= ~(HF_DP_SAVED);
5412
5413 OUTB (HF_PRT, hflags);
5414
5415 /*
5416 * Set up the new current data pointer.
5417 * ofs < 0 there, and for the next data phase, we
5418 * want to transfer part of the data of the sg entry
5419 * corresponding to index dp_sg-1 prior to returning
5420 * to the main data script.
5421 */
5422 pm->ret = cpu_to_scr(dp_ret);
5423 tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
5424 tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
5425 pm->sg.addr = cpu_to_scr(tmp);
5426 pm->sg.size = cpu_to_scr(-dp_ofs);
5427
5428 out_ok:
5429 OUTL (nc_temp, dp_scr);
5430 OUTL_DSP (SCRIPTA_BA (np, clrack));
5431 return;
5432
5433 out_reject:
5434 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5435 }
5436
5437 /*
5438 * chip calculation of the data residual.
5439 *
5440 * As I used to say, the requirement of data residual
5441 * in SCSI is broken, useless and cannot be achieved
5442 * without huge complexity.
5443 * But most OSes and even the official CAM require it.
5444 * When stupidity happens to be so widely spread inside
5445 * a community, it gets hard to convince.
5446 *
5447 * Anyway, I don't care, since I am not going to use
5448 * any software that considers this data residual as
5449 * a relevant information. :)
5450 */
sym_compute_residual(hcb_p np,ccb_p cp)5451 static int sym_compute_residual(hcb_p np, ccb_p cp)
5452 {
5453 int dp_sg, resid = 0;
5454 int dp_ofs = 0;
5455
5456 /*
5457 * Check for some data lost or just thrown away.
5458 * We are not required to be quite accurate in this
5459 * situation. Btw, if we are odd for output and the
5460 * device claims some more data, it may well happen
5461 * than our residual be zero. :-)
5462 */
5463 if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
5464 if (cp->xerr_status & XE_EXTRA_DATA)
5465 resid -= cp->extra_bytes;
5466 if (cp->xerr_status & XE_SODL_UNRUN)
5467 ++resid;
5468 if (cp->xerr_status & XE_SWIDE_OVRUN)
5469 --resid;
5470 }
5471
5472 /*
5473 * If all data has been transferred,
5474 * there is no residual.
5475 */
5476 if (cp->phys.head.lastp == cp->phys.head.goalp)
5477 return resid;
5478
5479 /*
5480 * If no data transfer occurs, or if the data
5481 * pointer is weird, return full residual.
5482 */
5483 if (cp->startp == cp->phys.head.lastp ||
5484 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
5485 &dp_ofs) < 0) {
5486 return cp->data_len;
5487 }
5488
5489 /*
5490 * If we were auto-sensing, then we are done.
5491 */
5492 if (cp->host_flags & HF_SENSE) {
5493 return -dp_ofs;
5494 }
5495
5496 /*
5497 * We are now full comfortable in the computation
5498 * of the data residual (2's complement).
5499 */
5500 resid = -cp->ext_ofs;
5501 for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
5502 u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
5503 resid += (tmp & 0xffffff);
5504 }
5505
5506 /*
5507 * Hopefully, the result is not too wrong.
5508 */
5509 return resid;
5510 }
5511
5512 /*
5513 * Print out the content of a SCSI message.
5514 */
sym_show_msg(u_char * msg)5515 static int sym_show_msg (u_char * msg)
5516 {
5517 u_char i;
5518 printf ("%x",*msg);
5519 if (*msg==M_EXTENDED) {
5520 for (i = 1; i < 8; i++) {
5521 if (i - 1 > msg[1]) break;
5522 printf ("-%x",msg[i]);
5523 }
5524 return (i+1);
5525 } else if ((*msg & 0xf0) == 0x20) {
5526 printf ("-%x",msg[1]);
5527 return (2);
5528 }
5529 return (1);
5530 }
5531
sym_print_msg(ccb_p cp,char * label,u_char * msg)5532 static void sym_print_msg (ccb_p cp, char *label, u_char *msg)
5533 {
5534 PRINT_ADDR(cp);
5535 if (label)
5536 printf ("%s: ", label);
5537
5538 (void) sym_show_msg (msg);
5539 printf (".\n");
5540 }
5541
5542 /*
5543 * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
5544 *
5545 * When we try to negotiate, we append the negotiation message
5546 * to the identify and (maybe) simple tag message.
5547 * The host status field is set to HS_NEGOTIATE to mark this
5548 * situation.
5549 *
5550 * If the target doesn't answer this message immediately
5551 * (as required by the standard), the SIR_NEGO_FAILED interrupt
5552 * will be raised eventually.
5553 * The handler removes the HS_NEGOTIATE status, and sets the
5554 * negotiated value to the default (async / nowide).
5555 *
5556 * If we receive a matching answer immediately, we check it
5557 * for validity, and set the values.
5558 *
5559 * If we receive a Reject message immediately, we assume the
5560 * negotiation has failed, and fall back to standard values.
5561 *
5562 * If we receive a negotiation message while not in HS_NEGOTIATE
5563 * state, it's a target initiated negotiation. We prepare a
5564 * (hopefully) valid answer, set our parameters, and send back
5565 * this answer to the target.
5566 *
5567 * If the target doesn't fetch the answer (no message out phase),
5568 * we assume the negotiation has failed, and fall back to default
5569 * settings (SIR_NEGO_PROTO interrupt).
5570 *
5571 * When we set the values, we adjust them in all ccbs belonging
5572 * to this target, in the controller's register, and in the "phys"
5573 * field of the controller's struct sym_hcb.
5574 */
5575
5576 /*
5577 * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message.
5578 */
sym_sync_nego(hcb_p np,tcb_p tp,ccb_p cp)5579 static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp)
5580 {
5581 u_char chg, ofs, per, fak, div;
5582 int req = 1;
5583
5584 /*
5585 * Synchronous request message received.
5586 */
5587 if (DEBUG_FLAGS & DEBUG_NEGO) {
5588 sym_print_msg(cp, "sync msgin", np->msgin);
5589 }
5590
5591 /*
5592 * request or answer ?
5593 */
5594 if (INB (HS_PRT) == HS_NEGOTIATE) {
5595 OUTB (HS_PRT, HS_BUSY);
5596 if (cp->nego_status && cp->nego_status != NS_SYNC)
5597 goto reject_it;
5598 req = 0;
5599 }
5600
5601 /*
5602 * get requested values.
5603 */
5604 chg = 0;
5605 per = np->msgin[3];
5606 ofs = np->msgin[4];
5607
5608 /*
5609 * check values against our limits.
5610 */
5611 if (ofs) {
5612 if (ofs > np->maxoffs)
5613 {chg = 1; ofs = np->maxoffs;}
5614 if (req) {
5615 if (ofs > tp->tinfo.user.offset)
5616 {chg = 1; ofs = tp->tinfo.user.offset;}
5617 }
5618 }
5619
5620 if (ofs) {
5621 if (per < np->minsync)
5622 {chg = 1; per = np->minsync;}
5623 if (req) {
5624 if (per < tp->tinfo.user.period)
5625 {chg = 1; per = tp->tinfo.user.period;}
5626 }
5627 }
5628
5629 div = fak = 0;
5630 if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0)
5631 goto reject_it;
5632
5633 if (DEBUG_FLAGS & DEBUG_NEGO) {
5634 PRINT_ADDR(cp);
5635 printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n",
5636 ofs, per, div, fak, chg);
5637 }
5638
5639 /*
5640 * This was an answer message
5641 */
5642 if (req == 0) {
5643 if (chg) /* Answer wasn't acceptable. */
5644 goto reject_it;
5645 sym_setsync (np, cp, ofs, per, div, fak);
5646 OUTL_DSP (SCRIPTA_BA (np, clrack));
5647 return;
5648 }
5649
5650 /*
5651 * It was a request. Set value and
5652 * prepare an answer message
5653 */
5654 sym_setsync (np, cp, ofs, per, div, fak);
5655
5656 np->msgout[0] = M_EXTENDED;
5657 np->msgout[1] = 3;
5658 np->msgout[2] = M_X_SYNC_REQ;
5659 np->msgout[3] = per;
5660 np->msgout[4] = ofs;
5661
5662 cp->nego_status = NS_SYNC;
5663
5664 if (DEBUG_FLAGS & DEBUG_NEGO) {
5665 sym_print_msg(cp, "sync msgout", np->msgout);
5666 }
5667
5668 np->msgin [0] = M_NOOP;
5669
5670 OUTL_DSP (SCRIPTB_BA (np, sdtr_resp));
5671 return;
5672 reject_it:
5673 sym_setsync (np, cp, 0, 0, 0, 0);
5674 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5675 }
5676
5677 /*
5678 * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message.
5679 */
sym_ppr_nego(hcb_p np,tcb_p tp,ccb_p cp)5680 static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp)
5681 {
5682 u_char chg, ofs, per, fak, dt, div, wide;
5683 int req = 1;
5684
5685 /*
5686 * Synchronous request message received.
5687 */
5688 if (DEBUG_FLAGS & DEBUG_NEGO) {
5689 sym_print_msg(cp, "ppr msgin", np->msgin);
5690 }
5691
5692 /*
5693 * get requested values.
5694 */
5695 chg = 0;
5696 per = np->msgin[3];
5697 ofs = np->msgin[5];
5698 wide = np->msgin[6];
5699 dt = np->msgin[7] & PPR_OPT_DT;
5700
5701 /*
5702 * request or answer ?
5703 */
5704 if (INB (HS_PRT) == HS_NEGOTIATE) {
5705 OUTB (HS_PRT, HS_BUSY);
5706 if (cp->nego_status && cp->nego_status != NS_PPR)
5707 goto reject_it;
5708 req = 0;
5709 }
5710
5711 /*
5712 * check values against our limits.
5713 */
5714 if (wide > np->maxwide)
5715 {chg = 1; wide = np->maxwide;}
5716 if (!wide || !(np->features & FE_ULTRA3))
5717 dt &= ~PPR_OPT_DT;
5718 if (req) {
5719 if (wide > tp->tinfo.user.width)
5720 {chg = 1; wide = tp->tinfo.user.width;}
5721 }
5722
5723 if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */
5724 dt &= ~PPR_OPT_DT;
5725
5726 if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1;
5727
5728 if (ofs) {
5729 if (dt) {
5730 if (ofs > np->maxoffs_dt)
5731 {chg = 1; ofs = np->maxoffs_dt;}
5732 }
5733 else if (ofs > np->maxoffs)
5734 {chg = 1; ofs = np->maxoffs;}
5735 if (req) {
5736 if (ofs > tp->tinfo.user.offset)
5737 {chg = 1; ofs = tp->tinfo.user.offset;}
5738 }
5739 }
5740
5741 if (ofs) {
5742 if (dt) {
5743 if (per < np->minsync_dt)
5744 {chg = 1; per = np->minsync_dt;}
5745 }
5746 else if (per < np->minsync)
5747 {chg = 1; per = np->minsync;}
5748 if (req) {
5749 if (per < tp->tinfo.user.period)
5750 {chg = 1; per = tp->tinfo.user.period;}
5751 }
5752 }
5753
5754 div = fak = 0;
5755 if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0)
5756 goto reject_it;
5757
5758 if (DEBUG_FLAGS & DEBUG_NEGO) {
5759 PRINT_ADDR(cp);
5760 printf ("ppr: "
5761 "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n",
5762 dt, ofs, per, wide, div, fak, chg);
5763 }
5764
5765 /*
5766 * It was an answer.
5767 */
5768 if (req == 0) {
5769 if (chg) /* Answer wasn't acceptable */
5770 goto reject_it;
5771 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak);
5772 OUTL_DSP (SCRIPTA_BA (np, clrack));
5773 return;
5774 }
5775
5776 /*
5777 * It was a request. Set value and
5778 * prepare an answer message
5779 */
5780 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak);
5781
5782 np->msgout[0] = M_EXTENDED;
5783 np->msgout[1] = 6;
5784 np->msgout[2] = M_X_PPR_REQ;
5785 np->msgout[3] = per;
5786 np->msgout[4] = 0;
5787 np->msgout[5] = ofs;
5788 np->msgout[6] = wide;
5789 np->msgout[7] = dt;
5790
5791 cp->nego_status = NS_PPR;
5792
5793 if (DEBUG_FLAGS & DEBUG_NEGO) {
5794 sym_print_msg(cp, "ppr msgout", np->msgout);
5795 }
5796
5797 np->msgin [0] = M_NOOP;
5798
5799 OUTL_DSP (SCRIPTB_BA (np, ppr_resp));
5800 return;
5801 reject_it:
5802 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0);
5803 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5804 /*
5805 * If it was a device response that should result in
5806 * ST, we may want to try a legacy negotiation later.
5807 */
5808 if (!req && !dt) {
5809 tp->tinfo.goal.options = 0;
5810 tp->tinfo.goal.width = wide;
5811 tp->tinfo.goal.period = per;
5812 tp->tinfo.goal.offset = ofs;
5813 }
5814 }
5815
5816 /*
5817 * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message.
5818 */
sym_wide_nego(hcb_p np,tcb_p tp,ccb_p cp)5819 static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp)
5820 {
5821 u_char chg, wide;
5822 int req = 1;
5823
5824 /*
5825 * Wide request message received.
5826 */
5827 if (DEBUG_FLAGS & DEBUG_NEGO) {
5828 sym_print_msg(cp, "wide msgin", np->msgin);
5829 }
5830
5831 /*
5832 * Is it a request from the device?
5833 */
5834 if (INB (HS_PRT) == HS_NEGOTIATE) {
5835 OUTB (HS_PRT, HS_BUSY);
5836 if (cp->nego_status && cp->nego_status != NS_WIDE)
5837 goto reject_it;
5838 req = 0;
5839 }
5840
5841 /*
5842 * get requested values.
5843 */
5844 chg = 0;
5845 wide = np->msgin[3];
5846
5847 /*
5848 * check values against driver limits.
5849 */
5850 if (wide > np->maxwide)
5851 {chg = 1; wide = np->maxwide;}
5852 if (req) {
5853 if (wide > tp->tinfo.user.width)
5854 {chg = 1; wide = tp->tinfo.user.width;}
5855 }
5856
5857 if (DEBUG_FLAGS & DEBUG_NEGO) {
5858 PRINT_ADDR(cp);
5859 printf ("wdtr: wide=%d chg=%d.\n", wide, chg);
5860 }
5861
5862 /*
5863 * This was an answer message
5864 */
5865 if (req == 0) {
5866 if (chg) /* Answer wasn't acceptable. */
5867 goto reject_it;
5868 sym_setwide (np, cp, wide);
5869
5870 /*
5871 * Negotiate for SYNC immediately after WIDE response.
5872 * This allows to negotiate for both WIDE and SYNC on
5873 * a single SCSI command (Suggested by Justin Gibbs).
5874 */
5875 if (tp->tinfo.goal.offset) {
5876 np->msgout[0] = M_EXTENDED;
5877 np->msgout[1] = 3;
5878 np->msgout[2] = M_X_SYNC_REQ;
5879 np->msgout[3] = tp->tinfo.goal.period;
5880 np->msgout[4] = tp->tinfo.goal.offset;
5881
5882 if (DEBUG_FLAGS & DEBUG_NEGO) {
5883 sym_print_msg(cp, "sync msgout", np->msgout);
5884 }
5885
5886 cp->nego_status = NS_SYNC;
5887 OUTB (HS_PRT, HS_NEGOTIATE);
5888 OUTL_DSP (SCRIPTB_BA (np, sdtr_resp));
5889 return;
5890 }
5891
5892 OUTL_DSP (SCRIPTA_BA (np, clrack));
5893 return;
5894 }
5895
5896 /*
5897 * It was a request, set value and
5898 * prepare an answer message
5899 */
5900 sym_setwide (np, cp, wide);
5901
5902 np->msgout[0] = M_EXTENDED;
5903 np->msgout[1] = 2;
5904 np->msgout[2] = M_X_WIDE_REQ;
5905 np->msgout[3] = wide;
5906
5907 np->msgin [0] = M_NOOP;
5908
5909 cp->nego_status = NS_WIDE;
5910
5911 if (DEBUG_FLAGS & DEBUG_NEGO) {
5912 sym_print_msg(cp, "wide msgout", np->msgout);
5913 }
5914
5915 OUTL_DSP (SCRIPTB_BA (np, wdtr_resp));
5916 return;
5917 reject_it:
5918 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5919 }
5920
5921 /*
5922 * Reset SYNC or WIDE to default settings.
5923 *
5924 * Called when a negotiation does not succeed either
5925 * on rejection or on protocol error.
5926 *
5927 * If it was a PPR that made problems, we may want to
5928 * try a legacy negotiation later.
5929 */
sym_nego_default(hcb_p np,tcb_p tp,ccb_p cp)5930 static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp)
5931 {
5932 /*
5933 * any error in negotiation:
5934 * fall back to default mode.
5935 */
5936 switch (cp->nego_status) {
5937 case NS_PPR:
5938 #if 0
5939 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0);
5940 #else
5941 tp->tinfo.goal.options = 0;
5942 if (tp->tinfo.goal.period < np->minsync)
5943 tp->tinfo.goal.period = np->minsync;
5944 if (tp->tinfo.goal.offset > np->maxoffs)
5945 tp->tinfo.goal.offset = np->maxoffs;
5946 #endif
5947 break;
5948 case NS_SYNC:
5949 sym_setsync (np, cp, 0, 0, 0, 0);
5950 break;
5951 case NS_WIDE:
5952 sym_setwide (np, cp, 0);
5953 break;
5954 }
5955 np->msgin [0] = M_NOOP;
5956 np->msgout[0] = M_NOOP;
5957 cp->nego_status = 0;
5958 }
5959
5960 /*
5961 * chip handler for MESSAGE REJECT received in response to
5962 * a WIDE or SYNCHRONOUS negotiation.
5963 */
sym_nego_rejected(hcb_p np,tcb_p tp,ccb_p cp)5964 static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp)
5965 {
5966 sym_nego_default(np, tp, cp);
5967 OUTB (HS_PRT, HS_BUSY);
5968 }
5969
5970 /*
5971 * chip exception handler for programmed interrupts.
5972 */
sym_int_sir(hcb_p np)5973 static void sym_int_sir (hcb_p np)
5974 {
5975 u_char num = INB (nc_dsps);
5976 u32 dsa = INL (nc_dsa);
5977 ccb_p cp = sym_ccb_from_dsa(np, dsa);
5978 u_char target = INB (nc_sdid) & 0x0f;
5979 tcb_p tp = &np->target[target];
5980 int tmp;
5981
5982 SYM_LOCK_ASSERT(MA_OWNED);
5983
5984 if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num);
5985
5986 switch (num) {
5987 /*
5988 * Command has been completed with error condition
5989 * or has been auto-sensed.
5990 */
5991 case SIR_COMPLETE_ERROR:
5992 if (!cp)
5993 goto out;
5994 sym_complete_error(np, cp);
5995 return;
5996 /*
5997 * The C code is currently trying to recover from something.
5998 * Typically, user want to abort some command.
5999 */
6000 case SIR_SCRIPT_STOPPED:
6001 case SIR_TARGET_SELECTED:
6002 case SIR_ABORT_SENT:
6003 sym_sir_task_recovery(np, num);
6004 return;
6005 /*
6006 * The device didn't go to MSG OUT phase after having
6007 * been selected with ATN. We donnot want to handle
6008 * that.
6009 */
6010 case SIR_SEL_ATN_NO_MSG_OUT:
6011 printf ("%s:%d: No MSG OUT phase after selection with ATN.\n",
6012 sym_name (np), target);
6013 goto out_stuck;
6014 /*
6015 * The device didn't switch to MSG IN phase after
6016 * having reseleted the initiator.
6017 */
6018 case SIR_RESEL_NO_MSG_IN:
6019 printf ("%s:%d: No MSG IN phase after reselection.\n",
6020 sym_name (np), target);
6021 goto out_stuck;
6022 /*
6023 * After reselection, the device sent a message that wasn't
6024 * an IDENTIFY.
6025 */
6026 case SIR_RESEL_NO_IDENTIFY:
6027 printf ("%s:%d: No IDENTIFY after reselection.\n",
6028 sym_name (np), target);
6029 goto out_stuck;
6030 /*
6031 * The device reselected a LUN we donnot know about.
6032 */
6033 case SIR_RESEL_BAD_LUN:
6034 np->msgout[0] = M_RESET;
6035 goto out;
6036 /*
6037 * The device reselected for an untagged nexus and we
6038 * haven't any.
6039 */
6040 case SIR_RESEL_BAD_I_T_L:
6041 np->msgout[0] = M_ABORT;
6042 goto out;
6043 /*
6044 * The device reselected for a tagged nexus that we donnot
6045 * have.
6046 */
6047 case SIR_RESEL_BAD_I_T_L_Q:
6048 np->msgout[0] = M_ABORT_TAG;
6049 goto out;
6050 /*
6051 * The SCRIPTS let us know that the device has grabbed
6052 * our message and will abort the job.
6053 */
6054 case SIR_RESEL_ABORTED:
6055 np->lastmsg = np->msgout[0];
6056 np->msgout[0] = M_NOOP;
6057 printf ("%s:%d: message %x sent on bad reselection.\n",
6058 sym_name (np), target, np->lastmsg);
6059 goto out;
6060 /*
6061 * The SCRIPTS let us know that a message has been
6062 * successfully sent to the device.
6063 */
6064 case SIR_MSG_OUT_DONE:
6065 np->lastmsg = np->msgout[0];
6066 np->msgout[0] = M_NOOP;
6067 /* Should we really care of that */
6068 if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
6069 if (cp) {
6070 cp->xerr_status &= ~XE_PARITY_ERR;
6071 if (!cp->xerr_status)
6072 OUTOFFB (HF_PRT, HF_EXT_ERR);
6073 }
6074 }
6075 goto out;
6076 /*
6077 * The device didn't send a GOOD SCSI status.
6078 * We may have some work to do prior to allow
6079 * the SCRIPTS processor to continue.
6080 */
6081 case SIR_BAD_SCSI_STATUS:
6082 if (!cp)
6083 goto out;
6084 sym_sir_bad_scsi_status(np, cp);
6085 return;
6086 /*
6087 * We are asked by the SCRIPTS to prepare a
6088 * REJECT message.
6089 */
6090 case SIR_REJECT_TO_SEND:
6091 sym_print_msg(cp, "M_REJECT to send for ", np->msgin);
6092 np->msgout[0] = M_REJECT;
6093 goto out;
6094 /*
6095 * We have been ODD at the end of a DATA IN
6096 * transfer and the device didn't send a
6097 * IGNORE WIDE RESIDUE message.
6098 * It is a data overrun condition.
6099 */
6100 case SIR_SWIDE_OVERRUN:
6101 if (cp) {
6102 OUTONB (HF_PRT, HF_EXT_ERR);
6103 cp->xerr_status |= XE_SWIDE_OVRUN;
6104 }
6105 goto out;
6106 /*
6107 * We have been ODD at the end of a DATA OUT
6108 * transfer.
6109 * It is a data underrun condition.
6110 */
6111 case SIR_SODL_UNDERRUN:
6112 if (cp) {
6113 OUTONB (HF_PRT, HF_EXT_ERR);
6114 cp->xerr_status |= XE_SODL_UNRUN;
6115 }
6116 goto out;
6117 /*
6118 * The device wants us to transfer more data than
6119 * expected or in the wrong direction.
6120 * The number of extra bytes is in scratcha.
6121 * It is a data overrun condition.
6122 */
6123 case SIR_DATA_OVERRUN:
6124 if (cp) {
6125 OUTONB (HF_PRT, HF_EXT_ERR);
6126 cp->xerr_status |= XE_EXTRA_DATA;
6127 cp->extra_bytes += INL (nc_scratcha);
6128 }
6129 goto out;
6130 /*
6131 * The device switched to an illegal phase (4/5).
6132 */
6133 case SIR_BAD_PHASE:
6134 if (cp) {
6135 OUTONB (HF_PRT, HF_EXT_ERR);
6136 cp->xerr_status |= XE_BAD_PHASE;
6137 }
6138 goto out;
6139 /*
6140 * We received a message.
6141 */
6142 case SIR_MSG_RECEIVED:
6143 if (!cp)
6144 goto out_stuck;
6145 switch (np->msgin [0]) {
6146 /*
6147 * We received an extended message.
6148 * We handle MODIFY DATA POINTER, SDTR, WDTR
6149 * and reject all other extended messages.
6150 */
6151 case M_EXTENDED:
6152 switch (np->msgin [2]) {
6153 case M_X_MODIFY_DP:
6154 if (DEBUG_FLAGS & DEBUG_POINTER)
6155 sym_print_msg(cp,"modify DP",np->msgin);
6156 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
6157 (np->msgin[5]<<8) + (np->msgin[6]);
6158 sym_modify_dp(np, cp, tmp);
6159 return;
6160 case M_X_SYNC_REQ:
6161 sym_sync_nego(np, tp, cp);
6162 return;
6163 case M_X_PPR_REQ:
6164 sym_ppr_nego(np, tp, cp);
6165 return;
6166 case M_X_WIDE_REQ:
6167 sym_wide_nego(np, tp, cp);
6168 return;
6169 default:
6170 goto out_reject;
6171 }
6172 break;
6173 /*
6174 * We received a 1/2 byte message not handled from SCRIPTS.
6175 * We are only expecting MESSAGE REJECT and IGNORE WIDE
6176 * RESIDUE messages that haven't been anticipated by
6177 * SCRIPTS on SWIDE full condition. Unanticipated IGNORE
6178 * WIDE RESIDUE messages are aliased as MODIFY DP (-1).
6179 */
6180 case M_IGN_RESIDUE:
6181 if (DEBUG_FLAGS & DEBUG_POINTER)
6182 sym_print_msg(cp,"ign wide residue", np->msgin);
6183 sym_modify_dp(np, cp, -1);
6184 return;
6185 case M_REJECT:
6186 if (INB (HS_PRT) == HS_NEGOTIATE)
6187 sym_nego_rejected(np, tp, cp);
6188 else {
6189 PRINT_ADDR(cp);
6190 printf ("M_REJECT received (%x:%x).\n",
6191 scr_to_cpu(np->lastmsg), np->msgout[0]);
6192 }
6193 goto out_clrack;
6194 break;
6195 default:
6196 goto out_reject;
6197 }
6198 break;
6199 /*
6200 * We received an unknown message.
6201 * Ignore all MSG IN phases and reject it.
6202 */
6203 case SIR_MSG_WEIRD:
6204 sym_print_msg(cp, "WEIRD message received", np->msgin);
6205 OUTL_DSP (SCRIPTB_BA (np, msg_weird));
6206 return;
6207 /*
6208 * Negotiation failed.
6209 * Target does not send us the reply.
6210 * Remove the HS_NEGOTIATE status.
6211 */
6212 case SIR_NEGO_FAILED:
6213 OUTB (HS_PRT, HS_BUSY);
6214 /*
6215 * Negotiation failed.
6216 * Target does not want answer message.
6217 */
6218 case SIR_NEGO_PROTO:
6219 if (!cp)
6220 goto out;
6221 sym_nego_default(np, tp, cp);
6222 goto out;
6223 }
6224
6225 out:
6226 OUTONB_STD ();
6227 return;
6228 out_reject:
6229 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
6230 return;
6231 out_clrack:
6232 OUTL_DSP (SCRIPTA_BA (np, clrack));
6233 return;
6234 out_stuck:
6235 return;
6236 }
6237
6238 /*
6239 * Acquire a control block
6240 */
sym_get_ccb(hcb_p np,u_char tn,u_char ln,u_char tag_order)6241 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order)
6242 {
6243 tcb_p tp = &np->target[tn];
6244 lcb_p lp = sym_lp(tp, ln);
6245 u_short tag = NO_TAG;
6246 SYM_QUEHEAD *qp;
6247 ccb_p cp = (ccb_p) NULL;
6248
6249 /*
6250 * Look for a free CCB
6251 */
6252 if (sym_que_empty(&np->free_ccbq))
6253 goto out;
6254 qp = sym_remque_head(&np->free_ccbq);
6255 if (!qp)
6256 goto out;
6257 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
6258
6259 /*
6260 * If the LCB is not yet available and the LUN
6261 * has been probed ok, try to allocate the LCB.
6262 */
6263 if (!lp && sym_is_bit(tp->lun_map, ln)) {
6264 lp = sym_alloc_lcb(np, tn, ln);
6265 if (!lp)
6266 goto out_free;
6267 }
6268
6269 /*
6270 * If the LCB is not available here, then the
6271 * logical unit is not yet discovered. For those
6272 * ones only accept 1 SCSI IO per logical unit,
6273 * since we cannot allow disconnections.
6274 */
6275 if (!lp) {
6276 if (!sym_is_bit(tp->busy0_map, ln))
6277 sym_set_bit(tp->busy0_map, ln);
6278 else
6279 goto out_free;
6280 } else {
6281 /*
6282 * If we have been asked for a tagged command, refuse
6283 * to overlap with an existing untagged one.
6284 */
6285 if (tag_order) {
6286 if (lp->busy_itl != 0)
6287 goto out_free;
6288 /*
6289 * Allocate resources for tags if not yet.
6290 */
6291 if (!lp->cb_tags) {
6292 sym_alloc_lcb_tags(np, tn, ln);
6293 if (!lp->cb_tags)
6294 goto out_free;
6295 }
6296 /*
6297 * Get a tag for this SCSI IO and set up
6298 * the CCB bus address for reselection,
6299 * and count it for this LUN.
6300 * Toggle reselect path to tagged.
6301 */
6302 if (lp->busy_itlq < SYM_CONF_MAX_TASK) {
6303 tag = lp->cb_tags[lp->ia_tag];
6304 if (++lp->ia_tag == SYM_CONF_MAX_TASK)
6305 lp->ia_tag = 0;
6306 lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba);
6307 ++lp->busy_itlq;
6308 lp->head.resel_sa =
6309 cpu_to_scr(SCRIPTA_BA (np, resel_tag));
6310 }
6311 else
6312 goto out_free;
6313 }
6314 /*
6315 * This command will not be tagged.
6316 * If we already have either a tagged or untagged
6317 * one, refuse to overlap this untagged one.
6318 */
6319 else {
6320 if (lp->busy_itlq != 0 || lp->busy_itl != 0)
6321 goto out_free;
6322 /*
6323 * Count this nexus for this LUN.
6324 * Set up the CCB bus address for reselection.
6325 * Toggle reselect path to untagged.
6326 */
6327 lp->busy_itl = 1;
6328 lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
6329 lp->head.resel_sa =
6330 cpu_to_scr(SCRIPTA_BA (np, resel_no_tag));
6331 }
6332 }
6333 /*
6334 * Put the CCB into the busy queue.
6335 */
6336 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
6337
6338 /*
6339 * Remember all informations needed to free this CCB.
6340 */
6341 cp->to_abort = 0;
6342 cp->tag = tag;
6343 cp->target = tn;
6344 cp->lun = ln;
6345
6346 if (DEBUG_FLAGS & DEBUG_TAGS) {
6347 PRINT_LUN(np, tn, ln);
6348 printf ("ccb @%p using tag %d.\n", cp, tag);
6349 }
6350
6351 out:
6352 return cp;
6353 out_free:
6354 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
6355 return NULL;
6356 }
6357
6358 /*
6359 * Release one control block
6360 */
sym_free_ccb(hcb_p np,ccb_p cp)6361 static void sym_free_ccb(hcb_p np, ccb_p cp)
6362 {
6363 tcb_p tp = &np->target[cp->target];
6364 lcb_p lp = sym_lp(tp, cp->lun);
6365
6366 if (DEBUG_FLAGS & DEBUG_TAGS) {
6367 PRINT_LUN(np, cp->target, cp->lun);
6368 printf ("ccb @%p freeing tag %d.\n", cp, cp->tag);
6369 }
6370
6371 /*
6372 * If LCB available,
6373 */
6374 if (lp) {
6375 /*
6376 * If tagged, release the tag, set the reselect path.
6377 */
6378 if (cp->tag != NO_TAG) {
6379 /*
6380 * Free the tag value.
6381 */
6382 lp->cb_tags[lp->if_tag] = cp->tag;
6383 if (++lp->if_tag == SYM_CONF_MAX_TASK)
6384 lp->if_tag = 0;
6385 /*
6386 * Make the reselect path invalid,
6387 * and uncount this CCB.
6388 */
6389 lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba);
6390 --lp->busy_itlq;
6391 } else { /* Untagged */
6392 /*
6393 * Make the reselect path invalid,
6394 * and uncount this CCB.
6395 */
6396 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
6397 lp->busy_itl = 0;
6398 }
6399 /*
6400 * If no JOB active, make the LUN reselect path invalid.
6401 */
6402 if (lp->busy_itlq == 0 && lp->busy_itl == 0)
6403 lp->head.resel_sa =
6404 cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun));
6405 }
6406 /*
6407 * Otherwise, we only accept 1 IO per LUN.
6408 * Clear the bit that keeps track of this IO.
6409 */
6410 else
6411 sym_clr_bit(tp->busy0_map, cp->lun);
6412
6413 /*
6414 * We donnot queue more than 1 ccb per target
6415 * with negotiation at any time. If this ccb was
6416 * used for negotiation, clear this info in the tcb.
6417 */
6418 if (cp == tp->nego_cp)
6419 tp->nego_cp = NULL;
6420
6421 #ifdef SYM_CONF_IARB_SUPPORT
6422 /*
6423 * If we just complete the last queued CCB,
6424 * clear this info that is no longer relevant.
6425 */
6426 if (cp == np->last_cp)
6427 np->last_cp = NULL;
6428 #endif
6429
6430 /*
6431 * Unmap user data from DMA map if needed.
6432 */
6433 if (cp->dmamapped) {
6434 bus_dmamap_unload(np->data_dmat, cp->dmamap);
6435 cp->dmamapped = 0;
6436 }
6437
6438 /*
6439 * Make this CCB available.
6440 */
6441 cp->cam_ccb = NULL;
6442 cp->host_status = HS_IDLE;
6443 sym_remque(&cp->link_ccbq);
6444 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
6445 }
6446
6447 /*
6448 * Allocate a CCB from memory and initialize its fixed part.
6449 */
sym_alloc_ccb(hcb_p np)6450 static ccb_p sym_alloc_ccb(hcb_p np)
6451 {
6452 ccb_p cp = NULL;
6453 int hcode;
6454
6455 SYM_LOCK_ASSERT(MA_NOTOWNED);
6456
6457 /*
6458 * Prevent from allocating more CCBs than we can
6459 * queue to the controller.
6460 */
6461 if (np->actccbs >= SYM_CONF_MAX_START)
6462 return NULL;
6463
6464 /*
6465 * Allocate memory for this CCB.
6466 */
6467 cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB");
6468 if (!cp)
6469 return NULL;
6470
6471 /*
6472 * Allocate a bounce buffer for sense data.
6473 */
6474 cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF");
6475 if (!cp->sns_bbuf)
6476 goto out_free;
6477
6478 /*
6479 * Allocate a map for the DMA of user data.
6480 */
6481 if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap))
6482 goto out_free;
6483 /*
6484 * Count it.
6485 */
6486 np->actccbs++;
6487
6488 /*
6489 * Initialize the callout.
6490 */
6491 callout_init(&cp->ch, 1);
6492
6493 /*
6494 * Compute the bus address of this ccb.
6495 */
6496 cp->ccb_ba = vtobus(cp);
6497
6498 /*
6499 * Insert this ccb into the hashed list.
6500 */
6501 hcode = CCB_HASH_CODE(cp->ccb_ba);
6502 cp->link_ccbh = np->ccbh[hcode];
6503 np->ccbh[hcode] = cp;
6504
6505 /*
6506 * Initialize the start and restart actions.
6507 */
6508 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, idle));
6509 cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
6510
6511 /*
6512 * Initilialyze some other fields.
6513 */
6514 cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2]));
6515
6516 /*
6517 * Chain into free ccb queue.
6518 */
6519 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
6520
6521 return cp;
6522 out_free:
6523 if (cp->sns_bbuf)
6524 sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF");
6525 sym_mfree_dma(cp, sizeof(*cp), "CCB");
6526 return NULL;
6527 }
6528
6529 /*
6530 * Look up a CCB from a DSA value.
6531 */
sym_ccb_from_dsa(hcb_p np,u32 dsa)6532 static ccb_p sym_ccb_from_dsa(hcb_p np, u32 dsa)
6533 {
6534 int hcode;
6535 ccb_p cp;
6536
6537 hcode = CCB_HASH_CODE(dsa);
6538 cp = np->ccbh[hcode];
6539 while (cp) {
6540 if (cp->ccb_ba == dsa)
6541 break;
6542 cp = cp->link_ccbh;
6543 }
6544
6545 return cp;
6546 }
6547
6548 /*
6549 * Lun control block allocation and initialization.
6550 */
sym_alloc_lcb(hcb_p np,u_char tn,u_char ln)6551 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln)
6552 {
6553 tcb_p tp = &np->target[tn];
6554 lcb_p lp = sym_lp(tp, ln);
6555
6556 /*
6557 * Already done, just return.
6558 */
6559 if (lp)
6560 return lp;
6561 /*
6562 * Check against some race.
6563 */
6564 assert(!sym_is_bit(tp->busy0_map, ln));
6565
6566 /*
6567 * Allocate the LCB bus address array.
6568 * Compute the bus address of this table.
6569 */
6570 if (ln && !tp->luntbl) {
6571 int i;
6572
6573 tp->luntbl = sym_calloc_dma(256, "LUNTBL");
6574 if (!tp->luntbl)
6575 goto fail;
6576 for (i = 0 ; i < 64 ; i++)
6577 tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
6578 tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl));
6579 }
6580
6581 /*
6582 * Allocate the table of pointers for LUN(s) > 0, if needed.
6583 */
6584 if (ln && !tp->lunmp) {
6585 tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p),
6586 "LUNMP");
6587 if (!tp->lunmp)
6588 goto fail;
6589 }
6590
6591 /*
6592 * Allocate the lcb.
6593 * Make it available to the chip.
6594 */
6595 lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB");
6596 if (!lp)
6597 goto fail;
6598 if (ln) {
6599 tp->lunmp[ln] = lp;
6600 tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
6601 }
6602 else {
6603 tp->lun0p = lp;
6604 tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
6605 }
6606
6607 /*
6608 * Let the itl task point to error handling.
6609 */
6610 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
6611
6612 /*
6613 * Set the reselect pattern to our default. :)
6614 */
6615 lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun));
6616
6617 /*
6618 * Set user capabilities.
6619 */
6620 lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
6621
6622 fail:
6623 return lp;
6624 }
6625
6626 /*
6627 * Allocate LCB resources for tagged command queuing.
6628 */
sym_alloc_lcb_tags(hcb_p np,u_char tn,u_char ln)6629 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln)
6630 {
6631 tcb_p tp = &np->target[tn];
6632 lcb_p lp = sym_lp(tp, ln);
6633 int i;
6634
6635 /*
6636 * If LCB not available, try to allocate it.
6637 */
6638 if (!lp && !(lp = sym_alloc_lcb(np, tn, ln)))
6639 return;
6640
6641 /*
6642 * Allocate the task table and and the tag allocation
6643 * circular buffer. We want both or none.
6644 */
6645 lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
6646 if (!lp->itlq_tbl)
6647 return;
6648 lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS");
6649 if (!lp->cb_tags) {
6650 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
6651 lp->itlq_tbl = NULL;
6652 return;
6653 }
6654
6655 /*
6656 * Initialize the task table with invalid entries.
6657 */
6658 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
6659 lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba);
6660
6661 /*
6662 * Fill up the tag buffer with tag numbers.
6663 */
6664 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
6665 lp->cb_tags[i] = i;
6666
6667 /*
6668 * Make the task table available to SCRIPTS,
6669 * And accept tagged commands now.
6670 */
6671 lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
6672 }
6673
6674 /*
6675 * Test the pci bus snoop logic :-(
6676 *
6677 * Has to be called with interrupts disabled.
6678 */
6679 #ifndef SYM_CONF_IOMAPPED
sym_regtest(hcb_p np)6680 static int sym_regtest (hcb_p np)
6681 {
6682 register volatile u32 data;
6683 /*
6684 * chip registers may NOT be cached.
6685 * write 0xffffffff to a read only register area,
6686 * and try to read it back.
6687 */
6688 data = 0xffffffff;
6689 OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data);
6690 data = INL_OFF(offsetof(struct sym_reg, nc_dstat));
6691 #if 1
6692 if (data == 0xffffffff) {
6693 #else
6694 if ((data & 0xe2f0fffd) != 0x02000080) {
6695 #endif
6696 printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
6697 (unsigned) data);
6698 return (0x10);
6699 }
6700 return (0);
6701 }
6702 #endif
6703
6704 static int sym_snooptest (hcb_p np)
6705 {
6706 u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat;
6707 int i, err=0;
6708 #ifndef SYM_CONF_IOMAPPED
6709 err |= sym_regtest (np);
6710 if (err) return (err);
6711 #endif
6712 restart_test:
6713 /*
6714 * Enable Master Parity Checking as we intend
6715 * to enable it for normal operations.
6716 */
6717 OUTB (nc_ctest4, (np->rv_ctest4 & MPEE));
6718 /*
6719 * init
6720 */
6721 pc = SCRIPTB0_BA (np, snooptest);
6722 host_wr = 1;
6723 sym_wr = 2;
6724 /*
6725 * Set memory and register.
6726 */
6727 np->cache = cpu_to_scr(host_wr);
6728 OUTL (nc_temp, sym_wr);
6729 /*
6730 * Start script (exchange values)
6731 */
6732 OUTL (nc_dsa, np->hcb_ba);
6733 OUTL_DSP (pc);
6734 /*
6735 * Wait 'til done (with timeout)
6736 */
6737 for (i = 0; i < SYM_SNOOP_TIMEOUT; i++)
6738 if (INB(nc_istat) & (INTF|SIP|DIP))
6739 break;
6740 if (i >= SYM_SNOOP_TIMEOUT) {
6741 printf ("CACHE TEST FAILED: timeout.\n");
6742 return (0x20);
6743 }
6744 /*
6745 * Check for fatal DMA errors.
6746 */
6747 dstat = INB (nc_dstat);
6748 #if 1 /* Band aiding for broken hardwares that fail PCI parity */
6749 if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) {
6750 device_printf(np->device, "PCI DATA PARITY ERROR DETECTED - "
6751 "DISABLING MASTER DATA PARITY CHECKING.\n");
6752 np->rv_ctest4 &= ~MPEE;
6753 goto restart_test;
6754 }
6755 #endif
6756 if (dstat & (MDPE|BF|IID)) {
6757 device_printf(np->device,
6758 "CACHE TEST FAILED: DMA error (dstat=0x%02x).\n", dstat);
6759 return (0x80);
6760 }
6761 /*
6762 * Save termination position.
6763 */
6764 pc = INL (nc_dsp);
6765 /*
6766 * Read memory and register.
6767 */
6768 host_rd = scr_to_cpu(np->cache);
6769 sym_rd = INL (nc_scratcha);
6770 sym_bk = INL (nc_temp);
6771
6772 /*
6773 * Check termination position.
6774 */
6775 if (pc != SCRIPTB0_BA (np, snoopend)+8) {
6776 device_printf(np->device,
6777 "CACHE TEST FAILED: script execution failed.\n");
6778 device_printf(np->device, "start=%08lx, pc=%08lx, end=%08lx\n",
6779 (u_long)SCRIPTB0_BA(np, snooptest), (u_long)pc,
6780 (u_long)SCRIPTB0_BA(np, snoopend) + 8);
6781 return (0x40);
6782 }
6783 /*
6784 * Show results.
6785 */
6786 if (host_wr != sym_rd) {
6787 device_printf(np->device,
6788 "CACHE TEST FAILED: host wrote %d, chip read %d.\n",
6789 (int)host_wr, (int)sym_rd);
6790 err |= 1;
6791 }
6792 if (host_rd != sym_wr) {
6793 device_printf(np->device,
6794 "CACHE TEST FAILED: chip wrote %d, host read %d.\n",
6795 (int)sym_wr, (int)host_rd);
6796 err |= 2;
6797 }
6798 if (sym_bk != sym_wr) {
6799 device_printf(np->device,
6800 "CACHE TEST FAILED: chip wrote %d, read back %d.\n",
6801 (int)sym_wr, (int)sym_bk);
6802 err |= 4;
6803 }
6804
6805 return (err);
6806 }
6807
6808 /*
6809 * Determine the chip's clock frequency.
6810 *
6811 * This is essential for the negotiation of the synchronous
6812 * transfer rate.
6813 *
6814 * Note: we have to return the correct value.
6815 * THERE IS NO SAFE DEFAULT VALUE.
6816 *
6817 * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
6818 * 53C860 and 53C875 rev. 1 support fast20 transfers but
6819 * do not have a clock doubler and so are provided with a
6820 * 80 MHz clock. All other fast20 boards incorporate a doubler
6821 * and so should be delivered with a 40 MHz clock.
6822 * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base
6823 * clock and provide a clock quadrupler (160 Mhz).
6824 */
6825
6826 /*
6827 * Select SCSI clock frequency
6828 */
6829 static void sym_selectclock(hcb_p np, u_char scntl3)
6830 {
6831 /*
6832 * If multiplier not present or not selected, leave here.
6833 */
6834 if (np->multiplier <= 1) {
6835 OUTB(nc_scntl3, scntl3);
6836 return;
6837 }
6838
6839 if (sym_verbose >= 2)
6840 device_printf(np->device, "enabling clock multiplier\n");
6841
6842 OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
6843 /*
6844 * Wait for the LCKFRQ bit to be set if supported by the chip.
6845 * Otherwise wait 20 micro-seconds.
6846 */
6847 if (np->features & FE_LCKFRQ) {
6848 int i = 20;
6849 while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
6850 UDELAY (20);
6851 if (!i)
6852 device_printf(np->device,
6853 "the chip cannot lock the frequency\n");
6854 } else
6855 UDELAY (20);
6856 OUTB(nc_stest3, HSC); /* Halt the scsi clock */
6857 OUTB(nc_scntl3, scntl3);
6858 OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
6859 OUTB(nc_stest3, 0x00); /* Restart scsi clock */
6860 }
6861
6862 /*
6863 * calculate SCSI clock frequency (in KHz)
6864 */
6865 static unsigned getfreq (hcb_p np, int gen)
6866 {
6867 unsigned int ms = 0;
6868 unsigned int f;
6869
6870 /*
6871 * Measure GEN timer delay in order
6872 * to calculate SCSI clock frequency
6873 *
6874 * This code will never execute too
6875 * many loop iterations (if DELAY is
6876 * reasonably correct). It could get
6877 * too low a delay (too high a freq.)
6878 * if the CPU is slow executing the
6879 * loop for some reason (an NMI, for
6880 * example). For this reason we will
6881 * if multiple measurements are to be
6882 * performed trust the higher delay
6883 * (lower frequency returned).
6884 */
6885 OUTW (nc_sien , 0); /* mask all scsi interrupts */
6886 (void) INW (nc_sist); /* clear pending scsi interrupt */
6887 OUTB (nc_dien , 0); /* mask all dma interrupts */
6888 (void) INW (nc_sist); /* another one, just to be sure :) */
6889 OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */
6890 OUTB (nc_stime1, 0); /* disable general purpose timer */
6891 OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
6892 while (!(INW(nc_sist) & GEN) && ms++ < 100000)
6893 UDELAY (1000); /* count ms */
6894 OUTB (nc_stime1, 0); /* disable general purpose timer */
6895 /*
6896 * set prescaler to divide by whatever 0 means
6897 * 0 ought to choose divide by 2, but appears
6898 * to set divide by 3.5 mode in my 53c810 ...
6899 */
6900 OUTB (nc_scntl3, 0);
6901
6902 /*
6903 * adjust for prescaler, and convert into KHz
6904 */
6905 f = ms ? ((1 << gen) * 4340) / ms : 0;
6906
6907 if (sym_verbose >= 2)
6908 device_printf(np->device, "Delay (GEN=%d): %u msec, %u KHz\n",
6909 gen, ms, f);
6910
6911 return f;
6912 }
6913
6914 static unsigned sym_getfreq (hcb_p np)
6915 {
6916 u_int f1, f2;
6917 int gen = 11;
6918
6919 (void) getfreq (np, gen); /* throw away first result */
6920 f1 = getfreq (np, gen);
6921 f2 = getfreq (np, gen);
6922 if (f1 > f2) f1 = f2; /* trust lower result */
6923 return f1;
6924 }
6925
6926 /*
6927 * Get/probe chip SCSI clock frequency
6928 */
6929 static void sym_getclock (hcb_p np, int mult)
6930 {
6931 unsigned char scntl3 = np->sv_scntl3;
6932 unsigned char stest1 = np->sv_stest1;
6933 unsigned f1;
6934
6935 /*
6936 * For the C10 core, assume 40 MHz.
6937 */
6938 if (np->features & FE_C10) {
6939 np->multiplier = mult;
6940 np->clock_khz = 40000 * mult;
6941 return;
6942 }
6943
6944 np->multiplier = 1;
6945 f1 = 40000;
6946 /*
6947 * True with 875/895/896/895A with clock multiplier selected
6948 */
6949 if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
6950 if (sym_verbose >= 2)
6951 device_printf(np->device, "clock multiplier found\n");
6952 np->multiplier = mult;
6953 }
6954
6955 /*
6956 * If multiplier not found or scntl3 not 7,5,3,
6957 * reset chip and get frequency from general purpose timer.
6958 * Otherwise trust scntl3 BIOS setting.
6959 */
6960 if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
6961 OUTB (nc_stest1, 0); /* make sure doubler is OFF */
6962 f1 = sym_getfreq (np);
6963
6964 if (sym_verbose)
6965 device_printf(np->device, "chip clock is %uKHz\n", f1);
6966
6967 if (f1 < 45000) f1 = 40000;
6968 else if (f1 < 55000) f1 = 50000;
6969 else f1 = 80000;
6970
6971 if (f1 < 80000 && mult > 1) {
6972 if (sym_verbose >= 2)
6973 device_printf(np->device,
6974 "clock multiplier assumed\n");
6975 np->multiplier = mult;
6976 }
6977 } else {
6978 if ((scntl3 & 7) == 3) f1 = 40000;
6979 else if ((scntl3 & 7) == 5) f1 = 80000;
6980 else f1 = 160000;
6981
6982 f1 /= np->multiplier;
6983 }
6984
6985 /*
6986 * Compute controller synchronous parameters.
6987 */
6988 f1 *= np->multiplier;
6989 np->clock_khz = f1;
6990 }
6991
6992 /*
6993 * Get/probe PCI clock frequency
6994 */
6995 static int sym_getpciclock (hcb_p np)
6996 {
6997 int f = 0;
6998
6999 /*
7000 * For the C1010-33, this doesn't work.
7001 * For the C1010-66, this will be tested when I'll have
7002 * such a beast to play with.
7003 */
7004 if (!(np->features & FE_C10)) {
7005 OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
7006 f = (int) sym_getfreq (np);
7007 OUTB (nc_stest1, 0);
7008 }
7009 np->pciclk_khz = f;
7010
7011 return f;
7012 }
7013
7014 /*============= DRIVER ACTION/COMPLETION ====================*/
7015
7016 /*
7017 * Print something that tells about extended errors.
7018 */
7019 static void sym_print_xerr(ccb_p cp, int x_status)
7020 {
7021 if (x_status & XE_PARITY_ERR) {
7022 PRINT_ADDR(cp);
7023 printf ("unrecovered SCSI parity error.\n");
7024 }
7025 if (x_status & XE_EXTRA_DATA) {
7026 PRINT_ADDR(cp);
7027 printf ("extraneous data discarded.\n");
7028 }
7029 if (x_status & XE_BAD_PHASE) {
7030 PRINT_ADDR(cp);
7031 printf ("illegal scsi phase (4/5).\n");
7032 }
7033 if (x_status & XE_SODL_UNRUN) {
7034 PRINT_ADDR(cp);
7035 printf ("ODD transfer in DATA OUT phase.\n");
7036 }
7037 if (x_status & XE_SWIDE_OVRUN) {
7038 PRINT_ADDR(cp);
7039 printf ("ODD transfer in DATA IN phase.\n");
7040 }
7041 }
7042
7043 /*
7044 * Choose the more appropriate CAM status if
7045 * the IO encountered an extended error.
7046 */
7047 static int sym_xerr_cam_status(int cam_status, int x_status)
7048 {
7049 if (x_status) {
7050 if (x_status & XE_PARITY_ERR)
7051 cam_status = CAM_UNCOR_PARITY;
7052 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
7053 cam_status = CAM_DATA_RUN_ERR;
7054 else if (x_status & XE_BAD_PHASE)
7055 cam_status = CAM_REQ_CMP_ERR;
7056 else
7057 cam_status = CAM_REQ_CMP_ERR;
7058 }
7059 return cam_status;
7060 }
7061
7062 /*
7063 * Complete execution of a SCSI command with extented
7064 * error, SCSI status error, or having been auto-sensed.
7065 *
7066 * The SCRIPTS processor is not running there, so we
7067 * can safely access IO registers and remove JOBs from
7068 * the START queue.
7069 * SCRATCHA is assumed to have been loaded with STARTPOS
7070 * before the SCRIPTS called the C code.
7071 */
7072 static void sym_complete_error (hcb_p np, ccb_p cp)
7073 {
7074 struct ccb_scsiio *csio;
7075 u_int cam_status;
7076 int i, sense_returned;
7077
7078 SYM_LOCK_ASSERT(MA_OWNED);
7079
7080 /*
7081 * Paranoid check. :)
7082 */
7083 if (!cp || !cp->cam_ccb)
7084 return;
7085
7086 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) {
7087 printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp,
7088 cp->host_status, cp->ssss_status, cp->host_flags,
7089 cp->target, cp->lun);
7090 MDELAY(100);
7091 }
7092
7093 /*
7094 * Get CAM command pointer.
7095 */
7096 csio = &cp->cam_ccb->csio;
7097
7098 /*
7099 * Check for extended errors.
7100 */
7101 if (cp->xerr_status) {
7102 if (sym_verbose)
7103 sym_print_xerr(cp, cp->xerr_status);
7104 if (cp->host_status == HS_COMPLETE)
7105 cp->host_status = HS_COMP_ERR;
7106 }
7107
7108 /*
7109 * Calculate the residual.
7110 */
7111 csio->sense_resid = 0;
7112 csio->resid = sym_compute_residual(np, cp);
7113
7114 if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */
7115 csio->resid = 0; /* throw them away. :) */
7116 cp->sv_resid = 0;
7117 }
7118
7119 if (cp->host_flags & HF_SENSE) { /* Auto sense */
7120 csio->scsi_status = cp->sv_scsi_status; /* Restore status */
7121 csio->sense_resid = csio->resid; /* Swap residuals */
7122 csio->resid = cp->sv_resid;
7123 cp->sv_resid = 0;
7124 if (sym_verbose && cp->sv_xerr_status)
7125 sym_print_xerr(cp, cp->sv_xerr_status);
7126 if (cp->host_status == HS_COMPLETE &&
7127 cp->ssss_status == S_GOOD &&
7128 cp->xerr_status == 0) {
7129 cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR,
7130 cp->sv_xerr_status);
7131 cam_status |= CAM_AUTOSNS_VALID;
7132 /*
7133 * Bounce back the sense data to user and
7134 * fix the residual.
7135 */
7136 bzero(&csio->sense_data, sizeof(csio->sense_data));
7137 sense_returned = SYM_SNS_BBUF_LEN - csio->sense_resid;
7138 if (sense_returned < csio->sense_len)
7139 csio->sense_resid = csio->sense_len -
7140 sense_returned;
7141 else
7142 csio->sense_resid = 0;
7143 memcpy(&csio->sense_data, cp->sns_bbuf,
7144 MIN(csio->sense_len, sense_returned));
7145 #if 0
7146 /*
7147 * If the device reports a UNIT ATTENTION condition
7148 * due to a RESET condition, we should consider all
7149 * disconnect CCBs for this unit as aborted.
7150 */
7151 if (1) {
7152 u_char *p;
7153 p = (u_char *) csio->sense_data;
7154 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
7155 sym_clear_tasks(np, CAM_REQ_ABORTED,
7156 cp->target,cp->lun, -1);
7157 }
7158 #endif
7159 }
7160 else
7161 cam_status = CAM_AUTOSENSE_FAIL;
7162 }
7163 else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */
7164 csio->scsi_status = cp->ssss_status;
7165 cam_status = CAM_SCSI_STATUS_ERROR;
7166 }
7167 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */
7168 cam_status = CAM_SEL_TIMEOUT;
7169 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/
7170 cam_status = CAM_UNEXP_BUSFREE;
7171 else { /* Extended error */
7172 if (sym_verbose) {
7173 PRINT_ADDR(cp);
7174 printf ("COMMAND FAILED (%x %x %x).\n",
7175 cp->host_status, cp->ssss_status,
7176 cp->xerr_status);
7177 }
7178 csio->scsi_status = cp->ssss_status;
7179 /*
7180 * Set the most appropriate value for CAM status.
7181 */
7182 cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR,
7183 cp->xerr_status);
7184 }
7185
7186 /*
7187 * Dequeue all queued CCBs for that device
7188 * not yet started by SCRIPTS.
7189 */
7190 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
7191 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
7192
7193 /*
7194 * Restart the SCRIPTS processor.
7195 */
7196 OUTL_DSP (SCRIPTA_BA (np, start));
7197
7198 /*
7199 * Synchronize DMA map if needed.
7200 */
7201 if (cp->dmamapped) {
7202 bus_dmamap_sync(np->data_dmat, cp->dmamap,
7203 (cp->dmamapped == SYM_DMA_READ ?
7204 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
7205 }
7206 /*
7207 * Add this one to the COMP queue.
7208 * Complete all those commands with either error
7209 * or requeue condition.
7210 */
7211 sym_set_cam_status((union ccb *) csio, cam_status);
7212 sym_remque(&cp->link_ccbq);
7213 sym_insque_head(&cp->link_ccbq, &np->comp_ccbq);
7214 sym_flush_comp_queue(np, 0);
7215 }
7216
7217 /*
7218 * Complete execution of a successful SCSI command.
7219 *
7220 * Only successful commands go to the DONE queue,
7221 * since we need to have the SCRIPTS processor
7222 * stopped on any error condition.
7223 * The SCRIPTS processor is running while we are
7224 * completing successful commands.
7225 */
7226 static void sym_complete_ok (hcb_p np, ccb_p cp)
7227 {
7228 struct ccb_scsiio *csio;
7229 tcb_p tp;
7230 lcb_p lp;
7231
7232 SYM_LOCK_ASSERT(MA_OWNED);
7233
7234 /*
7235 * Paranoid check. :)
7236 */
7237 if (!cp || !cp->cam_ccb)
7238 return;
7239 assert (cp->host_status == HS_COMPLETE);
7240
7241 /*
7242 * Get command, target and lun pointers.
7243 */
7244 csio = &cp->cam_ccb->csio;
7245 tp = &np->target[cp->target];
7246 lp = sym_lp(tp, cp->lun);
7247
7248 /*
7249 * Assume device discovered on first success.
7250 */
7251 if (!lp)
7252 sym_set_bit(tp->lun_map, cp->lun);
7253
7254 /*
7255 * If all data have been transferred, given than no
7256 * extended error did occur, there is no residual.
7257 */
7258 csio->resid = 0;
7259 if (cp->phys.head.lastp != cp->phys.head.goalp)
7260 csio->resid = sym_compute_residual(np, cp);
7261
7262 /*
7263 * Wrong transfer residuals may be worse than just always
7264 * returning zero. User can disable this feature from
7265 * sym_conf.h. Residual support is enabled by default.
7266 */
7267 if (!SYM_CONF_RESIDUAL_SUPPORT)
7268 csio->resid = 0;
7269
7270 /*
7271 * Synchronize DMA map if needed.
7272 */
7273 if (cp->dmamapped) {
7274 bus_dmamap_sync(np->data_dmat, cp->dmamap,
7275 (cp->dmamapped == SYM_DMA_READ ?
7276 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
7277 }
7278 /*
7279 * Set status and complete the command.
7280 */
7281 csio->scsi_status = cp->ssss_status;
7282 sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP);
7283 sym_xpt_done(np, (union ccb *) csio, cp);
7284 sym_free_ccb(np, cp);
7285 }
7286
7287 /*
7288 * Our callout handler
7289 */
7290 static void sym_callout(void *arg)
7291 {
7292 union ccb *ccb = (union ccb *) arg;
7293 hcb_p np = ccb->ccb_h.sym_hcb_ptr;
7294
7295 /*
7296 * Check that the CAM CCB is still queued.
7297 */
7298 if (!np)
7299 return;
7300
7301 SYM_LOCK();
7302
7303 switch(ccb->ccb_h.func_code) {
7304 case XPT_SCSI_IO:
7305 (void) sym_abort_scsiio(np, ccb, 1);
7306 break;
7307 default:
7308 break;
7309 }
7310
7311 SYM_UNLOCK();
7312 }
7313
7314 /*
7315 * Abort an SCSI IO.
7316 */
7317 static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out)
7318 {
7319 ccb_p cp;
7320 SYM_QUEHEAD *qp;
7321
7322 SYM_LOCK_ASSERT(MA_OWNED);
7323
7324 /*
7325 * Look up our CCB control block.
7326 */
7327 cp = NULL;
7328 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
7329 ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
7330 if (cp2->cam_ccb == ccb) {
7331 cp = cp2;
7332 break;
7333 }
7334 }
7335 if (!cp || cp->host_status == HS_WAIT)
7336 return -1;
7337
7338 /*
7339 * If a previous abort didn't succeed in time,
7340 * perform a BUS reset.
7341 */
7342 if (cp->to_abort) {
7343 sym_reset_scsi_bus(np, 1);
7344 return 0;
7345 }
7346
7347 /*
7348 * Mark the CCB for abort and allow time for.
7349 */
7350 cp->to_abort = timed_out ? 2 : 1;
7351 callout_reset(&cp->ch, 10 * hz, sym_callout, (caddr_t) ccb);
7352
7353 /*
7354 * Tell the SCRIPTS processor to stop and synchronize with us.
7355 */
7356 np->istat_sem = SEM;
7357 OUTB (nc_istat, SIGP|SEM);
7358 return 0;
7359 }
7360
7361 /*
7362 * Reset a SCSI device (all LUNs of a target).
7363 */
7364 static void sym_reset_dev(hcb_p np, union ccb *ccb)
7365 {
7366 tcb_p tp;
7367 struct ccb_hdr *ccb_h = &ccb->ccb_h;
7368
7369 SYM_LOCK_ASSERT(MA_OWNED);
7370
7371 if (ccb_h->target_id == np->myaddr ||
7372 ccb_h->target_id >= SYM_CONF_MAX_TARGET ||
7373 ccb_h->target_lun >= SYM_CONF_MAX_LUN) {
7374 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
7375 return;
7376 }
7377
7378 tp = &np->target[ccb_h->target_id];
7379
7380 tp->to_reset = 1;
7381 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
7382
7383 np->istat_sem = SEM;
7384 OUTB (nc_istat, SIGP|SEM);
7385 }
7386
7387 /*
7388 * SIM action entry point.
7389 */
7390 static void sym_action(struct cam_sim *sim, union ccb *ccb)
7391 {
7392 hcb_p np;
7393 tcb_p tp;
7394 lcb_p lp;
7395 ccb_p cp;
7396 int tmp;
7397 u_char idmsg, *msgptr;
7398 u_int msglen;
7399 struct ccb_scsiio *csio;
7400 struct ccb_hdr *ccb_h;
7401
7402 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n"));
7403
7404 /*
7405 * Retrieve our controller data structure.
7406 */
7407 np = (hcb_p) cam_sim_softc(sim);
7408
7409 SYM_LOCK_ASSERT(MA_OWNED);
7410
7411 /*
7412 * The common case is SCSI IO.
7413 * We deal with other ones elsewhere.
7414 */
7415 if (ccb->ccb_h.func_code != XPT_SCSI_IO) {
7416 sym_action2(sim, ccb);
7417 return;
7418 }
7419 csio = &ccb->csio;
7420 ccb_h = &csio->ccb_h;
7421
7422 /*
7423 * Work around races.
7424 */
7425 if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
7426 xpt_done(ccb);
7427 return;
7428 }
7429
7430 /*
7431 * Minimal checkings, so that we will not
7432 * go outside our tables.
7433 */
7434 if (ccb_h->target_id == np->myaddr ||
7435 ccb_h->target_id >= SYM_CONF_MAX_TARGET ||
7436 ccb_h->target_lun >= SYM_CONF_MAX_LUN) {
7437 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
7438 return;
7439 }
7440
7441 /*
7442 * Retrieve the target and lun descriptors.
7443 */
7444 tp = &np->target[ccb_h->target_id];
7445 lp = sym_lp(tp, ccb_h->target_lun);
7446
7447 /*
7448 * Complete the 1st INQUIRY command with error
7449 * condition if the device is flagged NOSCAN
7450 * at BOOT in the NVRAM. This may speed up
7451 * the boot and maintain coherency with BIOS
7452 * device numbering. Clearing the flag allows
7453 * user to rescan skipped devices later.
7454 * We also return error for devices not flagged
7455 * for SCAN LUNS in the NVRAM since some mono-lun
7456 * devices behave badly when asked for some non
7457 * zero LUN. Btw, this is an absolute hack.:-)
7458 */
7459 if (!(ccb_h->flags & CAM_CDB_PHYS) &&
7460 (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ?
7461 csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) {
7462 if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) ||
7463 ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) &&
7464 ccb_h->target_lun != 0)) {
7465 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
7466 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
7467 return;
7468 }
7469 }
7470
7471 /*
7472 * Get a control block for this IO.
7473 */
7474 tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0);
7475 cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp);
7476 if (!cp) {
7477 sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL);
7478 return;
7479 }
7480
7481 /*
7482 * Keep track of the IO in our CCB.
7483 */
7484 cp->cam_ccb = ccb;
7485
7486 /*
7487 * Build the IDENTIFY message.
7488 */
7489 idmsg = M_IDENTIFY | cp->lun;
7490 if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED)))
7491 idmsg |= 0x40;
7492
7493 msgptr = cp->scsi_smsg;
7494 msglen = 0;
7495 msgptr[msglen++] = idmsg;
7496
7497 /*
7498 * Build the tag message if present.
7499 */
7500 if (cp->tag != NO_TAG) {
7501 u_char order = csio->tag_action;
7502
7503 switch(order) {
7504 case M_ORDERED_TAG:
7505 break;
7506 case M_HEAD_TAG:
7507 break;
7508 default:
7509 order = M_SIMPLE_TAG;
7510 }
7511 msgptr[msglen++] = order;
7512
7513 /*
7514 * For less than 128 tags, actual tags are numbered
7515 * 1,3,5,..2*MAXTAGS+1,since we may have to deal
7516 * with devices that have problems with #TAG 0 or too
7517 * great #TAG numbers. For more tags (up to 256),
7518 * we use directly our tag number.
7519 */
7520 #if SYM_CONF_MAX_TASK > (512/4)
7521 msgptr[msglen++] = cp->tag;
7522 #else
7523 msgptr[msglen++] = (cp->tag << 1) + 1;
7524 #endif
7525 }
7526
7527 /*
7528 * Build a negotiation message if needed.
7529 * (nego_status is filled by sym_prepare_nego())
7530 */
7531 cp->nego_status = 0;
7532 if (tp->tinfo.current.width != tp->tinfo.goal.width ||
7533 tp->tinfo.current.period != tp->tinfo.goal.period ||
7534 tp->tinfo.current.offset != tp->tinfo.goal.offset ||
7535 tp->tinfo.current.options != tp->tinfo.goal.options) {
7536 if (!tp->nego_cp && lp)
7537 msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen);
7538 }
7539
7540 /*
7541 * Fill in our ccb
7542 */
7543
7544 /*
7545 * Startqueue
7546 */
7547 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select));
7548 cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA (np, resel_dsa));
7549
7550 /*
7551 * select
7552 */
7553 cp->phys.select.sel_id = cp->target;
7554 cp->phys.select.sel_scntl3 = tp->head.wval;
7555 cp->phys.select.sel_sxfer = tp->head.sval;
7556 cp->phys.select.sel_scntl4 = tp->head.uval;
7557
7558 /*
7559 * message
7560 */
7561 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg));
7562 cp->phys.smsg.size = cpu_to_scr(msglen);
7563
7564 /*
7565 * command
7566 */
7567 if (sym_setup_cdb(np, csio, cp) < 0) {
7568 sym_xpt_done(np, ccb, cp);
7569 sym_free_ccb(np, cp);
7570 return;
7571 }
7572
7573 /*
7574 * status
7575 */
7576 #if 0 /* Provision */
7577 cp->actualquirks = tp->quirks;
7578 #endif
7579 cp->actualquirks = SYM_QUIRK_AUTOSAVE;
7580 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
7581 cp->ssss_status = S_ILLEGAL;
7582 cp->xerr_status = 0;
7583 cp->host_flags = 0;
7584 cp->extra_bytes = 0;
7585
7586 /*
7587 * extreme data pointer.
7588 * shall be positive, so -1 is lower than lowest.:)
7589 */
7590 cp->ext_sg = -1;
7591 cp->ext_ofs = 0;
7592
7593 /*
7594 * Build the data descriptor block
7595 * and start the IO.
7596 */
7597 sym_setup_data_and_start(np, csio, cp);
7598 }
7599
7600 /*
7601 * Setup buffers and pointers that address the CDB.
7602 * I bet, physical CDBs will never be used on the planet,
7603 * since they can be bounced without significant overhead.
7604 */
7605 static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
7606 {
7607 struct ccb_hdr *ccb_h;
7608 u32 cmd_ba;
7609 int cmd_len;
7610
7611 SYM_LOCK_ASSERT(MA_OWNED);
7612
7613 ccb_h = &csio->ccb_h;
7614
7615 /*
7616 * CDB is 16 bytes max.
7617 */
7618 if (csio->cdb_len > sizeof(cp->cdb_buf)) {
7619 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
7620 return -1;
7621 }
7622 cmd_len = csio->cdb_len;
7623
7624 if (ccb_h->flags & CAM_CDB_POINTER) {
7625 /* CDB is a pointer */
7626 if (!(ccb_h->flags & CAM_CDB_PHYS)) {
7627 /* CDB pointer is virtual */
7628 memcpy(cp->cdb_buf, csio->cdb_io.cdb_ptr, cmd_len);
7629 cmd_ba = CCB_BA (cp, cdb_buf[0]);
7630 } else {
7631 /* CDB pointer is physical */
7632 #if 0
7633 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff;
7634 #else
7635 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
7636 return -1;
7637 #endif
7638 }
7639 } else {
7640 /* CDB is in the CAM ccb (buffer) */
7641 memcpy(cp->cdb_buf, csio->cdb_io.cdb_bytes, cmd_len);
7642 cmd_ba = CCB_BA (cp, cdb_buf[0]);
7643 }
7644
7645 cp->phys.cmd.addr = cpu_to_scr(cmd_ba);
7646 cp->phys.cmd.size = cpu_to_scr(cmd_len);
7647
7648 return 0;
7649 }
7650
7651 /*
7652 * Set up data pointers used by SCRIPTS.
7653 */
7654 static void __inline
7655 sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir)
7656 {
7657 u32 lastp, goalp;
7658
7659 SYM_LOCK_ASSERT(MA_OWNED);
7660
7661 /*
7662 * No segments means no data.
7663 */
7664 if (!cp->segments)
7665 dir = CAM_DIR_NONE;
7666
7667 /*
7668 * Set the data pointer.
7669 */
7670 switch(dir) {
7671 case CAM_DIR_OUT:
7672 goalp = SCRIPTA_BA (np, data_out2) + 8;
7673 lastp = goalp - 8 - (cp->segments * (2*4));
7674 break;
7675 case CAM_DIR_IN:
7676 cp->host_flags |= HF_DATA_IN;
7677 goalp = SCRIPTA_BA (np, data_in2) + 8;
7678 lastp = goalp - 8 - (cp->segments * (2*4));
7679 break;
7680 case CAM_DIR_NONE:
7681 default:
7682 lastp = goalp = SCRIPTB_BA (np, no_data);
7683 break;
7684 }
7685
7686 cp->phys.head.lastp = cpu_to_scr(lastp);
7687 cp->phys.head.goalp = cpu_to_scr(goalp);
7688 cp->phys.head.savep = cpu_to_scr(lastp);
7689 cp->startp = cp->phys.head.savep;
7690 }
7691
7692 /*
7693 * Call back routine for the DMA map service.
7694 * If bounce buffers are used (why ?), we may sleep and then
7695 * be called there in another context.
7696 */
7697 static void
7698 sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error)
7699 {
7700 ccb_p cp;
7701 hcb_p np;
7702 union ccb *ccb;
7703
7704 cp = (ccb_p) arg;
7705 ccb = cp->cam_ccb;
7706 np = (hcb_p) cp->arg;
7707
7708 SYM_LOCK_ASSERT(MA_OWNED);
7709
7710 /*
7711 * Deal with weird races.
7712 */
7713 if (sym_get_cam_status(ccb) != CAM_REQ_INPROG)
7714 goto out_abort;
7715
7716 /*
7717 * Deal with weird errors.
7718 */
7719 if (error) {
7720 cp->dmamapped = 0;
7721 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED);
7722 goto out_abort;
7723 }
7724
7725 /*
7726 * Build the data descriptor for the chip.
7727 */
7728 if (nsegs) {
7729 int retv;
7730 /* 896 rev 1 requires to be careful about boundaries */
7731 if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1)
7732 retv = sym_scatter_sg_physical(np, cp, psegs, nsegs);
7733 else
7734 retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs);
7735 if (retv < 0) {
7736 sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG);
7737 goto out_abort;
7738 }
7739 }
7740
7741 /*
7742 * Synchronize the DMA map only if we have
7743 * actually mapped the data.
7744 */
7745 if (cp->dmamapped) {
7746 bus_dmamap_sync(np->data_dmat, cp->dmamap,
7747 (cp->dmamapped == SYM_DMA_READ ?
7748 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
7749 }
7750
7751 /*
7752 * Set host status to busy state.
7753 * May have been set back to HS_WAIT to avoid a race.
7754 */
7755 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
7756
7757 /*
7758 * Set data pointers.
7759 */
7760 sym_setup_data_pointers(np, cp, (ccb->ccb_h.flags & CAM_DIR_MASK));
7761
7762 /*
7763 * Enqueue this IO in our pending queue.
7764 */
7765 sym_enqueue_cam_ccb(cp);
7766
7767 /*
7768 * When `#ifed 1', the code below makes the driver
7769 * panic on the first attempt to write to a SCSI device.
7770 * It is the first test we want to do after a driver
7771 * change that does not seem obviously safe. :)
7772 */
7773 #if 0
7774 switch (cp->cdb_buf[0]) {
7775 case 0x0A: case 0x2A: case 0xAA:
7776 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
7777 MDELAY(10000);
7778 break;
7779 default:
7780 break;
7781 }
7782 #endif
7783 /*
7784 * Activate this job.
7785 */
7786 sym_put_start_queue(np, cp);
7787 return;
7788 out_abort:
7789 sym_xpt_done(np, ccb, cp);
7790 sym_free_ccb(np, cp);
7791 }
7792
7793 /*
7794 * How complex it gets to deal with the data in CAM.
7795 * The Bus Dma stuff makes things still more complex.
7796 */
7797 static void
7798 sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
7799 {
7800 struct ccb_hdr *ccb_h;
7801 int dir, retv;
7802
7803 SYM_LOCK_ASSERT(MA_OWNED);
7804
7805 ccb_h = &csio->ccb_h;
7806
7807 /*
7808 * Now deal with the data.
7809 */
7810 cp->data_len = csio->dxfer_len;
7811 cp->arg = np;
7812
7813 /*
7814 * No direction means no data.
7815 */
7816 dir = (ccb_h->flags & CAM_DIR_MASK);
7817 if (dir == CAM_DIR_NONE) {
7818 sym_execute_ccb(cp, NULL, 0, 0);
7819 return;
7820 }
7821
7822 cp->dmamapped = (dir == CAM_DIR_IN) ? SYM_DMA_READ : SYM_DMA_WRITE;
7823 retv = bus_dmamap_load_ccb(np->data_dmat, cp->dmamap,
7824 (union ccb *)csio, sym_execute_ccb, cp, 0);
7825 if (retv == EINPROGRESS) {
7826 cp->host_status = HS_WAIT;
7827 xpt_freeze_simq(np->sim, 1);
7828 csio->ccb_h.status |= CAM_RELEASE_SIMQ;
7829 }
7830 }
7831
7832 /*
7833 * Move the scatter list to our data block.
7834 */
7835 static int
7836 sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp,
7837 bus_dma_segment_t *psegs, int nsegs)
7838 {
7839 struct sym_tblmove *data;
7840 bus_dma_segment_t *psegs2;
7841
7842 SYM_LOCK_ASSERT(MA_OWNED);
7843
7844 if (nsegs > SYM_CONF_MAX_SG)
7845 return -1;
7846
7847 data = &cp->phys.data[SYM_CONF_MAX_SG-1];
7848 psegs2 = &psegs[nsegs-1];
7849 cp->segments = nsegs;
7850
7851 while (1) {
7852 data->addr = cpu_to_scr(psegs2->ds_addr);
7853 data->size = cpu_to_scr(psegs2->ds_len);
7854 if (DEBUG_FLAGS & DEBUG_SCATTER) {
7855 device_printf(np->device,
7856 "scatter: paddr=%lx len=%ld\n",
7857 (long)psegs2->ds_addr, (long)psegs2->ds_len);
7858 }
7859 if (psegs2 != psegs) {
7860 --data;
7861 --psegs2;
7862 continue;
7863 }
7864 break;
7865 }
7866 return 0;
7867 }
7868
7869 /*
7870 * Scatter a SG list with physical addresses into bus addressable chunks.
7871 */
7872 static int
7873 sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs)
7874 {
7875 u_long ps, pe, pn;
7876 u_long k;
7877 int s, t;
7878
7879 SYM_LOCK_ASSERT(MA_OWNED);
7880
7881 s = SYM_CONF_MAX_SG - 1;
7882 t = nsegs - 1;
7883 ps = psegs[t].ds_addr;
7884 pe = ps + psegs[t].ds_len;
7885
7886 while (s >= 0) {
7887 pn = rounddown2(pe - 1, SYM_CONF_DMA_BOUNDARY);
7888 if (pn <= ps)
7889 pn = ps;
7890 k = pe - pn;
7891 if (DEBUG_FLAGS & DEBUG_SCATTER) {
7892 device_printf(np->device,
7893 "scatter: paddr=%lx len=%ld\n", pn, k);
7894 }
7895 cp->phys.data[s].addr = cpu_to_scr(pn);
7896 cp->phys.data[s].size = cpu_to_scr(k);
7897 --s;
7898 if (pn == ps) {
7899 if (--t < 0)
7900 break;
7901 ps = psegs[t].ds_addr;
7902 pe = ps + psegs[t].ds_len;
7903 }
7904 else
7905 pe = pn;
7906 }
7907
7908 cp->segments = SYM_CONF_MAX_SG - 1 - s;
7909
7910 return t >= 0 ? -1 : 0;
7911 }
7912
7913 /*
7914 * SIM action for non performance critical stuff.
7915 */
7916 static void sym_action2(struct cam_sim *sim, union ccb *ccb)
7917 {
7918 union ccb *abort_ccb;
7919 struct ccb_hdr *ccb_h;
7920 struct ccb_pathinq *cpi;
7921 struct ccb_trans_settings *cts;
7922 struct sym_trans *tip;
7923 hcb_p np;
7924 tcb_p tp;
7925 lcb_p lp;
7926 u_char dflags;
7927
7928 /*
7929 * Retrieve our controller data structure.
7930 */
7931 np = (hcb_p) cam_sim_softc(sim);
7932
7933 SYM_LOCK_ASSERT(MA_OWNED);
7934
7935 ccb_h = &ccb->ccb_h;
7936
7937 switch (ccb_h->func_code) {
7938 case XPT_SET_TRAN_SETTINGS:
7939 cts = &ccb->cts;
7940 tp = &np->target[ccb_h->target_id];
7941
7942 /*
7943 * Update SPI transport settings in TARGET control block.
7944 * Update SCSI device settings in LUN control block.
7945 */
7946 lp = sym_lp(tp, ccb_h->target_lun);
7947 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
7948 sym_update_trans(np, &tp->tinfo.goal, cts);
7949 if (lp)
7950 sym_update_dflags(np, &lp->current_flags, cts);
7951 }
7952 if (cts->type == CTS_TYPE_USER_SETTINGS) {
7953 sym_update_trans(np, &tp->tinfo.user, cts);
7954 if (lp)
7955 sym_update_dflags(np, &lp->user_flags, cts);
7956 }
7957
7958 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
7959 break;
7960 case XPT_GET_TRAN_SETTINGS:
7961 cts = &ccb->cts;
7962 tp = &np->target[ccb_h->target_id];
7963 lp = sym_lp(tp, ccb_h->target_lun);
7964
7965 #define cts__scsi (&cts->proto_specific.scsi)
7966 #define cts__spi (&cts->xport_specific.spi)
7967 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
7968 tip = &tp->tinfo.current;
7969 dflags = lp ? lp->current_flags : 0;
7970 }
7971 else {
7972 tip = &tp->tinfo.user;
7973 dflags = lp ? lp->user_flags : tp->usrflags;
7974 }
7975
7976 cts->protocol = PROTO_SCSI;
7977 cts->transport = XPORT_SPI;
7978 cts->protocol_version = tip->scsi_version;
7979 cts->transport_version = tip->spi_version;
7980
7981 cts__spi->sync_period = tip->period;
7982 cts__spi->sync_offset = tip->offset;
7983 cts__spi->bus_width = tip->width;
7984 cts__spi->ppr_options = tip->options;
7985
7986 cts__spi->valid = CTS_SPI_VALID_SYNC_RATE
7987 | CTS_SPI_VALID_SYNC_OFFSET
7988 | CTS_SPI_VALID_BUS_WIDTH
7989 | CTS_SPI_VALID_PPR_OPTIONS;
7990
7991 cts__spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
7992 if (dflags & SYM_DISC_ENABLED)
7993 cts__spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
7994 cts__spi->valid |= CTS_SPI_VALID_DISC;
7995
7996 cts__scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
7997 if (dflags & SYM_TAGS_ENABLED)
7998 cts__scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
7999 cts__scsi->valid |= CTS_SCSI_VALID_TQ;
8000 #undef cts__spi
8001 #undef cts__scsi
8002 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8003 break;
8004 case XPT_CALC_GEOMETRY:
8005 cam_calc_geometry(&ccb->ccg, /*extended*/1);
8006 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8007 break;
8008 case XPT_PATH_INQ:
8009 cpi = &ccb->cpi;
8010 cpi->version_num = 1;
8011 cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE;
8012 if ((np->features & FE_WIDE) != 0)
8013 cpi->hba_inquiry |= PI_WIDE_16;
8014 cpi->target_sprt = 0;
8015 cpi->hba_misc = PIM_UNMAPPED;
8016 if (np->usrflags & SYM_SCAN_TARGETS_HILO)
8017 cpi->hba_misc |= PIM_SCANHILO;
8018 if (np->usrflags & SYM_AVOID_BUS_RESET)
8019 cpi->hba_misc |= PIM_NOBUSRESET;
8020 cpi->hba_eng_cnt = 0;
8021 cpi->max_target = (np->features & FE_WIDE) ? 15 : 7;
8022 /* Semantic problem:)LUN number max = max number of LUNs - 1 */
8023 cpi->max_lun = SYM_CONF_MAX_LUN-1;
8024 if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN)
8025 cpi->max_lun = SYM_SETUP_MAX_LUN-1;
8026 cpi->bus_id = cam_sim_bus(sim);
8027 cpi->initiator_id = np->myaddr;
8028 cpi->base_transfer_speed = 3300;
8029 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
8030 strlcpy(cpi->hba_vid, "Symbios", HBA_IDLEN);
8031 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
8032 cpi->unit_number = cam_sim_unit(sim);
8033
8034 cpi->protocol = PROTO_SCSI;
8035 cpi->protocol_version = SCSI_REV_2;
8036 cpi->transport = XPORT_SPI;
8037 cpi->transport_version = 2;
8038 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
8039 if (np->features & FE_ULTRA3) {
8040 cpi->transport_version = 3;
8041 cpi->xport_specific.spi.ppr_options =
8042 SID_SPI_CLOCK_DT_ST;
8043 }
8044 cpi->maxio = SYM_CONF_MAX_SG * PAGE_SIZE;
8045 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8046 break;
8047 case XPT_ABORT:
8048 abort_ccb = ccb->cab.abort_ccb;
8049 switch(abort_ccb->ccb_h.func_code) {
8050 case XPT_SCSI_IO:
8051 if (sym_abort_scsiio(np, abort_ccb, 0) == 0) {
8052 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8053 break;
8054 }
8055 default:
8056 sym_xpt_done2(np, ccb, CAM_UA_ABORT);
8057 break;
8058 }
8059 break;
8060 case XPT_RESET_DEV:
8061 sym_reset_dev(np, ccb);
8062 break;
8063 case XPT_RESET_BUS:
8064 sym_reset_scsi_bus(np, 0);
8065 if (sym_verbose) {
8066 xpt_print_path(np->path);
8067 printf("SCSI BUS reset delivered.\n");
8068 }
8069 sym_init (np, 1);
8070 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8071 break;
8072 case XPT_TERM_IO:
8073 default:
8074 sym_xpt_done2(np, ccb, CAM_REQ_INVALID);
8075 break;
8076 }
8077 }
8078
8079 /*
8080 * Asynchronous notification handler.
8081 */
8082 static void
8083 sym_async(void *cb_arg, u32 code, struct cam_path *path, void *args __unused)
8084 {
8085 hcb_p np;
8086 struct cam_sim *sim;
8087 u_int tn;
8088 tcb_p tp;
8089
8090 sim = (struct cam_sim *) cb_arg;
8091 np = (hcb_p) cam_sim_softc(sim);
8092
8093 SYM_LOCK_ASSERT(MA_OWNED);
8094
8095 switch (code) {
8096 case AC_LOST_DEVICE:
8097 tn = xpt_path_target_id(path);
8098 if (tn >= SYM_CONF_MAX_TARGET)
8099 break;
8100
8101 tp = &np->target[tn];
8102
8103 tp->to_reset = 0;
8104 tp->head.sval = 0;
8105 tp->head.wval = np->rv_scntl3;
8106 tp->head.uval = 0;
8107
8108 tp->tinfo.current.period = tp->tinfo.goal.period = 0;
8109 tp->tinfo.current.offset = tp->tinfo.goal.offset = 0;
8110 tp->tinfo.current.width = tp->tinfo.goal.width = BUS_8_BIT;
8111 tp->tinfo.current.options = tp->tinfo.goal.options = 0;
8112
8113 break;
8114 default:
8115 break;
8116 }
8117 }
8118
8119 /*
8120 * Update transfer settings of a target.
8121 */
8122 static void sym_update_trans(hcb_p np, struct sym_trans *tip,
8123 struct ccb_trans_settings *cts)
8124 {
8125
8126 SYM_LOCK_ASSERT(MA_OWNED);
8127
8128 /*
8129 * Update the infos.
8130 */
8131 #define cts__spi (&cts->xport_specific.spi)
8132 if ((cts__spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
8133 tip->width = cts__spi->bus_width;
8134 if ((cts__spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)
8135 tip->offset = cts__spi->sync_offset;
8136 if ((cts__spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
8137 tip->period = cts__spi->sync_period;
8138 if ((cts__spi->valid & CTS_SPI_VALID_PPR_OPTIONS) != 0)
8139 tip->options = (cts__spi->ppr_options & PPR_OPT_DT);
8140 if (cts->protocol_version != PROTO_VERSION_UNSPECIFIED &&
8141 cts->protocol_version != PROTO_VERSION_UNKNOWN)
8142 tip->scsi_version = cts->protocol_version;
8143 if (cts->transport_version != XPORT_VERSION_UNSPECIFIED &&
8144 cts->transport_version != XPORT_VERSION_UNKNOWN)
8145 tip->spi_version = cts->transport_version;
8146 #undef cts__spi
8147 /*
8148 * Scale against driver configuration limits.
8149 */
8150 if (tip->width > SYM_SETUP_MAX_WIDE) tip->width = SYM_SETUP_MAX_WIDE;
8151 if (tip->period && tip->offset) {
8152 if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset = SYM_SETUP_MAX_OFFS;
8153 if (tip->period < SYM_SETUP_MIN_SYNC) tip->period = SYM_SETUP_MIN_SYNC;
8154 } else {
8155 tip->offset = 0;
8156 tip->period = 0;
8157 }
8158
8159 /*
8160 * Scale against actual controller BUS width.
8161 */
8162 if (tip->width > np->maxwide)
8163 tip->width = np->maxwide;
8164
8165 /*
8166 * Only accept DT if controller supports and SYNC/WIDE asked.
8167 */
8168 if (!((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) ||
8169 !(tip->width == BUS_16_BIT && tip->offset)) {
8170 tip->options &= ~PPR_OPT_DT;
8171 }
8172
8173 /*
8174 * Scale period factor and offset against controller limits.
8175 */
8176 if (tip->offset && tip->period) {
8177 if (tip->options & PPR_OPT_DT) {
8178 if (tip->period < np->minsync_dt)
8179 tip->period = np->minsync_dt;
8180 if (tip->period > np->maxsync_dt)
8181 tip->period = np->maxsync_dt;
8182 if (tip->offset > np->maxoffs_dt)
8183 tip->offset = np->maxoffs_dt;
8184 }
8185 else {
8186 if (tip->period < np->minsync)
8187 tip->period = np->minsync;
8188 if (tip->period > np->maxsync)
8189 tip->period = np->maxsync;
8190 if (tip->offset > np->maxoffs)
8191 tip->offset = np->maxoffs;
8192 }
8193 }
8194 }
8195
8196 /*
8197 * Update flags for a device (logical unit).
8198 */
8199 static void
8200 sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts)
8201 {
8202
8203 SYM_LOCK_ASSERT(MA_OWNED);
8204
8205 #define cts__scsi (&cts->proto_specific.scsi)
8206 #define cts__spi (&cts->xport_specific.spi)
8207 if ((cts__spi->valid & CTS_SPI_VALID_DISC) != 0) {
8208 if ((cts__spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
8209 *flags |= SYM_DISC_ENABLED;
8210 else
8211 *flags &= ~SYM_DISC_ENABLED;
8212 }
8213
8214 if ((cts__scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
8215 if ((cts__scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
8216 *flags |= SYM_TAGS_ENABLED;
8217 else
8218 *flags &= ~SYM_TAGS_ENABLED;
8219 }
8220 #undef cts__spi
8221 #undef cts__scsi
8222 }
8223
8224 /*============= DRIVER INITIALISATION ==================*/
8225
8226 static device_method_t sym_pci_methods[] = {
8227 DEVMETHOD(device_probe, sym_pci_probe),
8228 DEVMETHOD(device_attach, sym_pci_attach),
8229 DEVMETHOD(device_detach, sym_pci_detach),
8230 DEVMETHOD_END
8231 };
8232
8233 static driver_t sym_pci_driver = {
8234 "sym",
8235 sym_pci_methods,
8236 1 /* no softc */
8237 };
8238
8239 DRIVER_MODULE(sym, pci, sym_pci_driver, NULL, NULL);
8240 MODULE_DEPEND(sym, cam, 1, 1, 1);
8241 MODULE_DEPEND(sym, pci, 1, 1, 1);
8242
8243 static const struct sym_pci_chip sym_pci_dev_table[] = {
8244 {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 64,
8245 FE_ERL}
8246 ,
8247 #ifdef SYM_DEBUG_GENERIC_SUPPORT
8248 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1,
8249 FE_BOF}
8250 ,
8251 #else
8252 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1,
8253 FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF}
8254 ,
8255 #endif
8256 {PCI_ID_SYM53C815, 0xff, "815", 4, 8, 4, 64,
8257 FE_BOF|FE_ERL}
8258 ,
8259 {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 64,
8260 FE_WIDE|FE_BOF|FE_ERL|FE_DIFF}
8261 ,
8262 {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2,
8263 FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF}
8264 ,
8265 {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1,
8266 FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN}
8267 ,
8268 {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2,
8269 FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8270 FE_RAM|FE_DIFF}
8271 ,
8272 {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2,
8273 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8274 FE_RAM|FE_DIFF}
8275 ,
8276 {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2,
8277 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8278 FE_RAM|FE_DIFF}
8279 ,
8280 {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2,
8281 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8282 FE_RAM|FE_DIFF}
8283 ,
8284 #ifdef SYM_DEBUG_GENERIC_SUPPORT
8285 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2,
8286 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|
8287 FE_RAM|FE_LCKFRQ}
8288 ,
8289 #else
8290 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2,
8291 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8292 FE_RAM|FE_LCKFRQ}
8293 ,
8294 #endif
8295 {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4,
8296 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8297 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
8298 ,
8299 {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4,
8300 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8301 FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
8302 ,
8303 {PCI_ID_LSI53C1010, 0x00, "1010-33", 6, 31, 7, 8,
8304 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
8305 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
8306 FE_C10}
8307 ,
8308 {PCI_ID_LSI53C1010, 0xff, "1010-33", 6, 31, 7, 8,
8309 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
8310 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
8311 FE_C10|FE_U3EN}
8312 ,
8313 {PCI_ID_LSI53C1010_2, 0xff, "1010-66", 6, 31, 7, 8,
8314 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
8315 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC|
8316 FE_C10|FE_U3EN}
8317 ,
8318 {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4,
8319 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8320 FE_RAM|FE_IO256|FE_LEDC}
8321 };
8322
8323 /*
8324 * Look up the chip table.
8325 *
8326 * Return a pointer to the chip entry if found,
8327 * zero otherwise.
8328 */
8329 static const struct sym_pci_chip *
8330 sym_find_pci_chip(device_t dev)
8331 {
8332 const struct sym_pci_chip *chip;
8333 int i;
8334 u_short device_id;
8335 u_char revision;
8336
8337 if (pci_get_vendor(dev) != PCI_VENDOR_NCR)
8338 return NULL;
8339
8340 device_id = pci_get_device(dev);
8341 revision = pci_get_revid(dev);
8342
8343 for (i = 0; i < nitems(sym_pci_dev_table); i++) {
8344 chip = &sym_pci_dev_table[i];
8345 if (device_id != chip->device_id)
8346 continue;
8347 if (revision > chip->revision_id)
8348 continue;
8349 return chip;
8350 }
8351
8352 return NULL;
8353 }
8354
8355 /*
8356 * Tell upper layer if the chip is supported.
8357 */
8358 static int
8359 sym_pci_probe(device_t dev)
8360 {
8361 const struct sym_pci_chip *chip;
8362
8363 chip = sym_find_pci_chip(dev);
8364 if (chip && sym_find_firmware(chip)) {
8365 device_set_desc(dev, chip->name);
8366 return BUS_PROBE_DEFAULT;
8367 }
8368 return ENXIO;
8369 }
8370
8371 /*
8372 * Attach a sym53c8xx device.
8373 */
8374 static int
8375 sym_pci_attach(device_t dev)
8376 {
8377 const struct sym_pci_chip *chip;
8378 u_short command;
8379 u_char cachelnsz;
8380 struct sym_hcb *np = NULL;
8381 struct sym_nvram nvram;
8382 const struct sym_fw *fw = NULL;
8383 int i;
8384 bus_dma_tag_t bus_dmat;
8385
8386 bus_dmat = bus_get_dma_tag(dev);
8387
8388 /*
8389 * Only probed devices should be attached.
8390 * We just enjoy being paranoid. :)
8391 */
8392 chip = sym_find_pci_chip(dev);
8393 if (chip == NULL || (fw = sym_find_firmware(chip)) == NULL)
8394 return (ENXIO);
8395
8396 /*
8397 * Allocate immediately the host control block,
8398 * since we are only expecting to succeed. :)
8399 * We keep track in the HCB of all the resources that
8400 * are to be released on error.
8401 */
8402 np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB");
8403 if (np)
8404 np->bus_dmat = bus_dmat;
8405 else
8406 return (ENXIO);
8407 device_set_softc(dev, np);
8408
8409 SYM_LOCK_INIT();
8410
8411 /*
8412 * Copy some useful infos to the HCB.
8413 */
8414 np->hcb_ba = vtobus(np);
8415 np->verbose = bootverbose;
8416 np->device = dev;
8417 np->device_id = pci_get_device(dev);
8418 np->revision_id = pci_get_revid(dev);
8419 np->features = chip->features;
8420 np->clock_divn = chip->nr_divisor;
8421 np->maxoffs = chip->offset_max;
8422 np->maxburst = chip->burst_max;
8423 np->scripta_sz = fw->a_size;
8424 np->scriptb_sz = fw->b_size;
8425 np->fw_setup = fw->setup;
8426 np->fw_patch = fw->patch;
8427 np->fw_name = fw->name;
8428
8429 #ifdef __amd64__
8430 np->target = sym_calloc_dma(SYM_CONF_MAX_TARGET * sizeof(*(np->target)),
8431 "TARGET");
8432 if (!np->target)
8433 goto attach_failed;
8434 #endif
8435
8436 /*
8437 * Initialize the CCB free and busy queues.
8438 */
8439 sym_que_init(&np->free_ccbq);
8440 sym_que_init(&np->busy_ccbq);
8441 sym_que_init(&np->comp_ccbq);
8442 sym_que_init(&np->cam_ccbq);
8443
8444 /*
8445 * Allocate a tag for the DMA of user data.
8446 */
8447 if (bus_dma_tag_create(np->bus_dmat, 1, SYM_CONF_DMA_BOUNDARY,
8448 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
8449 BUS_SPACE_MAXSIZE_32BIT, SYM_CONF_MAX_SG, SYM_CONF_DMA_BOUNDARY,
8450 0, busdma_lock_mutex, &np->mtx, &np->data_dmat)) {
8451 device_printf(dev, "failed to create DMA tag.\n");
8452 goto attach_failed;
8453 }
8454
8455 /*
8456 * Read and apply some fix-ups to the PCI COMMAND
8457 * register. We want the chip to be enabled for:
8458 * - BUS mastering
8459 * - PCI parity checking (reporting would also be fine)
8460 * - Write And Invalidate.
8461 */
8462 command = pci_read_config(dev, PCIR_COMMAND, 2);
8463 command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_PERRESPEN |
8464 PCIM_CMD_MWRICEN;
8465 pci_write_config(dev, PCIR_COMMAND, command, 2);
8466
8467 /*
8468 * Let the device know about the cache line size,
8469 * if it doesn't yet.
8470 */
8471 cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
8472 if (!cachelnsz) {
8473 cachelnsz = 8;
8474 pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1);
8475 }
8476
8477 /*
8478 * Alloc/get/map/retrieve everything that deals with MMIO.
8479 */
8480 i = SYM_PCI_MMIO;
8481 np->mmio_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i,
8482 RF_ACTIVE);
8483 if (!np->mmio_res) {
8484 device_printf(dev, "failed to allocate MMIO resources\n");
8485 goto attach_failed;
8486 }
8487 np->mmio_ba = rman_get_start(np->mmio_res);
8488
8489 /*
8490 * Allocate the IRQ.
8491 */
8492 i = 0;
8493 np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i,
8494 RF_ACTIVE | RF_SHAREABLE);
8495 if (!np->irq_res) {
8496 device_printf(dev, "failed to allocate IRQ resource\n");
8497 goto attach_failed;
8498 }
8499
8500 #ifdef SYM_CONF_IOMAPPED
8501 /*
8502 * User want us to use normal IO with PCI.
8503 * Alloc/get/map/retrieve everything that deals with IO.
8504 */
8505 i = SYM_PCI_IO;
8506 np->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &i, RF_ACTIVE);
8507 if (!np->io_res) {
8508 device_printf(dev, "failed to allocate IO resources\n");
8509 goto attach_failed;
8510 }
8511
8512 #endif /* SYM_CONF_IOMAPPED */
8513
8514 /*
8515 * If the chip has RAM.
8516 * Alloc/get/map/retrieve the corresponding resources.
8517 */
8518 if (np->features & (FE_RAM|FE_RAM8K)) {
8519 int regs_id = SYM_PCI_RAM;
8520 if (np->features & FE_64BIT)
8521 regs_id = SYM_PCI_RAM64;
8522 np->ram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
8523 ®s_id, RF_ACTIVE);
8524 if (!np->ram_res) {
8525 device_printf(dev,"failed to allocate RAM resources\n");
8526 goto attach_failed;
8527 }
8528 np->ram_id = regs_id;
8529 np->ram_ba = rman_get_start(np->ram_res);
8530 }
8531
8532 /*
8533 * Save setting of some IO registers, so we will
8534 * be able to probe specific implementations.
8535 */
8536 sym_save_initial_setting (np);
8537
8538 /*
8539 * Reset the chip now, since it has been reported
8540 * that SCSI clock calibration may not work properly
8541 * if the chip is currently active.
8542 */
8543 sym_chip_reset (np);
8544
8545 /*
8546 * Try to read the user set-up.
8547 */
8548 (void) sym_read_nvram(np, &nvram);
8549
8550 /*
8551 * Prepare controller and devices settings, according
8552 * to chip features, user set-up and driver set-up.
8553 */
8554 (void) sym_prepare_setting(np, &nvram);
8555
8556 /*
8557 * Check the PCI clock frequency.
8558 * Must be performed after prepare_setting since it destroys
8559 * STEST1 that is used to probe for the clock doubler.
8560 */
8561 i = sym_getpciclock(np);
8562 if (i > 37000)
8563 device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i);
8564
8565 /*
8566 * Allocate the start queue.
8567 */
8568 np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE");
8569 if (!np->squeue)
8570 goto attach_failed;
8571 np->squeue_ba = vtobus(np->squeue);
8572
8573 /*
8574 * Allocate the done queue.
8575 */
8576 np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE");
8577 if (!np->dqueue)
8578 goto attach_failed;
8579 np->dqueue_ba = vtobus(np->dqueue);
8580
8581 /*
8582 * Allocate the target bus address array.
8583 */
8584 np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL");
8585 if (!np->targtbl)
8586 goto attach_failed;
8587 np->targtbl_ba = vtobus(np->targtbl);
8588
8589 /*
8590 * Allocate SCRIPTS areas.
8591 */
8592 np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0");
8593 np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0");
8594 if (!np->scripta0 || !np->scriptb0)
8595 goto attach_failed;
8596
8597 /*
8598 * Allocate the CCBs. We need at least ONE.
8599 */
8600 for (i = 0; sym_alloc_ccb(np) != NULL; i++)
8601 ;
8602 if (i < 1)
8603 goto attach_failed;
8604
8605 /*
8606 * Calculate BUS addresses where we are going
8607 * to load the SCRIPTS.
8608 */
8609 np->scripta_ba = vtobus(np->scripta0);
8610 np->scriptb_ba = vtobus(np->scriptb0);
8611 np->scriptb0_ba = np->scriptb_ba;
8612
8613 if (np->ram_ba) {
8614 np->scripta_ba = np->ram_ba;
8615 if (np->features & FE_RAM8K) {
8616 np->ram_ws = 8192;
8617 np->scriptb_ba = np->scripta_ba + 4096;
8618 #ifdef __LP64__
8619 np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32);
8620 #endif
8621 }
8622 else
8623 np->ram_ws = 4096;
8624 }
8625
8626 /*
8627 * Copy scripts to controller instance.
8628 */
8629 memcpy(np->scripta0, fw->a_base, np->scripta_sz);
8630 memcpy(np->scriptb0, fw->b_base, np->scriptb_sz);
8631
8632 /*
8633 * Setup variable parts in scripts and compute
8634 * scripts bus addresses used from the C code.
8635 */
8636 np->fw_setup(np, fw);
8637
8638 /*
8639 * Bind SCRIPTS with physical addresses usable by the
8640 * SCRIPTS processor (as seen from the BUS = BUS addresses).
8641 */
8642 sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz);
8643 sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz);
8644
8645 #ifdef SYM_CONF_IARB_SUPPORT
8646 /*
8647 * If user wants IARB to be set when we win arbitration
8648 * and have other jobs, compute the max number of consecutive
8649 * settings of IARB hints before we leave devices a chance to
8650 * arbitrate for reselection.
8651 */
8652 #ifdef SYM_SETUP_IARB_MAX
8653 np->iarb_max = SYM_SETUP_IARB_MAX;
8654 #else
8655 np->iarb_max = 4;
8656 #endif
8657 #endif
8658
8659 /*
8660 * Prepare the idle and invalid task actions.
8661 */
8662 np->idletask.start = cpu_to_scr(SCRIPTA_BA (np, idle));
8663 np->idletask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
8664 np->idletask_ba = vtobus(&np->idletask);
8665
8666 np->notask.start = cpu_to_scr(SCRIPTA_BA (np, idle));
8667 np->notask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
8668 np->notask_ba = vtobus(&np->notask);
8669
8670 np->bad_itl.start = cpu_to_scr(SCRIPTA_BA (np, idle));
8671 np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
8672 np->bad_itl_ba = vtobus(&np->bad_itl);
8673
8674 np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA (np, idle));
8675 np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA (np,bad_i_t_l_q));
8676 np->bad_itlq_ba = vtobus(&np->bad_itlq);
8677
8678 /*
8679 * Allocate and prepare the lun JUMP table that is used
8680 * for a target prior the probing of devices (bad lun table).
8681 * A private table will be allocated for the target on the
8682 * first INQUIRY response received.
8683 */
8684 np->badluntbl = sym_calloc_dma(256, "BADLUNTBL");
8685 if (!np->badluntbl)
8686 goto attach_failed;
8687
8688 np->badlun_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun));
8689 for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */
8690 np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
8691
8692 /*
8693 * Prepare the bus address array that contains the bus
8694 * address of each target control block.
8695 * For now, assume all logical units are wrong. :)
8696 */
8697 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
8698 np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i]));
8699 np->target[i].head.luntbl_sa =
8700 cpu_to_scr(vtobus(np->badluntbl));
8701 np->target[i].head.lun0_sa =
8702 cpu_to_scr(vtobus(&np->badlun_sa));
8703 }
8704
8705 /*
8706 * Now check the cache handling of the pci chipset.
8707 */
8708 if (sym_snooptest (np)) {
8709 device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n");
8710 goto attach_failed;
8711 }
8712
8713 /*
8714 * Now deal with CAM.
8715 * Hopefully, we will succeed with that one.:)
8716 */
8717 if (!sym_cam_attach(np))
8718 goto attach_failed;
8719
8720 /*
8721 * Sigh! we are done.
8722 */
8723 return 0;
8724
8725 /*
8726 * We have failed.
8727 * We will try to free all the resources we have
8728 * allocated, but if we are a boot device, this
8729 * will not help that much.;)
8730 */
8731 attach_failed:
8732 if (np)
8733 sym_pci_detach(dev);
8734 return ENXIO;
8735 }
8736
8737 /*
8738 * Detach a device by freeing everything that has been allocated for it.
8739 */
8740 static int
8741 sym_pci_detach(device_t dev)
8742 {
8743 hcb_p np;
8744 SYM_QUEHEAD *qp;
8745 ccb_p cp;
8746 tcb_p tp;
8747 lcb_p lp;
8748 int target, lun;
8749
8750 np = device_get_softc(dev);
8751
8752 /*
8753 * First free CAM resources.
8754 */
8755 sym_cam_free(np);
8756
8757 /*
8758 * Now every should be quiet for us to
8759 * free other resources.
8760 */
8761 if (np->ram_res)
8762 bus_release_resource(np->device, SYS_RES_MEMORY,
8763 np->ram_id, np->ram_res);
8764 if (np->mmio_res)
8765 bus_release_resource(np->device, SYS_RES_MEMORY,
8766 SYM_PCI_MMIO, np->mmio_res);
8767 if (np->io_res)
8768 bus_release_resource(np->device, SYS_RES_IOPORT,
8769 SYM_PCI_IO, np->io_res);
8770 if (np->irq_res)
8771 bus_release_resource(np->device, SYS_RES_IRQ,
8772 0, np->irq_res);
8773
8774 if (np->scriptb0)
8775 sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0");
8776 if (np->scripta0)
8777 sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0");
8778 if (np->squeue)
8779 sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE");
8780 if (np->dqueue)
8781 sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
8782
8783 while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) {
8784 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
8785 bus_dmamap_destroy(np->data_dmat, cp->dmamap);
8786 sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF");
8787 sym_mfree_dma(cp, sizeof(*cp), "CCB");
8788 }
8789
8790 if (np->badluntbl)
8791 sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
8792
8793 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
8794 tp = &np->target[target];
8795 for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) {
8796 lp = sym_lp(tp, lun);
8797 if (!lp)
8798 continue;
8799 if (lp->itlq_tbl)
8800 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4,
8801 "ITLQ_TBL");
8802 if (lp->cb_tags)
8803 sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK,
8804 "CB_TAGS");
8805 sym_mfree_dma(lp, sizeof(*lp), "LCB");
8806 }
8807 #if SYM_CONF_MAX_LUN > 1
8808 if (tp->lunmp)
8809 sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p),
8810 "LUNMP");
8811 #endif
8812 }
8813 #ifdef __amd64__
8814 if (np->target)
8815 sym_mfree_dma(np->target,
8816 SYM_CONF_MAX_TARGET * sizeof(*(np->target)), "TARGET");
8817 #endif
8818 if (np->targtbl)
8819 sym_mfree_dma(np->targtbl, 256, "TARGTBL");
8820 if (np->data_dmat)
8821 bus_dma_tag_destroy(np->data_dmat);
8822 if (SYM_LOCK_INITIALIZED() != 0)
8823 SYM_LOCK_DESTROY();
8824 device_set_softc(np->device, NULL);
8825 sym_mfree_dma(np, sizeof(*np), "HCB");
8826
8827 return (0);
8828 }
8829
8830 /*
8831 * Allocate CAM resources and register a bus to CAM.
8832 */
8833 static int sym_cam_attach(hcb_p np)
8834 {
8835 struct cam_devq *devq = NULL;
8836 struct cam_sim *sim = NULL;
8837 struct cam_path *path = NULL;
8838 int err;
8839
8840 /*
8841 * Establish our interrupt handler.
8842 */
8843 err = bus_setup_intr(np->device, np->irq_res,
8844 INTR_ENTROPY | INTR_MPSAFE | INTR_TYPE_CAM,
8845 NULL, sym_intr, np, &np->intr);
8846 if (err) {
8847 device_printf(np->device, "bus_setup_intr() failed: %d\n",
8848 err);
8849 goto fail;
8850 }
8851
8852 /*
8853 * Create the device queue for our sym SIM.
8854 */
8855 devq = cam_simq_alloc(SYM_CONF_MAX_START);
8856 if (!devq)
8857 goto fail;
8858
8859 /*
8860 * Construct our SIM entry.
8861 */
8862 sim = cam_sim_alloc(sym_action, sym_poll, "sym", np,
8863 device_get_unit(np->device),
8864 &np->mtx, 1, SYM_SETUP_MAX_TAG, devq);
8865 if (!sim)
8866 goto fail;
8867
8868 SYM_LOCK();
8869
8870 if (xpt_bus_register(sim, np->device, 0) != CAM_SUCCESS)
8871 goto fail;
8872 np->sim = sim;
8873 sim = NULL;
8874
8875 if (xpt_create_path(&path, NULL,
8876 cam_sim_path(np->sim), CAM_TARGET_WILDCARD,
8877 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
8878 goto fail;
8879 }
8880 np->path = path;
8881
8882 /*
8883 * Establish our async notification handler.
8884 */
8885 if (xpt_register_async(AC_LOST_DEVICE, sym_async, np->sim, path) !=
8886 CAM_REQ_CMP)
8887 goto fail;
8888
8889 /*
8890 * Start the chip now, without resetting the BUS, since
8891 * it seems that this must stay under control of CAM.
8892 * With LVD/SE capable chips and BUS in SE mode, we may
8893 * get a spurious SMBC interrupt.
8894 */
8895 sym_init (np, 0);
8896
8897 SYM_UNLOCK();
8898
8899 return 1;
8900 fail:
8901 SYM_UNLOCK();
8902
8903 sym_cam_free(np);
8904
8905 return 0;
8906 }
8907
8908 /*
8909 * Free everything that deals with CAM.
8910 */
8911 static void sym_cam_free(hcb_p np)
8912 {
8913
8914 SYM_LOCK_ASSERT(MA_NOTOWNED);
8915
8916 if (np->intr) {
8917 bus_teardown_intr(np->device, np->irq_res, np->intr);
8918 np->intr = NULL;
8919 }
8920
8921 SYM_LOCK();
8922
8923 if (np->path) {
8924 xpt_async(AC_LOST_DEVICE, np->path, NULL);
8925 xpt_free_path(np->path);
8926 np->path = NULL;
8927 }
8928 if (np->sim) {
8929 xpt_bus_deregister(cam_sim_path(np->sim));
8930 cam_sim_free(np->sim, /*free_devq*/ TRUE);
8931 np->sim = NULL;
8932 }
8933
8934 SYM_UNLOCK();
8935 }
8936
8937 /*============ OPTIONNAL NVRAM SUPPORT =================*/
8938
8939 /*
8940 * Get host setup from NVRAM.
8941 */
8942 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram)
8943 {
8944 #ifdef SYM_CONF_NVRAM_SUPPORT
8945 /*
8946 * Get parity checking, host ID, verbose mode
8947 * and miscellaneous host flags from NVRAM.
8948 */
8949 switch(nvram->type) {
8950 case SYM_SYMBIOS_NVRAM:
8951 if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
8952 np->rv_scntl0 &= ~0x0a;
8953 np->myaddr = nvram->data.Symbios.host_id & 0x0f;
8954 if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
8955 np->verbose += 1;
8956 if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO)
8957 np->usrflags |= SYM_SCAN_TARGETS_HILO;
8958 if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET)
8959 np->usrflags |= SYM_AVOID_BUS_RESET;
8960 break;
8961 case SYM_TEKRAM_NVRAM:
8962 np->myaddr = nvram->data.Tekram.host_id & 0x0f;
8963 break;
8964 default:
8965 break;
8966 }
8967 #endif
8968 }
8969
8970 /*
8971 * Get target setup from NVRAM.
8972 */
8973 #ifdef SYM_CONF_NVRAM_SUPPORT
8974 static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram);
8975 static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram);
8976 #endif
8977
8978 static void
8979 sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp)
8980 {
8981 #ifdef SYM_CONF_NVRAM_SUPPORT
8982 switch(nvp->type) {
8983 case SYM_SYMBIOS_NVRAM:
8984 sym_Symbios_setup_target (np, target, &nvp->data.Symbios);
8985 break;
8986 case SYM_TEKRAM_NVRAM:
8987 sym_Tekram_setup_target (np, target, &nvp->data.Tekram);
8988 break;
8989 default:
8990 break;
8991 }
8992 #endif
8993 }
8994
8995 #ifdef SYM_CONF_NVRAM_SUPPORT
8996 /*
8997 * Get target set-up from Symbios format NVRAM.
8998 */
8999 static void
9000 sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram)
9001 {
9002 tcb_p tp = &np->target[target];
9003 Symbios_target *tn = &nvram->target[target];
9004
9005 tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0;
9006 tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT;
9007 tp->usrtags =
9008 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0;
9009
9010 if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
9011 tp->usrflags &= ~SYM_DISC_ENABLED;
9012 if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
9013 tp->usrflags |= SYM_SCAN_BOOT_DISABLED;
9014 if (!(tn->flags & SYMBIOS_SCAN_LUNS))
9015 tp->usrflags |= SYM_SCAN_LUNS_DISABLED;
9016 }
9017
9018 /*
9019 * Get target set-up from Tekram format NVRAM.
9020 */
9021 static void
9022 sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram)
9023 {
9024 tcb_p tp = &np->target[target];
9025 struct Tekram_target *tn = &nvram->target[target];
9026 int i;
9027
9028 if (tn->flags & TEKRAM_SYNC_NEGO) {
9029 i = tn->sync_index & 0xf;
9030 tp->tinfo.user.period = Tekram_sync[i];
9031 }
9032
9033 tp->tinfo.user.width =
9034 (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT;
9035
9036 if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
9037 tp->usrtags = 2 << nvram->max_tags_index;
9038 }
9039
9040 if (tn->flags & TEKRAM_DISCONNECT_ENABLE)
9041 tp->usrflags |= SYM_DISC_ENABLED;
9042
9043 /* If any device does not support parity, we will not use this option */
9044 if (!(tn->flags & TEKRAM_PARITY_CHECK))
9045 np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
9046 }
9047
9048 #ifdef SYM_CONF_DEBUG_NVRAM
9049 /*
9050 * Dump Symbios format NVRAM for debugging purpose.
9051 */
9052 static void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram)
9053 {
9054 int i;
9055
9056 /* display Symbios nvram host data */
9057 device_printf(np->device, "HOST ID=%d%s%s%s%s%s%s\n",
9058 nvram->host_id & 0x0f,
9059 (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" : "",
9060 (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" : "",
9061 (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" : "",
9062 (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" : "",
9063 (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET) ? " NO_RESET" : "",
9064 (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" : "");
9065
9066 /* display Symbios nvram drive data */
9067 for (i = 0 ; i < 15 ; i++) {
9068 struct Symbios_target *tn = &nvram->target[i];
9069 printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
9070 sym_name(np), i,
9071 (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
9072 (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
9073 (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
9074 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
9075 tn->bus_width,
9076 tn->sync_period / 4,
9077 tn->timeout);
9078 }
9079 }
9080
9081 /*
9082 * Dump TEKRAM format NVRAM for debugging purpose.
9083 */
9084 static const u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120};
9085 static void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram)
9086 {
9087 int i, tags, boot_delay;
9088 char *rem;
9089
9090 /* display Tekram nvram host data */
9091 tags = 2 << nvram->max_tags_index;
9092 boot_delay = 0;
9093 if (nvram->boot_delay_index < 6)
9094 boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
9095 switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
9096 default:
9097 case 0: rem = ""; break;
9098 case 1: rem = " REMOVABLE=boot device"; break;
9099 case 2: rem = " REMOVABLE=all"; break;
9100 }
9101
9102 device_printf(np->device,
9103 "HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
9104 nvram->host_id & 0x0f,
9105 (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" : "",
9106 (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" : "",
9107 (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" : "",
9108 (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" : "",
9109 (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" : "",
9110 (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" : "",
9111 (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" : "",
9112 (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" : "",
9113 rem, boot_delay, tags);
9114
9115 /* display Tekram nvram drive data */
9116 for (i = 0; i <= 15; i++) {
9117 int sync, j;
9118 struct Tekram_target *tn = &nvram->target[i];
9119 j = tn->sync_index & 0xf;
9120 sync = Tekram_sync[j];
9121 printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
9122 sym_name(np), i,
9123 (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
9124 (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
9125 (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
9126 (tn->flags & TEKRAM_START_CMD) ? " START" : "",
9127 (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
9128 (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
9129 sync);
9130 }
9131 }
9132 #endif /* SYM_CONF_DEBUG_NVRAM */
9133 #endif /* SYM_CONF_NVRAM_SUPPORT */
9134
9135 /*
9136 * Try reading Symbios or Tekram NVRAM
9137 */
9138 #ifdef SYM_CONF_NVRAM_SUPPORT
9139 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram);
9140 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram);
9141 #endif
9142
9143 static int sym_read_nvram(hcb_p np, struct sym_nvram *nvp)
9144 {
9145 #ifdef SYM_CONF_NVRAM_SUPPORT
9146 /*
9147 * Try to read SYMBIOS nvram.
9148 * Try to read TEKRAM nvram if Symbios nvram not found.
9149 */
9150 if (SYM_SETUP_SYMBIOS_NVRAM &&
9151 !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) {
9152 nvp->type = SYM_SYMBIOS_NVRAM;
9153 #ifdef SYM_CONF_DEBUG_NVRAM
9154 sym_display_Symbios_nvram(np, &nvp->data.Symbios);
9155 #endif
9156 }
9157 else if (SYM_SETUP_TEKRAM_NVRAM &&
9158 !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) {
9159 nvp->type = SYM_TEKRAM_NVRAM;
9160 #ifdef SYM_CONF_DEBUG_NVRAM
9161 sym_display_Tekram_nvram(np, &nvp->data.Tekram);
9162 #endif
9163 }
9164 else
9165 nvp->type = 0;
9166 #else
9167 nvp->type = 0;
9168 #endif
9169 return nvp->type;
9170 }
9171
9172 #ifdef SYM_CONF_NVRAM_SUPPORT
9173 /*
9174 * 24C16 EEPROM reading.
9175 *
9176 * GPOI0 - data in/data out
9177 * GPIO1 - clock
9178 * Symbios NVRAM wiring now also used by Tekram.
9179 */
9180
9181 #define SET_BIT 0
9182 #define CLR_BIT 1
9183 #define SET_CLK 2
9184 #define CLR_CLK 3
9185
9186 /*
9187 * Set/clear data/clock bit in GPIO0
9188 */
9189 static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg,
9190 int bit_mode)
9191 {
9192 UDELAY (5);
9193 switch (bit_mode){
9194 case SET_BIT:
9195 *gpreg |= write_bit;
9196 break;
9197 case CLR_BIT:
9198 *gpreg &= 0xfe;
9199 break;
9200 case SET_CLK:
9201 *gpreg |= 0x02;
9202 break;
9203 case CLR_CLK:
9204 *gpreg &= 0xfd;
9205 break;
9206 }
9207 OUTB (nc_gpreg, *gpreg);
9208 UDELAY (5);
9209 }
9210
9211 /*
9212 * Send START condition to NVRAM to wake it up.
9213 */
9214 static void S24C16_start(hcb_p np, u_char *gpreg)
9215 {
9216 S24C16_set_bit(np, 1, gpreg, SET_BIT);
9217 S24C16_set_bit(np, 0, gpreg, SET_CLK);
9218 S24C16_set_bit(np, 0, gpreg, CLR_BIT);
9219 S24C16_set_bit(np, 0, gpreg, CLR_CLK);
9220 }
9221
9222 /*
9223 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
9224 */
9225 static void S24C16_stop(hcb_p np, u_char *gpreg)
9226 {
9227 S24C16_set_bit(np, 0, gpreg, SET_CLK);
9228 S24C16_set_bit(np, 1, gpreg, SET_BIT);
9229 }
9230
9231 /*
9232 * Read or write a bit to the NVRAM,
9233 * read if GPIO0 input else write if GPIO0 output
9234 */
9235 static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit,
9236 u_char *gpreg)
9237 {
9238 S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
9239 S24C16_set_bit(np, 0, gpreg, SET_CLK);
9240 if (read_bit)
9241 *read_bit = INB (nc_gpreg);
9242 S24C16_set_bit(np, 0, gpreg, CLR_CLK);
9243 S24C16_set_bit(np, 0, gpreg, CLR_BIT);
9244 }
9245
9246 /*
9247 * Output an ACK to the NVRAM after reading,
9248 * change GPIO0 to output and when done back to an input
9249 */
9250 static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg,
9251 u_char *gpcntl)
9252 {
9253 OUTB (nc_gpcntl, *gpcntl & 0xfe);
9254 S24C16_do_bit(np, 0, write_bit, gpreg);
9255 OUTB (nc_gpcntl, *gpcntl);
9256 }
9257
9258 /*
9259 * Input an ACK from NVRAM after writing,
9260 * change GPIO0 to input and when done back to an output
9261 */
9262 static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg,
9263 u_char *gpcntl)
9264 {
9265 OUTB (nc_gpcntl, *gpcntl | 0x01);
9266 S24C16_do_bit(np, read_bit, 1, gpreg);
9267 OUTB (nc_gpcntl, *gpcntl);
9268 }
9269
9270 /*
9271 * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
9272 * GPIO0 must already be set as an output
9273 */
9274 static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data,
9275 u_char *gpreg, u_char *gpcntl)
9276 {
9277 int x;
9278
9279 for (x = 0; x < 8; x++)
9280 S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
9281
9282 S24C16_read_ack(np, ack_data, gpreg, gpcntl);
9283 }
9284
9285 /*
9286 * READ a byte from the NVRAM and then send an ACK to say we have got it,
9287 * GPIO0 must already be set as an input
9288 */
9289 static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data,
9290 u_char *gpreg, u_char *gpcntl)
9291 {
9292 int x;
9293 u_char read_bit;
9294
9295 *read_data = 0;
9296 for (x = 0; x < 8; x++) {
9297 S24C16_do_bit(np, &read_bit, 1, gpreg);
9298 *read_data |= ((read_bit & 0x01) << (7 - x));
9299 }
9300
9301 S24C16_write_ack(np, ack_data, gpreg, gpcntl);
9302 }
9303
9304 /*
9305 * Read 'len' bytes starting at 'offset'.
9306 */
9307 static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len)
9308 {
9309 u_char gpcntl, gpreg;
9310 u_char old_gpcntl, old_gpreg;
9311 u_char ack_data;
9312 int retv = 1;
9313 int x;
9314
9315 /* save current state of GPCNTL and GPREG */
9316 old_gpreg = INB (nc_gpreg);
9317 old_gpcntl = INB (nc_gpcntl);
9318 gpcntl = old_gpcntl & 0x1c;
9319
9320 /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
9321 OUTB (nc_gpreg, old_gpreg);
9322 OUTB (nc_gpcntl, gpcntl);
9323
9324 /* this is to set NVRAM into a known state with GPIO0/1 both low */
9325 gpreg = old_gpreg;
9326 S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
9327 S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
9328
9329 /* now set NVRAM inactive with GPIO0/1 both high */
9330 S24C16_stop(np, &gpreg);
9331
9332 /* activate NVRAM */
9333 S24C16_start(np, &gpreg);
9334
9335 /* write device code and random address MSB */
9336 S24C16_write_byte(np, &ack_data,
9337 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
9338 if (ack_data & 0x01)
9339 goto out;
9340
9341 /* write random address LSB */
9342 S24C16_write_byte(np, &ack_data,
9343 offset & 0xff, &gpreg, &gpcntl);
9344 if (ack_data & 0x01)
9345 goto out;
9346
9347 /* regenerate START state to set up for reading */
9348 S24C16_start(np, &gpreg);
9349
9350 /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
9351 S24C16_write_byte(np, &ack_data,
9352 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
9353 if (ack_data & 0x01)
9354 goto out;
9355
9356 /* now set up GPIO0 for inputting data */
9357 gpcntl |= 0x01;
9358 OUTB (nc_gpcntl, gpcntl);
9359
9360 /* input all requested data - only part of total NVRAM */
9361 for (x = 0; x < len; x++)
9362 S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
9363
9364 /* finally put NVRAM back in inactive mode */
9365 gpcntl &= 0xfe;
9366 OUTB (nc_gpcntl, gpcntl);
9367 S24C16_stop(np, &gpreg);
9368 retv = 0;
9369 out:
9370 /* return GPIO0/1 to original states after having accessed NVRAM */
9371 OUTB (nc_gpcntl, old_gpcntl);
9372 OUTB (nc_gpreg, old_gpreg);
9373
9374 return retv;
9375 }
9376
9377 #undef SET_BIT /* 0 */
9378 #undef CLR_BIT /* 1 */
9379 #undef SET_CLK /* 2 */
9380 #undef CLR_CLK /* 3 */
9381
9382 /*
9383 * Try reading Symbios NVRAM.
9384 * Return 0 if OK.
9385 */
9386 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram)
9387 {
9388 static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
9389 u_char *data = (u_char *) nvram;
9390 int len = sizeof(*nvram);
9391 u_short csum;
9392 int x;
9393
9394 /* probe the 24c16 and read the SYMBIOS 24c16 area */
9395 if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
9396 return 1;
9397
9398 /* check valid NVRAM signature, verify byte count and checksum */
9399 if (nvram->type != 0 ||
9400 bcmp(nvram->trailer, Symbios_trailer, 6) ||
9401 nvram->byte_count != len - 12)
9402 return 1;
9403
9404 /* verify checksum */
9405 for (x = 6, csum = 0; x < len - 6; x++)
9406 csum += data[x];
9407 if (csum != nvram->checksum)
9408 return 1;
9409
9410 return 0;
9411 }
9412
9413 /*
9414 * 93C46 EEPROM reading.
9415 *
9416 * GPOI0 - data in
9417 * GPIO1 - data out
9418 * GPIO2 - clock
9419 * GPIO4 - chip select
9420 *
9421 * Used by Tekram.
9422 */
9423
9424 /*
9425 * Pulse clock bit in GPIO0
9426 */
9427 static void T93C46_Clk(hcb_p np, u_char *gpreg)
9428 {
9429 OUTB (nc_gpreg, *gpreg | 0x04);
9430 UDELAY (2);
9431 OUTB (nc_gpreg, *gpreg);
9432 }
9433
9434 /*
9435 * Read bit from NVRAM
9436 */
9437 static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg)
9438 {
9439 UDELAY (2);
9440 T93C46_Clk(np, gpreg);
9441 *read_bit = INB (nc_gpreg);
9442 }
9443
9444 /*
9445 * Write bit to GPIO0
9446 */
9447 static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg)
9448 {
9449 if (write_bit & 0x01)
9450 *gpreg |= 0x02;
9451 else
9452 *gpreg &= 0xfd;
9453
9454 *gpreg |= 0x10;
9455
9456 OUTB (nc_gpreg, *gpreg);
9457 UDELAY (2);
9458
9459 T93C46_Clk(np, gpreg);
9460 }
9461
9462 /*
9463 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
9464 */
9465 static void T93C46_Stop(hcb_p np, u_char *gpreg)
9466 {
9467 *gpreg &= 0xef;
9468 OUTB (nc_gpreg, *gpreg);
9469 UDELAY (2);
9470
9471 T93C46_Clk(np, gpreg);
9472 }
9473
9474 /*
9475 * Send read command and address to NVRAM
9476 */
9477 static void T93C46_Send_Command(hcb_p np, u_short write_data,
9478 u_char *read_bit, u_char *gpreg)
9479 {
9480 int x;
9481
9482 /* send 9 bits, start bit (1), command (2), address (6) */
9483 for (x = 0; x < 9; x++)
9484 T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
9485
9486 *read_bit = INB (nc_gpreg);
9487 }
9488
9489 /*
9490 * READ 2 bytes from the NVRAM
9491 */
9492 static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg)
9493 {
9494 int x;
9495 u_char read_bit;
9496
9497 *nvram_data = 0;
9498 for (x = 0; x < 16; x++) {
9499 T93C46_Read_Bit(np, &read_bit, gpreg);
9500
9501 if (read_bit & 0x01)
9502 *nvram_data |= (0x01 << (15 - x));
9503 else
9504 *nvram_data &= ~(0x01 << (15 - x));
9505 }
9506 }
9507
9508 /*
9509 * Read Tekram NvRAM data.
9510 */
9511 static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg)
9512 {
9513 u_char read_bit;
9514 int x;
9515
9516 for (x = 0; x < len; x++) {
9517 /* output read command and address */
9518 T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
9519 if (read_bit & 0x01)
9520 return 1; /* Bad */
9521 T93C46_Read_Word(np, &data[x], gpreg);
9522 T93C46_Stop(np, gpreg);
9523 }
9524
9525 return 0;
9526 }
9527
9528 /*
9529 * Try reading 93C46 Tekram NVRAM.
9530 */
9531 static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram)
9532 {
9533 u_char gpcntl, gpreg;
9534 u_char old_gpcntl, old_gpreg;
9535 int retv = 1;
9536
9537 /* save current state of GPCNTL and GPREG */
9538 old_gpreg = INB (nc_gpreg);
9539 old_gpcntl = INB (nc_gpcntl);
9540
9541 /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
9542 1/2/4 out */
9543 gpreg = old_gpreg & 0xe9;
9544 OUTB (nc_gpreg, gpreg);
9545 gpcntl = (old_gpcntl & 0xe9) | 0x09;
9546 OUTB (nc_gpcntl, gpcntl);
9547
9548 /* input all of NVRAM, 64 words */
9549 retv = T93C46_Read_Data(np, (u_short *) nvram,
9550 sizeof(*nvram) / sizeof(short), &gpreg);
9551
9552 /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
9553 OUTB (nc_gpcntl, old_gpcntl);
9554 OUTB (nc_gpreg, old_gpreg);
9555
9556 return retv;
9557 }
9558
9559 /*
9560 * Try reading Tekram NVRAM.
9561 * Return 0 if OK.
9562 */
9563 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram)
9564 {
9565 u_char *data = (u_char *) nvram;
9566 int len = sizeof(*nvram);
9567 u_short csum;
9568 int x;
9569
9570 switch (np->device_id) {
9571 case PCI_ID_SYM53C885:
9572 case PCI_ID_SYM53C895:
9573 case PCI_ID_SYM53C896:
9574 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
9575 data, len);
9576 break;
9577 case PCI_ID_SYM53C875:
9578 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
9579 data, len);
9580 if (!x)
9581 break;
9582 default:
9583 x = sym_read_T93C46_nvram(np, nvram);
9584 break;
9585 }
9586 if (x)
9587 return 1;
9588
9589 /* verify checksum */
9590 for (x = 0, csum = 0; x < len - 1; x += 2)
9591 csum += data[x] + (data[x+1] << 8);
9592 if (csum != 0x1234)
9593 return 1;
9594
9595 return 0;
9596 }
9597
9598 #endif /* SYM_CONF_NVRAM_SUPPORT */
9599