1 /*- 2 * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010 3 * PCI-SCSI controllers. 4 * 5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> 6 * 7 * This driver also supports the following Symbios/LSI PCI-SCSI chips: 8 * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895, 9 * 53C810, 53C815, 53C825 and the 53C1510D is 53C8XX mode. 10 * 11 * 12 * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver. 13 * Copyright (C) 1998-1999 Gerard Roudier 14 * 15 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 16 * a port of the FreeBSD ncr driver to Linux-1.2.13. 17 * 18 * The original ncr driver has been written for 386bsd and FreeBSD by 19 * Wolfgang Stanglmeier <wolf@cologne.de> 20 * Stefan Esser <se@mi.Uni-Koeln.de> 21 * Copyright (C) 1994 Wolfgang Stanglmeier 22 * 23 * The initialisation code, and part of the code that addresses 24 * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM 25 * written by Justin T. Gibbs. 26 * 27 * Other major contributions: 28 * 29 * NVRAM detection and reading. 30 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> 31 * 32 *----------------------------------------------------------------------------- 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. The name of the author may not be used to endorse or promote products 43 * derived from this software without specific prior written permission. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 49 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 */ 57 58 #include <sys/cdefs.h> 59 __FBSDID("$FreeBSD$"); 60 61 #define SYM_DRIVER_NAME "sym-1.6.5-20000902" 62 63 /* #define SYM_DEBUG_GENERIC_SUPPORT */ 64 /* #define CAM_NEW_TRAN_CODE */ 65 66 #include <sys/param.h> 67 68 /* 69 * Driver configuration options. 70 */ 71 #include "opt_sym.h" 72 #include <dev/sym/sym_conf.h> 73 74 75 #include <sys/systm.h> 76 #include <sys/malloc.h> 77 #include <sys/endian.h> 78 #include <sys/kernel.h> 79 #include <sys/lock.h> 80 #include <sys/mutex.h> 81 #include <sys/module.h> 82 #include <sys/bus.h> 83 84 #include <sys/proc.h> 85 86 #include <dev/pci/pcireg.h> 87 #include <dev/pci/pcivar.h> 88 89 #include <machine/bus_memio.h> 90 /* 91 * Only include bus_pio if needed. 92 * This avoids bus space primitives to be uselessly bloated 93 * by out-of-age PIO operations. 94 */ 95 #ifdef SYM_CONF_IOMAPPED 96 #include <machine/bus_pio.h> 97 #endif 98 #include <machine/bus.h> 99 100 #include <machine/resource.h> 101 #include <sys/rman.h> 102 103 #include <cam/cam.h> 104 #include <cam/cam_ccb.h> 105 #include <cam/cam_sim.h> 106 #include <cam/cam_xpt_sim.h> 107 #include <cam/cam_debug.h> 108 109 #include <cam/scsi/scsi_all.h> 110 #include <cam/scsi/scsi_message.h> 111 112 #include <vm/vm.h> 113 #include <vm/vm_param.h> 114 #include <vm/pmap.h> 115 116 /* Short and quite clear integer types */ 117 typedef int8_t s8; 118 typedef int16_t s16; 119 typedef int32_t s32; 120 typedef u_int8_t u8; 121 typedef u_int16_t u16; 122 typedef u_int32_t u32; 123 124 /* 125 * From 'cam.error_recovery_diffs.20010313.context' patch. 126 */ 127 #ifdef CAM_NEW_TRAN_CODE 128 #define FreeBSD_New_Tran_Settings 129 #endif /* CAM_NEW_TRAN_CODE */ 130 131 /* 132 * Driver definitions. 133 */ 134 #include <dev/sym/sym_defs.h> 135 #include <dev/sym/sym_fw.h> 136 137 /* 138 * IA32 architecture does not reorder STORES and prevents 139 * LOADS from passing STORES. It is called `program order' 140 * by Intel and allows device drivers to deal with memory 141 * ordering by only ensuring that the code is not reordered 142 * by the compiler when ordering is required. 143 * Other architectures implement a weaker ordering that 144 * requires memory barriers (and also IO barriers when they 145 * make sense) to be used. 146 */ 147 148 #if defined __i386__ || defined __amd64__ 149 #define MEMORY_BARRIER() do { ; } while(0) 150 #elif defined __alpha__ 151 #define MEMORY_BARRIER() alpha_mb() 152 #elif defined __powerpc__ 153 #define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory") 154 #elif defined __ia64__ 155 #define MEMORY_BARRIER() __asm__ volatile("mf.a; mf" : : : "memory") 156 #elif defined __sparc64__ 157 #define MEMORY_BARRIER() __asm__ volatile("membar #Sync" : : : "memory") 158 #else 159 #error "Not supported platform" 160 #endif 161 162 /* 163 * Portable but silly implemented byte order primitives. 164 * We define the primitives we need, since FreeBSD doesn't 165 * seem to have them yet. 166 */ 167 #if BYTE_ORDER == BIG_ENDIAN 168 169 #define __revb16(x) ( (((u16)(x) & (u16)0x00ffU) << 8) | \ 170 (((u16)(x) & (u16)0xff00U) >> 8) ) 171 #define __revb32(x) ( (((u32)(x) & 0x000000ffU) << 24) | \ 172 (((u32)(x) & 0x0000ff00U) << 8) | \ 173 (((u32)(x) & 0x00ff0000U) >> 8) | \ 174 (((u32)(x) & 0xff000000U) >> 24) ) 175 176 #define __htole16(v) __revb16(v) 177 #define __htole32(v) __revb32(v) 178 #define __le16toh(v) __htole16(v) 179 #define __le32toh(v) __htole32(v) 180 181 static __inline u16 _htole16(u16 v) { return __htole16(v); } 182 static __inline u32 _htole32(u32 v) { return __htole32(v); } 183 #define _le16toh _htole16 184 #define _le32toh _htole32 185 186 #else /* LITTLE ENDIAN */ 187 188 #define __htole16(v) (v) 189 #define __htole32(v) (v) 190 #define __le16toh(v) (v) 191 #define __le32toh(v) (v) 192 193 #define _htole16(v) (v) 194 #define _htole32(v) (v) 195 #define _le16toh(v) (v) 196 #define _le32toh(v) (v) 197 198 #endif /* BYTE_ORDER */ 199 200 /* 201 * A la VMS/CAM-3 queue management. 202 */ 203 204 typedef struct sym_quehead { 205 struct sym_quehead *flink; /* Forward pointer */ 206 struct sym_quehead *blink; /* Backward pointer */ 207 } SYM_QUEHEAD; 208 209 #define sym_que_init(ptr) do { \ 210 (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ 211 } while (0) 212 213 static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head) 214 { 215 return (head->flink == head) ? 0 : head->flink; 216 } 217 218 static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head) 219 { 220 return (head->blink == head) ? 0 : head->blink; 221 } 222 223 static __inline void __sym_que_add(struct sym_quehead * new, 224 struct sym_quehead * blink, 225 struct sym_quehead * flink) 226 { 227 flink->blink = new; 228 new->flink = flink; 229 new->blink = blink; 230 blink->flink = new; 231 } 232 233 static __inline void __sym_que_del(struct sym_quehead * blink, 234 struct sym_quehead * flink) 235 { 236 flink->blink = blink; 237 blink->flink = flink; 238 } 239 240 static __inline int sym_que_empty(struct sym_quehead *head) 241 { 242 return head->flink == head; 243 } 244 245 static __inline void sym_que_splice(struct sym_quehead *list, 246 struct sym_quehead *head) 247 { 248 struct sym_quehead *first = list->flink; 249 250 if (first != list) { 251 struct sym_quehead *last = list->blink; 252 struct sym_quehead *at = head->flink; 253 254 first->blink = head; 255 head->flink = first; 256 257 last->flink = at; 258 at->blink = last; 259 } 260 } 261 262 #define sym_que_entry(ptr, type, member) \ 263 ((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member))) 264 265 266 #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) 267 268 #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink) 269 270 #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) 271 272 static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) 273 { 274 struct sym_quehead *elem = head->flink; 275 276 if (elem != head) 277 __sym_que_del(head, elem->flink); 278 else 279 elem = 0; 280 return elem; 281 } 282 283 #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) 284 285 static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head) 286 { 287 struct sym_quehead *elem = head->blink; 288 289 if (elem != head) 290 __sym_que_del(elem->blink, head); 291 else 292 elem = 0; 293 return elem; 294 } 295 296 /* 297 * This one may be useful. 298 */ 299 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ 300 for (qp = (head)->flink; qp != (head); qp = qp->flink) 301 /* 302 * FreeBSD does not offer our kind of queue in the CAM CCB. 303 * So, we have to cast. 304 */ 305 #define sym_qptr(p) ((struct sym_quehead *) (p)) 306 307 /* 308 * Simple bitmap operations. 309 */ 310 #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) 311 #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) 312 #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) 313 314 /* 315 * Number of tasks per device we want to handle. 316 */ 317 #if SYM_CONF_MAX_TAG_ORDER > 8 318 #error "more than 256 tags per logical unit not allowed." 319 #endif 320 #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER) 321 322 /* 323 * Donnot use more tasks that we can handle. 324 */ 325 #ifndef SYM_CONF_MAX_TAG 326 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK 327 #endif 328 #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK 329 #undef SYM_CONF_MAX_TAG 330 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK 331 #endif 332 333 /* 334 * This one means 'NO TAG for this job' 335 */ 336 #define NO_TAG (256) 337 338 /* 339 * Number of SCSI targets. 340 */ 341 #if SYM_CONF_MAX_TARGET > 16 342 #error "more than 16 targets not allowed." 343 #endif 344 345 /* 346 * Number of logical units per target. 347 */ 348 #if SYM_CONF_MAX_LUN > 64 349 #error "more than 64 logical units per target not allowed." 350 #endif 351 352 /* 353 * Asynchronous pre-scaler (ns). Shall be 40 for 354 * the SCSI timings to be compliant. 355 */ 356 #define SYM_CONF_MIN_ASYNC (40) 357 358 /* 359 * Number of entries in the START and DONE queues. 360 * 361 * We limit to 1 PAGE in order to succeed allocation of 362 * these queues. Each entry is 8 bytes long (2 DWORDS). 363 */ 364 #ifdef SYM_CONF_MAX_START 365 #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2) 366 #else 367 #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2) 368 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) 369 #endif 370 371 #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8 372 #undef SYM_CONF_MAX_QUEUE 373 #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8 374 #undef SYM_CONF_MAX_START 375 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) 376 #endif 377 378 /* 379 * For this one, we want a short name :-) 380 */ 381 #define MAX_QUEUE SYM_CONF_MAX_QUEUE 382 383 /* 384 * Active debugging tags and verbosity. 385 */ 386 #define DEBUG_ALLOC (0x0001) 387 #define DEBUG_PHASE (0x0002) 388 #define DEBUG_POLL (0x0004) 389 #define DEBUG_QUEUE (0x0008) 390 #define DEBUG_RESULT (0x0010) 391 #define DEBUG_SCATTER (0x0020) 392 #define DEBUG_SCRIPT (0x0040) 393 #define DEBUG_TINY (0x0080) 394 #define DEBUG_TIMING (0x0100) 395 #define DEBUG_NEGO (0x0200) 396 #define DEBUG_TAGS (0x0400) 397 #define DEBUG_POINTER (0x0800) 398 399 #if 0 400 static int sym_debug = 0; 401 #define DEBUG_FLAGS sym_debug 402 #else 403 /* #define DEBUG_FLAGS (0x0631) */ 404 #define DEBUG_FLAGS (0x0000) 405 406 #endif 407 #define sym_verbose (np->verbose) 408 409 /* 410 * Insert a delay in micro-seconds and milli-seconds. 411 */ 412 static void UDELAY(int us) { DELAY(us); } 413 static void MDELAY(int ms) { while (ms--) UDELAY(1000); } 414 415 /* 416 * Simple power of two buddy-like allocator. 417 * 418 * This simple code is not intended to be fast, but to 419 * provide power of 2 aligned memory allocations. 420 * Since the SCRIPTS processor only supplies 8 bit arithmetic, 421 * this allocator allows simple and fast address calculations 422 * from the SCRIPTS code. In addition, cache line alignment 423 * is guaranteed for power of 2 cache line size. 424 * 425 * This allocator has been developped for the Linux sym53c8xx 426 * driver, since this O/S does not provide naturally aligned 427 * allocations. 428 * It has the advantage of allowing the driver to use private 429 * pages of memory that will be useful if we ever need to deal 430 * with IO MMUs for PCI. 431 */ 432 433 #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */ 434 #ifndef __amd64__ 435 #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */ 436 #else 437 #define MEMO_PAGE_ORDER 1 /* 2 PAGEs maximum on amd64 */ 438 #endif 439 #if 0 440 #define MEMO_FREE_UNUSED /* Free unused pages immediately */ 441 #endif 442 #define MEMO_WARN 1 443 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER) 444 #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT) 445 #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1) 446 447 #ifndef __amd64__ 448 #define get_pages() malloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_NOWAIT) 449 #define free_pages(p) free((p), M_DEVBUF) 450 #else 451 #define get_pages() contigmalloc(MEMO_CLUSTER_SIZE, M_DEVBUF, \ 452 0, 0, 1LL << 32, PAGE_SIZE, 1LL << 32) 453 #define free_pages(p) contigfree((p), MEMO_CLUSTER_SIZE, M_DEVBUF) 454 #endif 455 456 typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */ 457 458 typedef struct m_link { /* Link between free memory chunks */ 459 struct m_link *next; 460 } m_link_s; 461 462 typedef struct m_vtob { /* Virtual to Bus address translation */ 463 struct m_vtob *next; 464 bus_dmamap_t dmamap; /* Map for this chunk */ 465 m_addr_t vaddr; /* Virtual address */ 466 m_addr_t baddr; /* Bus physical address */ 467 } m_vtob_s; 468 /* Hash this stuff a bit to speed up translations */ 469 #define VTOB_HASH_SHIFT 5 470 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) 471 #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) 472 #define VTOB_HASH_CODE(m) \ 473 ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK) 474 475 typedef struct m_pool { /* Memory pool of a given kind */ 476 bus_dma_tag_t dev_dmat; /* Identifies the pool */ 477 bus_dma_tag_t dmat; /* Tag for our fixed allocations */ 478 m_addr_t (*getp)(struct m_pool *); 479 #ifdef MEMO_FREE_UNUSED 480 void (*freep)(struct m_pool *, m_addr_t); 481 #endif 482 #define M_GETP() mp->getp(mp) 483 #define M_FREEP(p) mp->freep(mp, p) 484 int nump; 485 m_vtob_s *(vtob[VTOB_HASH_SIZE]); 486 struct m_pool *next; 487 struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1]; 488 } m_pool_s; 489 490 static void *___sym_malloc(m_pool_s *mp, int size) 491 { 492 int i = 0; 493 int s = (1 << MEMO_SHIFT); 494 int j; 495 m_addr_t a; 496 m_link_s *h = mp->h; 497 498 if (size > MEMO_CLUSTER_SIZE) 499 return 0; 500 501 while (size > s) { 502 s <<= 1; 503 ++i; 504 } 505 506 j = i; 507 while (!h[j].next) { 508 if (s == MEMO_CLUSTER_SIZE) { 509 h[j].next = (m_link_s *) M_GETP(); 510 if (h[j].next) 511 h[j].next->next = 0; 512 break; 513 } 514 ++j; 515 s <<= 1; 516 } 517 a = (m_addr_t) h[j].next; 518 if (a) { 519 h[j].next = h[j].next->next; 520 while (j > i) { 521 j -= 1; 522 s >>= 1; 523 h[j].next = (m_link_s *) (a+s); 524 h[j].next->next = 0; 525 } 526 } 527 #ifdef DEBUG 528 printf("___sym_malloc(%d) = %p\n", size, (void *) a); 529 #endif 530 return (void *) a; 531 } 532 533 static void ___sym_mfree(m_pool_s *mp, void *ptr, int size) 534 { 535 int i = 0; 536 int s = (1 << MEMO_SHIFT); 537 m_link_s *q; 538 m_addr_t a, b; 539 m_link_s *h = mp->h; 540 541 #ifdef DEBUG 542 printf("___sym_mfree(%p, %d)\n", ptr, size); 543 #endif 544 545 if (size > MEMO_CLUSTER_SIZE) 546 return; 547 548 while (size > s) { 549 s <<= 1; 550 ++i; 551 } 552 553 a = (m_addr_t) ptr; 554 555 while (1) { 556 #ifdef MEMO_FREE_UNUSED 557 if (s == MEMO_CLUSTER_SIZE) { 558 M_FREEP(a); 559 break; 560 } 561 #endif 562 b = a ^ s; 563 q = &h[i]; 564 while (q->next && q->next != (m_link_s *) b) { 565 q = q->next; 566 } 567 if (!q->next) { 568 ((m_link_s *) a)->next = h[i].next; 569 h[i].next = (m_link_s *) a; 570 break; 571 } 572 q->next = q->next->next; 573 a = a & b; 574 s <<= 1; 575 ++i; 576 } 577 } 578 579 static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags) 580 { 581 void *p; 582 583 p = ___sym_malloc(mp, size); 584 585 if (DEBUG_FLAGS & DEBUG_ALLOC) 586 printf ("new %-10s[%4d] @%p.\n", name, size, p); 587 588 if (p) 589 bzero(p, size); 590 else if (uflags & MEMO_WARN) 591 printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); 592 593 return p; 594 } 595 596 #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, MEMO_WARN) 597 598 static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name) 599 { 600 if (DEBUG_FLAGS & DEBUG_ALLOC) 601 printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); 602 603 ___sym_mfree(mp, ptr, size); 604 605 } 606 607 /* 608 * Default memory pool we donnot need to involve in DMA. 609 */ 610 /* 611 * With the `bus dma abstraction', we use a separate pool for 612 * memory we donnot need to involve in DMA. 613 */ 614 static m_addr_t ___mp0_getp(m_pool_s *mp) 615 { 616 m_addr_t m = (m_addr_t) get_pages(); 617 if (m) 618 ++mp->nump; 619 return m; 620 } 621 622 #ifdef MEMO_FREE_UNUSED 623 static void ___mp0_freep(m_pool_s *mp, m_addr_t m) 624 { 625 free_pages(m); 626 --mp->nump; 627 } 628 #endif 629 630 #ifdef MEMO_FREE_UNUSED 631 static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep}; 632 #else 633 static m_pool_s mp0 = {0, 0, ___mp0_getp}; 634 #endif 635 636 637 /* 638 * Actual memory allocation routine for non-DMAed memory. 639 */ 640 static void *sym_calloc(int size, char *name) 641 { 642 void *m; 643 /* Lock */ 644 m = __sym_calloc(&mp0, size, name); 645 /* Unlock */ 646 return m; 647 } 648 649 /* 650 * Actual memory allocation routine for non-DMAed memory. 651 */ 652 static void sym_mfree(void *ptr, int size, char *name) 653 { 654 /* Lock */ 655 __sym_mfree(&mp0, ptr, size, name); 656 /* Unlock */ 657 } 658 659 /* 660 * DMAable pools. 661 */ 662 /* 663 * With `bus dma abstraction', we use a separate pool per parent 664 * BUS handle. A reverse table (hashed) is maintained for virtual 665 * to BUS address translation. 666 */ 667 static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 668 { 669 bus_addr_t *baddr; 670 baddr = (bus_addr_t *)arg; 671 *baddr = segs->ds_addr; 672 } 673 674 static m_addr_t ___dma_getp(m_pool_s *mp) 675 { 676 m_vtob_s *vbp; 677 void *vaddr = 0; 678 bus_addr_t baddr = 0; 679 680 vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB"); 681 if (!vbp) 682 goto out_err; 683 684 if (bus_dmamem_alloc(mp->dmat, &vaddr, 685 BUS_DMA_NOWAIT, &vbp->dmamap)) 686 goto out_err; 687 bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr, 688 MEMO_CLUSTER_SIZE, getbaddrcb, &baddr, 0); 689 if (baddr) { 690 int hc = VTOB_HASH_CODE(vaddr); 691 vbp->vaddr = (m_addr_t) vaddr; 692 vbp->baddr = (m_addr_t) baddr; 693 vbp->next = mp->vtob[hc]; 694 mp->vtob[hc] = vbp; 695 ++mp->nump; 696 return (m_addr_t) vaddr; 697 } 698 out_err: 699 if (baddr) 700 bus_dmamap_unload(mp->dmat, vbp->dmamap); 701 if (vaddr) 702 bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap); 703 if (vbp->dmamap) 704 bus_dmamap_destroy(mp->dmat, vbp->dmamap); 705 if (vbp) 706 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); 707 return 0; 708 } 709 710 #ifdef MEMO_FREE_UNUSED 711 static void ___dma_freep(m_pool_s *mp, m_addr_t m) 712 { 713 m_vtob_s **vbpp, *vbp; 714 int hc = VTOB_HASH_CODE(m); 715 716 vbpp = &mp->vtob[hc]; 717 while (*vbpp && (*vbpp)->vaddr != m) 718 vbpp = &(*vbpp)->next; 719 if (*vbpp) { 720 vbp = *vbpp; 721 *vbpp = (*vbpp)->next; 722 bus_dmamap_unload(mp->dmat, vbp->dmamap); 723 bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap); 724 bus_dmamap_destroy(mp->dmat, vbp->dmamap); 725 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); 726 --mp->nump; 727 } 728 } 729 #endif 730 731 static __inline m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat) 732 { 733 m_pool_s *mp; 734 for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next); 735 return mp; 736 } 737 738 static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat) 739 { 740 m_pool_s *mp = 0; 741 742 mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL"); 743 if (mp) { 744 mp->dev_dmat = dev_dmat; 745 if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE, 746 BUS_SPACE_MAXADDR_32BIT, 747 BUS_SPACE_MAXADDR_32BIT, 748 NULL, NULL, MEMO_CLUSTER_SIZE, 1, 749 MEMO_CLUSTER_SIZE, 0, 750 busdma_lock_mutex, &Giant, &mp->dmat)) { 751 mp->getp = ___dma_getp; 752 #ifdef MEMO_FREE_UNUSED 753 mp->freep = ___dma_freep; 754 #endif 755 mp->next = mp0.next; 756 mp0.next = mp; 757 return mp; 758 } 759 } 760 if (mp) 761 __sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL"); 762 return 0; 763 } 764 765 #ifdef MEMO_FREE_UNUSED 766 static void ___del_dma_pool(m_pool_s *p) 767 { 768 struct m_pool **pp = &mp0.next; 769 770 while (*pp && *pp != p) 771 pp = &(*pp)->next; 772 if (*pp) { 773 *pp = (*pp)->next; 774 bus_dma_tag_destroy(p->dmat); 775 __sym_mfree(&mp0, p, sizeof(*p), "MPOOL"); 776 } 777 } 778 #endif 779 780 static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name) 781 { 782 struct m_pool *mp; 783 void *m = 0; 784 785 /* Lock */ 786 mp = ___get_dma_pool(dev_dmat); 787 if (!mp) 788 mp = ___cre_dma_pool(dev_dmat); 789 if (mp) 790 m = __sym_calloc(mp, size, name); 791 #ifdef MEMO_FREE_UNUSED 792 if (mp && !mp->nump) 793 ___del_dma_pool(mp); 794 #endif 795 /* Unlock */ 796 797 return m; 798 } 799 800 static void 801 __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name) 802 { 803 struct m_pool *mp; 804 805 /* Lock */ 806 mp = ___get_dma_pool(dev_dmat); 807 if (mp) 808 __sym_mfree(mp, m, size, name); 809 #ifdef MEMO_FREE_UNUSED 810 if (mp && !mp->nump) 811 ___del_dma_pool(mp); 812 #endif 813 /* Unlock */ 814 } 815 816 static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m) 817 { 818 m_pool_s *mp; 819 int hc = VTOB_HASH_CODE(m); 820 m_vtob_s *vp = 0; 821 m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK; 822 823 /* Lock */ 824 mp = ___get_dma_pool(dev_dmat); 825 if (mp) { 826 vp = mp->vtob[hc]; 827 while (vp && (m_addr_t) vp->vaddr != a) 828 vp = vp->next; 829 } 830 /* Unlock */ 831 if (!vp) 832 panic("sym: VTOBUS FAILED!\n"); 833 return vp ? vp->baddr + (((m_addr_t) m) - a) : 0; 834 } 835 836 837 /* 838 * Verbs for DMAable memory handling. 839 * The _uvptv_ macro avoids a nasty warning about pointer to volatile 840 * being discarded. 841 */ 842 #define _uvptv_(p) ((void *)((vm_offset_t)(p))) 843 #define _sym_calloc_dma(np, s, n) __sym_calloc_dma(np->bus_dmat, s, n) 844 #define _sym_mfree_dma(np, p, s, n) \ 845 __sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n) 846 #define sym_calloc_dma(s, n) _sym_calloc_dma(np, s, n) 847 #define sym_mfree_dma(p, s, n) _sym_mfree_dma(np, p, s, n) 848 #define _vtobus(np, p) __vtobus(np->bus_dmat, _uvptv_(p)) 849 #define vtobus(p) _vtobus(np, p) 850 851 852 /* 853 * Print a buffer in hexadecimal format. 854 */ 855 static void sym_printb_hex (u_char *p, int n) 856 { 857 while (n-- > 0) 858 printf (" %x", *p++); 859 } 860 861 /* 862 * Same with a label at beginning and .\n at end. 863 */ 864 static void sym_printl_hex (char *label, u_char *p, int n) 865 { 866 printf ("%s", label); 867 sym_printb_hex (p, n); 868 printf (".\n"); 869 } 870 871 /* 872 * Return a string for SCSI BUS mode. 873 */ 874 static char *sym_scsi_bus_mode(int mode) 875 { 876 switch(mode) { 877 case SMODE_HVD: return "HVD"; 878 case SMODE_SE: return "SE"; 879 case SMODE_LVD: return "LVD"; 880 } 881 return "??"; 882 } 883 884 /* 885 * Some poor and bogus sync table that refers to Tekram NVRAM layout. 886 */ 887 #ifdef SYM_CONF_NVRAM_SUPPORT 888 static u_char Tekram_sync[16] = 889 {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10}; 890 #endif 891 892 /* 893 * Union of supported NVRAM formats. 894 */ 895 struct sym_nvram { 896 int type; 897 #define SYM_SYMBIOS_NVRAM (1) 898 #define SYM_TEKRAM_NVRAM (2) 899 #ifdef SYM_CONF_NVRAM_SUPPORT 900 union { 901 Symbios_nvram Symbios; 902 Tekram_nvram Tekram; 903 } data; 904 #endif 905 }; 906 907 /* 908 * This one is hopefully useless, but actually useful. :-) 909 */ 910 #ifndef assert 911 #define assert(expression) { \ 912 if (!(expression)) { \ 913 (void)panic( \ 914 "assertion \"%s\" failed: file \"%s\", line %d\n", \ 915 #expression, \ 916 __FILE__, __LINE__); \ 917 } \ 918 } 919 #endif 920 921 /* 922 * Some provision for a possible big endian mode supported by 923 * Symbios chips (never seen, by the way). 924 * For now, this stuff does not deserve any comments. :) 925 */ 926 927 #define sym_offb(o) (o) 928 #define sym_offw(o) (o) 929 930 /* 931 * Some provision for support for BIG ENDIAN CPU. 932 * Btw, FreeBSD does not seem to be ready yet for big endian. 933 */ 934 935 #if BYTE_ORDER == BIG_ENDIAN 936 #define cpu_to_scr(dw) _htole32(dw) 937 #define scr_to_cpu(dw) _le32toh(dw) 938 #else 939 #define cpu_to_scr(dw) (dw) 940 #define scr_to_cpu(dw) (dw) 941 #endif 942 943 /* 944 * Access to the chip IO registers and on-chip RAM. 945 * We use the `bus space' interface under FreeBSD-4 and 946 * later kernel versions. 947 */ 948 949 950 #if defined(SYM_CONF_IOMAPPED) 951 952 #define INB_OFF(o) bus_space_read_1(np->io_tag, np->io_bsh, o) 953 #define INW_OFF(o) bus_space_read_2(np->io_tag, np->io_bsh, o) 954 #define INL_OFF(o) bus_space_read_4(np->io_tag, np->io_bsh, o) 955 956 #define OUTB_OFF(o, v) bus_space_write_1(np->io_tag, np->io_bsh, o, (v)) 957 #define OUTW_OFF(o, v) bus_space_write_2(np->io_tag, np->io_bsh, o, (v)) 958 #define OUTL_OFF(o, v) bus_space_write_4(np->io_tag, np->io_bsh, o, (v)) 959 960 #else /* Memory mapped IO */ 961 962 #define INB_OFF(o) bus_space_read_1(np->mmio_tag, np->mmio_bsh, o) 963 #define INW_OFF(o) bus_space_read_2(np->mmio_tag, np->mmio_bsh, o) 964 #define INL_OFF(o) bus_space_read_4(np->mmio_tag, np->mmio_bsh, o) 965 966 #define OUTB_OFF(o, v) bus_space_write_1(np->mmio_tag, np->mmio_bsh, o, (v)) 967 #define OUTW_OFF(o, v) bus_space_write_2(np->mmio_tag, np->mmio_bsh, o, (v)) 968 #define OUTL_OFF(o, v) bus_space_write_4(np->mmio_tag, np->mmio_bsh, o, (v)) 969 970 #endif /* SYM_CONF_IOMAPPED */ 971 972 #define OUTRAM_OFF(o, a, l) \ 973 bus_space_write_region_1(np->ram_tag, np->ram_bsh, o, (a), (l)) 974 975 976 /* 977 * Common definitions for both bus space and legacy IO methods. 978 */ 979 #define INB(r) INB_OFF(offsetof(struct sym_reg,r)) 980 #define INW(r) INW_OFF(offsetof(struct sym_reg,r)) 981 #define INL(r) INL_OFF(offsetof(struct sym_reg,r)) 982 983 #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v)) 984 #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v)) 985 #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v)) 986 987 #define OUTONB(r, m) OUTB(r, INB(r) | (m)) 988 #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) 989 #define OUTONW(r, m) OUTW(r, INW(r) | (m)) 990 #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) 991 #define OUTONL(r, m) OUTL(r, INL(r) | (m)) 992 #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) 993 994 /* 995 * We normally want the chip to have a consistent view 996 * of driver internal data structures when we restart it. 997 * Thus these macros. 998 */ 999 #define OUTL_DSP(v) \ 1000 do { \ 1001 MEMORY_BARRIER(); \ 1002 OUTL (nc_dsp, (v)); \ 1003 } while (0) 1004 1005 #define OUTONB_STD() \ 1006 do { \ 1007 MEMORY_BARRIER(); \ 1008 OUTONB (nc_dcntl, (STD|NOCOM)); \ 1009 } while (0) 1010 1011 /* 1012 * Command control block states. 1013 */ 1014 #define HS_IDLE (0) 1015 #define HS_BUSY (1) 1016 #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ 1017 #define HS_DISCONNECT (3) /* Disconnected by target */ 1018 #define HS_WAIT (4) /* waiting for resource */ 1019 1020 #define HS_DONEMASK (0x80) 1021 #define HS_COMPLETE (4|HS_DONEMASK) 1022 #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ 1023 #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */ 1024 #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */ 1025 1026 /* 1027 * Software Interrupt Codes 1028 */ 1029 #define SIR_BAD_SCSI_STATUS (1) 1030 #define SIR_SEL_ATN_NO_MSG_OUT (2) 1031 #define SIR_MSG_RECEIVED (3) 1032 #define SIR_MSG_WEIRD (4) 1033 #define SIR_NEGO_FAILED (5) 1034 #define SIR_NEGO_PROTO (6) 1035 #define SIR_SCRIPT_STOPPED (7) 1036 #define SIR_REJECT_TO_SEND (8) 1037 #define SIR_SWIDE_OVERRUN (9) 1038 #define SIR_SODL_UNDERRUN (10) 1039 #define SIR_RESEL_NO_MSG_IN (11) 1040 #define SIR_RESEL_NO_IDENTIFY (12) 1041 #define SIR_RESEL_BAD_LUN (13) 1042 #define SIR_TARGET_SELECTED (14) 1043 #define SIR_RESEL_BAD_I_T_L (15) 1044 #define SIR_RESEL_BAD_I_T_L_Q (16) 1045 #define SIR_ABORT_SENT (17) 1046 #define SIR_RESEL_ABORTED (18) 1047 #define SIR_MSG_OUT_DONE (19) 1048 #define SIR_COMPLETE_ERROR (20) 1049 #define SIR_DATA_OVERRUN (21) 1050 #define SIR_BAD_PHASE (22) 1051 #define SIR_MAX (22) 1052 1053 /* 1054 * Extended error bit codes. 1055 * xerr_status field of struct sym_ccb. 1056 */ 1057 #define XE_EXTRA_DATA (1) /* unexpected data phase */ 1058 #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */ 1059 #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */ 1060 #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */ 1061 #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */ 1062 1063 /* 1064 * Negotiation status. 1065 * nego_status field of struct sym_ccb. 1066 */ 1067 #define NS_SYNC (1) 1068 #define NS_WIDE (2) 1069 #define NS_PPR (3) 1070 1071 /* 1072 * A CCB hashed table is used to retrieve CCB address 1073 * from DSA value. 1074 */ 1075 #define CCB_HASH_SHIFT 8 1076 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT) 1077 #define CCB_HASH_MASK (CCB_HASH_SIZE-1) 1078 #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK) 1079 1080 /* 1081 * Device flags. 1082 */ 1083 #define SYM_DISC_ENABLED (1) 1084 #define SYM_TAGS_ENABLED (1<<1) 1085 #define SYM_SCAN_BOOT_DISABLED (1<<2) 1086 #define SYM_SCAN_LUNS_DISABLED (1<<3) 1087 1088 /* 1089 * Host adapter miscellaneous flags. 1090 */ 1091 #define SYM_AVOID_BUS_RESET (1) 1092 #define SYM_SCAN_TARGETS_HILO (1<<1) 1093 1094 /* 1095 * Device quirks. 1096 * Some devices, for example the CHEETAH 2 LVD, disconnects without 1097 * saving the DATA POINTER then reselects and terminates the IO. 1098 * On reselection, the automatic RESTORE DATA POINTER makes the 1099 * CURRENT DATA POINTER not point at the end of the IO. 1100 * This behaviour just breaks our calculation of the residual. 1101 * For now, we just force an AUTO SAVE on disconnection and will 1102 * fix that in a further driver version. 1103 */ 1104 #define SYM_QUIRK_AUTOSAVE 1 1105 1106 /* 1107 * Misc. 1108 */ 1109 #define SYM_SNOOP_TIMEOUT (10000000) 1110 #define SYM_PCI_IO PCIR_BAR(0) 1111 #define SYM_PCI_MMIO PCIR_BAR(1) 1112 #define SYM_PCI_RAM PCIR_BAR(2) 1113 #define SYM_PCI_RAM64 PCIR_BAR(3) 1114 1115 /* 1116 * Back-pointer from the CAM CCB to our data structures. 1117 */ 1118 #define sym_hcb_ptr spriv_ptr0 1119 /* #define sym_ccb_ptr spriv_ptr1 */ 1120 1121 /* 1122 * We mostly have to deal with pointers. 1123 * Thus these typedef's. 1124 */ 1125 typedef struct sym_tcb *tcb_p; 1126 typedef struct sym_lcb *lcb_p; 1127 typedef struct sym_ccb *ccb_p; 1128 typedef struct sym_hcb *hcb_p; 1129 1130 /* 1131 * Gather negotiable parameters value 1132 */ 1133 struct sym_trans { 1134 #ifdef FreeBSD_New_Tran_Settings 1135 u8 scsi_version; 1136 u8 spi_version; 1137 #endif 1138 u8 period; 1139 u8 offset; 1140 u8 width; 1141 u8 options; /* PPR options */ 1142 }; 1143 1144 struct sym_tinfo { 1145 struct sym_trans current; 1146 struct sym_trans goal; 1147 struct sym_trans user; 1148 }; 1149 1150 #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT 1151 #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT 1152 1153 /* 1154 * Global TCB HEADER. 1155 * 1156 * Due to lack of indirect addressing on earlier NCR chips, 1157 * this substructure is copied from the TCB to a global 1158 * address after selection. 1159 * For SYMBIOS chips that support LOAD/STORE this copy is 1160 * not needed and thus not performed. 1161 */ 1162 struct sym_tcbh { 1163 /* 1164 * Scripts bus addresses of LUN table accessed from scripts. 1165 * LUN #0 is a special case, since multi-lun devices are rare, 1166 * and we we want to speed-up the general case and not waste 1167 * resources. 1168 */ 1169 u32 luntbl_sa; /* bus address of this table */ 1170 u32 lun0_sa; /* bus address of LCB #0 */ 1171 /* 1172 * Actual SYNC/WIDE IO registers value for this target. 1173 * 'sval', 'wval' and 'uval' are read from SCRIPTS and 1174 * so have alignment constraints. 1175 */ 1176 /*0*/ u_char uval; /* -> SCNTL4 register */ 1177 /*1*/ u_char sval; /* -> SXFER io register */ 1178 /*2*/ u_char filler1; 1179 /*3*/ u_char wval; /* -> SCNTL3 io register */ 1180 }; 1181 1182 /* 1183 * Target Control Block 1184 */ 1185 struct sym_tcb { 1186 /* 1187 * TCB header. 1188 * Assumed at offset 0. 1189 */ 1190 /*0*/ struct sym_tcbh head; 1191 1192 /* 1193 * LUN table used by the SCRIPTS processor. 1194 * An array of bus addresses is used on reselection. 1195 */ 1196 u32 *luntbl; /* LCBs bus address table */ 1197 1198 /* 1199 * LUN table used by the C code. 1200 */ 1201 lcb_p lun0p; /* LCB of LUN #0 (usual case) */ 1202 #if SYM_CONF_MAX_LUN > 1 1203 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */ 1204 #endif 1205 1206 /* 1207 * Bitmap that tells about LUNs that succeeded at least 1208 * 1 IO and therefore assumed to be a real device. 1209 * Avoid useless allocation of the LCB structure. 1210 */ 1211 u32 lun_map[(SYM_CONF_MAX_LUN+31)/32]; 1212 1213 /* 1214 * Bitmap that tells about LUNs that haven't yet an LCB 1215 * allocated (not discovered or LCB allocation failed). 1216 */ 1217 u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32]; 1218 1219 /* 1220 * Transfer capabilities (SIP) 1221 */ 1222 struct sym_tinfo tinfo; 1223 1224 /* 1225 * Keep track of the CCB used for the negotiation in order 1226 * to ensure that only 1 negotiation is queued at a time. 1227 */ 1228 ccb_p nego_cp; /* CCB used for the nego */ 1229 1230 /* 1231 * Set when we want to reset the device. 1232 */ 1233 u_char to_reset; 1234 1235 /* 1236 * Other user settable limits and options. 1237 * These limits are read from the NVRAM if present. 1238 */ 1239 u_char usrflags; 1240 u_short usrtags; 1241 }; 1242 1243 /* 1244 * Global LCB HEADER. 1245 * 1246 * Due to lack of indirect addressing on earlier NCR chips, 1247 * this substructure is copied from the LCB to a global 1248 * address after selection. 1249 * For SYMBIOS chips that support LOAD/STORE this copy is 1250 * not needed and thus not performed. 1251 */ 1252 struct sym_lcbh { 1253 /* 1254 * SCRIPTS address jumped by SCRIPTS on reselection. 1255 * For not probed logical units, this address points to 1256 * SCRIPTS that deal with bad LU handling (must be at 1257 * offset zero of the LCB for that reason). 1258 */ 1259 /*0*/ u32 resel_sa; 1260 1261 /* 1262 * Task (bus address of a CCB) read from SCRIPTS that points 1263 * to the unique ITL nexus allowed to be disconnected. 1264 */ 1265 u32 itl_task_sa; 1266 1267 /* 1268 * Task table bus address (read from SCRIPTS). 1269 */ 1270 u32 itlq_tbl_sa; 1271 }; 1272 1273 /* 1274 * Logical Unit Control Block 1275 */ 1276 struct sym_lcb { 1277 /* 1278 * TCB header. 1279 * Assumed at offset 0. 1280 */ 1281 /*0*/ struct sym_lcbh head; 1282 1283 /* 1284 * Task table read from SCRIPTS that contains pointers to 1285 * ITLQ nexuses. The bus address read from SCRIPTS is 1286 * inside the header. 1287 */ 1288 u32 *itlq_tbl; /* Kernel virtual address */ 1289 1290 /* 1291 * Busy CCBs management. 1292 */ 1293 u_short busy_itlq; /* Number of busy tagged CCBs */ 1294 u_short busy_itl; /* Number of busy untagged CCBs */ 1295 1296 /* 1297 * Circular tag allocation buffer. 1298 */ 1299 u_short ia_tag; /* Tag allocation index */ 1300 u_short if_tag; /* Tag release index */ 1301 u_char *cb_tags; /* Circular tags buffer */ 1302 1303 /* 1304 * Set when we want to clear all tasks. 1305 */ 1306 u_char to_clear; 1307 1308 /* 1309 * Capabilities. 1310 */ 1311 u_char user_flags; 1312 u_char current_flags; 1313 }; 1314 1315 /* 1316 * Action from SCRIPTS on a task. 1317 * Is part of the CCB, but is also used separately to plug 1318 * error handling action to perform from SCRIPTS. 1319 */ 1320 struct sym_actscr { 1321 u32 start; /* Jumped by SCRIPTS after selection */ 1322 u32 restart; /* Jumped by SCRIPTS on relection */ 1323 }; 1324 1325 /* 1326 * Phase mismatch context. 1327 * 1328 * It is part of the CCB and is used as parameters for the 1329 * DATA pointer. We need two contexts to handle correctly the 1330 * SAVED DATA POINTER. 1331 */ 1332 struct sym_pmc { 1333 struct sym_tblmove sg; /* Updated interrupted SG block */ 1334 u32 ret; /* SCRIPT return address */ 1335 }; 1336 1337 /* 1338 * LUN control block lookup. 1339 * We use a direct pointer for LUN #0, and a table of 1340 * pointers which is only allocated for devices that support 1341 * LUN(s) > 0. 1342 */ 1343 #if SYM_CONF_MAX_LUN <= 1 1344 #define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : 0 1345 #else 1346 #define sym_lp(np, tp, lun) \ 1347 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0 1348 #endif 1349 1350 /* 1351 * Status are used by the host and the script processor. 1352 * 1353 * The last four bytes (status[4]) are copied to the 1354 * scratchb register (declared as scr0..scr3) just after the 1355 * select/reselect, and copied back just after disconnecting. 1356 * Inside the script the XX_REG are used. 1357 */ 1358 1359 /* 1360 * Last four bytes (script) 1361 */ 1362 #define QU_REG scr0 1363 #define HS_REG scr1 1364 #define HS_PRT nc_scr1 1365 #define SS_REG scr2 1366 #define SS_PRT nc_scr2 1367 #define HF_REG scr3 1368 #define HF_PRT nc_scr3 1369 1370 /* 1371 * Last four bytes (host) 1372 */ 1373 #define actualquirks phys.head.status[0] 1374 #define host_status phys.head.status[1] 1375 #define ssss_status phys.head.status[2] 1376 #define host_flags phys.head.status[3] 1377 1378 /* 1379 * Host flags 1380 */ 1381 #define HF_IN_PM0 1u 1382 #define HF_IN_PM1 (1u<<1) 1383 #define HF_ACT_PM (1u<<2) 1384 #define HF_DP_SAVED (1u<<3) 1385 #define HF_SENSE (1u<<4) 1386 #define HF_EXT_ERR (1u<<5) 1387 #define HF_DATA_IN (1u<<6) 1388 #ifdef SYM_CONF_IARB_SUPPORT 1389 #define HF_HINT_IARB (1u<<7) 1390 #endif 1391 1392 /* 1393 * Global CCB HEADER. 1394 * 1395 * Due to lack of indirect addressing on earlier NCR chips, 1396 * this substructure is copied from the ccb to a global 1397 * address after selection (or reselection) and copied back 1398 * before disconnect. 1399 * For SYMBIOS chips that support LOAD/STORE this copy is 1400 * not needed and thus not performed. 1401 */ 1402 1403 struct sym_ccbh { 1404 /* 1405 * Start and restart SCRIPTS addresses (must be at 0). 1406 */ 1407 /*0*/ struct sym_actscr go; 1408 1409 /* 1410 * SCRIPTS jump address that deal with data pointers. 1411 * 'savep' points to the position in the script responsible 1412 * for the actual transfer of data. 1413 * It's written on reception of a SAVE_DATA_POINTER message. 1414 */ 1415 u32 savep; /* Jump address to saved data pointer */ 1416 u32 lastp; /* SCRIPTS address at end of data */ 1417 u32 goalp; /* Not accessed for now from SCRIPTS */ 1418 1419 /* 1420 * Status fields. 1421 */ 1422 u8 status[4]; 1423 }; 1424 1425 /* 1426 * Data Structure Block 1427 * 1428 * During execution of a ccb by the script processor, the 1429 * DSA (data structure address) register points to this 1430 * substructure of the ccb. 1431 */ 1432 struct sym_dsb { 1433 /* 1434 * CCB header. 1435 * Also assumed at offset 0 of the sym_ccb structure. 1436 */ 1437 /*0*/ struct sym_ccbh head; 1438 1439 /* 1440 * Phase mismatch contexts. 1441 * We need two to handle correctly the SAVED DATA POINTER. 1442 * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic 1443 * for address calculation from SCRIPTS. 1444 */ 1445 struct sym_pmc pm0; 1446 struct sym_pmc pm1; 1447 1448 /* 1449 * Table data for Script 1450 */ 1451 struct sym_tblsel select; 1452 struct sym_tblmove smsg; 1453 struct sym_tblmove smsg_ext; 1454 struct sym_tblmove cmd; 1455 struct sym_tblmove sense; 1456 struct sym_tblmove wresid; 1457 struct sym_tblmove data [SYM_CONF_MAX_SG]; 1458 }; 1459 1460 /* 1461 * Our Command Control Block 1462 */ 1463 struct sym_ccb { 1464 /* 1465 * This is the data structure which is pointed by the DSA 1466 * register when it is executed by the script processor. 1467 * It must be the first entry. 1468 */ 1469 struct sym_dsb phys; 1470 1471 /* 1472 * Pointer to CAM ccb and related stuff. 1473 */ 1474 union ccb *cam_ccb; /* CAM scsiio ccb */ 1475 u8 cdb_buf[16]; /* Copy of CDB */ 1476 u8 *sns_bbuf; /* Bounce buffer for sense data */ 1477 #define SYM_SNS_BBUF_LEN sizeof(struct scsi_sense_data) 1478 int data_len; /* Total data length */ 1479 int segments; /* Number of SG segments */ 1480 1481 /* 1482 * Miscellaneous status'. 1483 */ 1484 u_char nego_status; /* Negotiation status */ 1485 u_char xerr_status; /* Extended error flags */ 1486 u32 extra_bytes; /* Extraneous bytes transferred */ 1487 1488 /* 1489 * Message areas. 1490 * We prepare a message to be sent after selection. 1491 * We may use a second one if the command is rescheduled 1492 * due to CHECK_CONDITION or COMMAND TERMINATED. 1493 * Contents are IDENTIFY and SIMPLE_TAG. 1494 * While negotiating sync or wide transfer, 1495 * a SDTR or WDTR message is appended. 1496 */ 1497 u_char scsi_smsg [12]; 1498 u_char scsi_smsg2[12]; 1499 1500 /* 1501 * Auto request sense related fields. 1502 */ 1503 u_char sensecmd[6]; /* Request Sense command */ 1504 u_char sv_scsi_status; /* Saved SCSI status */ 1505 u_char sv_xerr_status; /* Saved extended status */ 1506 int sv_resid; /* Saved residual */ 1507 1508 /* 1509 * Map for the DMA of user data. 1510 */ 1511 void *arg; /* Argument for some callback */ 1512 bus_dmamap_t dmamap; /* DMA map for user data */ 1513 u_char dmamapped; 1514 #define SYM_DMA_NONE 0 1515 #define SYM_DMA_READ 1 1516 #define SYM_DMA_WRITE 2 1517 /* 1518 * Other fields. 1519 */ 1520 u32 ccb_ba; /* BUS address of this CCB */ 1521 u_short tag; /* Tag for this transfer */ 1522 /* NO_TAG means no tag */ 1523 u_char target; 1524 u_char lun; 1525 ccb_p link_ccbh; /* Host adapter CCB hash chain */ 1526 SYM_QUEHEAD 1527 link_ccbq; /* Link to free/busy CCB queue */ 1528 u32 startp; /* Initial data pointer */ 1529 int ext_sg; /* Extreme data pointer, used */ 1530 int ext_ofs; /* to calculate the residual. */ 1531 u_char to_abort; /* Want this IO to be aborted */ 1532 }; 1533 1534 #define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl)) 1535 1536 /* 1537 * Host Control Block 1538 */ 1539 struct sym_hcb { 1540 /* 1541 * Global headers. 1542 * Due to poorness of addressing capabilities, earlier 1543 * chips (810, 815, 825) copy part of the data structures 1544 * (CCB, TCB and LCB) in fixed areas. 1545 */ 1546 #ifdef SYM_CONF_GENERIC_SUPPORT 1547 struct sym_ccbh ccb_head; 1548 struct sym_tcbh tcb_head; 1549 struct sym_lcbh lcb_head; 1550 #endif 1551 /* 1552 * Idle task and invalid task actions and 1553 * their bus addresses. 1554 */ 1555 struct sym_actscr idletask, notask, bad_itl, bad_itlq; 1556 vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba; 1557 1558 /* 1559 * Dummy lun table to protect us against target 1560 * returning bad lun number on reselection. 1561 */ 1562 u32 *badluntbl; /* Table physical address */ 1563 u32 badlun_sa; /* SCRIPT handler BUS address */ 1564 1565 /* 1566 * Bus address of this host control block. 1567 */ 1568 u32 hcb_ba; 1569 1570 /* 1571 * Bit 32-63 of the on-chip RAM bus address in LE format. 1572 * The START_RAM64 script loads the MMRS and MMWS from this 1573 * field. 1574 */ 1575 u32 scr_ram_seg; 1576 1577 /* 1578 * Chip and controller indentification. 1579 */ 1580 device_t device; 1581 int unit; 1582 char inst_name[8]; 1583 1584 /* 1585 * Initial value of some IO register bits. 1586 * These values are assumed to have been set by BIOS, and may 1587 * be used to probe adapter implementation differences. 1588 */ 1589 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, 1590 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4, 1591 sv_stest1; 1592 1593 /* 1594 * Actual initial value of IO register bits used by the 1595 * driver. They are loaded at initialisation according to 1596 * features that are to be enabled/disabled. 1597 */ 1598 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, 1599 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; 1600 1601 /* 1602 * Target data. 1603 */ 1604 struct sym_tcb target[SYM_CONF_MAX_TARGET]; 1605 1606 /* 1607 * Target control block bus address array used by the SCRIPT 1608 * on reselection. 1609 */ 1610 u32 *targtbl; 1611 u32 targtbl_ba; 1612 1613 /* 1614 * CAM SIM information for this instance. 1615 */ 1616 struct cam_sim *sim; 1617 struct cam_path *path; 1618 1619 /* 1620 * Allocated hardware resources. 1621 */ 1622 struct resource *irq_res; 1623 struct resource *io_res; 1624 struct resource *mmio_res; 1625 struct resource *ram_res; 1626 int ram_id; 1627 void *intr; 1628 1629 /* 1630 * Bus stuff. 1631 * 1632 * My understanding of PCI is that all agents must share the 1633 * same addressing range and model. 1634 * But some hardware architecture guys provide complex and 1635 * brain-deaded stuff that makes shit. 1636 * This driver only support PCI compliant implementations and 1637 * deals with part of the BUS stuff complexity only to fit O/S 1638 * requirements. 1639 */ 1640 bus_space_handle_t io_bsh; 1641 bus_space_tag_t io_tag; 1642 bus_space_handle_t mmio_bsh; 1643 bus_space_tag_t mmio_tag; 1644 bus_space_handle_t ram_bsh; 1645 bus_space_tag_t ram_tag; 1646 1647 /* 1648 * DMA stuff. 1649 */ 1650 bus_dma_tag_t bus_dmat; /* DMA tag from parent BUS */ 1651 bus_dma_tag_t data_dmat; /* DMA tag for user data */ 1652 /* 1653 * Virtual and physical bus addresses of the chip. 1654 */ 1655 vm_offset_t mmio_va; /* MMIO kernel virtual address */ 1656 vm_offset_t mmio_pa; /* MMIO CPU physical address */ 1657 vm_offset_t mmio_ba; /* MMIO BUS address */ 1658 int mmio_ws; /* MMIO Window size */ 1659 1660 vm_offset_t ram_va; /* RAM kernel virtual address */ 1661 vm_offset_t ram_pa; /* RAM CPU physical address */ 1662 vm_offset_t ram_ba; /* RAM BUS address */ 1663 int ram_ws; /* RAM window size */ 1664 u32 io_port; /* IO port address */ 1665 1666 /* 1667 * SCRIPTS virtual and physical bus addresses. 1668 * 'script' is loaded in the on-chip RAM if present. 1669 * 'scripth' stays in main memory for all chips except the 1670 * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM. 1671 */ 1672 u_char *scripta0; /* Copies of script and scripth */ 1673 u_char *scriptb0; /* Copies of script and scripth */ 1674 vm_offset_t scripta_ba; /* Actual script and scripth */ 1675 vm_offset_t scriptb_ba; /* bus addresses. */ 1676 vm_offset_t scriptb0_ba; 1677 u_short scripta_sz; /* Actual size of script A */ 1678 u_short scriptb_sz; /* Actual size of script B */ 1679 1680 /* 1681 * Bus addresses, setup and patch methods for 1682 * the selected firmware. 1683 */ 1684 struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */ 1685 struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ 1686 void (*fw_setup)(hcb_p np, struct sym_fw *fw); 1687 void (*fw_patch)(hcb_p np); 1688 char *fw_name; 1689 1690 /* 1691 * General controller parameters and configuration. 1692 */ 1693 u_short device_id; /* PCI device id */ 1694 u_char revision_id; /* PCI device revision id */ 1695 u_int features; /* Chip features map */ 1696 u_char myaddr; /* SCSI id of the adapter */ 1697 u_char maxburst; /* log base 2 of dwords burst */ 1698 u_char maxwide; /* Maximum transfer width */ 1699 u_char minsync; /* Min sync period factor (ST) */ 1700 u_char maxsync; /* Max sync period factor (ST) */ 1701 u_char maxoffs; /* Max scsi offset (ST) */ 1702 u_char minsync_dt; /* Min sync period factor (DT) */ 1703 u_char maxsync_dt; /* Max sync period factor (DT) */ 1704 u_char maxoffs_dt; /* Max scsi offset (DT) */ 1705 u_char multiplier; /* Clock multiplier (1,2,4) */ 1706 u_char clock_divn; /* Number of clock divisors */ 1707 u32 clock_khz; /* SCSI clock frequency in KHz */ 1708 u32 pciclk_khz; /* Estimated PCI clock in KHz */ 1709 /* 1710 * Start queue management. 1711 * It is filled up by the host processor and accessed by the 1712 * SCRIPTS processor in order to start SCSI commands. 1713 */ 1714 volatile /* Prevent code optimizations */ 1715 u32 *squeue; /* Start queue virtual address */ 1716 u32 squeue_ba; /* Start queue BUS address */ 1717 u_short squeueput; /* Next free slot of the queue */ 1718 u_short actccbs; /* Number of allocated CCBs */ 1719 1720 /* 1721 * Command completion queue. 1722 * It is the same size as the start queue to avoid overflow. 1723 */ 1724 u_short dqueueget; /* Next position to scan */ 1725 volatile /* Prevent code optimizations */ 1726 u32 *dqueue; /* Completion (done) queue */ 1727 u32 dqueue_ba; /* Done queue BUS address */ 1728 1729 /* 1730 * Miscellaneous buffers accessed by the scripts-processor. 1731 * They shall be DWORD aligned, because they may be read or 1732 * written with a script command. 1733 */ 1734 u_char msgout[8]; /* Buffer for MESSAGE OUT */ 1735 u_char msgin [8]; /* Buffer for MESSAGE IN */ 1736 u32 lastmsg; /* Last SCSI message sent */ 1737 u_char scratch; /* Scratch for SCSI receive */ 1738 1739 /* 1740 * Miscellaneous configuration and status parameters. 1741 */ 1742 u_char usrflags; /* Miscellaneous user flags */ 1743 u_char scsi_mode; /* Current SCSI BUS mode */ 1744 u_char verbose; /* Verbosity for this controller*/ 1745 u32 cache; /* Used for cache test at init. */ 1746 1747 /* 1748 * CCB lists and queue. 1749 */ 1750 ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */ 1751 SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */ 1752 SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ 1753 1754 /* 1755 * During error handling and/or recovery, 1756 * active CCBs that are to be completed with 1757 * error or requeued are moved from the busy_ccbq 1758 * to the comp_ccbq prior to completion. 1759 */ 1760 SYM_QUEHEAD comp_ccbq; 1761 1762 /* 1763 * CAM CCB pending queue. 1764 */ 1765 SYM_QUEHEAD cam_ccbq; 1766 1767 /* 1768 * IMMEDIATE ARBITRATION (IARB) control. 1769 * 1770 * We keep track in 'last_cp' of the last CCB that has been 1771 * queued to the SCRIPTS processor and clear 'last_cp' when 1772 * this CCB completes. If last_cp is not zero at the moment 1773 * we queue a new CCB, we set a flag in 'last_cp' that is 1774 * used by the SCRIPTS as a hint for setting IARB. 1775 * We donnot set more than 'iarb_max' consecutive hints for 1776 * IARB in order to leave devices a chance to reselect. 1777 * By the way, any non zero value of 'iarb_max' is unfair. :) 1778 */ 1779 #ifdef SYM_CONF_IARB_SUPPORT 1780 u_short iarb_max; /* Max. # consecutive IARB hints*/ 1781 u_short iarb_count; /* Actual # of these hints */ 1782 ccb_p last_cp; 1783 #endif 1784 1785 /* 1786 * Command abort handling. 1787 * We need to synchronize tightly with the SCRIPTS 1788 * processor in order to handle things correctly. 1789 */ 1790 u_char abrt_msg[4]; /* Message to send buffer */ 1791 struct sym_tblmove abrt_tbl; /* Table for the MOV of it */ 1792 struct sym_tblsel abrt_sel; /* Sync params for selection */ 1793 u_char istat_sem; /* Tells the chip to stop (SEM) */ 1794 }; 1795 1796 #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) 1797 1798 /* 1799 * Return the name of the controller. 1800 */ 1801 static __inline char *sym_name(hcb_p np) 1802 { 1803 return np->inst_name; 1804 } 1805 1806 /*--------------------------------------------------------------------------*/ 1807 /*------------------------------ FIRMWARES ---------------------------------*/ 1808 /*--------------------------------------------------------------------------*/ 1809 1810 /* 1811 * This stuff will be moved to a separate source file when 1812 * the driver will be broken into several source modules. 1813 */ 1814 1815 /* 1816 * Macros used for all firmwares. 1817 */ 1818 #define SYM_GEN_A(s, label) ((short) offsetof(s, label)), 1819 #define SYM_GEN_B(s, label) ((short) offsetof(s, label)), 1820 #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label) 1821 #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label) 1822 1823 1824 #ifdef SYM_CONF_GENERIC_SUPPORT 1825 /* 1826 * Allocate firmware #1 script area. 1827 */ 1828 #define SYM_FWA_SCR sym_fw1a_scr 1829 #define SYM_FWB_SCR sym_fw1b_scr 1830 #include <dev/sym/sym_fw1.h> 1831 struct sym_fwa_ofs sym_fw1a_ofs = { 1832 SYM_GEN_FW_A(struct SYM_FWA_SCR) 1833 }; 1834 struct sym_fwb_ofs sym_fw1b_ofs = { 1835 SYM_GEN_FW_B(struct SYM_FWB_SCR) 1836 }; 1837 #undef SYM_FWA_SCR 1838 #undef SYM_FWB_SCR 1839 #endif /* SYM_CONF_GENERIC_SUPPORT */ 1840 1841 /* 1842 * Allocate firmware #2 script area. 1843 */ 1844 #define SYM_FWA_SCR sym_fw2a_scr 1845 #define SYM_FWB_SCR sym_fw2b_scr 1846 #include <dev/sym/sym_fw2.h> 1847 struct sym_fwa_ofs sym_fw2a_ofs = { 1848 SYM_GEN_FW_A(struct SYM_FWA_SCR) 1849 }; 1850 struct sym_fwb_ofs sym_fw2b_ofs = { 1851 SYM_GEN_FW_B(struct SYM_FWB_SCR) 1852 SYM_GEN_B(struct SYM_FWB_SCR, start64) 1853 SYM_GEN_B(struct SYM_FWB_SCR, pm_handle) 1854 }; 1855 #undef SYM_FWA_SCR 1856 #undef SYM_FWB_SCR 1857 1858 #undef SYM_GEN_A 1859 #undef SYM_GEN_B 1860 #undef PADDR_A 1861 #undef PADDR_B 1862 1863 #ifdef SYM_CONF_GENERIC_SUPPORT 1864 /* 1865 * Patch routine for firmware #1. 1866 */ 1867 static void 1868 sym_fw1_patch(hcb_p np) 1869 { 1870 struct sym_fw1a_scr *scripta0; 1871 struct sym_fw1b_scr *scriptb0; 1872 1873 scripta0 = (struct sym_fw1a_scr *) np->scripta0; 1874 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; 1875 1876 /* 1877 * Remove LED support if not needed. 1878 */ 1879 if (!(np->features & FE_LED0)) { 1880 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); 1881 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); 1882 scripta0->start[0] = cpu_to_scr(SCR_NO_OP); 1883 } 1884 1885 #ifdef SYM_CONF_IARB_SUPPORT 1886 /* 1887 * If user does not want to use IMMEDIATE ARBITRATION 1888 * when we are reselected while attempting to arbitrate, 1889 * patch the SCRIPTS accordingly with a SCRIPT NO_OP. 1890 */ 1891 if (!SYM_CONF_SET_IARB_ON_ARB_LOST) 1892 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); 1893 #endif 1894 /* 1895 * Patch some data in SCRIPTS. 1896 * - start and done queue initial bus address. 1897 * - target bus address table bus address. 1898 */ 1899 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); 1900 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); 1901 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); 1902 } 1903 #endif /* SYM_CONF_GENERIC_SUPPORT */ 1904 1905 /* 1906 * Patch routine for firmware #2. 1907 */ 1908 static void 1909 sym_fw2_patch(hcb_p np) 1910 { 1911 struct sym_fw2a_scr *scripta0; 1912 struct sym_fw2b_scr *scriptb0; 1913 1914 scripta0 = (struct sym_fw2a_scr *) np->scripta0; 1915 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; 1916 1917 /* 1918 * Remove LED support if not needed. 1919 */ 1920 if (!(np->features & FE_LED0)) { 1921 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); 1922 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); 1923 scripta0->start[0] = cpu_to_scr(SCR_NO_OP); 1924 } 1925 1926 #ifdef SYM_CONF_IARB_SUPPORT 1927 /* 1928 * If user does not want to use IMMEDIATE ARBITRATION 1929 * when we are reselected while attempting to arbitrate, 1930 * patch the SCRIPTS accordingly with a SCRIPT NO_OP. 1931 */ 1932 if (!SYM_CONF_SET_IARB_ON_ARB_LOST) 1933 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); 1934 #endif 1935 /* 1936 * Patch some variable in SCRIPTS. 1937 * - start and done queue initial bus address. 1938 * - target bus address table bus address. 1939 */ 1940 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); 1941 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); 1942 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); 1943 1944 /* 1945 * Remove the load of SCNTL4 on reselection if not a C10. 1946 */ 1947 if (!(np->features & FE_C10)) { 1948 scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP); 1949 scripta0->resel_scntl4[1] = cpu_to_scr(0); 1950 } 1951 1952 /* 1953 * Remove a couple of work-arounds specific to C1010 if 1954 * they are not desirable. See `sym_fw2.h' for more details. 1955 */ 1956 if (!(np->device_id == PCI_ID_LSI53C1010_2 && 1957 np->revision_id < 0x1 && 1958 np->pciclk_khz < 60000)) { 1959 scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP); 1960 scripta0->datao_phase[1] = cpu_to_scr(0); 1961 } 1962 if (!(np->device_id == PCI_ID_LSI53C1010 && 1963 /* np->revision_id < 0xff */ 1)) { 1964 scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP); 1965 scripta0->sel_done[1] = cpu_to_scr(0); 1966 } 1967 1968 /* 1969 * Patch some other variables in SCRIPTS. 1970 * These ones are loaded by the SCRIPTS processor. 1971 */ 1972 scriptb0->pm0_data_addr[0] = 1973 cpu_to_scr(np->scripta_ba + 1974 offsetof(struct sym_fw2a_scr, pm0_data)); 1975 scriptb0->pm1_data_addr[0] = 1976 cpu_to_scr(np->scripta_ba + 1977 offsetof(struct sym_fw2a_scr, pm1_data)); 1978 } 1979 1980 /* 1981 * Fill the data area in scripts. 1982 * To be done for all firmwares. 1983 */ 1984 static void 1985 sym_fw_fill_data (u32 *in, u32 *out) 1986 { 1987 int i; 1988 1989 for (i = 0; i < SYM_CONF_MAX_SG; i++) { 1990 *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN; 1991 *in++ = offsetof (struct sym_dsb, data[i]); 1992 *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT; 1993 *out++ = offsetof (struct sym_dsb, data[i]); 1994 } 1995 } 1996 1997 /* 1998 * Setup useful script bus addresses. 1999 * To be done for all firmwares. 2000 */ 2001 static void 2002 sym_fw_setup_bus_addresses(hcb_p np, struct sym_fw *fw) 2003 { 2004 u32 *pa; 2005 u_short *po; 2006 int i; 2007 2008 /* 2009 * Build the bus address table for script A 2010 * from the script A offset table. 2011 */ 2012 po = (u_short *) fw->a_ofs; 2013 pa = (u32 *) &np->fwa_bas; 2014 for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++) 2015 pa[i] = np->scripta_ba + po[i]; 2016 2017 /* 2018 * Same for script B. 2019 */ 2020 po = (u_short *) fw->b_ofs; 2021 pa = (u32 *) &np->fwb_bas; 2022 for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++) 2023 pa[i] = np->scriptb_ba + po[i]; 2024 } 2025 2026 #ifdef SYM_CONF_GENERIC_SUPPORT 2027 /* 2028 * Setup routine for firmware #1. 2029 */ 2030 static void 2031 sym_fw1_setup(hcb_p np, struct sym_fw *fw) 2032 { 2033 struct sym_fw1a_scr *scripta0; 2034 struct sym_fw1b_scr *scriptb0; 2035 2036 scripta0 = (struct sym_fw1a_scr *) np->scripta0; 2037 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; 2038 2039 /* 2040 * Fill variable parts in scripts. 2041 */ 2042 sym_fw_fill_data(scripta0->data_in, scripta0->data_out); 2043 2044 /* 2045 * Setup bus addresses used from the C code.. 2046 */ 2047 sym_fw_setup_bus_addresses(np, fw); 2048 } 2049 #endif /* SYM_CONF_GENERIC_SUPPORT */ 2050 2051 /* 2052 * Setup routine for firmware #2. 2053 */ 2054 static void 2055 sym_fw2_setup(hcb_p np, struct sym_fw *fw) 2056 { 2057 struct sym_fw2a_scr *scripta0; 2058 struct sym_fw2b_scr *scriptb0; 2059 2060 scripta0 = (struct sym_fw2a_scr *) np->scripta0; 2061 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; 2062 2063 /* 2064 * Fill variable parts in scripts. 2065 */ 2066 sym_fw_fill_data(scripta0->data_in, scripta0->data_out); 2067 2068 /* 2069 * Setup bus addresses used from the C code.. 2070 */ 2071 sym_fw_setup_bus_addresses(np, fw); 2072 } 2073 2074 /* 2075 * Allocate firmware descriptors. 2076 */ 2077 #ifdef SYM_CONF_GENERIC_SUPPORT 2078 static struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic"); 2079 #endif /* SYM_CONF_GENERIC_SUPPORT */ 2080 static struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based"); 2081 2082 /* 2083 * Find the most appropriate firmware for a chip. 2084 */ 2085 static struct sym_fw * 2086 sym_find_firmware(struct sym_pci_chip *chip) 2087 { 2088 if (chip->features & FE_LDSTR) 2089 return &sym_fw2; 2090 #ifdef SYM_CONF_GENERIC_SUPPORT 2091 else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC))) 2092 return &sym_fw1; 2093 #endif 2094 else 2095 return 0; 2096 } 2097 2098 /* 2099 * Bind a script to physical addresses. 2100 */ 2101 static void sym_fw_bind_script (hcb_p np, u32 *start, int len) 2102 { 2103 u32 opcode, new, old, tmp1, tmp2; 2104 u32 *end, *cur; 2105 int relocs; 2106 2107 cur = start; 2108 end = start + len/4; 2109 2110 while (cur < end) { 2111 2112 opcode = *cur; 2113 2114 /* 2115 * If we forget to change the length 2116 * in scripts, a field will be 2117 * padded with 0. This is an illegal 2118 * command. 2119 */ 2120 if (opcode == 0) { 2121 printf ("%s: ERROR0 IN SCRIPT at %d.\n", 2122 sym_name(np), (int) (cur-start)); 2123 MDELAY (10000); 2124 ++cur; 2125 continue; 2126 }; 2127 2128 /* 2129 * We use the bogus value 0xf00ff00f ;-) 2130 * to reserve data area in SCRIPTS. 2131 */ 2132 if (opcode == SCR_DATA_ZERO) { 2133 *cur++ = 0; 2134 continue; 2135 } 2136 2137 if (DEBUG_FLAGS & DEBUG_SCRIPT) 2138 printf ("%d: <%x>\n", (int) (cur-start), 2139 (unsigned)opcode); 2140 2141 /* 2142 * We don't have to decode ALL commands 2143 */ 2144 switch (opcode >> 28) { 2145 case 0xf: 2146 /* 2147 * LOAD / STORE DSA relative, don't relocate. 2148 */ 2149 relocs = 0; 2150 break; 2151 case 0xe: 2152 /* 2153 * LOAD / STORE absolute. 2154 */ 2155 relocs = 1; 2156 break; 2157 case 0xc: 2158 /* 2159 * COPY has TWO arguments. 2160 */ 2161 relocs = 2; 2162 tmp1 = cur[1]; 2163 tmp2 = cur[2]; 2164 if ((tmp1 ^ tmp2) & 3) { 2165 printf ("%s: ERROR1 IN SCRIPT at %d.\n", 2166 sym_name(np), (int) (cur-start)); 2167 MDELAY (10000); 2168 } 2169 /* 2170 * If PREFETCH feature not enabled, remove 2171 * the NO FLUSH bit if present. 2172 */ 2173 if ((opcode & SCR_NO_FLUSH) && 2174 !(np->features & FE_PFEN)) { 2175 opcode = (opcode & ~SCR_NO_FLUSH); 2176 } 2177 break; 2178 case 0x0: 2179 /* 2180 * MOVE/CHMOV (absolute address) 2181 */ 2182 if (!(np->features & FE_WIDE)) 2183 opcode = (opcode | OPC_MOVE); 2184 relocs = 1; 2185 break; 2186 case 0x1: 2187 /* 2188 * MOVE/CHMOV (table indirect) 2189 */ 2190 if (!(np->features & FE_WIDE)) 2191 opcode = (opcode | OPC_MOVE); 2192 relocs = 0; 2193 break; 2194 case 0x8: 2195 /* 2196 * JUMP / CALL 2197 * dont't relocate if relative :-) 2198 */ 2199 if (opcode & 0x00800000) 2200 relocs = 0; 2201 else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ 2202 relocs = 2; 2203 else 2204 relocs = 1; 2205 break; 2206 case 0x4: 2207 case 0x5: 2208 case 0x6: 2209 case 0x7: 2210 relocs = 1; 2211 break; 2212 default: 2213 relocs = 0; 2214 break; 2215 }; 2216 2217 /* 2218 * Scriptify:) the opcode. 2219 */ 2220 *cur++ = cpu_to_scr(opcode); 2221 2222 /* 2223 * If no relocation, assume 1 argument 2224 * and just scriptize:) it. 2225 */ 2226 if (!relocs) { 2227 *cur = cpu_to_scr(*cur); 2228 ++cur; 2229 continue; 2230 } 2231 2232 /* 2233 * Otherwise performs all needed relocations. 2234 */ 2235 while (relocs--) { 2236 old = *cur; 2237 2238 switch (old & RELOC_MASK) { 2239 case RELOC_REGISTER: 2240 new = (old & ~RELOC_MASK) + np->mmio_ba; 2241 break; 2242 case RELOC_LABEL_A: 2243 new = (old & ~RELOC_MASK) + np->scripta_ba; 2244 break; 2245 case RELOC_LABEL_B: 2246 new = (old & ~RELOC_MASK) + np->scriptb_ba; 2247 break; 2248 case RELOC_SOFTC: 2249 new = (old & ~RELOC_MASK) + np->hcb_ba; 2250 break; 2251 case 0: 2252 /* 2253 * Don't relocate a 0 address. 2254 * They are mostly used for patched or 2255 * script self-modified areas. 2256 */ 2257 if (old == 0) { 2258 new = old; 2259 break; 2260 } 2261 /* fall through */ 2262 default: 2263 new = 0; 2264 panic("sym_fw_bind_script: " 2265 "weird relocation %x\n", old); 2266 break; 2267 } 2268 2269 *cur++ = cpu_to_scr(new); 2270 } 2271 }; 2272 } 2273 2274 /*--------------------------------------------------------------------------*/ 2275 /*--------------------------- END OF FIRMARES -----------------------------*/ 2276 /*--------------------------------------------------------------------------*/ 2277 2278 /* 2279 * Function prototypes. 2280 */ 2281 static void sym_save_initial_setting (hcb_p np); 2282 static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram); 2283 static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr); 2284 static void sym_put_start_queue (hcb_p np, ccb_p cp); 2285 static void sym_chip_reset (hcb_p np); 2286 static void sym_soft_reset (hcb_p np); 2287 static void sym_start_reset (hcb_p np); 2288 static int sym_reset_scsi_bus (hcb_p np, int enab_int); 2289 static int sym_wakeup_done (hcb_p np); 2290 static void sym_flush_busy_queue (hcb_p np, int cam_status); 2291 static void sym_flush_comp_queue (hcb_p np, int cam_status); 2292 static void sym_init (hcb_p np, int reason); 2293 static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, 2294 u_char *fakp); 2295 static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per, 2296 u_char div, u_char fak); 2297 static void sym_setwide (hcb_p np, ccb_p cp, u_char wide); 2298 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 2299 u_char per, u_char wide, u_char div, u_char fak); 2300 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 2301 u_char per, u_char wide, u_char div, u_char fak); 2302 static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat); 2303 static void sym_intr (void *arg); 2304 static void sym_poll (struct cam_sim *sim); 2305 static void sym_recover_scsi_int (hcb_p np, u_char hsts); 2306 static void sym_int_sto (hcb_p np); 2307 static void sym_int_udc (hcb_p np); 2308 static void sym_int_sbmc (hcb_p np); 2309 static void sym_int_par (hcb_p np, u_short sist); 2310 static void sym_int_ma (hcb_p np); 2311 static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, 2312 int task); 2313 static void sym_sir_bad_scsi_status (hcb_p np, int num, ccb_p cp); 2314 static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task); 2315 static void sym_sir_task_recovery (hcb_p np, int num); 2316 static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs); 2317 static void sym_modify_dp (hcb_p np, tcb_p tp, ccb_p cp, int ofs); 2318 static int sym_compute_residual (hcb_p np, ccb_p cp); 2319 static int sym_show_msg (u_char * msg); 2320 static void sym_print_msg (ccb_p cp, char *label, u_char *msg); 2321 static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp); 2322 static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp); 2323 static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp); 2324 static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp); 2325 static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp); 2326 static void sym_int_sir (hcb_p np); 2327 static void sym_free_ccb (hcb_p np, ccb_p cp); 2328 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order); 2329 static ccb_p sym_alloc_ccb (hcb_p np); 2330 static ccb_p sym_ccb_from_dsa (hcb_p np, u32 dsa); 2331 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln); 2332 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln); 2333 static int sym_snooptest (hcb_p np); 2334 static void sym_selectclock(hcb_p np, u_char scntl3); 2335 static void sym_getclock (hcb_p np, int mult); 2336 static int sym_getpciclock (hcb_p np); 2337 static void sym_complete_ok (hcb_p np, ccb_p cp); 2338 static void sym_complete_error (hcb_p np, ccb_p cp); 2339 static void sym_timeout (void *arg); 2340 static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out); 2341 static void sym_reset_dev (hcb_p np, union ccb *ccb); 2342 static void sym_action (struct cam_sim *sim, union ccb *ccb); 2343 static void sym_action1 (struct cam_sim *sim, union ccb *ccb); 2344 static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); 2345 static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio, 2346 ccb_p cp); 2347 static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, 2348 bus_dma_segment_t *psegs, int nsegs); 2349 static int sym_scatter_sg_physical (hcb_p np, ccb_p cp, 2350 bus_dma_segment_t *psegs, int nsegs); 2351 static void sym_action2 (struct cam_sim *sim, union ccb *ccb); 2352 static void sym_update_trans (hcb_p np, tcb_p tp, struct sym_trans *tip, 2353 struct ccb_trans_settings *cts); 2354 static void sym_update_dflags(hcb_p np, u_char *flags, 2355 struct ccb_trans_settings *cts); 2356 2357 static struct sym_pci_chip *sym_find_pci_chip (device_t dev); 2358 static int sym_pci_probe (device_t dev); 2359 static int sym_pci_attach (device_t dev); 2360 2361 static void sym_pci_free (hcb_p np); 2362 static int sym_cam_attach (hcb_p np); 2363 static void sym_cam_free (hcb_p np); 2364 2365 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram); 2366 static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp); 2367 static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp); 2368 2369 /* 2370 * Print something which allows to retrieve the controler type, 2371 * unit, target, lun concerned by a kernel message. 2372 */ 2373 static void PRINT_TARGET (hcb_p np, int target) 2374 { 2375 printf ("%s:%d:", sym_name(np), target); 2376 } 2377 2378 static void PRINT_LUN(hcb_p np, int target, int lun) 2379 { 2380 printf ("%s:%d:%d:", sym_name(np), target, lun); 2381 } 2382 2383 static void PRINT_ADDR (ccb_p cp) 2384 { 2385 if (cp && cp->cam_ccb) 2386 xpt_print_path(cp->cam_ccb->ccb_h.path); 2387 } 2388 2389 /* 2390 * Take into account this ccb in the freeze count. 2391 */ 2392 static void sym_freeze_cam_ccb(union ccb *ccb) 2393 { 2394 if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) { 2395 if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 2396 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2397 xpt_freeze_devq(ccb->ccb_h.path, 1); 2398 } 2399 } 2400 } 2401 2402 /* 2403 * Set the status field of a CAM CCB. 2404 */ 2405 static __inline void sym_set_cam_status(union ccb *ccb, cam_status status) 2406 { 2407 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2408 ccb->ccb_h.status |= status; 2409 } 2410 2411 /* 2412 * Get the status field of a CAM CCB. 2413 */ 2414 static __inline int sym_get_cam_status(union ccb *ccb) 2415 { 2416 return ccb->ccb_h.status & CAM_STATUS_MASK; 2417 } 2418 2419 /* 2420 * Enqueue a CAM CCB. 2421 */ 2422 static void sym_enqueue_cam_ccb(hcb_p np, union ccb *ccb) 2423 { 2424 assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED)); 2425 ccb->ccb_h.status = CAM_REQ_INPROG; 2426 2427 ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb, 2428 ccb->ccb_h.timeout*hz/1000); 2429 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2430 ccb->ccb_h.sym_hcb_ptr = np; 2431 2432 sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq); 2433 } 2434 2435 /* 2436 * Complete a pending CAM CCB. 2437 */ 2438 static void sym_xpt_done(hcb_p np, union ccb *ccb) 2439 { 2440 if (ccb->ccb_h.status & CAM_SIM_QUEUED) { 2441 untimeout(sym_timeout, (caddr_t) ccb, ccb->ccb_h.timeout_ch); 2442 sym_remque(sym_qptr(&ccb->ccb_h.sim_links)); 2443 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2444 ccb->ccb_h.sym_hcb_ptr = 0; 2445 } 2446 if (ccb->ccb_h.flags & CAM_DEV_QFREEZE) 2447 sym_freeze_cam_ccb(ccb); 2448 xpt_done(ccb); 2449 } 2450 2451 static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status) 2452 { 2453 sym_set_cam_status(ccb, cam_status); 2454 sym_xpt_done(np, ccb); 2455 } 2456 2457 /* 2458 * SYMBIOS chip clock divisor table. 2459 * 2460 * Divisors are multiplied by 10,000,000 in order to make 2461 * calculations more simple. 2462 */ 2463 #define _5M 5000000 2464 static u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; 2465 2466 /* 2467 * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, 2468 * 128 transfers. All chips support at least 16 transfers 2469 * bursts. The 825A, 875 and 895 chips support bursts of up 2470 * to 128 transfers and the 895A and 896 support bursts of up 2471 * to 64 transfers. All other chips support up to 16 2472 * transfers bursts. 2473 * 2474 * For PCI 32 bit data transfers each transfer is a DWORD. 2475 * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. 2476 * 2477 * We use log base 2 (burst length) as internal code, with 2478 * value 0 meaning "burst disabled". 2479 */ 2480 2481 /* 2482 * Burst length from burst code. 2483 */ 2484 #define burst_length(bc) (!(bc))? 0 : 1 << (bc) 2485 2486 /* 2487 * Burst code from io register bits. 2488 */ 2489 #define burst_code(dmode, ctest4, ctest5) \ 2490 (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 2491 2492 /* 2493 * Set initial io register bits from burst code. 2494 */ 2495 static __inline void sym_init_burst(hcb_p np, u_char bc) 2496 { 2497 np->rv_ctest4 &= ~0x80; 2498 np->rv_dmode &= ~(0x3 << 6); 2499 np->rv_ctest5 &= ~0x4; 2500 2501 if (!bc) { 2502 np->rv_ctest4 |= 0x80; 2503 } 2504 else { 2505 --bc; 2506 np->rv_dmode |= ((bc & 0x3) << 6); 2507 np->rv_ctest5 |= (bc & 0x4); 2508 } 2509 } 2510 2511 2512 /* 2513 * Print out the list of targets that have some flag disabled by user. 2514 */ 2515 static void sym_print_targets_flag(hcb_p np, int mask, char *msg) 2516 { 2517 int cnt; 2518 int i; 2519 2520 for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 2521 if (i == np->myaddr) 2522 continue; 2523 if (np->target[i].usrflags & mask) { 2524 if (!cnt++) 2525 printf("%s: %s disabled for targets", 2526 sym_name(np), msg); 2527 printf(" %d", i); 2528 } 2529 } 2530 if (cnt) 2531 printf(".\n"); 2532 } 2533 2534 /* 2535 * Save initial settings of some IO registers. 2536 * Assumed to have been set by BIOS. 2537 * We cannot reset the chip prior to reading the 2538 * IO registers, since informations will be lost. 2539 * Since the SCRIPTS processor may be running, this 2540 * is not safe on paper, but it seems to work quite 2541 * well. :) 2542 */ 2543 static void sym_save_initial_setting (hcb_p np) 2544 { 2545 np->sv_scntl0 = INB(nc_scntl0) & 0x0a; 2546 np->sv_scntl3 = INB(nc_scntl3) & 0x07; 2547 np->sv_dmode = INB(nc_dmode) & 0xce; 2548 np->sv_dcntl = INB(nc_dcntl) & 0xa8; 2549 np->sv_ctest3 = INB(nc_ctest3) & 0x01; 2550 np->sv_ctest4 = INB(nc_ctest4) & 0x80; 2551 np->sv_gpcntl = INB(nc_gpcntl); 2552 np->sv_stest1 = INB(nc_stest1); 2553 np->sv_stest2 = INB(nc_stest2) & 0x20; 2554 np->sv_stest4 = INB(nc_stest4); 2555 if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ 2556 np->sv_scntl4 = INB(nc_scntl4); 2557 np->sv_ctest5 = INB(nc_ctest5) & 0x04; 2558 } 2559 else 2560 np->sv_ctest5 = INB(nc_ctest5) & 0x24; 2561 } 2562 2563 /* 2564 * Prepare io register values used by sym_init() according 2565 * to selected and supported features. 2566 */ 2567 static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram) 2568 { 2569 u_char burst_max; 2570 u32 period; 2571 int i; 2572 2573 /* 2574 * Wide ? 2575 */ 2576 np->maxwide = (np->features & FE_WIDE)? 1 : 0; 2577 2578 /* 2579 * Get the frequency of the chip's clock. 2580 */ 2581 if (np->features & FE_QUAD) 2582 np->multiplier = 4; 2583 else if (np->features & FE_DBLR) 2584 np->multiplier = 2; 2585 else 2586 np->multiplier = 1; 2587 2588 np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000; 2589 np->clock_khz *= np->multiplier; 2590 2591 if (np->clock_khz != 40000) 2592 sym_getclock(np, np->multiplier); 2593 2594 /* 2595 * Divisor to be used for async (timer pre-scaler). 2596 */ 2597 i = np->clock_divn - 1; 2598 while (--i >= 0) { 2599 if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { 2600 ++i; 2601 break; 2602 } 2603 } 2604 np->rv_scntl3 = i+1; 2605 2606 /* 2607 * The C1010 uses hardwired divisors for async. 2608 * So, we just throw away, the async. divisor.:-) 2609 */ 2610 if (np->features & FE_C10) 2611 np->rv_scntl3 = 0; 2612 2613 /* 2614 * Minimum synchronous period factor supported by the chip. 2615 * Btw, 'period' is in tenths of nanoseconds. 2616 */ 2617 period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; 2618 if (period <= 250) np->minsync = 10; 2619 else if (period <= 303) np->minsync = 11; 2620 else if (period <= 500) np->minsync = 12; 2621 else np->minsync = (period + 40 - 1) / 40; 2622 2623 /* 2624 * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). 2625 */ 2626 if (np->minsync < 25 && 2627 !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) 2628 np->minsync = 25; 2629 else if (np->minsync < 12 && 2630 !(np->features & (FE_ULTRA2|FE_ULTRA3))) 2631 np->minsync = 12; 2632 2633 /* 2634 * Maximum synchronous period factor supported by the chip. 2635 */ 2636 period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); 2637 np->maxsync = period > 2540 ? 254 : period / 10; 2638 2639 /* 2640 * If chip is a C1010, guess the sync limits in DT mode. 2641 */ 2642 if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { 2643 if (np->clock_khz == 160000) { 2644 np->minsync_dt = 9; 2645 np->maxsync_dt = 50; 2646 np->maxoffs_dt = 62; 2647 } 2648 } 2649 2650 /* 2651 * 64 bit addressing (895A/896/1010) ? 2652 */ 2653 if (np->features & FE_DAC) 2654 #if BITS_PER_LONG > 32 2655 np->rv_ccntl1 |= (XTIMOD | EXTIBMV); 2656 #else 2657 np->rv_ccntl1 |= (DDAC); 2658 #endif 2659 2660 /* 2661 * Phase mismatch handled by SCRIPTS (895A/896/1010) ? 2662 */ 2663 if (np->features & FE_NOPM) 2664 np->rv_ccntl0 |= (ENPMJ); 2665 2666 /* 2667 * C1010 Errata. 2668 * In dual channel mode, contention occurs if internal cycles 2669 * are used. Disable internal cycles. 2670 */ 2671 if (np->device_id == PCI_ID_LSI53C1010 && 2672 np->revision_id < 0x2) 2673 np->rv_ccntl0 |= DILS; 2674 2675 /* 2676 * Select burst length (dwords) 2677 */ 2678 burst_max = SYM_SETUP_BURST_ORDER; 2679 if (burst_max == 255) 2680 burst_max = burst_code(np->sv_dmode, np->sv_ctest4, 2681 np->sv_ctest5); 2682 if (burst_max > 7) 2683 burst_max = 7; 2684 if (burst_max > np->maxburst) 2685 burst_max = np->maxburst; 2686 2687 /* 2688 * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. 2689 * This chip and the 860 Rev 1 may wrongly use PCI cache line 2690 * based transactions on LOAD/STORE instructions. So we have 2691 * to prevent these chips from using such PCI transactions in 2692 * this driver. The generic ncr driver that does not use 2693 * LOAD/STORE instructions does not need this work-around. 2694 */ 2695 if ((np->device_id == PCI_ID_SYM53C810 && 2696 np->revision_id >= 0x10 && np->revision_id <= 0x11) || 2697 (np->device_id == PCI_ID_SYM53C860 && 2698 np->revision_id <= 0x1)) 2699 np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); 2700 2701 /* 2702 * Select all supported special features. 2703 * If we are using on-board RAM for scripts, prefetch (PFEN) 2704 * does not help, but burst op fetch (BOF) does. 2705 * Disabling PFEN makes sure BOF will be used. 2706 */ 2707 if (np->features & FE_ERL) 2708 np->rv_dmode |= ERL; /* Enable Read Line */ 2709 if (np->features & FE_BOF) 2710 np->rv_dmode |= BOF; /* Burst Opcode Fetch */ 2711 if (np->features & FE_ERMP) 2712 np->rv_dmode |= ERMP; /* Enable Read Multiple */ 2713 #if 1 2714 if ((np->features & FE_PFEN) && !np->ram_ba) 2715 #else 2716 if (np->features & FE_PFEN) 2717 #endif 2718 np->rv_dcntl |= PFEN; /* Prefetch Enable */ 2719 if (np->features & FE_CLSE) 2720 np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ 2721 if (np->features & FE_WRIE) 2722 np->rv_ctest3 |= WRIE; /* Write and Invalidate */ 2723 if (np->features & FE_DFS) 2724 np->rv_ctest5 |= DFS; /* Dma Fifo Size */ 2725 2726 /* 2727 * Select some other 2728 */ 2729 if (SYM_SETUP_PCI_PARITY) 2730 np->rv_ctest4 |= MPEE; /* Master parity checking */ 2731 if (SYM_SETUP_SCSI_PARITY) 2732 np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ 2733 2734 /* 2735 * Get parity checking, host ID and verbose mode from NVRAM 2736 */ 2737 np->myaddr = 255; 2738 sym_nvram_setup_host (np, nvram); 2739 2740 /* 2741 * Get SCSI addr of host adapter (set by bios?). 2742 */ 2743 if (np->myaddr == 255) { 2744 np->myaddr = INB(nc_scid) & 0x07; 2745 if (!np->myaddr) 2746 np->myaddr = SYM_SETUP_HOST_ID; 2747 } 2748 2749 /* 2750 * Prepare initial io register bits for burst length 2751 */ 2752 sym_init_burst(np, burst_max); 2753 2754 /* 2755 * Set SCSI BUS mode. 2756 * - LVD capable chips (895/895A/896/1010) report the 2757 * current BUS mode through the STEST4 IO register. 2758 * - For previous generation chips (825/825A/875), 2759 * user has to tell us how to check against HVD, 2760 * since a 100% safe algorithm is not possible. 2761 */ 2762 np->scsi_mode = SMODE_SE; 2763 if (np->features & (FE_ULTRA2|FE_ULTRA3)) 2764 np->scsi_mode = (np->sv_stest4 & SMODE); 2765 else if (np->features & FE_DIFF) { 2766 if (SYM_SETUP_SCSI_DIFF == 1) { 2767 if (np->sv_scntl3) { 2768 if (np->sv_stest2 & 0x20) 2769 np->scsi_mode = SMODE_HVD; 2770 } 2771 else if (nvram->type == SYM_SYMBIOS_NVRAM) { 2772 if (!(INB(nc_gpreg) & 0x08)) 2773 np->scsi_mode = SMODE_HVD; 2774 } 2775 } 2776 else if (SYM_SETUP_SCSI_DIFF == 2) 2777 np->scsi_mode = SMODE_HVD; 2778 } 2779 if (np->scsi_mode == SMODE_HVD) 2780 np->rv_stest2 |= 0x20; 2781 2782 /* 2783 * Set LED support from SCRIPTS. 2784 * Ignore this feature for boards known to use a 2785 * specific GPIO wiring and for the 895A, 896 2786 * and 1010 that drive the LED directly. 2787 */ 2788 if ((SYM_SETUP_SCSI_LED || 2789 (nvram->type == SYM_SYMBIOS_NVRAM || 2790 (nvram->type == SYM_TEKRAM_NVRAM && 2791 np->device_id == PCI_ID_SYM53C895))) && 2792 !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) 2793 np->features |= FE_LED0; 2794 2795 /* 2796 * Set irq mode. 2797 */ 2798 switch(SYM_SETUP_IRQ_MODE & 3) { 2799 case 2: 2800 np->rv_dcntl |= IRQM; 2801 break; 2802 case 1: 2803 np->rv_dcntl |= (np->sv_dcntl & IRQM); 2804 break; 2805 default: 2806 break; 2807 } 2808 2809 /* 2810 * Configure targets according to driver setup. 2811 * If NVRAM present get targets setup from NVRAM. 2812 */ 2813 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 2814 tcb_p tp = &np->target[i]; 2815 2816 #ifdef FreeBSD_New_Tran_Settings 2817 tp->tinfo.user.scsi_version = tp->tinfo.current.scsi_version= 2; 2818 tp->tinfo.user.spi_version = tp->tinfo.current.spi_version = 2; 2819 #endif 2820 tp->tinfo.user.period = np->minsync; 2821 tp->tinfo.user.offset = np->maxoffs; 2822 tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT; 2823 tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); 2824 tp->usrtags = SYM_SETUP_MAX_TAG; 2825 2826 sym_nvram_setup_target (np, i, nvram); 2827 2828 /* 2829 * For now, guess PPR/DT support from the period 2830 * and BUS width. 2831 */ 2832 if (np->features & FE_ULTRA3) { 2833 if (tp->tinfo.user.period <= 9 && 2834 tp->tinfo.user.width == BUS_16_BIT) { 2835 tp->tinfo.user.options |= PPR_OPT_DT; 2836 tp->tinfo.user.offset = np->maxoffs_dt; 2837 #ifdef FreeBSD_New_Tran_Settings 2838 tp->tinfo.user.spi_version = 3; 2839 #endif 2840 } 2841 } 2842 2843 if (!tp->usrtags) 2844 tp->usrflags &= ~SYM_TAGS_ENABLED; 2845 } 2846 2847 /* 2848 * Let user know about the settings. 2849 */ 2850 i = nvram->type; 2851 printf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np), 2852 i == SYM_SYMBIOS_NVRAM ? "Symbios" : 2853 (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"), 2854 np->myaddr, 2855 (np->features & FE_ULTRA3) ? 80 : 2856 (np->features & FE_ULTRA2) ? 40 : 2857 (np->features & FE_ULTRA) ? 20 : 10, 2858 sym_scsi_bus_mode(np->scsi_mode), 2859 (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); 2860 /* 2861 * Tell him more on demand. 2862 */ 2863 if (sym_verbose) { 2864 printf("%s: %s IRQ line driver%s\n", 2865 sym_name(np), 2866 np->rv_dcntl & IRQM ? "totem pole" : "open drain", 2867 np->ram_ba ? ", using on-chip SRAM" : ""); 2868 printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); 2869 if (np->features & FE_NOPM) 2870 printf("%s: handling phase mismatch from SCRIPTS.\n", 2871 sym_name(np)); 2872 } 2873 /* 2874 * And still more. 2875 */ 2876 if (sym_verbose > 1) { 2877 printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " 2878 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", 2879 sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, 2880 np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); 2881 2882 printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " 2883 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", 2884 sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, 2885 np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); 2886 } 2887 /* 2888 * Let user be aware of targets that have some disable flags set. 2889 */ 2890 sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT"); 2891 if (sym_verbose) 2892 sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED, 2893 "SCAN FOR LUNS"); 2894 2895 return 0; 2896 } 2897 2898 /* 2899 * Prepare the next negotiation message if needed. 2900 * 2901 * Fill in the part of message buffer that contains the 2902 * negotiation and the nego_status field of the CCB. 2903 * Returns the size of the message in bytes. 2904 */ 2905 2906 static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr) 2907 { 2908 tcb_p tp = &np->target[cp->target]; 2909 int msglen = 0; 2910 2911 /* 2912 * Early C1010 chips need a work-around for DT 2913 * data transfer to work. 2914 */ 2915 if (!(np->features & FE_U3EN)) 2916 tp->tinfo.goal.options = 0; 2917 /* 2918 * negotiate using PPR ? 2919 */ 2920 if (tp->tinfo.goal.options & PPR_OPT_MASK) 2921 nego = NS_PPR; 2922 /* 2923 * negotiate wide transfers ? 2924 */ 2925 else if (tp->tinfo.current.width != tp->tinfo.goal.width) 2926 nego = NS_WIDE; 2927 /* 2928 * negotiate synchronous transfers? 2929 */ 2930 else if (tp->tinfo.current.period != tp->tinfo.goal.period || 2931 tp->tinfo.current.offset != tp->tinfo.goal.offset) 2932 nego = NS_SYNC; 2933 2934 switch (nego) { 2935 case NS_SYNC: 2936 msgptr[msglen++] = M_EXTENDED; 2937 msgptr[msglen++] = 3; 2938 msgptr[msglen++] = M_X_SYNC_REQ; 2939 msgptr[msglen++] = tp->tinfo.goal.period; 2940 msgptr[msglen++] = tp->tinfo.goal.offset; 2941 break; 2942 case NS_WIDE: 2943 msgptr[msglen++] = M_EXTENDED; 2944 msgptr[msglen++] = 2; 2945 msgptr[msglen++] = M_X_WIDE_REQ; 2946 msgptr[msglen++] = tp->tinfo.goal.width; 2947 break; 2948 case NS_PPR: 2949 msgptr[msglen++] = M_EXTENDED; 2950 msgptr[msglen++] = 6; 2951 msgptr[msglen++] = M_X_PPR_REQ; 2952 msgptr[msglen++] = tp->tinfo.goal.period; 2953 msgptr[msglen++] = 0; 2954 msgptr[msglen++] = tp->tinfo.goal.offset; 2955 msgptr[msglen++] = tp->tinfo.goal.width; 2956 msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT; 2957 break; 2958 }; 2959 2960 cp->nego_status = nego; 2961 2962 if (nego) { 2963 tp->nego_cp = cp; /* Keep track a nego will be performed */ 2964 if (DEBUG_FLAGS & DEBUG_NEGO) { 2965 sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" : 2966 nego == NS_WIDE ? "wide msgout" : 2967 "ppr msgout", msgptr); 2968 }; 2969 }; 2970 2971 return msglen; 2972 } 2973 2974 /* 2975 * Insert a job into the start queue. 2976 */ 2977 static void sym_put_start_queue(hcb_p np, ccb_p cp) 2978 { 2979 u_short qidx; 2980 2981 #ifdef SYM_CONF_IARB_SUPPORT 2982 /* 2983 * If the previously queued CCB is not yet done, 2984 * set the IARB hint. The SCRIPTS will go with IARB 2985 * for this job when starting the previous one. 2986 * We leave devices a chance to win arbitration by 2987 * not using more than 'iarb_max' consecutive 2988 * immediate arbitrations. 2989 */ 2990 if (np->last_cp && np->iarb_count < np->iarb_max) { 2991 np->last_cp->host_flags |= HF_HINT_IARB; 2992 ++np->iarb_count; 2993 } 2994 else 2995 np->iarb_count = 0; 2996 np->last_cp = cp; 2997 #endif 2998 2999 /* 3000 * Insert first the idle task and then our job. 3001 * The MB should ensure proper ordering. 3002 */ 3003 qidx = np->squeueput + 2; 3004 if (qidx >= MAX_QUEUE*2) qidx = 0; 3005 3006 np->squeue [qidx] = cpu_to_scr(np->idletask_ba); 3007 MEMORY_BARRIER(); 3008 np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); 3009 3010 np->squeueput = qidx; 3011 3012 if (DEBUG_FLAGS & DEBUG_QUEUE) 3013 printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); 3014 3015 /* 3016 * Script processor may be waiting for reselect. 3017 * Wake it up. 3018 */ 3019 MEMORY_BARRIER(); 3020 OUTB (nc_istat, SIGP|np->istat_sem); 3021 } 3022 3023 3024 /* 3025 * Soft reset the chip. 3026 * 3027 * Raising SRST when the chip is running may cause 3028 * problems on dual function chips (see below). 3029 * On the other hand, LVD devices need some delay 3030 * to settle and report actual BUS mode in STEST4. 3031 */ 3032 static void sym_chip_reset (hcb_p np) 3033 { 3034 OUTB (nc_istat, SRST); 3035 UDELAY (10); 3036 OUTB (nc_istat, 0); 3037 UDELAY(2000); /* For BUS MODE to settle */ 3038 } 3039 3040 /* 3041 * Soft reset the chip. 3042 * 3043 * Some 896 and 876 chip revisions may hang-up if we set 3044 * the SRST (soft reset) bit at the wrong time when SCRIPTS 3045 * are running. 3046 * So, we need to abort the current operation prior to 3047 * soft resetting the chip. 3048 */ 3049 static void sym_soft_reset (hcb_p np) 3050 { 3051 u_char istat; 3052 int i; 3053 3054 OUTB (nc_istat, CABRT); 3055 for (i = 1000000 ; i ; --i) { 3056 istat = INB (nc_istat); 3057 if (istat & SIP) { 3058 INW (nc_sist); 3059 continue; 3060 } 3061 if (istat & DIP) { 3062 OUTB (nc_istat, 0); 3063 INB (nc_dstat); 3064 break; 3065 } 3066 } 3067 if (!i) 3068 printf("%s: unable to abort current chip operation.\n", 3069 sym_name(np)); 3070 sym_chip_reset (np); 3071 } 3072 3073 /* 3074 * Start reset process. 3075 * 3076 * The interrupt handler will reinitialize the chip. 3077 */ 3078 static void sym_start_reset(hcb_p np) 3079 { 3080 (void) sym_reset_scsi_bus(np, 1); 3081 } 3082 3083 static int sym_reset_scsi_bus(hcb_p np, int enab_int) 3084 { 3085 u32 term; 3086 int retv = 0; 3087 3088 sym_soft_reset(np); /* Soft reset the chip */ 3089 if (enab_int) 3090 OUTW (nc_sien, RST); 3091 /* 3092 * Enable Tolerant, reset IRQD if present and 3093 * properly set IRQ mode, prior to resetting the bus. 3094 */ 3095 OUTB (nc_stest3, TE); 3096 OUTB (nc_dcntl, (np->rv_dcntl & IRQM)); 3097 OUTB (nc_scntl1, CRST); 3098 UDELAY (200); 3099 3100 if (!SYM_SETUP_SCSI_BUS_CHECK) 3101 goto out; 3102 /* 3103 * Check for no terminators or SCSI bus shorts to ground. 3104 * Read SCSI data bus, data parity bits and control signals. 3105 * We are expecting RESET to be TRUE and other signals to be 3106 * FALSE. 3107 */ 3108 term = INB(nc_sstat0); 3109 term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ 3110 term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */ 3111 ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */ 3112 ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */ 3113 INB(nc_sbcl); /* req ack bsy sel atn msg cd io */ 3114 3115 if (!(np->features & FE_WIDE)) 3116 term &= 0x3ffff; 3117 3118 if (term != (2<<7)) { 3119 printf("%s: suspicious SCSI data while resetting the BUS.\n", 3120 sym_name(np)); 3121 printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " 3122 "0x%lx, expecting 0x%lx\n", 3123 sym_name(np), 3124 (np->features & FE_WIDE) ? "dp1,d15-8," : "", 3125 (u_long)term, (u_long)(2<<7)); 3126 if (SYM_SETUP_SCSI_BUS_CHECK == 1) 3127 retv = 1; 3128 } 3129 out: 3130 OUTB (nc_scntl1, 0); 3131 /* MDELAY(100); */ 3132 return retv; 3133 } 3134 3135 /* 3136 * The chip may have completed jobs. Look at the DONE QUEUE. 3137 * 3138 * On architectures that may reorder LOAD/STORE operations, 3139 * a memory barrier may be needed after the reading of the 3140 * so-called `flag' and prior to dealing with the data. 3141 */ 3142 static int sym_wakeup_done (hcb_p np) 3143 { 3144 ccb_p cp; 3145 int i, n; 3146 u32 dsa; 3147 3148 n = 0; 3149 i = np->dqueueget; 3150 while (1) { 3151 dsa = scr_to_cpu(np->dqueue[i]); 3152 if (!dsa) 3153 break; 3154 np->dqueue[i] = 0; 3155 if ((i = i+2) >= MAX_QUEUE*2) 3156 i = 0; 3157 3158 cp = sym_ccb_from_dsa(np, dsa); 3159 if (cp) { 3160 MEMORY_BARRIER(); 3161 sym_complete_ok (np, cp); 3162 ++n; 3163 } 3164 else 3165 printf ("%s: bad DSA (%x) in done queue.\n", 3166 sym_name(np), (u_int) dsa); 3167 } 3168 np->dqueueget = i; 3169 3170 return n; 3171 } 3172 3173 /* 3174 * Complete all active CCBs with error. 3175 * Used on CHIP/SCSI RESET. 3176 */ 3177 static void sym_flush_busy_queue (hcb_p np, int cam_status) 3178 { 3179 /* 3180 * Move all active CCBs to the COMP queue 3181 * and flush this queue. 3182 */ 3183 sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); 3184 sym_que_init(&np->busy_ccbq); 3185 sym_flush_comp_queue(np, cam_status); 3186 } 3187 3188 /* 3189 * Start chip. 3190 * 3191 * 'reason' means: 3192 * 0: initialisation. 3193 * 1: SCSI BUS RESET delivered or received. 3194 * 2: SCSI BUS MODE changed. 3195 */ 3196 static void sym_init (hcb_p np, int reason) 3197 { 3198 int i; 3199 u32 phys; 3200 3201 /* 3202 * Reset chip if asked, otherwise just clear fifos. 3203 */ 3204 if (reason == 1) 3205 sym_soft_reset(np); 3206 else { 3207 OUTB (nc_stest3, TE|CSF); 3208 OUTONB (nc_ctest3, CLF); 3209 } 3210 3211 /* 3212 * Clear Start Queue 3213 */ 3214 phys = np->squeue_ba; 3215 for (i = 0; i < MAX_QUEUE*2; i += 2) { 3216 np->squeue[i] = cpu_to_scr(np->idletask_ba); 3217 np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); 3218 } 3219 np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); 3220 3221 /* 3222 * Start at first entry. 3223 */ 3224 np->squeueput = 0; 3225 3226 /* 3227 * Clear Done Queue 3228 */ 3229 phys = np->dqueue_ba; 3230 for (i = 0; i < MAX_QUEUE*2; i += 2) { 3231 np->dqueue[i] = 0; 3232 np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); 3233 } 3234 np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); 3235 3236 /* 3237 * Start at first entry. 3238 */ 3239 np->dqueueget = 0; 3240 3241 /* 3242 * Install patches in scripts. 3243 * This also let point to first position the start 3244 * and done queue pointers used from SCRIPTS. 3245 */ 3246 np->fw_patch(np); 3247 3248 /* 3249 * Wakeup all pending jobs. 3250 */ 3251 sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET); 3252 3253 /* 3254 * Init chip. 3255 */ 3256 OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */ 3257 UDELAY (2000); /* The 895 needs time for the bus mode to settle */ 3258 3259 OUTB (nc_scntl0, np->rv_scntl0 | 0xc0); 3260 /* full arb., ena parity, par->ATN */ 3261 OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ 3262 3263 sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ 3264 3265 OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ 3266 OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */ 3267 OUTB (nc_istat , SIGP ); /* Signal Process */ 3268 OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */ 3269 OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ 3270 3271 OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ 3272 OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */ 3273 OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */ 3274 3275 /* Extended Sreq/Sack filtering not supported on the C10 */ 3276 if (np->features & FE_C10) 3277 OUTB (nc_stest2, np->rv_stest2); 3278 else 3279 OUTB (nc_stest2, EXT|np->rv_stest2); 3280 3281 OUTB (nc_stest3, TE); /* TolerANT enable */ 3282 OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ 3283 3284 /* 3285 * For now, disable AIP generation on C1010-66. 3286 */ 3287 if (np->device_id == PCI_ID_LSI53C1010_2) 3288 OUTB (nc_aipcntl1, DISAIP); 3289 3290 /* 3291 * C10101 Errata. 3292 * Errant SGE's when in narrow. Write bits 4 & 5 of 3293 * STEST1 register to disable SGE. We probably should do 3294 * that from SCRIPTS for each selection/reselection, but 3295 * I just don't want. :) 3296 */ 3297 if (np->device_id == PCI_ID_LSI53C1010 && 3298 /* np->revision_id < 0xff */ 1) 3299 OUTB (nc_stest1, INB(nc_stest1) | 0x30); 3300 3301 /* 3302 * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. 3303 * Disable overlapped arbitration for some dual function devices, 3304 * regardless revision id (kind of post-chip-design feature. ;-)) 3305 */ 3306 if (np->device_id == PCI_ID_SYM53C875) 3307 OUTB (nc_ctest0, (1<<5)); 3308 else if (np->device_id == PCI_ID_SYM53C896) 3309 np->rv_ccntl0 |= DPR; 3310 3311 /* 3312 * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing 3313 * and/or hardware phase mismatch, since only such chips 3314 * seem to support those IO registers. 3315 */ 3316 if (np->features & (FE_DAC|FE_NOPM)) { 3317 OUTB (nc_ccntl0, np->rv_ccntl0); 3318 OUTB (nc_ccntl1, np->rv_ccntl1); 3319 } 3320 3321 /* 3322 * If phase mismatch handled by scripts (895A/896/1010), 3323 * set PM jump addresses. 3324 */ 3325 if (np->features & FE_NOPM) { 3326 OUTL (nc_pmjad1, SCRIPTB_BA (np, pm_handle)); 3327 OUTL (nc_pmjad2, SCRIPTB_BA (np, pm_handle)); 3328 } 3329 3330 /* 3331 * Enable GPIO0 pin for writing if LED support from SCRIPTS. 3332 * Also set GPIO5 and clear GPIO6 if hardware LED control. 3333 */ 3334 if (np->features & FE_LED0) 3335 OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01); 3336 else if (np->features & FE_LEDC) 3337 OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20); 3338 3339 /* 3340 * enable ints 3341 */ 3342 OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); 3343 OUTB (nc_dien , MDPE|BF|SSI|SIR|IID); 3344 3345 /* 3346 * For 895/6 enable SBMC interrupt and save current SCSI bus mode. 3347 * Try to eat the spurious SBMC interrupt that may occur when 3348 * we reset the chip but not the SCSI BUS (at initialization). 3349 */ 3350 if (np->features & (FE_ULTRA2|FE_ULTRA3)) { 3351 OUTONW (nc_sien, SBMC); 3352 if (reason == 0) { 3353 MDELAY(100); 3354 INW (nc_sist); 3355 } 3356 np->scsi_mode = INB (nc_stest4) & SMODE; 3357 } 3358 3359 /* 3360 * Fill in target structure. 3361 * Reinitialize usrsync. 3362 * Reinitialize usrwide. 3363 * Prepare sync negotiation according to actual SCSI bus mode. 3364 */ 3365 for (i=0;i<SYM_CONF_MAX_TARGET;i++) { 3366 tcb_p tp = &np->target[i]; 3367 3368 tp->to_reset = 0; 3369 tp->head.sval = 0; 3370 tp->head.wval = np->rv_scntl3; 3371 tp->head.uval = 0; 3372 3373 tp->tinfo.current.period = 0; 3374 tp->tinfo.current.offset = 0; 3375 tp->tinfo.current.width = BUS_8_BIT; 3376 tp->tinfo.current.options = 0; 3377 } 3378 3379 /* 3380 * Download SCSI SCRIPTS to on-chip RAM if present, 3381 * and start script processor. 3382 */ 3383 if (np->ram_ba) { 3384 if (sym_verbose > 1) 3385 printf ("%s: Downloading SCSI SCRIPTS.\n", 3386 sym_name(np)); 3387 if (np->ram_ws == 8192) { 3388 OUTRAM_OFF(4096, np->scriptb0, np->scriptb_sz); 3389 OUTL (nc_mmws, np->scr_ram_seg); 3390 OUTL (nc_mmrs, np->scr_ram_seg); 3391 OUTL (nc_sfs, np->scr_ram_seg); 3392 phys = SCRIPTB_BA (np, start64); 3393 } 3394 else 3395 phys = SCRIPTA_BA (np, init); 3396 OUTRAM_OFF(0, np->scripta0, np->scripta_sz); 3397 } 3398 else 3399 phys = SCRIPTA_BA (np, init); 3400 3401 np->istat_sem = 0; 3402 3403 OUTL (nc_dsa, np->hcb_ba); 3404 OUTL_DSP (phys); 3405 3406 /* 3407 * Notify the XPT about the RESET condition. 3408 */ 3409 if (reason != 0) 3410 xpt_async(AC_BUS_RESET, np->path, NULL); 3411 } 3412 3413 /* 3414 * Get clock factor and sync divisor for a given 3415 * synchronous factor period. 3416 */ 3417 static int 3418 sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) 3419 { 3420 u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ 3421 int div = np->clock_divn; /* Number of divisors supported */ 3422 u32 fak; /* Sync factor in sxfer */ 3423 u32 per; /* Period in tenths of ns */ 3424 u32 kpc; /* (per * clk) */ 3425 int ret; 3426 3427 /* 3428 * Compute the synchronous period in tenths of nano-seconds 3429 */ 3430 if (dt && sfac <= 9) per = 125; 3431 else if (sfac <= 10) per = 250; 3432 else if (sfac == 11) per = 303; 3433 else if (sfac == 12) per = 500; 3434 else per = 40 * sfac; 3435 ret = per; 3436 3437 kpc = per * clk; 3438 if (dt) 3439 kpc <<= 1; 3440 3441 /* 3442 * For earliest C10 revision 0, we cannot use extra 3443 * clocks for the setting of the SCSI clocking. 3444 * Note that this limits the lowest sync data transfer 3445 * to 5 Mega-transfers per second and may result in 3446 * using higher clock divisors. 3447 */ 3448 #if 1 3449 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { 3450 /* 3451 * Look for the lowest clock divisor that allows an 3452 * output speed not faster than the period. 3453 */ 3454 while (div > 0) { 3455 --div; 3456 if (kpc > (div_10M[div] << 2)) { 3457 ++div; 3458 break; 3459 } 3460 } 3461 fak = 0; /* No extra clocks */ 3462 if (div == np->clock_divn) { /* Are we too fast ? */ 3463 ret = -1; 3464 } 3465 *divp = div; 3466 *fakp = fak; 3467 return ret; 3468 } 3469 #endif 3470 3471 /* 3472 * Look for the greatest clock divisor that allows an 3473 * input speed faster than the period. 3474 */ 3475 while (div-- > 0) 3476 if (kpc >= (div_10M[div] << 2)) break; 3477 3478 /* 3479 * Calculate the lowest clock factor that allows an output 3480 * speed not faster than the period, and the max output speed. 3481 * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. 3482 * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. 3483 */ 3484 if (dt) { 3485 fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; 3486 /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ 3487 } 3488 else { 3489 fak = (kpc - 1) / div_10M[div] + 1 - 4; 3490 /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ 3491 } 3492 3493 /* 3494 * Check against our hardware limits, or bugs :). 3495 */ 3496 if (fak < 0) {fak = 0; ret = -1;} 3497 if (fak > 2) {fak = 2; ret = -1;} 3498 3499 /* 3500 * Compute and return sync parameters. 3501 */ 3502 *divp = div; 3503 *fakp = fak; 3504 3505 return ret; 3506 } 3507 3508 /* 3509 * Tell the SCSI layer about the new transfer parameters. 3510 */ 3511 static void 3512 sym_xpt_async_transfer_neg(hcb_p np, int target, u_int spi_valid) 3513 { 3514 struct ccb_trans_settings cts; 3515 struct cam_path *path; 3516 int sts; 3517 tcb_p tp = &np->target[target]; 3518 3519 sts = xpt_create_path(&path, NULL, cam_sim_path(np->sim), target, 3520 CAM_LUN_WILDCARD); 3521 if (sts != CAM_REQ_CMP) 3522 return; 3523 3524 bzero(&cts, sizeof(cts)); 3525 3526 #ifdef FreeBSD_New_Tran_Settings 3527 #define cts__scsi (cts.proto_specific.scsi) 3528 #define cts__spi (cts.xport_specific.spi) 3529 3530 cts.type = CTS_TYPE_CURRENT_SETTINGS; 3531 cts.protocol = PROTO_SCSI; 3532 cts.transport = XPORT_SPI; 3533 cts.protocol_version = tp->tinfo.current.scsi_version; 3534 cts.transport_version = tp->tinfo.current.spi_version; 3535 3536 cts__spi.valid = spi_valid; 3537 if (spi_valid & CTS_SPI_VALID_SYNC_RATE) 3538 cts__spi.sync_period = tp->tinfo.current.period; 3539 if (spi_valid & CTS_SPI_VALID_SYNC_OFFSET) 3540 cts__spi.sync_offset = tp->tinfo.current.offset; 3541 if (spi_valid & CTS_SPI_VALID_BUS_WIDTH) 3542 cts__spi.bus_width = tp->tinfo.current.width; 3543 if (spi_valid & CTS_SPI_VALID_PPR_OPTIONS) 3544 cts__spi.ppr_options = tp->tinfo.current.options; 3545 #undef cts__spi 3546 #undef cts__scsi 3547 #else 3548 cts.valid = spi_valid; 3549 if (spi_valid & CCB_TRANS_SYNC_RATE_VALID) 3550 cts.sync_period = tp->tinfo.current.period; 3551 if (spi_valid & CCB_TRANS_SYNC_OFFSET_VALID) 3552 cts.sync_offset = tp->tinfo.current.offset; 3553 if (spi_valid & CCB_TRANS_BUS_WIDTH_VALID) 3554 cts.bus_width = tp->tinfo.current.width; 3555 #endif 3556 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 3557 xpt_async(AC_TRANSFER_NEG, path, &cts); 3558 xpt_free_path(path); 3559 } 3560 3561 #ifdef FreeBSD_New_Tran_Settings 3562 #define SYM_SPI_VALID_WDTR \ 3563 CTS_SPI_VALID_BUS_WIDTH | \ 3564 CTS_SPI_VALID_SYNC_RATE | \ 3565 CTS_SPI_VALID_SYNC_OFFSET 3566 #define SYM_SPI_VALID_SDTR \ 3567 CTS_SPI_VALID_SYNC_RATE | \ 3568 CTS_SPI_VALID_SYNC_OFFSET 3569 #define SYM_SPI_VALID_PPR \ 3570 CTS_SPI_VALID_PPR_OPTIONS | \ 3571 CTS_SPI_VALID_BUS_WIDTH | \ 3572 CTS_SPI_VALID_SYNC_RATE | \ 3573 CTS_SPI_VALID_SYNC_OFFSET 3574 #else 3575 #define SYM_SPI_VALID_WDTR \ 3576 CCB_TRANS_BUS_WIDTH_VALID | \ 3577 CCB_TRANS_SYNC_RATE_VALID | \ 3578 CCB_TRANS_SYNC_OFFSET_VALID 3579 #define SYM_SPI_VALID_SDTR \ 3580 CCB_TRANS_SYNC_RATE_VALID | \ 3581 CCB_TRANS_SYNC_OFFSET_VALID 3582 #define SYM_SPI_VALID_PPR \ 3583 CCB_TRANS_BUS_WIDTH_VALID | \ 3584 CCB_TRANS_SYNC_RATE_VALID | \ 3585 CCB_TRANS_SYNC_OFFSET_VALID 3586 #endif 3587 3588 /* 3589 * We received a WDTR. 3590 * Let everything be aware of the changes. 3591 */ 3592 static void sym_setwide(hcb_p np, ccb_p cp, u_char wide) 3593 { 3594 tcb_p tp = &np->target[cp->target]; 3595 3596 sym_settrans(np, cp, 0, 0, 0, wide, 0, 0); 3597 3598 /* 3599 * Tell the SCSI layer about the new transfer parameters. 3600 */ 3601 tp->tinfo.goal.width = tp->tinfo.current.width = wide; 3602 tp->tinfo.current.offset = 0; 3603 tp->tinfo.current.period = 0; 3604 tp->tinfo.current.options = 0; 3605 3606 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_WDTR); 3607 } 3608 3609 /* 3610 * We received a SDTR. 3611 * Let everything be aware of the changes. 3612 */ 3613 static void 3614 sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak) 3615 { 3616 tcb_p tp = &np->target[cp->target]; 3617 u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0; 3618 3619 sym_settrans(np, cp, 0, ofs, per, wide, div, fak); 3620 3621 /* 3622 * Tell the SCSI layer about the new transfer parameters. 3623 */ 3624 tp->tinfo.goal.period = tp->tinfo.current.period = per; 3625 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; 3626 tp->tinfo.goal.options = tp->tinfo.current.options = 0; 3627 3628 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_SDTR); 3629 } 3630 3631 /* 3632 * We received a PPR. 3633 * Let everything be aware of the changes. 3634 */ 3635 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 3636 u_char per, u_char wide, u_char div, u_char fak) 3637 { 3638 tcb_p tp = &np->target[cp->target]; 3639 3640 sym_settrans(np, cp, dt, ofs, per, wide, div, fak); 3641 3642 /* 3643 * Tell the SCSI layer about the new transfer parameters. 3644 */ 3645 tp->tinfo.goal.width = tp->tinfo.current.width = wide; 3646 tp->tinfo.goal.period = tp->tinfo.current.period = per; 3647 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; 3648 tp->tinfo.goal.options = tp->tinfo.current.options = dt; 3649 3650 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_PPR); 3651 } 3652 3653 /* 3654 * Switch trans mode for current job and it's target. 3655 */ 3656 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 3657 u_char per, u_char wide, u_char div, u_char fak) 3658 { 3659 SYM_QUEHEAD *qp; 3660 union ccb *ccb; 3661 tcb_p tp; 3662 u_char target = INB (nc_sdid) & 0x0f; 3663 u_char sval, wval, uval; 3664 3665 assert (cp); 3666 if (!cp) return; 3667 ccb = cp->cam_ccb; 3668 assert (ccb); 3669 if (!ccb) return; 3670 assert (target == (cp->target & 0xf)); 3671 tp = &np->target[target]; 3672 3673 sval = tp->head.sval; 3674 wval = tp->head.wval; 3675 uval = tp->head.uval; 3676 3677 #if 0 3678 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", 3679 sval, wval, uval, np->rv_scntl3); 3680 #endif 3681 /* 3682 * Set the offset. 3683 */ 3684 if (!(np->features & FE_C10)) 3685 sval = (sval & ~0x1f) | ofs; 3686 else 3687 sval = (sval & ~0x3f) | ofs; 3688 3689 /* 3690 * Set the sync divisor and extra clock factor. 3691 */ 3692 if (ofs != 0) { 3693 wval = (wval & ~0x70) | ((div+1) << 4); 3694 if (!(np->features & FE_C10)) 3695 sval = (sval & ~0xe0) | (fak << 5); 3696 else { 3697 uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); 3698 if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); 3699 if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); 3700 } 3701 } 3702 3703 /* 3704 * Set the bus width. 3705 */ 3706 wval = wval & ~EWS; 3707 if (wide != 0) 3708 wval |= EWS; 3709 3710 /* 3711 * Set misc. ultra enable bits. 3712 */ 3713 if (np->features & FE_C10) { 3714 uval = uval & ~(U3EN|AIPCKEN); 3715 if (dt) { 3716 assert(np->features & FE_U3EN); 3717 uval |= U3EN; 3718 } 3719 } 3720 else { 3721 wval = wval & ~ULTRA; 3722 if (per <= 12) wval |= ULTRA; 3723 } 3724 3725 /* 3726 * Stop there if sync parameters are unchanged. 3727 */ 3728 if (tp->head.sval == sval && 3729 tp->head.wval == wval && 3730 tp->head.uval == uval) 3731 return; 3732 tp->head.sval = sval; 3733 tp->head.wval = wval; 3734 tp->head.uval = uval; 3735 3736 /* 3737 * Disable extended Sreq/Sack filtering if per < 50. 3738 * Not supported on the C1010. 3739 */ 3740 if (per < 50 && !(np->features & FE_C10)) 3741 OUTOFFB (nc_stest2, EXT); 3742 3743 /* 3744 * set actual value and sync_status 3745 */ 3746 OUTB (nc_sxfer, tp->head.sval); 3747 OUTB (nc_scntl3, tp->head.wval); 3748 3749 if (np->features & FE_C10) { 3750 OUTB (nc_scntl4, tp->head.uval); 3751 } 3752 3753 /* 3754 * patch ALL busy ccbs of this target. 3755 */ 3756 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 3757 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 3758 if (cp->target != target) 3759 continue; 3760 cp->phys.select.sel_scntl3 = tp->head.wval; 3761 cp->phys.select.sel_sxfer = tp->head.sval; 3762 if (np->features & FE_C10) { 3763 cp->phys.select.sel_scntl4 = tp->head.uval; 3764 } 3765 } 3766 } 3767 3768 /* 3769 * log message for real hard errors 3770 * 3771 * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc). 3772 * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. 3773 * 3774 * exception register: 3775 * ds: dstat 3776 * si: sist 3777 * 3778 * SCSI bus lines: 3779 * so: control lines as driven by chip. 3780 * si: control lines as seen by chip. 3781 * sd: scsi data lines as seen by chip. 3782 * 3783 * wide/fastmode: 3784 * sxfer: (see the manual) 3785 * scntl3: (see the manual) 3786 * 3787 * current script command: 3788 * dsp: script address (relative to start of script). 3789 * dbc: first word of script command. 3790 * 3791 * First 24 register of the chip: 3792 * r0..rf 3793 */ 3794 static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat) 3795 { 3796 u32 dsp; 3797 int script_ofs; 3798 int script_size; 3799 char *script_name; 3800 u_char *script_base; 3801 int i; 3802 3803 dsp = INL (nc_dsp); 3804 3805 if (dsp > np->scripta_ba && 3806 dsp <= np->scripta_ba + np->scripta_sz) { 3807 script_ofs = dsp - np->scripta_ba; 3808 script_size = np->scripta_sz; 3809 script_base = (u_char *) np->scripta0; 3810 script_name = "scripta"; 3811 } 3812 else if (np->scriptb_ba < dsp && 3813 dsp <= np->scriptb_ba + np->scriptb_sz) { 3814 script_ofs = dsp - np->scriptb_ba; 3815 script_size = np->scriptb_sz; 3816 script_base = (u_char *) np->scriptb0; 3817 script_name = "scriptb"; 3818 } else { 3819 script_ofs = dsp; 3820 script_size = 0; 3821 script_base = 0; 3822 script_name = "mem"; 3823 } 3824 3825 printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", 3826 sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist, 3827 (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), 3828 (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer), 3829 (unsigned)INB (nc_scntl3), script_name, script_ofs, 3830 (unsigned)INL (nc_dbc)); 3831 3832 if (((script_ofs & 3) == 0) && 3833 (unsigned)script_ofs < script_size) { 3834 printf ("%s: script cmd = %08x\n", sym_name(np), 3835 scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); 3836 } 3837 3838 printf ("%s: regdump:", sym_name(np)); 3839 for (i=0; i<24;i++) 3840 printf (" %02x", (unsigned)INB_OFF(i)); 3841 printf (".\n"); 3842 3843 /* 3844 * PCI BUS error, read the PCI ststus register. 3845 */ 3846 if (dstat & (MDPE|BF)) { 3847 u_short pci_sts; 3848 pci_sts = pci_read_config(np->device, PCIR_STATUS, 2); 3849 if (pci_sts & 0xf900) { 3850 pci_write_config(np->device, PCIR_STATUS, pci_sts, 2); 3851 printf("%s: PCI STATUS = 0x%04x\n", 3852 sym_name(np), pci_sts & 0xf900); 3853 } 3854 } 3855 } 3856 3857 /* 3858 * chip interrupt handler 3859 * 3860 * In normal situations, interrupt conditions occur one at 3861 * a time. But when something bad happens on the SCSI BUS, 3862 * the chip may raise several interrupt flags before 3863 * stopping and interrupting the CPU. The additionnal 3864 * interrupt flags are stacked in some extra registers 3865 * after the SIP and/or DIP flag has been raised in the 3866 * ISTAT. After the CPU has read the interrupt condition 3867 * flag from SIST or DSTAT, the chip unstacks the other 3868 * interrupt flags and sets the corresponding bits in 3869 * SIST or DSTAT. Since the chip starts stacking once the 3870 * SIP or DIP flag is set, there is a small window of time 3871 * where the stacking does not occur. 3872 * 3873 * Typically, multiple interrupt conditions may happen in 3874 * the following situations: 3875 * 3876 * - SCSI parity error + Phase mismatch (PAR|MA) 3877 * When a parity error is detected in input phase 3878 * and the device switches to msg-in phase inside a 3879 * block MOV. 3880 * - SCSI parity error + Unexpected disconnect (PAR|UDC) 3881 * When a stupid device does not want to handle the 3882 * recovery of an SCSI parity error. 3883 * - Some combinations of STO, PAR, UDC, ... 3884 * When using non compliant SCSI stuff, when user is 3885 * doing non compliant hot tampering on the BUS, when 3886 * something really bad happens to a device, etc ... 3887 * 3888 * The heuristic suggested by SYMBIOS to handle 3889 * multiple interrupts is to try unstacking all 3890 * interrupts conditions and to handle them on some 3891 * priority based on error severity. 3892 * This will work when the unstacking has been 3893 * successful, but we cannot be 100 % sure of that, 3894 * since the CPU may have been faster to unstack than 3895 * the chip is able to stack. Hmmm ... But it seems that 3896 * such a situation is very unlikely to happen. 3897 * 3898 * If this happen, for example STO caught by the CPU 3899 * then UDC happenning before the CPU have restarted 3900 * the SCRIPTS, the driver may wrongly complete the 3901 * same command on UDC, since the SCRIPTS didn't restart 3902 * and the DSA still points to the same command. 3903 * We avoid this situation by setting the DSA to an 3904 * invalid value when the CCB is completed and before 3905 * restarting the SCRIPTS. 3906 * 3907 * Another issue is that we need some section of our 3908 * recovery procedures to be somehow uninterruptible but 3909 * the SCRIPTS processor does not provides such a 3910 * feature. For this reason, we handle recovery preferently 3911 * from the C code and check against some SCRIPTS critical 3912 * sections from the C code. 3913 * 3914 * Hopefully, the interrupt handling of the driver is now 3915 * able to resist to weird BUS error conditions, but donnot 3916 * ask me for any guarantee that it will never fail. :-) 3917 * Use at your own decision and risk. 3918 */ 3919 3920 static void sym_intr1 (hcb_p np) 3921 { 3922 u_char istat, istatc; 3923 u_char dstat; 3924 u_short sist; 3925 3926 /* 3927 * interrupt on the fly ? 3928 * 3929 * A `dummy read' is needed to ensure that the 3930 * clear of the INTF flag reaches the device 3931 * before the scanning of the DONE queue. 3932 */ 3933 istat = INB (nc_istat); 3934 if (istat & INTF) { 3935 OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem); 3936 istat = INB (nc_istat); /* DUMMY READ */ 3937 if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); 3938 (void)sym_wakeup_done (np); 3939 }; 3940 3941 if (!(istat & (SIP|DIP))) 3942 return; 3943 3944 #if 0 /* We should never get this one */ 3945 if (istat & CABRT) 3946 OUTB (nc_istat, CABRT); 3947 #endif 3948 3949 /* 3950 * PAR and MA interrupts may occur at the same time, 3951 * and we need to know of both in order to handle 3952 * this situation properly. We try to unstack SCSI 3953 * interrupts for that reason. BTW, I dislike a LOT 3954 * such a loop inside the interrupt routine. 3955 * Even if DMA interrupt stacking is very unlikely to 3956 * happen, we also try unstacking these ones, since 3957 * this has no performance impact. 3958 */ 3959 sist = 0; 3960 dstat = 0; 3961 istatc = istat; 3962 do { 3963 if (istatc & SIP) 3964 sist |= INW (nc_sist); 3965 if (istatc & DIP) 3966 dstat |= INB (nc_dstat); 3967 istatc = INB (nc_istat); 3968 istat |= istatc; 3969 } while (istatc & (SIP|DIP)); 3970 3971 if (DEBUG_FLAGS & DEBUG_TINY) 3972 printf ("<%d|%x:%x|%x:%x>", 3973 (int)INB(nc_scr0), 3974 dstat,sist, 3975 (unsigned)INL(nc_dsp), 3976 (unsigned)INL(nc_dbc)); 3977 /* 3978 * On paper, a memory barrier may be needed here. 3979 * And since we are paranoid ... :) 3980 */ 3981 MEMORY_BARRIER(); 3982 3983 /* 3984 * First, interrupts we want to service cleanly. 3985 * 3986 * Phase mismatch (MA) is the most frequent interrupt 3987 * for chip earlier than the 896 and so we have to service 3988 * it as quickly as possible. 3989 * A SCSI parity error (PAR) may be combined with a phase 3990 * mismatch condition (MA). 3991 * Programmed interrupts (SIR) are used to call the C code 3992 * from SCRIPTS. 3993 * The single step interrupt (SSI) is not used in this 3994 * driver. 3995 */ 3996 if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && 3997 !(dstat & (MDPE|BF|ABRT|IID))) { 3998 if (sist & PAR) sym_int_par (np, sist); 3999 else if (sist & MA) sym_int_ma (np); 4000 else if (dstat & SIR) sym_int_sir (np); 4001 else if (dstat & SSI) OUTONB_STD (); 4002 else goto unknown_int; 4003 return; 4004 }; 4005 4006 /* 4007 * Now, interrupts that donnot happen in normal 4008 * situations and that we may need to recover from. 4009 * 4010 * On SCSI RESET (RST), we reset everything. 4011 * On SCSI BUS MODE CHANGE (SBMC), we complete all 4012 * active CCBs with RESET status, prepare all devices 4013 * for negotiating again and restart the SCRIPTS. 4014 * On STO and UDC, we complete the CCB with the corres- 4015 * ponding status and restart the SCRIPTS. 4016 */ 4017 if (sist & RST) { 4018 xpt_print_path(np->path); 4019 printf("SCSI BUS reset detected.\n"); 4020 sym_init (np, 1); 4021 return; 4022 }; 4023 4024 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ 4025 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ 4026 4027 if (!(sist & (GEN|HTH|SGE)) && 4028 !(dstat & (MDPE|BF|ABRT|IID))) { 4029 if (sist & SBMC) sym_int_sbmc (np); 4030 else if (sist & STO) sym_int_sto (np); 4031 else if (sist & UDC) sym_int_udc (np); 4032 else goto unknown_int; 4033 return; 4034 }; 4035 4036 /* 4037 * Now, interrupts we are not able to recover cleanly. 4038 * 4039 * Log message for hard errors. 4040 * Reset everything. 4041 */ 4042 4043 sym_log_hard_error(np, sist, dstat); 4044 4045 if ((sist & (GEN|HTH|SGE)) || 4046 (dstat & (MDPE|BF|ABRT|IID))) { 4047 sym_start_reset(np); 4048 return; 4049 }; 4050 4051 unknown_int: 4052 /* 4053 * We just miss the cause of the interrupt. :( 4054 * Print a message. The timeout will do the real work. 4055 */ 4056 printf( "%s: unknown interrupt(s) ignored, " 4057 "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", 4058 sym_name(np), istat, dstat, sist); 4059 } 4060 4061 static void sym_intr(void *arg) 4062 { 4063 if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); 4064 sym_intr1((hcb_p) arg); 4065 if (DEBUG_FLAGS & DEBUG_TINY) printf ("]"); 4066 return; 4067 } 4068 4069 static void sym_poll(struct cam_sim *sim) 4070 { 4071 int s = splcam(); 4072 sym_intr(cam_sim_softc(sim)); 4073 splx(s); 4074 } 4075 4076 4077 /* 4078 * generic recovery from scsi interrupt 4079 * 4080 * The doc says that when the chip gets an SCSI interrupt, 4081 * it tries to stop in an orderly fashion, by completing 4082 * an instruction fetch that had started or by flushing 4083 * the DMA fifo for a write to memory that was executing. 4084 * Such a fashion is not enough to know if the instruction 4085 * that was just before the current DSP value has been 4086 * executed or not. 4087 * 4088 * There are some small SCRIPTS sections that deal with 4089 * the start queue and the done queue that may break any 4090 * assomption from the C code if we are interrupted 4091 * inside, so we reset if this happens. Btw, since these 4092 * SCRIPTS sections are executed while the SCRIPTS hasn't 4093 * started SCSI operations, it is very unlikely to happen. 4094 * 4095 * All the driver data structures are supposed to be 4096 * allocated from the same 4 GB memory window, so there 4097 * is a 1 to 1 relationship between DSA and driver data 4098 * structures. Since we are careful :) to invalidate the 4099 * DSA when we complete a command or when the SCRIPTS 4100 * pushes a DSA into a queue, we can trust it when it 4101 * points to a CCB. 4102 */ 4103 static void sym_recover_scsi_int (hcb_p np, u_char hsts) 4104 { 4105 u32 dsp = INL (nc_dsp); 4106 u32 dsa = INL (nc_dsa); 4107 ccb_p cp = sym_ccb_from_dsa(np, dsa); 4108 4109 /* 4110 * If we haven't been interrupted inside the SCRIPTS 4111 * critical pathes, we can safely restart the SCRIPTS 4112 * and trust the DSA value if it matches a CCB. 4113 */ 4114 if ((!(dsp > SCRIPTA_BA (np, getjob_begin) && 4115 dsp < SCRIPTA_BA (np, getjob_end) + 1)) && 4116 (!(dsp > SCRIPTA_BA (np, ungetjob) && 4117 dsp < SCRIPTA_BA (np, reselect) + 1)) && 4118 (!(dsp > SCRIPTB_BA (np, sel_for_abort) && 4119 dsp < SCRIPTB_BA (np, sel_for_abort_1) + 1)) && 4120 (!(dsp > SCRIPTA_BA (np, done) && 4121 dsp < SCRIPTA_BA (np, done_end) + 1))) { 4122 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ 4123 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ 4124 /* 4125 * If we have a CCB, let the SCRIPTS call us back for 4126 * the handling of the error with SCRATCHA filled with 4127 * STARTPOS. This way, we will be able to freeze the 4128 * device queue and requeue awaiting IOs. 4129 */ 4130 if (cp) { 4131 cp->host_status = hsts; 4132 OUTL_DSP (SCRIPTA_BA (np, complete_error)); 4133 } 4134 /* 4135 * Otherwise just restart the SCRIPTS. 4136 */ 4137 else { 4138 OUTL (nc_dsa, 0xffffff); 4139 OUTL_DSP (SCRIPTA_BA (np, start)); 4140 } 4141 } 4142 else 4143 goto reset_all; 4144 4145 return; 4146 4147 reset_all: 4148 sym_start_reset(np); 4149 } 4150 4151 /* 4152 * chip exception handler for selection timeout 4153 */ 4154 static void sym_int_sto (hcb_p np) 4155 { 4156 u32 dsp = INL (nc_dsp); 4157 4158 if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); 4159 4160 if (dsp == SCRIPTA_BA (np, wf_sel_done) + 8) 4161 sym_recover_scsi_int(np, HS_SEL_TIMEOUT); 4162 else 4163 sym_start_reset(np); 4164 } 4165 4166 /* 4167 * chip exception handler for unexpected disconnect 4168 */ 4169 static void sym_int_udc (hcb_p np) 4170 { 4171 printf ("%s: unexpected disconnect\n", sym_name(np)); 4172 sym_recover_scsi_int(np, HS_UNEXPECTED); 4173 } 4174 4175 /* 4176 * chip exception handler for SCSI bus mode change 4177 * 4178 * spi2-r12 11.2.3 says a transceiver mode change must 4179 * generate a reset event and a device that detects a reset 4180 * event shall initiate a hard reset. It says also that a 4181 * device that detects a mode change shall set data transfer 4182 * mode to eight bit asynchronous, etc... 4183 * So, just reinitializing all except chip should be enough. 4184 */ 4185 static void sym_int_sbmc (hcb_p np) 4186 { 4187 u_char scsi_mode = INB (nc_stest4) & SMODE; 4188 4189 /* 4190 * Notify user. 4191 */ 4192 xpt_print_path(np->path); 4193 printf("SCSI BUS mode change from %s to %s.\n", 4194 sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); 4195 4196 /* 4197 * Should suspend command processing for a few seconds and 4198 * reinitialize all except the chip. 4199 */ 4200 sym_init (np, 2); 4201 } 4202 4203 /* 4204 * chip exception handler for SCSI parity error. 4205 * 4206 * When the chip detects a SCSI parity error and is 4207 * currently executing a (CH)MOV instruction, it does 4208 * not interrupt immediately, but tries to finish the 4209 * transfer of the current scatter entry before 4210 * interrupting. The following situations may occur: 4211 * 4212 * - The complete scatter entry has been transferred 4213 * without the device having changed phase. 4214 * The chip will then interrupt with the DSP pointing 4215 * to the instruction that follows the MOV. 4216 * 4217 * - A phase mismatch occurs before the MOV finished 4218 * and phase errors are to be handled by the C code. 4219 * The chip will then interrupt with both PAR and MA 4220 * conditions set. 4221 * 4222 * - A phase mismatch occurs before the MOV finished and 4223 * phase errors are to be handled by SCRIPTS. 4224 * The chip will load the DSP with the phase mismatch 4225 * JUMP address and interrupt the host processor. 4226 */ 4227 static void sym_int_par (hcb_p np, u_short sist) 4228 { 4229 u_char hsts = INB (HS_PRT); 4230 u32 dsp = INL (nc_dsp); 4231 u32 dbc = INL (nc_dbc); 4232 u32 dsa = INL (nc_dsa); 4233 u_char sbcl = INB (nc_sbcl); 4234 u_char cmd = dbc >> 24; 4235 int phase = cmd & 7; 4236 ccb_p cp = sym_ccb_from_dsa(np, dsa); 4237 4238 printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", 4239 sym_name(np), hsts, dbc, sbcl); 4240 4241 /* 4242 * Check that the chip is connected to the SCSI BUS. 4243 */ 4244 if (!(INB (nc_scntl1) & ISCON)) { 4245 sym_recover_scsi_int(np, HS_UNEXPECTED); 4246 return; 4247 } 4248 4249 /* 4250 * If the nexus is not clearly identified, reset the bus. 4251 * We will try to do better later. 4252 */ 4253 if (!cp) 4254 goto reset_all; 4255 4256 /* 4257 * Check instruction was a MOV, direction was INPUT and 4258 * ATN is asserted. 4259 */ 4260 if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) 4261 goto reset_all; 4262 4263 /* 4264 * Keep track of the parity error. 4265 */ 4266 OUTONB (HF_PRT, HF_EXT_ERR); 4267 cp->xerr_status |= XE_PARITY_ERR; 4268 4269 /* 4270 * Prepare the message to send to the device. 4271 */ 4272 np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; 4273 4274 /* 4275 * If the old phase was DATA IN phase, we have to deal with 4276 * the 3 situations described above. 4277 * For other input phases (MSG IN and STATUS), the device 4278 * must resend the whole thing that failed parity checking 4279 * or signal error. So, jumping to dispatcher should be OK. 4280 */ 4281 if (phase == 1 || phase == 5) { 4282 /* Phase mismatch handled by SCRIPTS */ 4283 if (dsp == SCRIPTB_BA (np, pm_handle)) 4284 OUTL_DSP (dsp); 4285 /* Phase mismatch handled by the C code */ 4286 else if (sist & MA) 4287 sym_int_ma (np); 4288 /* No phase mismatch occurred */ 4289 else { 4290 OUTL (nc_temp, dsp); 4291 OUTL_DSP (SCRIPTA_BA (np, dispatch)); 4292 } 4293 } 4294 else 4295 OUTL_DSP (SCRIPTA_BA (np, clrack)); 4296 return; 4297 4298 reset_all: 4299 sym_start_reset(np); 4300 return; 4301 } 4302 4303 /* 4304 * chip exception handler for phase errors. 4305 * 4306 * We have to construct a new transfer descriptor, 4307 * to transfer the rest of the current block. 4308 */ 4309 static void sym_int_ma (hcb_p np) 4310 { 4311 u32 dbc; 4312 u32 rest; 4313 u32 dsp; 4314 u32 dsa; 4315 u32 nxtdsp; 4316 u32 *vdsp; 4317 u32 oadr, olen; 4318 u32 *tblp; 4319 u32 newcmd; 4320 u_int delta; 4321 u_char cmd; 4322 u_char hflags, hflags0; 4323 struct sym_pmc *pm; 4324 ccb_p cp; 4325 4326 dsp = INL (nc_dsp); 4327 dbc = INL (nc_dbc); 4328 dsa = INL (nc_dsa); 4329 4330 cmd = dbc >> 24; 4331 rest = dbc & 0xffffff; 4332 delta = 0; 4333 4334 /* 4335 * locate matching cp if any. 4336 */ 4337 cp = sym_ccb_from_dsa(np, dsa); 4338 4339 /* 4340 * Donnot take into account dma fifo and various buffers in 4341 * INPUT phase since the chip flushes everything before 4342 * raising the MA interrupt for interrupted INPUT phases. 4343 * For DATA IN phase, we will check for the SWIDE later. 4344 */ 4345 if ((cmd & 7) != 1 && (cmd & 7) != 5) { 4346 u_char ss0, ss2; 4347 4348 if (np->features & FE_DFBC) 4349 delta = INW (nc_dfbc); 4350 else { 4351 u32 dfifo; 4352 4353 /* 4354 * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. 4355 */ 4356 dfifo = INL(nc_dfifo); 4357 4358 /* 4359 * Calculate remaining bytes in DMA fifo. 4360 * (CTEST5 = dfifo >> 16) 4361 */ 4362 if (dfifo & (DFS << 16)) 4363 delta = ((((dfifo >> 8) & 0x300) | 4364 (dfifo & 0xff)) - rest) & 0x3ff; 4365 else 4366 delta = ((dfifo & 0xff) - rest) & 0x7f; 4367 } 4368 4369 /* 4370 * The data in the dma fifo has not been transfered to 4371 * the target -> add the amount to the rest 4372 * and clear the data. 4373 * Check the sstat2 register in case of wide transfer. 4374 */ 4375 rest += delta; 4376 ss0 = INB (nc_sstat0); 4377 if (ss0 & OLF) rest++; 4378 if (!(np->features & FE_C10)) 4379 if (ss0 & ORF) rest++; 4380 if (cp && (cp->phys.select.sel_scntl3 & EWS)) { 4381 ss2 = INB (nc_sstat2); 4382 if (ss2 & OLF1) rest++; 4383 if (!(np->features & FE_C10)) 4384 if (ss2 & ORF1) rest++; 4385 }; 4386 4387 /* 4388 * Clear fifos. 4389 */ 4390 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ 4391 OUTB (nc_stest3, TE|CSF); /* scsi fifo */ 4392 } 4393 4394 /* 4395 * log the information 4396 */ 4397 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) 4398 printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7, 4399 (unsigned) rest, (unsigned) delta); 4400 4401 /* 4402 * try to find the interrupted script command, 4403 * and the address at which to continue. 4404 */ 4405 vdsp = 0; 4406 nxtdsp = 0; 4407 if (dsp > np->scripta_ba && 4408 dsp <= np->scripta_ba + np->scripta_sz) { 4409 vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); 4410 nxtdsp = dsp; 4411 } 4412 else if (dsp > np->scriptb_ba && 4413 dsp <= np->scriptb_ba + np->scriptb_sz) { 4414 vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); 4415 nxtdsp = dsp; 4416 } 4417 4418 /* 4419 * log the information 4420 */ 4421 if (DEBUG_FLAGS & DEBUG_PHASE) { 4422 printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", 4423 cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); 4424 }; 4425 4426 if (!vdsp) { 4427 printf ("%s: interrupted SCRIPT address not found.\n", 4428 sym_name (np)); 4429 goto reset_all; 4430 } 4431 4432 if (!cp) { 4433 printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", 4434 sym_name (np)); 4435 goto reset_all; 4436 } 4437 4438 /* 4439 * get old startaddress and old length. 4440 */ 4441 oadr = scr_to_cpu(vdsp[1]); 4442 4443 if (cmd & 0x10) { /* Table indirect */ 4444 tblp = (u32 *) ((char*) &cp->phys + oadr); 4445 olen = scr_to_cpu(tblp[0]); 4446 oadr = scr_to_cpu(tblp[1]); 4447 } else { 4448 tblp = (u32 *) 0; 4449 olen = scr_to_cpu(vdsp[0]) & 0xffffff; 4450 }; 4451 4452 if (DEBUG_FLAGS & DEBUG_PHASE) { 4453 printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", 4454 (unsigned) (scr_to_cpu(vdsp[0]) >> 24), 4455 tblp, 4456 (unsigned) olen, 4457 (unsigned) oadr); 4458 }; 4459 4460 /* 4461 * check cmd against assumed interrupted script command. 4462 * If dt data phase, the MOVE instruction hasn't bit 4 of 4463 * the phase. 4464 */ 4465 if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { 4466 PRINT_ADDR(cp); 4467 printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", 4468 (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24); 4469 4470 goto reset_all; 4471 }; 4472 4473 /* 4474 * if old phase not dataphase, leave here. 4475 */ 4476 if (cmd & 2) { 4477 PRINT_ADDR(cp); 4478 printf ("phase change %x-%x %d@%08x resid=%d.\n", 4479 cmd&7, INB(nc_sbcl)&7, (unsigned)olen, 4480 (unsigned)oadr, (unsigned)rest); 4481 goto unexpected_phase; 4482 }; 4483 4484 /* 4485 * Choose the correct PM save area. 4486 * 4487 * Look at the PM_SAVE SCRIPT if you want to understand 4488 * this stuff. The equivalent code is implemented in 4489 * SCRIPTS for the 895A, 896 and 1010 that are able to 4490 * handle PM from the SCRIPTS processor. 4491 */ 4492 hflags0 = INB (HF_PRT); 4493 hflags = hflags0; 4494 4495 if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { 4496 if (hflags & HF_IN_PM0) 4497 nxtdsp = scr_to_cpu(cp->phys.pm0.ret); 4498 else if (hflags & HF_IN_PM1) 4499 nxtdsp = scr_to_cpu(cp->phys.pm1.ret); 4500 4501 if (hflags & HF_DP_SAVED) 4502 hflags ^= HF_ACT_PM; 4503 } 4504 4505 if (!(hflags & HF_ACT_PM)) { 4506 pm = &cp->phys.pm0; 4507 newcmd = SCRIPTA_BA (np, pm0_data); 4508 } 4509 else { 4510 pm = &cp->phys.pm1; 4511 newcmd = SCRIPTA_BA (np, pm1_data); 4512 } 4513 4514 hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); 4515 if (hflags != hflags0) 4516 OUTB (HF_PRT, hflags); 4517 4518 /* 4519 * fillin the phase mismatch context 4520 */ 4521 pm->sg.addr = cpu_to_scr(oadr + olen - rest); 4522 pm->sg.size = cpu_to_scr(rest); 4523 pm->ret = cpu_to_scr(nxtdsp); 4524 4525 /* 4526 * If we have a SWIDE, 4527 * - prepare the address to write the SWIDE from SCRIPTS, 4528 * - compute the SCRIPTS address to restart from, 4529 * - move current data pointer context by one byte. 4530 */ 4531 nxtdsp = SCRIPTA_BA (np, dispatch); 4532 if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && 4533 (INB (nc_scntl2) & WSR)) { 4534 u32 tmp; 4535 4536 /* 4537 * Set up the table indirect for the MOVE 4538 * of the residual byte and adjust the data 4539 * pointer context. 4540 */ 4541 tmp = scr_to_cpu(pm->sg.addr); 4542 cp->phys.wresid.addr = cpu_to_scr(tmp); 4543 pm->sg.addr = cpu_to_scr(tmp + 1); 4544 tmp = scr_to_cpu(pm->sg.size); 4545 cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); 4546 pm->sg.size = cpu_to_scr(tmp - 1); 4547 4548 /* 4549 * If only the residual byte is to be moved, 4550 * no PM context is needed. 4551 */ 4552 if ((tmp&0xffffff) == 1) 4553 newcmd = pm->ret; 4554 4555 /* 4556 * Prepare the address of SCRIPTS that will 4557 * move the residual byte to memory. 4558 */ 4559 nxtdsp = SCRIPTB_BA (np, wsr_ma_helper); 4560 } 4561 4562 if (DEBUG_FLAGS & DEBUG_PHASE) { 4563 PRINT_ADDR(cp); 4564 printf ("PM %x %x %x / %x %x %x.\n", 4565 hflags0, hflags, newcmd, 4566 (unsigned)scr_to_cpu(pm->sg.addr), 4567 (unsigned)scr_to_cpu(pm->sg.size), 4568 (unsigned)scr_to_cpu(pm->ret)); 4569 } 4570 4571 /* 4572 * Restart the SCRIPTS processor. 4573 */ 4574 OUTL (nc_temp, newcmd); 4575 OUTL_DSP (nxtdsp); 4576 return; 4577 4578 /* 4579 * Unexpected phase changes that occurs when the current phase 4580 * is not a DATA IN or DATA OUT phase are due to error conditions. 4581 * Such event may only happen when the SCRIPTS is using a 4582 * multibyte SCSI MOVE. 4583 * 4584 * Phase change Some possible cause 4585 * 4586 * COMMAND --> MSG IN SCSI parity error detected by target. 4587 * COMMAND --> STATUS Bad command or refused by target. 4588 * MSG OUT --> MSG IN Message rejected by target. 4589 * MSG OUT --> COMMAND Bogus target that discards extended 4590 * negotiation messages. 4591 * 4592 * The code below does not care of the new phase and so 4593 * trusts the target. Why to annoy it ? 4594 * If the interrupted phase is COMMAND phase, we restart at 4595 * dispatcher. 4596 * If a target does not get all the messages after selection, 4597 * the code assumes blindly that the target discards extended 4598 * messages and clears the negotiation status. 4599 * If the target does not want all our response to negotiation, 4600 * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids 4601 * bloat for such a should_not_happen situation). 4602 * In all other situation, we reset the BUS. 4603 * Are these assumptions reasonnable ? (Wait and see ...) 4604 */ 4605 unexpected_phase: 4606 dsp -= 8; 4607 nxtdsp = 0; 4608 4609 switch (cmd & 7) { 4610 case 2: /* COMMAND phase */ 4611 nxtdsp = SCRIPTA_BA (np, dispatch); 4612 break; 4613 #if 0 4614 case 3: /* STATUS phase */ 4615 nxtdsp = SCRIPTA_BA (np, dispatch); 4616 break; 4617 #endif 4618 case 6: /* MSG OUT phase */ 4619 /* 4620 * If the device may want to use untagged when we want 4621 * tagged, we prepare an IDENTIFY without disc. granted, 4622 * since we will not be able to handle reselect. 4623 * Otherwise, we just don't care. 4624 */ 4625 if (dsp == SCRIPTA_BA (np, send_ident)) { 4626 if (cp->tag != NO_TAG && olen - rest <= 3) { 4627 cp->host_status = HS_BUSY; 4628 np->msgout[0] = M_IDENTIFY | cp->lun; 4629 nxtdsp = SCRIPTB_BA (np, ident_break_atn); 4630 } 4631 else 4632 nxtdsp = SCRIPTB_BA (np, ident_break); 4633 } 4634 else if (dsp == SCRIPTB_BA (np, send_wdtr) || 4635 dsp == SCRIPTB_BA (np, send_sdtr) || 4636 dsp == SCRIPTB_BA (np, send_ppr)) { 4637 nxtdsp = SCRIPTB_BA (np, nego_bad_phase); 4638 } 4639 break; 4640 #if 0 4641 case 7: /* MSG IN phase */ 4642 nxtdsp = SCRIPTA_BA (np, clrack); 4643 break; 4644 #endif 4645 } 4646 4647 if (nxtdsp) { 4648 OUTL_DSP (nxtdsp); 4649 return; 4650 } 4651 4652 reset_all: 4653 sym_start_reset(np); 4654 } 4655 4656 /* 4657 * Dequeue from the START queue all CCBs that match 4658 * a given target/lun/task condition (-1 means all), 4659 * and move them from the BUSY queue to the COMP queue 4660 * with CAM_REQUEUE_REQ status condition. 4661 * This function is used during error handling/recovery. 4662 * It is called with SCRIPTS not running. 4663 */ 4664 static int 4665 sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task) 4666 { 4667 int j; 4668 ccb_p cp; 4669 4670 /* 4671 * Make sure the starting index is within range. 4672 */ 4673 assert((i >= 0) && (i < 2*MAX_QUEUE)); 4674 4675 /* 4676 * Walk until end of START queue and dequeue every job 4677 * that matches the target/lun/task condition. 4678 */ 4679 j = i; 4680 while (i != np->squeueput) { 4681 cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); 4682 assert(cp); 4683 #ifdef SYM_CONF_IARB_SUPPORT 4684 /* Forget hints for IARB, they may be no longer relevant */ 4685 cp->host_flags &= ~HF_HINT_IARB; 4686 #endif 4687 if ((target == -1 || cp->target == target) && 4688 (lun == -1 || cp->lun == lun) && 4689 (task == -1 || cp->tag == task)) { 4690 sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ); 4691 sym_remque(&cp->link_ccbq); 4692 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); 4693 } 4694 else { 4695 if (i != j) 4696 np->squeue[j] = np->squeue[i]; 4697 if ((j += 2) >= MAX_QUEUE*2) j = 0; 4698 } 4699 if ((i += 2) >= MAX_QUEUE*2) i = 0; 4700 } 4701 if (i != j) /* Copy back the idle task if needed */ 4702 np->squeue[j] = np->squeue[i]; 4703 np->squeueput = j; /* Update our current start queue pointer */ 4704 4705 return (i - j) / 2; 4706 } 4707 4708 /* 4709 * Complete all CCBs queued to the COMP queue. 4710 * 4711 * These CCBs are assumed: 4712 * - Not to be referenced either by devices or 4713 * SCRIPTS-related queues and datas. 4714 * - To have to be completed with an error condition 4715 * or requeued. 4716 * 4717 * The device queue freeze count is incremented 4718 * for each CCB that does not prevent this. 4719 * This function is called when all CCBs involved 4720 * in error handling/recovery have been reaped. 4721 */ 4722 static void 4723 sym_flush_comp_queue(hcb_p np, int cam_status) 4724 { 4725 SYM_QUEHEAD *qp; 4726 ccb_p cp; 4727 4728 while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) { 4729 union ccb *ccb; 4730 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 4731 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 4732 /* Leave quiet CCBs waiting for resources */ 4733 if (cp->host_status == HS_WAIT) 4734 continue; 4735 ccb = cp->cam_ccb; 4736 if (cam_status) 4737 sym_set_cam_status(ccb, cam_status); 4738 sym_free_ccb(np, cp); 4739 sym_freeze_cam_ccb(ccb); 4740 sym_xpt_done(np, ccb); 4741 } 4742 } 4743 4744 /* 4745 * chip handler for bad SCSI status condition 4746 * 4747 * In case of bad SCSI status, we unqueue all the tasks 4748 * currently queued to the controller but not yet started 4749 * and then restart the SCRIPTS processor immediately. 4750 * 4751 * QUEUE FULL and BUSY conditions are handled the same way. 4752 * Basically all the not yet started tasks are requeued in 4753 * device queue and the queue is frozen until a completion. 4754 * 4755 * For CHECK CONDITION and COMMAND TERMINATED status, we use 4756 * the CCB of the failed command to prepare a REQUEST SENSE 4757 * SCSI command and queue it to the controller queue. 4758 * 4759 * SCRATCHA is assumed to have been loaded with STARTPOS 4760 * before the SCRIPTS called the C code. 4761 */ 4762 static void sym_sir_bad_scsi_status(hcb_p np, int num, ccb_p cp) 4763 { 4764 tcb_p tp = &np->target[cp->target]; 4765 u32 startp; 4766 u_char s_status = cp->ssss_status; 4767 u_char h_flags = cp->host_flags; 4768 int msglen; 4769 int nego; 4770 int i; 4771 4772 /* 4773 * Compute the index of the next job to start from SCRIPTS. 4774 */ 4775 i = (INL (nc_scratcha) - np->squeue_ba) / 4; 4776 4777 /* 4778 * The last CCB queued used for IARB hint may be 4779 * no longer relevant. Forget it. 4780 */ 4781 #ifdef SYM_CONF_IARB_SUPPORT 4782 if (np->last_cp) 4783 np->last_cp = 0; 4784 #endif 4785 4786 /* 4787 * Now deal with the SCSI status. 4788 */ 4789 switch(s_status) { 4790 case S_BUSY: 4791 case S_QUEUE_FULL: 4792 if (sym_verbose >= 2) { 4793 PRINT_ADDR(cp); 4794 printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); 4795 } 4796 default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ 4797 sym_complete_error (np, cp); 4798 break; 4799 case S_TERMINATED: 4800 case S_CHECK_COND: 4801 /* 4802 * If we get an SCSI error when requesting sense, give up. 4803 */ 4804 if (h_flags & HF_SENSE) { 4805 sym_complete_error (np, cp); 4806 break; 4807 } 4808 4809 /* 4810 * Dequeue all queued CCBs for that device not yet started, 4811 * and restart the SCRIPTS processor immediately. 4812 */ 4813 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); 4814 OUTL_DSP (SCRIPTA_BA (np, start)); 4815 4816 /* 4817 * Save some info of the actual IO. 4818 * Compute the data residual. 4819 */ 4820 cp->sv_scsi_status = cp->ssss_status; 4821 cp->sv_xerr_status = cp->xerr_status; 4822 cp->sv_resid = sym_compute_residual(np, cp); 4823 4824 /* 4825 * Prepare all needed data structures for 4826 * requesting sense data. 4827 */ 4828 4829 /* 4830 * identify message 4831 */ 4832 cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun; 4833 msglen = 1; 4834 4835 /* 4836 * If we are currently using anything different from 4837 * async. 8 bit data transfers with that target, 4838 * start a negotiation, since the device may want 4839 * to report us a UNIT ATTENTION condition due to 4840 * a cause we currently ignore, and we donnot want 4841 * to be stuck with WIDE and/or SYNC data transfer. 4842 * 4843 * cp->nego_status is filled by sym_prepare_nego(). 4844 */ 4845 cp->nego_status = 0; 4846 nego = 0; 4847 if (tp->tinfo.current.options & PPR_OPT_MASK) 4848 nego = NS_PPR; 4849 else if (tp->tinfo.current.width != BUS_8_BIT) 4850 nego = NS_WIDE; 4851 else if (tp->tinfo.current.offset != 0) 4852 nego = NS_SYNC; 4853 if (nego) 4854 msglen += 4855 sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]); 4856 /* 4857 * Message table indirect structure. 4858 */ 4859 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg2)); 4860 cp->phys.smsg.size = cpu_to_scr(msglen); 4861 4862 /* 4863 * sense command 4864 */ 4865 cp->phys.cmd.addr = cpu_to_scr(CCB_BA (cp, sensecmd)); 4866 cp->phys.cmd.size = cpu_to_scr(6); 4867 4868 /* 4869 * patch requested size into sense command 4870 */ 4871 cp->sensecmd[0] = 0x03; 4872 cp->sensecmd[1] = cp->lun << 5; 4873 #ifdef FreeBSD_New_Tran_Settings 4874 if (tp->tinfo.current.scsi_version > 2 || cp->lun > 7) 4875 cp->sensecmd[1] = 0; 4876 #endif 4877 cp->sensecmd[4] = SYM_SNS_BBUF_LEN; 4878 cp->data_len = SYM_SNS_BBUF_LEN; 4879 4880 /* 4881 * sense data 4882 */ 4883 bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN); 4884 cp->phys.sense.addr = cpu_to_scr(vtobus(cp->sns_bbuf)); 4885 cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); 4886 4887 /* 4888 * requeue the command. 4889 */ 4890 startp = SCRIPTB_BA (np, sdata_in); 4891 4892 cp->phys.head.savep = cpu_to_scr(startp); 4893 cp->phys.head.goalp = cpu_to_scr(startp + 16); 4894 cp->phys.head.lastp = cpu_to_scr(startp); 4895 cp->startp = cpu_to_scr(startp); 4896 4897 cp->actualquirks = SYM_QUIRK_AUTOSAVE; 4898 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; 4899 cp->ssss_status = S_ILLEGAL; 4900 cp->host_flags = (HF_SENSE|HF_DATA_IN); 4901 cp->xerr_status = 0; 4902 cp->extra_bytes = 0; 4903 4904 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); 4905 4906 /* 4907 * Requeue the command. 4908 */ 4909 sym_put_start_queue(np, cp); 4910 4911 /* 4912 * Give back to upper layer everything we have dequeued. 4913 */ 4914 sym_flush_comp_queue(np, 0); 4915 break; 4916 } 4917 } 4918 4919 /* 4920 * After a device has accepted some management message 4921 * as BUS DEVICE RESET, ABORT TASK, etc ..., or when 4922 * a device signals a UNIT ATTENTION condition, some 4923 * tasks are thrown away by the device. We are required 4924 * to reflect that on our tasks list since the device 4925 * will never complete these tasks. 4926 * 4927 * This function move from the BUSY queue to the COMP 4928 * queue all disconnected CCBs for a given target that 4929 * match the following criteria: 4930 * - lun=-1 means any logical UNIT otherwise a given one. 4931 * - task=-1 means any task, otherwise a given one. 4932 */ 4933 static int 4934 sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task) 4935 { 4936 SYM_QUEHEAD qtmp, *qp; 4937 int i = 0; 4938 ccb_p cp; 4939 4940 /* 4941 * Move the entire BUSY queue to our temporary queue. 4942 */ 4943 sym_que_init(&qtmp); 4944 sym_que_splice(&np->busy_ccbq, &qtmp); 4945 sym_que_init(&np->busy_ccbq); 4946 4947 /* 4948 * Put all CCBs that matches our criteria into 4949 * the COMP queue and put back other ones into 4950 * the BUSY queue. 4951 */ 4952 while ((qp = sym_remque_head(&qtmp)) != 0) { 4953 union ccb *ccb; 4954 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 4955 ccb = cp->cam_ccb; 4956 if (cp->host_status != HS_DISCONNECT || 4957 cp->target != target || 4958 (lun != -1 && cp->lun != lun) || 4959 (task != -1 && 4960 (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { 4961 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 4962 continue; 4963 } 4964 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); 4965 4966 /* Preserve the software timeout condition */ 4967 if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT) 4968 sym_set_cam_status(ccb, cam_status); 4969 ++i; 4970 #if 0 4971 printf("XXXX TASK @%p CLEARED\n", cp); 4972 #endif 4973 } 4974 return i; 4975 } 4976 4977 /* 4978 * chip handler for TASKS recovery 4979 * 4980 * We cannot safely abort a command, while the SCRIPTS 4981 * processor is running, since we just would be in race 4982 * with it. 4983 * 4984 * As long as we have tasks to abort, we keep the SEM 4985 * bit set in the ISTAT. When this bit is set, the 4986 * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) 4987 * each time it enters the scheduler. 4988 * 4989 * If we have to reset a target, clear tasks of a unit, 4990 * or to perform the abort of a disconnected job, we 4991 * restart the SCRIPTS for selecting the target. Once 4992 * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). 4993 * If it loses arbitration, the SCRIPTS will interrupt again 4994 * the next time it will enter its scheduler, and so on ... 4995 * 4996 * On SIR_TARGET_SELECTED, we scan for the more 4997 * appropriate thing to do: 4998 * 4999 * - If nothing, we just sent a M_ABORT message to the 5000 * target to get rid of the useless SCSI bus ownership. 5001 * According to the specs, no tasks shall be affected. 5002 * - If the target is to be reset, we send it a M_RESET 5003 * message. 5004 * - If a logical UNIT is to be cleared , we send the 5005 * IDENTIFY(lun) + M_ABORT. 5006 * - If an untagged task is to be aborted, we send the 5007 * IDENTIFY(lun) + M_ABORT. 5008 * - If a tagged task is to be aborted, we send the 5009 * IDENTIFY(lun) + task attributes + M_ABORT_TAG. 5010 * 5011 * Once our 'kiss of death' :) message has been accepted 5012 * by the target, the SCRIPTS interrupts again 5013 * (SIR_ABORT_SENT). On this interrupt, we complete 5014 * all the CCBs that should have been aborted by the 5015 * target according to our message. 5016 */ 5017 static void sym_sir_task_recovery(hcb_p np, int num) 5018 { 5019 SYM_QUEHEAD *qp; 5020 ccb_p cp; 5021 tcb_p tp; 5022 int target=-1, lun=-1, task; 5023 int i, k; 5024 5025 switch(num) { 5026 /* 5027 * The SCRIPTS processor stopped before starting 5028 * the next command in order to allow us to perform 5029 * some task recovery. 5030 */ 5031 case SIR_SCRIPT_STOPPED: 5032 /* 5033 * Do we have any target to reset or unit to clear ? 5034 */ 5035 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 5036 tp = &np->target[i]; 5037 if (tp->to_reset || 5038 (tp->lun0p && tp->lun0p->to_clear)) { 5039 target = i; 5040 break; 5041 } 5042 if (!tp->lunmp) 5043 continue; 5044 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { 5045 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { 5046 target = i; 5047 break; 5048 } 5049 } 5050 if (target != -1) 5051 break; 5052 } 5053 5054 /* 5055 * If not, walk the busy queue for any 5056 * disconnected CCB to be aborted. 5057 */ 5058 if (target == -1) { 5059 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 5060 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); 5061 if (cp->host_status != HS_DISCONNECT) 5062 continue; 5063 if (cp->to_abort) { 5064 target = cp->target; 5065 break; 5066 } 5067 } 5068 } 5069 5070 /* 5071 * If some target is to be selected, 5072 * prepare and start the selection. 5073 */ 5074 if (target != -1) { 5075 tp = &np->target[target]; 5076 np->abrt_sel.sel_id = target; 5077 np->abrt_sel.sel_scntl3 = tp->head.wval; 5078 np->abrt_sel.sel_sxfer = tp->head.sval; 5079 OUTL(nc_dsa, np->hcb_ba); 5080 OUTL_DSP (SCRIPTB_BA (np, sel_for_abort)); 5081 return; 5082 } 5083 5084 /* 5085 * Now look for a CCB to abort that haven't started yet. 5086 * Btw, the SCRIPTS processor is still stopped, so 5087 * we are not in race. 5088 */ 5089 i = 0; 5090 cp = 0; 5091 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 5092 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 5093 if (cp->host_status != HS_BUSY && 5094 cp->host_status != HS_NEGOTIATE) 5095 continue; 5096 if (!cp->to_abort) 5097 continue; 5098 #ifdef SYM_CONF_IARB_SUPPORT 5099 /* 5100 * If we are using IMMEDIATE ARBITRATION, we donnot 5101 * want to cancel the last queued CCB, since the 5102 * SCRIPTS may have anticipated the selection. 5103 */ 5104 if (cp == np->last_cp) { 5105 cp->to_abort = 0; 5106 continue; 5107 } 5108 #endif 5109 i = 1; /* Means we have found some */ 5110 break; 5111 } 5112 if (!i) { 5113 /* 5114 * We are done, so we donnot need 5115 * to synchronize with the SCRIPTS anylonger. 5116 * Remove the SEM flag from the ISTAT. 5117 */ 5118 np->istat_sem = 0; 5119 OUTB (nc_istat, SIGP); 5120 break; 5121 } 5122 /* 5123 * Compute index of next position in the start 5124 * queue the SCRIPTS intends to start and dequeue 5125 * all CCBs for that device that haven't been started. 5126 */ 5127 i = (INL (nc_scratcha) - np->squeue_ba) / 4; 5128 i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); 5129 5130 /* 5131 * Make sure at least our IO to abort has been dequeued. 5132 */ 5133 assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ); 5134 5135 /* 5136 * Keep track in cam status of the reason of the abort. 5137 */ 5138 if (cp->to_abort == 2) 5139 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); 5140 else 5141 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); 5142 5143 /* 5144 * Complete with error everything that we have dequeued. 5145 */ 5146 sym_flush_comp_queue(np, 0); 5147 break; 5148 /* 5149 * The SCRIPTS processor has selected a target 5150 * we may have some manual recovery to perform for. 5151 */ 5152 case SIR_TARGET_SELECTED: 5153 target = (INB (nc_sdid) & 0xf); 5154 tp = &np->target[target]; 5155 5156 np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); 5157 5158 /* 5159 * If the target is to be reset, prepare a 5160 * M_RESET message and clear the to_reset flag 5161 * since we donnot expect this operation to fail. 5162 */ 5163 if (tp->to_reset) { 5164 np->abrt_msg[0] = M_RESET; 5165 np->abrt_tbl.size = 1; 5166 tp->to_reset = 0; 5167 break; 5168 } 5169 5170 /* 5171 * Otherwise, look for some logical unit to be cleared. 5172 */ 5173 if (tp->lun0p && tp->lun0p->to_clear) 5174 lun = 0; 5175 else if (tp->lunmp) { 5176 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { 5177 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { 5178 lun = k; 5179 break; 5180 } 5181 } 5182 } 5183 5184 /* 5185 * If a logical unit is to be cleared, prepare 5186 * an IDENTIFY(lun) + ABORT MESSAGE. 5187 */ 5188 if (lun != -1) { 5189 lcb_p lp = sym_lp(np, tp, lun); 5190 lp->to_clear = 0; /* We donnot expect to fail here */ 5191 np->abrt_msg[0] = M_IDENTIFY | lun; 5192 np->abrt_msg[1] = M_ABORT; 5193 np->abrt_tbl.size = 2; 5194 break; 5195 } 5196 5197 /* 5198 * Otherwise, look for some disconnected job to 5199 * abort for this target. 5200 */ 5201 i = 0; 5202 cp = 0; 5203 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 5204 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 5205 if (cp->host_status != HS_DISCONNECT) 5206 continue; 5207 if (cp->target != target) 5208 continue; 5209 if (!cp->to_abort) 5210 continue; 5211 i = 1; /* Means we have some */ 5212 break; 5213 } 5214 5215 /* 5216 * If we have none, probably since the device has 5217 * completed the command before we won abitration, 5218 * send a M_ABORT message without IDENTIFY. 5219 * According to the specs, the device must just 5220 * disconnect the BUS and not abort any task. 5221 */ 5222 if (!i) { 5223 np->abrt_msg[0] = M_ABORT; 5224 np->abrt_tbl.size = 1; 5225 break; 5226 } 5227 5228 /* 5229 * We have some task to abort. 5230 * Set the IDENTIFY(lun) 5231 */ 5232 np->abrt_msg[0] = M_IDENTIFY | cp->lun; 5233 5234 /* 5235 * If we want to abort an untagged command, we 5236 * will send an IDENTIFY + M_ABORT. 5237 * Otherwise (tagged command), we will send 5238 * an IDENTIFY + task attributes + ABORT TAG. 5239 */ 5240 if (cp->tag == NO_TAG) { 5241 np->abrt_msg[1] = M_ABORT; 5242 np->abrt_tbl.size = 2; 5243 } 5244 else { 5245 np->abrt_msg[1] = cp->scsi_smsg[1]; 5246 np->abrt_msg[2] = cp->scsi_smsg[2]; 5247 np->abrt_msg[3] = M_ABORT_TAG; 5248 np->abrt_tbl.size = 4; 5249 } 5250 /* 5251 * Keep track of software timeout condition, since the 5252 * peripheral driver may not count retries on abort 5253 * conditions not due to timeout. 5254 */ 5255 if (cp->to_abort == 2) 5256 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); 5257 cp->to_abort = 0; /* We donnot expect to fail here */ 5258 break; 5259 5260 /* 5261 * The target has accepted our message and switched 5262 * to BUS FREE phase as we expected. 5263 */ 5264 case SIR_ABORT_SENT: 5265 target = (INB (nc_sdid) & 0xf); 5266 tp = &np->target[target]; 5267 5268 /* 5269 ** If we didn't abort anything, leave here. 5270 */ 5271 if (np->abrt_msg[0] == M_ABORT) 5272 break; 5273 5274 /* 5275 * If we sent a M_RESET, then a hardware reset has 5276 * been performed by the target. 5277 * - Reset everything to async 8 bit 5278 * - Tell ourself to negotiate next time :-) 5279 * - Prepare to clear all disconnected CCBs for 5280 * this target from our task list (lun=task=-1) 5281 */ 5282 lun = -1; 5283 task = -1; 5284 if (np->abrt_msg[0] == M_RESET) { 5285 tp->head.sval = 0; 5286 tp->head.wval = np->rv_scntl3; 5287 tp->head.uval = 0; 5288 tp->tinfo.current.period = 0; 5289 tp->tinfo.current.offset = 0; 5290 tp->tinfo.current.width = BUS_8_BIT; 5291 tp->tinfo.current.options = 0; 5292 } 5293 5294 /* 5295 * Otherwise, check for the LUN and TASK(s) 5296 * concerned by the cancelation. 5297 * If it is not ABORT_TAG then it is CLEAR_QUEUE 5298 * or an ABORT message :-) 5299 */ 5300 else { 5301 lun = np->abrt_msg[0] & 0x3f; 5302 if (np->abrt_msg[1] == M_ABORT_TAG) 5303 task = np->abrt_msg[2]; 5304 } 5305 5306 /* 5307 * Complete all the CCBs the device should have 5308 * aborted due to our 'kiss of death' message. 5309 */ 5310 i = (INL (nc_scratcha) - np->squeue_ba) / 4; 5311 (void) sym_dequeue_from_squeue(np, i, target, lun, -1); 5312 (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task); 5313 sym_flush_comp_queue(np, 0); 5314 5315 /* 5316 * If we sent a BDR, make uper layer aware of that. 5317 */ 5318 if (np->abrt_msg[0] == M_RESET) 5319 xpt_async(AC_SENT_BDR, np->path, NULL); 5320 break; 5321 } 5322 5323 /* 5324 * Print to the log the message we intend to send. 5325 */ 5326 if (num == SIR_TARGET_SELECTED) { 5327 PRINT_TARGET(np, target); 5328 sym_printl_hex("control msgout:", np->abrt_msg, 5329 np->abrt_tbl.size); 5330 np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); 5331 } 5332 5333 /* 5334 * Let the SCRIPTS processor continue. 5335 */ 5336 OUTONB_STD (); 5337 } 5338 5339 /* 5340 * Gerard's alchemy:) that deals with with the data 5341 * pointer for both MDP and the residual calculation. 5342 * 5343 * I didn't want to bloat the code by more than 200 5344 * lignes for the handling of both MDP and the residual. 5345 * This has been achieved by using a data pointer 5346 * representation consisting in an index in the data 5347 * array (dp_sg) and a negative offset (dp_ofs) that 5348 * have the following meaning: 5349 * 5350 * - dp_sg = SYM_CONF_MAX_SG 5351 * we are at the end of the data script. 5352 * - dp_sg < SYM_CONF_MAX_SG 5353 * dp_sg points to the next entry of the scatter array 5354 * we want to transfer. 5355 * - dp_ofs < 0 5356 * dp_ofs represents the residual of bytes of the 5357 * previous entry scatter entry we will send first. 5358 * - dp_ofs = 0 5359 * no residual to send first. 5360 * 5361 * The function sym_evaluate_dp() accepts an arbitray 5362 * offset (basically from the MDP message) and returns 5363 * the corresponding values of dp_sg and dp_ofs. 5364 */ 5365 5366 static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs) 5367 { 5368 u32 dp_scr; 5369 int dp_ofs, dp_sg, dp_sgmin; 5370 int tmp; 5371 struct sym_pmc *pm; 5372 5373 /* 5374 * Compute the resulted data pointer in term of a script 5375 * address within some DATA script and a signed byte offset. 5376 */ 5377 dp_scr = scr; 5378 dp_ofs = *ofs; 5379 if (dp_scr == SCRIPTA_BA (np, pm0_data)) 5380 pm = &cp->phys.pm0; 5381 else if (dp_scr == SCRIPTA_BA (np, pm1_data)) 5382 pm = &cp->phys.pm1; 5383 else 5384 pm = 0; 5385 5386 if (pm) { 5387 dp_scr = scr_to_cpu(pm->ret); 5388 dp_ofs -= scr_to_cpu(pm->sg.size); 5389 } 5390 5391 /* 5392 * If we are auto-sensing, then we are done. 5393 */ 5394 if (cp->host_flags & HF_SENSE) { 5395 *ofs = dp_ofs; 5396 return 0; 5397 } 5398 5399 /* 5400 * Deduce the index of the sg entry. 5401 * Keep track of the index of the first valid entry. 5402 * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the 5403 * end of the data. 5404 */ 5405 tmp = scr_to_cpu(cp->phys.head.goalp); 5406 dp_sg = SYM_CONF_MAX_SG; 5407 if (dp_scr != tmp) 5408 dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); 5409 dp_sgmin = SYM_CONF_MAX_SG - cp->segments; 5410 5411 /* 5412 * Move to the sg entry the data pointer belongs to. 5413 * 5414 * If we are inside the data area, we expect result to be: 5415 * 5416 * Either, 5417 * dp_ofs = 0 and dp_sg is the index of the sg entry 5418 * the data pointer belongs to (or the end of the data) 5419 * Or, 5420 * dp_ofs < 0 and dp_sg is the index of the sg entry 5421 * the data pointer belongs to + 1. 5422 */ 5423 if (dp_ofs < 0) { 5424 int n; 5425 while (dp_sg > dp_sgmin) { 5426 --dp_sg; 5427 tmp = scr_to_cpu(cp->phys.data[dp_sg].size); 5428 n = dp_ofs + (tmp & 0xffffff); 5429 if (n > 0) { 5430 ++dp_sg; 5431 break; 5432 } 5433 dp_ofs = n; 5434 } 5435 } 5436 else if (dp_ofs > 0) { 5437 while (dp_sg < SYM_CONF_MAX_SG) { 5438 tmp = scr_to_cpu(cp->phys.data[dp_sg].size); 5439 dp_ofs -= (tmp & 0xffffff); 5440 ++dp_sg; 5441 if (dp_ofs <= 0) 5442 break; 5443 } 5444 } 5445 5446 /* 5447 * Make sure the data pointer is inside the data area. 5448 * If not, return some error. 5449 */ 5450 if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) 5451 goto out_err; 5452 else if (dp_sg > SYM_CONF_MAX_SG || 5453 (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) 5454 goto out_err; 5455 5456 /* 5457 * Save the extreme pointer if needed. 5458 */ 5459 if (dp_sg > cp->ext_sg || 5460 (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { 5461 cp->ext_sg = dp_sg; 5462 cp->ext_ofs = dp_ofs; 5463 } 5464 5465 /* 5466 * Return data. 5467 */ 5468 *ofs = dp_ofs; 5469 return dp_sg; 5470 5471 out_err: 5472 return -1; 5473 } 5474 5475 /* 5476 * chip handler for MODIFY DATA POINTER MESSAGE 5477 * 5478 * We also call this function on IGNORE WIDE RESIDUE 5479 * messages that do not match a SWIDE full condition. 5480 * Btw, we assume in that situation that such a message 5481 * is equivalent to a MODIFY DATA POINTER (offset=-1). 5482 */ 5483 5484 static void sym_modify_dp(hcb_p np, tcb_p tp, ccb_p cp, int ofs) 5485 { 5486 int dp_ofs = ofs; 5487 u32 dp_scr = INL (nc_temp); 5488 u32 dp_ret; 5489 u32 tmp; 5490 u_char hflags; 5491 int dp_sg; 5492 struct sym_pmc *pm; 5493 5494 /* 5495 * Not supported for auto-sense. 5496 */ 5497 if (cp->host_flags & HF_SENSE) 5498 goto out_reject; 5499 5500 /* 5501 * Apply our alchemy:) (see comments in sym_evaluate_dp()), 5502 * to the resulted data pointer. 5503 */ 5504 dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); 5505 if (dp_sg < 0) 5506 goto out_reject; 5507 5508 /* 5509 * And our alchemy:) allows to easily calculate the data 5510 * script address we want to return for the next data phase. 5511 */ 5512 dp_ret = cpu_to_scr(cp->phys.head.goalp); 5513 dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); 5514 5515 /* 5516 * If offset / scatter entry is zero we donnot need 5517 * a context for the new current data pointer. 5518 */ 5519 if (dp_ofs == 0) { 5520 dp_scr = dp_ret; 5521 goto out_ok; 5522 } 5523 5524 /* 5525 * Get a context for the new current data pointer. 5526 */ 5527 hflags = INB (HF_PRT); 5528 5529 if (hflags & HF_DP_SAVED) 5530 hflags ^= HF_ACT_PM; 5531 5532 if (!(hflags & HF_ACT_PM)) { 5533 pm = &cp->phys.pm0; 5534 dp_scr = SCRIPTA_BA (np, pm0_data); 5535 } 5536 else { 5537 pm = &cp->phys.pm1; 5538 dp_scr = SCRIPTA_BA (np, pm1_data); 5539 } 5540 5541 hflags &= ~(HF_DP_SAVED); 5542 5543 OUTB (HF_PRT, hflags); 5544 5545 /* 5546 * Set up the new current data pointer. 5547 * ofs < 0 there, and for the next data phase, we 5548 * want to transfer part of the data of the sg entry 5549 * corresponding to index dp_sg-1 prior to returning 5550 * to the main data script. 5551 */ 5552 pm->ret = cpu_to_scr(dp_ret); 5553 tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); 5554 tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; 5555 pm->sg.addr = cpu_to_scr(tmp); 5556 pm->sg.size = cpu_to_scr(-dp_ofs); 5557 5558 out_ok: 5559 OUTL (nc_temp, dp_scr); 5560 OUTL_DSP (SCRIPTA_BA (np, clrack)); 5561 return; 5562 5563 out_reject: 5564 OUTL_DSP (SCRIPTB_BA (np, msg_bad)); 5565 } 5566 5567 5568 /* 5569 * chip calculation of the data residual. 5570 * 5571 * As I used to say, the requirement of data residual 5572 * in SCSI is broken, useless and cannot be achieved 5573 * without huge complexity. 5574 * But most OSes and even the official CAM require it. 5575 * When stupidity happens to be so widely spread inside 5576 * a community, it gets hard to convince. 5577 * 5578 * Anyway, I don't care, since I am not going to use 5579 * any software that considers this data residual as 5580 * a relevant information. :) 5581 */ 5582 5583 static int sym_compute_residual(hcb_p np, ccb_p cp) 5584 { 5585 int dp_sg, dp_sgmin, resid = 0; 5586 int dp_ofs = 0; 5587 5588 /* 5589 * Check for some data lost or just thrown away. 5590 * We are not required to be quite accurate in this 5591 * situation. Btw, if we are odd for output and the 5592 * device claims some more data, it may well happen 5593 * than our residual be zero. :-) 5594 */ 5595 if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { 5596 if (cp->xerr_status & XE_EXTRA_DATA) 5597 resid -= cp->extra_bytes; 5598 if (cp->xerr_status & XE_SODL_UNRUN) 5599 ++resid; 5600 if (cp->xerr_status & XE_SWIDE_OVRUN) 5601 --resid; 5602 } 5603 5604 /* 5605 * If all data has been transferred, 5606 * there is no residual. 5607 */ 5608 if (cp->phys.head.lastp == cp->phys.head.goalp) 5609 return resid; 5610 5611 /* 5612 * If no data transfer occurs, or if the data 5613 * pointer is weird, return full residual. 5614 */ 5615 if (cp->startp == cp->phys.head.lastp || 5616 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), 5617 &dp_ofs) < 0) { 5618 return cp->data_len; 5619 } 5620 5621 /* 5622 * If we were auto-sensing, then we are done. 5623 */ 5624 if (cp->host_flags & HF_SENSE) { 5625 return -dp_ofs; 5626 } 5627 5628 /* 5629 * We are now full comfortable in the computation 5630 * of the data residual (2's complement). 5631 */ 5632 dp_sgmin = SYM_CONF_MAX_SG - cp->segments; 5633 resid = -cp->ext_ofs; 5634 for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { 5635 u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); 5636 resid += (tmp & 0xffffff); 5637 } 5638 5639 /* 5640 * Hopefully, the result is not too wrong. 5641 */ 5642 return resid; 5643 } 5644 5645 /* 5646 * Print out the content of a SCSI message. 5647 */ 5648 5649 static int sym_show_msg (u_char * msg) 5650 { 5651 u_char i; 5652 printf ("%x",*msg); 5653 if (*msg==M_EXTENDED) { 5654 for (i=1;i<8;i++) { 5655 if (i-1>msg[1]) break; 5656 printf ("-%x",msg[i]); 5657 }; 5658 return (i+1); 5659 } else if ((*msg & 0xf0) == 0x20) { 5660 printf ("-%x",msg[1]); 5661 return (2); 5662 }; 5663 return (1); 5664 } 5665 5666 static void sym_print_msg (ccb_p cp, char *label, u_char *msg) 5667 { 5668 PRINT_ADDR(cp); 5669 if (label) 5670 printf ("%s: ", label); 5671 5672 (void) sym_show_msg (msg); 5673 printf (".\n"); 5674 } 5675 5676 /* 5677 * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. 5678 * 5679 * When we try to negotiate, we append the negotiation message 5680 * to the identify and (maybe) simple tag message. 5681 * The host status field is set to HS_NEGOTIATE to mark this 5682 * situation. 5683 * 5684 * If the target doesn't answer this message immediately 5685 * (as required by the standard), the SIR_NEGO_FAILED interrupt 5686 * will be raised eventually. 5687 * The handler removes the HS_NEGOTIATE status, and sets the 5688 * negotiated value to the default (async / nowide). 5689 * 5690 * If we receive a matching answer immediately, we check it 5691 * for validity, and set the values. 5692 * 5693 * If we receive a Reject message immediately, we assume the 5694 * negotiation has failed, and fall back to standard values. 5695 * 5696 * If we receive a negotiation message while not in HS_NEGOTIATE 5697 * state, it's a target initiated negotiation. We prepare a 5698 * (hopefully) valid answer, set our parameters, and send back 5699 * this answer to the target. 5700 * 5701 * If the target doesn't fetch the answer (no message out phase), 5702 * we assume the negotiation has failed, and fall back to default 5703 * settings (SIR_NEGO_PROTO interrupt). 5704 * 5705 * When we set the values, we adjust them in all ccbs belonging 5706 * to this target, in the controller's register, and in the "phys" 5707 * field of the controller's struct sym_hcb. 5708 */ 5709 5710 /* 5711 * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. 5712 */ 5713 static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp) 5714 { 5715 u_char chg, ofs, per, fak, div; 5716 int req = 1; 5717 5718 /* 5719 * Synchronous request message received. 5720 */ 5721 if (DEBUG_FLAGS & DEBUG_NEGO) { 5722 sym_print_msg(cp, "sync msgin", np->msgin); 5723 }; 5724 5725 /* 5726 * request or answer ? 5727 */ 5728 if (INB (HS_PRT) == HS_NEGOTIATE) { 5729 OUTB (HS_PRT, HS_BUSY); 5730 if (cp->nego_status && cp->nego_status != NS_SYNC) 5731 goto reject_it; 5732 req = 0; 5733 } 5734 5735 /* 5736 * get requested values. 5737 */ 5738 chg = 0; 5739 per = np->msgin[3]; 5740 ofs = np->msgin[4]; 5741 5742 /* 5743 * check values against our limits. 5744 */ 5745 if (ofs) { 5746 if (ofs > np->maxoffs) 5747 {chg = 1; ofs = np->maxoffs;} 5748 if (req) { 5749 if (ofs > tp->tinfo.user.offset) 5750 {chg = 1; ofs = tp->tinfo.user.offset;} 5751 } 5752 } 5753 5754 if (ofs) { 5755 if (per < np->minsync) 5756 {chg = 1; per = np->minsync;} 5757 if (req) { 5758 if (per < tp->tinfo.user.period) 5759 {chg = 1; per = tp->tinfo.user.period;} 5760 } 5761 } 5762 5763 div = fak = 0; 5764 if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) 5765 goto reject_it; 5766 5767 if (DEBUG_FLAGS & DEBUG_NEGO) { 5768 PRINT_ADDR(cp); 5769 printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", 5770 ofs, per, div, fak, chg); 5771 } 5772 5773 /* 5774 * This was an answer message 5775 */ 5776 if (req == 0) { 5777 if (chg) /* Answer wasn't acceptable. */ 5778 goto reject_it; 5779 sym_setsync (np, cp, ofs, per, div, fak); 5780 OUTL_DSP (SCRIPTA_BA (np, clrack)); 5781 return; 5782 } 5783 5784 /* 5785 * It was a request. Set value and 5786 * prepare an answer message 5787 */ 5788 sym_setsync (np, cp, ofs, per, div, fak); 5789 5790 np->msgout[0] = M_EXTENDED; 5791 np->msgout[1] = 3; 5792 np->msgout[2] = M_X_SYNC_REQ; 5793 np->msgout[3] = per; 5794 np->msgout[4] = ofs; 5795 5796 cp->nego_status = NS_SYNC; 5797 5798 if (DEBUG_FLAGS & DEBUG_NEGO) { 5799 sym_print_msg(cp, "sync msgout", np->msgout); 5800 } 5801 5802 np->msgin [0] = M_NOOP; 5803 5804 OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); 5805 return; 5806 reject_it: 5807 sym_setsync (np, cp, 0, 0, 0, 0); 5808 OUTL_DSP (SCRIPTB_BA (np, msg_bad)); 5809 } 5810 5811 /* 5812 * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. 5813 */ 5814 static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp) 5815 { 5816 u_char chg, ofs, per, fak, dt, div, wide; 5817 int req = 1; 5818 5819 /* 5820 * Synchronous request message received. 5821 */ 5822 if (DEBUG_FLAGS & DEBUG_NEGO) { 5823 sym_print_msg(cp, "ppr msgin", np->msgin); 5824 }; 5825 5826 /* 5827 * get requested values. 5828 */ 5829 chg = 0; 5830 per = np->msgin[3]; 5831 ofs = np->msgin[5]; 5832 wide = np->msgin[6]; 5833 dt = np->msgin[7] & PPR_OPT_DT; 5834 5835 /* 5836 * request or answer ? 5837 */ 5838 if (INB (HS_PRT) == HS_NEGOTIATE) { 5839 OUTB (HS_PRT, HS_BUSY); 5840 if (cp->nego_status && cp->nego_status != NS_PPR) 5841 goto reject_it; 5842 req = 0; 5843 } 5844 5845 /* 5846 * check values against our limits. 5847 */ 5848 if (wide > np->maxwide) 5849 {chg = 1; wide = np->maxwide;} 5850 if (!wide || !(np->features & FE_ULTRA3)) 5851 dt &= ~PPR_OPT_DT; 5852 if (req) { 5853 if (wide > tp->tinfo.user.width) 5854 {chg = 1; wide = tp->tinfo.user.width;} 5855 } 5856 5857 if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */ 5858 dt &= ~PPR_OPT_DT; 5859 5860 if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; 5861 5862 if (ofs) { 5863 if (dt) { 5864 if (ofs > np->maxoffs_dt) 5865 {chg = 1; ofs = np->maxoffs_dt;} 5866 } 5867 else if (ofs > np->maxoffs) 5868 {chg = 1; ofs = np->maxoffs;} 5869 if (req) { 5870 if (ofs > tp->tinfo.user.offset) 5871 {chg = 1; ofs = tp->tinfo.user.offset;} 5872 } 5873 } 5874 5875 if (ofs) { 5876 if (dt) { 5877 if (per < np->minsync_dt) 5878 {chg = 1; per = np->minsync_dt;} 5879 } 5880 else if (per < np->minsync) 5881 {chg = 1; per = np->minsync;} 5882 if (req) { 5883 if (per < tp->tinfo.user.period) 5884 {chg = 1; per = tp->tinfo.user.period;} 5885 } 5886 } 5887 5888 div = fak = 0; 5889 if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) 5890 goto reject_it; 5891 5892 if (DEBUG_FLAGS & DEBUG_NEGO) { 5893 PRINT_ADDR(cp); 5894 printf ("ppr: " 5895 "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n", 5896 dt, ofs, per, wide, div, fak, chg); 5897 } 5898 5899 /* 5900 * It was an answer. 5901 */ 5902 if (req == 0) { 5903 if (chg) /* Answer wasn't acceptable */ 5904 goto reject_it; 5905 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); 5906 OUTL_DSP (SCRIPTA_BA (np, clrack)); 5907 return; 5908 } 5909 5910 /* 5911 * It was a request. Set value and 5912 * prepare an answer message 5913 */ 5914 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); 5915 5916 np->msgout[0] = M_EXTENDED; 5917 np->msgout[1] = 6; 5918 np->msgout[2] = M_X_PPR_REQ; 5919 np->msgout[3] = per; 5920 np->msgout[4] = 0; 5921 np->msgout[5] = ofs; 5922 np->msgout[6] = wide; 5923 np->msgout[7] = dt; 5924 5925 cp->nego_status = NS_PPR; 5926 5927 if (DEBUG_FLAGS & DEBUG_NEGO) { 5928 sym_print_msg(cp, "ppr msgout", np->msgout); 5929 } 5930 5931 np->msgin [0] = M_NOOP; 5932 5933 OUTL_DSP (SCRIPTB_BA (np, ppr_resp)); 5934 return; 5935 reject_it: 5936 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); 5937 OUTL_DSP (SCRIPTB_BA (np, msg_bad)); 5938 /* 5939 * If it was a device response that should result in 5940 * ST, we may want to try a legacy negotiation later. 5941 */ 5942 if (!req && !dt) { 5943 tp->tinfo.goal.options = 0; 5944 tp->tinfo.goal.width = wide; 5945 tp->tinfo.goal.period = per; 5946 tp->tinfo.goal.offset = ofs; 5947 } 5948 return; 5949 } 5950 5951 /* 5952 * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. 5953 */ 5954 static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp) 5955 { 5956 u_char chg, wide; 5957 int req = 1; 5958 5959 /* 5960 * Wide request message received. 5961 */ 5962 if (DEBUG_FLAGS & DEBUG_NEGO) { 5963 sym_print_msg(cp, "wide msgin", np->msgin); 5964 }; 5965 5966 /* 5967 * Is it a request from the device? 5968 */ 5969 if (INB (HS_PRT) == HS_NEGOTIATE) { 5970 OUTB (HS_PRT, HS_BUSY); 5971 if (cp->nego_status && cp->nego_status != NS_WIDE) 5972 goto reject_it; 5973 req = 0; 5974 } 5975 5976 /* 5977 * get requested values. 5978 */ 5979 chg = 0; 5980 wide = np->msgin[3]; 5981 5982 /* 5983 * check values against driver limits. 5984 */ 5985 if (wide > np->maxwide) 5986 {chg = 1; wide = np->maxwide;} 5987 if (req) { 5988 if (wide > tp->tinfo.user.width) 5989 {chg = 1; wide = tp->tinfo.user.width;} 5990 } 5991 5992 if (DEBUG_FLAGS & DEBUG_NEGO) { 5993 PRINT_ADDR(cp); 5994 printf ("wdtr: wide=%d chg=%d.\n", wide, chg); 5995 } 5996 5997 /* 5998 * This was an answer message 5999 */ 6000 if (req == 0) { 6001 if (chg) /* Answer wasn't acceptable. */ 6002 goto reject_it; 6003 sym_setwide (np, cp, wide); 6004 6005 /* 6006 * Negotiate for SYNC immediately after WIDE response. 6007 * This allows to negotiate for both WIDE and SYNC on 6008 * a single SCSI command (Suggested by Justin Gibbs). 6009 */ 6010 if (tp->tinfo.goal.offset) { 6011 np->msgout[0] = M_EXTENDED; 6012 np->msgout[1] = 3; 6013 np->msgout[2] = M_X_SYNC_REQ; 6014 np->msgout[3] = tp->tinfo.goal.period; 6015 np->msgout[4] = tp->tinfo.goal.offset; 6016 6017 if (DEBUG_FLAGS & DEBUG_NEGO) { 6018 sym_print_msg(cp, "sync msgout", np->msgout); 6019 } 6020 6021 cp->nego_status = NS_SYNC; 6022 OUTB (HS_PRT, HS_NEGOTIATE); 6023 OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); 6024 return; 6025 } 6026 6027 OUTL_DSP (SCRIPTA_BA (np, clrack)); 6028 return; 6029 }; 6030 6031 /* 6032 * It was a request, set value and 6033 * prepare an answer message 6034 */ 6035 sym_setwide (np, cp, wide); 6036 6037 np->msgout[0] = M_EXTENDED; 6038 np->msgout[1] = 2; 6039 np->msgout[2] = M_X_WIDE_REQ; 6040 np->msgout[3] = wide; 6041 6042 np->msgin [0] = M_NOOP; 6043 6044 cp->nego_status = NS_WIDE; 6045 6046 if (DEBUG_FLAGS & DEBUG_NEGO) { 6047 sym_print_msg(cp, "wide msgout", np->msgout); 6048 } 6049 6050 OUTL_DSP (SCRIPTB_BA (np, wdtr_resp)); 6051 return; 6052 reject_it: 6053 OUTL_DSP (SCRIPTB_BA (np, msg_bad)); 6054 } 6055 6056 /* 6057 * Reset SYNC or WIDE to default settings. 6058 * 6059 * Called when a negotiation does not succeed either 6060 * on rejection or on protocol error. 6061 * 6062 * If it was a PPR that made problems, we may want to 6063 * try a legacy negotiation later. 6064 */ 6065 static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp) 6066 { 6067 /* 6068 * any error in negotiation: 6069 * fall back to default mode. 6070 */ 6071 switch (cp->nego_status) { 6072 case NS_PPR: 6073 #if 0 6074 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); 6075 #else 6076 tp->tinfo.goal.options = 0; 6077 if (tp->tinfo.goal.period < np->minsync) 6078 tp->tinfo.goal.period = np->minsync; 6079 if (tp->tinfo.goal.offset > np->maxoffs) 6080 tp->tinfo.goal.offset = np->maxoffs; 6081 #endif 6082 break; 6083 case NS_SYNC: 6084 sym_setsync (np, cp, 0, 0, 0, 0); 6085 break; 6086 case NS_WIDE: 6087 sym_setwide (np, cp, 0); 6088 break; 6089 }; 6090 np->msgin [0] = M_NOOP; 6091 np->msgout[0] = M_NOOP; 6092 cp->nego_status = 0; 6093 } 6094 6095 /* 6096 * chip handler for MESSAGE REJECT received in response to 6097 * a WIDE or SYNCHRONOUS negotiation. 6098 */ 6099 static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp) 6100 { 6101 sym_nego_default(np, tp, cp); 6102 OUTB (HS_PRT, HS_BUSY); 6103 } 6104 6105 /* 6106 * chip exception handler for programmed interrupts. 6107 */ 6108 static void sym_int_sir (hcb_p np) 6109 { 6110 u_char num = INB (nc_dsps); 6111 u32 dsa = INL (nc_dsa); 6112 ccb_p cp = sym_ccb_from_dsa(np, dsa); 6113 u_char target = INB (nc_sdid) & 0x0f; 6114 tcb_p tp = &np->target[target]; 6115 int tmp; 6116 6117 if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); 6118 6119 switch (num) { 6120 /* 6121 * Command has been completed with error condition 6122 * or has been auto-sensed. 6123 */ 6124 case SIR_COMPLETE_ERROR: 6125 sym_complete_error(np, cp); 6126 return; 6127 /* 6128 * The C code is currently trying to recover from something. 6129 * Typically, user want to abort some command. 6130 */ 6131 case SIR_SCRIPT_STOPPED: 6132 case SIR_TARGET_SELECTED: 6133 case SIR_ABORT_SENT: 6134 sym_sir_task_recovery(np, num); 6135 return; 6136 /* 6137 * The device didn't go to MSG OUT phase after having 6138 * been selected with ATN. We donnot want to handle 6139 * that. 6140 */ 6141 case SIR_SEL_ATN_NO_MSG_OUT: 6142 printf ("%s:%d: No MSG OUT phase after selection with ATN.\n", 6143 sym_name (np), target); 6144 goto out_stuck; 6145 /* 6146 * The device didn't switch to MSG IN phase after 6147 * having reseleted the initiator. 6148 */ 6149 case SIR_RESEL_NO_MSG_IN: 6150 printf ("%s:%d: No MSG IN phase after reselection.\n", 6151 sym_name (np), target); 6152 goto out_stuck; 6153 /* 6154 * After reselection, the device sent a message that wasn't 6155 * an IDENTIFY. 6156 */ 6157 case SIR_RESEL_NO_IDENTIFY: 6158 printf ("%s:%d: No IDENTIFY after reselection.\n", 6159 sym_name (np), target); 6160 goto out_stuck; 6161 /* 6162 * The device reselected a LUN we donnot know about. 6163 */ 6164 case SIR_RESEL_BAD_LUN: 6165 np->msgout[0] = M_RESET; 6166 goto out; 6167 /* 6168 * The device reselected for an untagged nexus and we 6169 * haven't any. 6170 */ 6171 case SIR_RESEL_BAD_I_T_L: 6172 np->msgout[0] = M_ABORT; 6173 goto out; 6174 /* 6175 * The device reselected for a tagged nexus that we donnot 6176 * have. 6177 */ 6178 case SIR_RESEL_BAD_I_T_L_Q: 6179 np->msgout[0] = M_ABORT_TAG; 6180 goto out; 6181 /* 6182 * The SCRIPTS let us know that the device has grabbed 6183 * our message and will abort the job. 6184 */ 6185 case SIR_RESEL_ABORTED: 6186 np->lastmsg = np->msgout[0]; 6187 np->msgout[0] = M_NOOP; 6188 printf ("%s:%d: message %x sent on bad reselection.\n", 6189 sym_name (np), target, np->lastmsg); 6190 goto out; 6191 /* 6192 * The SCRIPTS let us know that a message has been 6193 * successfully sent to the device. 6194 */ 6195 case SIR_MSG_OUT_DONE: 6196 np->lastmsg = np->msgout[0]; 6197 np->msgout[0] = M_NOOP; 6198 /* Should we really care of that */ 6199 if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { 6200 if (cp) { 6201 cp->xerr_status &= ~XE_PARITY_ERR; 6202 if (!cp->xerr_status) 6203 OUTOFFB (HF_PRT, HF_EXT_ERR); 6204 } 6205 } 6206 goto out; 6207 /* 6208 * The device didn't send a GOOD SCSI status. 6209 * We may have some work to do prior to allow 6210 * the SCRIPTS processor to continue. 6211 */ 6212 case SIR_BAD_SCSI_STATUS: 6213 if (!cp) 6214 goto out; 6215 sym_sir_bad_scsi_status(np, num, cp); 6216 return; 6217 /* 6218 * We are asked by the SCRIPTS to prepare a 6219 * REJECT message. 6220 */ 6221 case SIR_REJECT_TO_SEND: 6222 sym_print_msg(cp, "M_REJECT to send for ", np->msgin); 6223 np->msgout[0] = M_REJECT; 6224 goto out; 6225 /* 6226 * We have been ODD at the end of a DATA IN 6227 * transfer and the device didn't send a 6228 * IGNORE WIDE RESIDUE message. 6229 * It is a data overrun condition. 6230 */ 6231 case SIR_SWIDE_OVERRUN: 6232 if (cp) { 6233 OUTONB (HF_PRT, HF_EXT_ERR); 6234 cp->xerr_status |= XE_SWIDE_OVRUN; 6235 } 6236 goto out; 6237 /* 6238 * We have been ODD at the end of a DATA OUT 6239 * transfer. 6240 * It is a data underrun condition. 6241 */ 6242 case SIR_SODL_UNDERRUN: 6243 if (cp) { 6244 OUTONB (HF_PRT, HF_EXT_ERR); 6245 cp->xerr_status |= XE_SODL_UNRUN; 6246 } 6247 goto out; 6248 /* 6249 * The device wants us to tranfer more data than 6250 * expected or in the wrong direction. 6251 * The number of extra bytes is in scratcha. 6252 * It is a data overrun condition. 6253 */ 6254 case SIR_DATA_OVERRUN: 6255 if (cp) { 6256 OUTONB (HF_PRT, HF_EXT_ERR); 6257 cp->xerr_status |= XE_EXTRA_DATA; 6258 cp->extra_bytes += INL (nc_scratcha); 6259 } 6260 goto out; 6261 /* 6262 * The device switched to an illegal phase (4/5). 6263 */ 6264 case SIR_BAD_PHASE: 6265 if (cp) { 6266 OUTONB (HF_PRT, HF_EXT_ERR); 6267 cp->xerr_status |= XE_BAD_PHASE; 6268 } 6269 goto out; 6270 /* 6271 * We received a message. 6272 */ 6273 case SIR_MSG_RECEIVED: 6274 if (!cp) 6275 goto out_stuck; 6276 switch (np->msgin [0]) { 6277 /* 6278 * We received an extended message. 6279 * We handle MODIFY DATA POINTER, SDTR, WDTR 6280 * and reject all other extended messages. 6281 */ 6282 case M_EXTENDED: 6283 switch (np->msgin [2]) { 6284 case M_X_MODIFY_DP: 6285 if (DEBUG_FLAGS & DEBUG_POINTER) 6286 sym_print_msg(cp,"modify DP",np->msgin); 6287 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 6288 (np->msgin[5]<<8) + (np->msgin[6]); 6289 sym_modify_dp(np, tp, cp, tmp); 6290 return; 6291 case M_X_SYNC_REQ: 6292 sym_sync_nego(np, tp, cp); 6293 return; 6294 case M_X_PPR_REQ: 6295 sym_ppr_nego(np, tp, cp); 6296 return; 6297 case M_X_WIDE_REQ: 6298 sym_wide_nego(np, tp, cp); 6299 return; 6300 default: 6301 goto out_reject; 6302 } 6303 break; 6304 /* 6305 * We received a 1/2 byte message not handled from SCRIPTS. 6306 * We are only expecting MESSAGE REJECT and IGNORE WIDE 6307 * RESIDUE messages that haven't been anticipated by 6308 * SCRIPTS on SWIDE full condition. Unanticipated IGNORE 6309 * WIDE RESIDUE messages are aliased as MODIFY DP (-1). 6310 */ 6311 case M_IGN_RESIDUE: 6312 if (DEBUG_FLAGS & DEBUG_POINTER) 6313 sym_print_msg(cp,"ign wide residue", np->msgin); 6314 sym_modify_dp(np, tp, cp, -1); 6315 return; 6316 case M_REJECT: 6317 if (INB (HS_PRT) == HS_NEGOTIATE) 6318 sym_nego_rejected(np, tp, cp); 6319 else { 6320 PRINT_ADDR(cp); 6321 printf ("M_REJECT received (%x:%x).\n", 6322 scr_to_cpu(np->lastmsg), np->msgout[0]); 6323 } 6324 goto out_clrack; 6325 break; 6326 default: 6327 goto out_reject; 6328 } 6329 break; 6330 /* 6331 * We received an unknown message. 6332 * Ignore all MSG IN phases and reject it. 6333 */ 6334 case SIR_MSG_WEIRD: 6335 sym_print_msg(cp, "WEIRD message received", np->msgin); 6336 OUTL_DSP (SCRIPTB_BA (np, msg_weird)); 6337 return; 6338 /* 6339 * Negotiation failed. 6340 * Target does not send us the reply. 6341 * Remove the HS_NEGOTIATE status. 6342 */ 6343 case SIR_NEGO_FAILED: 6344 OUTB (HS_PRT, HS_BUSY); 6345 /* 6346 * Negotiation failed. 6347 * Target does not want answer message. 6348 */ 6349 case SIR_NEGO_PROTO: 6350 sym_nego_default(np, tp, cp); 6351 goto out; 6352 }; 6353 6354 out: 6355 OUTONB_STD (); 6356 return; 6357 out_reject: 6358 OUTL_DSP (SCRIPTB_BA (np, msg_bad)); 6359 return; 6360 out_clrack: 6361 OUTL_DSP (SCRIPTA_BA (np, clrack)); 6362 return; 6363 out_stuck: 6364 return; 6365 } 6366 6367 /* 6368 * Acquire a control block 6369 */ 6370 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order) 6371 { 6372 tcb_p tp = &np->target[tn]; 6373 lcb_p lp = sym_lp(np, tp, ln); 6374 u_short tag = NO_TAG; 6375 SYM_QUEHEAD *qp; 6376 ccb_p cp = (ccb_p) 0; 6377 6378 /* 6379 * Look for a free CCB 6380 */ 6381 if (sym_que_empty(&np->free_ccbq)) 6382 (void) sym_alloc_ccb(np); 6383 qp = sym_remque_head(&np->free_ccbq); 6384 if (!qp) 6385 goto out; 6386 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 6387 6388 /* 6389 * If the LCB is not yet available and the LUN 6390 * has been probed ok, try to allocate the LCB. 6391 */ 6392 if (!lp && sym_is_bit(tp->lun_map, ln)) { 6393 lp = sym_alloc_lcb(np, tn, ln); 6394 if (!lp) 6395 goto out_free; 6396 } 6397 6398 /* 6399 * If the LCB is not available here, then the 6400 * logical unit is not yet discovered. For those 6401 * ones only accept 1 SCSI IO per logical unit, 6402 * since we cannot allow disconnections. 6403 */ 6404 if (!lp) { 6405 if (!sym_is_bit(tp->busy0_map, ln)) 6406 sym_set_bit(tp->busy0_map, ln); 6407 else 6408 goto out_free; 6409 } else { 6410 /* 6411 * If we have been asked for a tagged command. 6412 */ 6413 if (tag_order) { 6414 /* 6415 * Debugging purpose. 6416 */ 6417 assert(lp->busy_itl == 0); 6418 /* 6419 * Allocate resources for tags if not yet. 6420 */ 6421 if (!lp->cb_tags) { 6422 sym_alloc_lcb_tags(np, tn, ln); 6423 if (!lp->cb_tags) 6424 goto out_free; 6425 } 6426 /* 6427 * Get a tag for this SCSI IO and set up 6428 * the CCB bus address for reselection, 6429 * and count it for this LUN. 6430 * Toggle reselect path to tagged. 6431 */ 6432 if (lp->busy_itlq < SYM_CONF_MAX_TASK) { 6433 tag = lp->cb_tags[lp->ia_tag]; 6434 if (++lp->ia_tag == SYM_CONF_MAX_TASK) 6435 lp->ia_tag = 0; 6436 lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); 6437 ++lp->busy_itlq; 6438 lp->head.resel_sa = 6439 cpu_to_scr(SCRIPTA_BA (np, resel_tag)); 6440 } 6441 else 6442 goto out_free; 6443 } 6444 /* 6445 * This command will not be tagged. 6446 * If we already have either a tagged or untagged 6447 * one, refuse to overlap this untagged one. 6448 */ 6449 else { 6450 /* 6451 * Debugging purpose. 6452 */ 6453 assert(lp->busy_itl == 0 && lp->busy_itlq == 0); 6454 /* 6455 * Count this nexus for this LUN. 6456 * Set up the CCB bus address for reselection. 6457 * Toggle reselect path to untagged. 6458 */ 6459 if (++lp->busy_itl == 1) { 6460 lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); 6461 lp->head.resel_sa = 6462 cpu_to_scr(SCRIPTA_BA (np, resel_no_tag)); 6463 } 6464 else 6465 goto out_free; 6466 } 6467 } 6468 /* 6469 * Put the CCB into the busy queue. 6470 */ 6471 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 6472 6473 /* 6474 * Remember all informations needed to free this CCB. 6475 */ 6476 cp->to_abort = 0; 6477 cp->tag = tag; 6478 cp->target = tn; 6479 cp->lun = ln; 6480 6481 if (DEBUG_FLAGS & DEBUG_TAGS) { 6482 PRINT_LUN(np, tn, ln); 6483 printf ("ccb @%p using tag %d.\n", cp, tag); 6484 } 6485 6486 out: 6487 return cp; 6488 out_free: 6489 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); 6490 return (ccb_p) 0; 6491 } 6492 6493 /* 6494 * Release one control block 6495 */ 6496 static void sym_free_ccb (hcb_p np, ccb_p cp) 6497 { 6498 tcb_p tp = &np->target[cp->target]; 6499 lcb_p lp = sym_lp(np, tp, cp->lun); 6500 6501 if (DEBUG_FLAGS & DEBUG_TAGS) { 6502 PRINT_LUN(np, cp->target, cp->lun); 6503 printf ("ccb @%p freeing tag %d.\n", cp, cp->tag); 6504 } 6505 6506 /* 6507 * If LCB available, 6508 */ 6509 if (lp) { 6510 /* 6511 * If tagged, release the tag, set the relect path 6512 */ 6513 if (cp->tag != NO_TAG) { 6514 /* 6515 * Free the tag value. 6516 */ 6517 lp->cb_tags[lp->if_tag] = cp->tag; 6518 if (++lp->if_tag == SYM_CONF_MAX_TASK) 6519 lp->if_tag = 0; 6520 /* 6521 * Make the reselect path invalid, 6522 * and uncount this CCB. 6523 */ 6524 lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); 6525 --lp->busy_itlq; 6526 } else { /* Untagged */ 6527 /* 6528 * Make the reselect path invalid, 6529 * and uncount this CCB. 6530 */ 6531 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); 6532 --lp->busy_itl; 6533 } 6534 /* 6535 * If no JOB active, make the LUN reselect path invalid. 6536 */ 6537 if (lp->busy_itlq == 0 && lp->busy_itl == 0) 6538 lp->head.resel_sa = 6539 cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); 6540 } 6541 /* 6542 * Otherwise, we only accept 1 IO per LUN. 6543 * Clear the bit that keeps track of this IO. 6544 */ 6545 else 6546 sym_clr_bit(tp->busy0_map, cp->lun); 6547 6548 /* 6549 * We donnot queue more than 1 ccb per target 6550 * with negotiation at any time. If this ccb was 6551 * used for negotiation, clear this info in the tcb. 6552 */ 6553 if (cp == tp->nego_cp) 6554 tp->nego_cp = 0; 6555 6556 #ifdef SYM_CONF_IARB_SUPPORT 6557 /* 6558 * If we just complete the last queued CCB, 6559 * clear this info that is no longer relevant. 6560 */ 6561 if (cp == np->last_cp) 6562 np->last_cp = 0; 6563 #endif 6564 6565 /* 6566 * Unmap user data from DMA map if needed. 6567 */ 6568 if (cp->dmamapped) { 6569 bus_dmamap_unload(np->data_dmat, cp->dmamap); 6570 cp->dmamapped = 0; 6571 } 6572 6573 /* 6574 * Make this CCB available. 6575 */ 6576 cp->cam_ccb = 0; 6577 cp->host_status = HS_IDLE; 6578 sym_remque(&cp->link_ccbq); 6579 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); 6580 } 6581 6582 /* 6583 * Allocate a CCB from memory and initialize its fixed part. 6584 */ 6585 static ccb_p sym_alloc_ccb(hcb_p np) 6586 { 6587 ccb_p cp = 0; 6588 int hcode; 6589 6590 /* 6591 * Prevent from allocating more CCBs than we can 6592 * queue to the controller. 6593 */ 6594 if (np->actccbs >= SYM_CONF_MAX_START) 6595 return 0; 6596 6597 /* 6598 * Allocate memory for this CCB. 6599 */ 6600 cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); 6601 if (!cp) 6602 goto out_free; 6603 6604 /* 6605 * Allocate a bounce buffer for sense data. 6606 */ 6607 cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF"); 6608 if (!cp->sns_bbuf) 6609 goto out_free; 6610 6611 /* 6612 * Allocate a map for the DMA of user data. 6613 */ 6614 if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap)) 6615 goto out_free; 6616 /* 6617 * Count it. 6618 */ 6619 np->actccbs++; 6620 6621 /* 6622 * Compute the bus address of this ccb. 6623 */ 6624 cp->ccb_ba = vtobus(cp); 6625 6626 /* 6627 * Insert this ccb into the hashed list. 6628 */ 6629 hcode = CCB_HASH_CODE(cp->ccb_ba); 6630 cp->link_ccbh = np->ccbh[hcode]; 6631 np->ccbh[hcode] = cp; 6632 6633 /* 6634 * Initialyze the start and restart actions. 6635 */ 6636 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 6637 cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); 6638 6639 /* 6640 * Initilialyze some other fields. 6641 */ 6642 cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); 6643 6644 /* 6645 * Chain into free ccb queue. 6646 */ 6647 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); 6648 6649 return cp; 6650 out_free: 6651 if (cp) { 6652 if (cp->sns_bbuf) 6653 sym_mfree_dma(cp->sns_bbuf,SYM_SNS_BBUF_LEN,"SNS_BBUF"); 6654 sym_mfree_dma(cp, sizeof(*cp), "CCB"); 6655 } 6656 return 0; 6657 } 6658 6659 /* 6660 * Look up a CCB from a DSA value. 6661 */ 6662 static ccb_p sym_ccb_from_dsa(hcb_p np, u32 dsa) 6663 { 6664 int hcode; 6665 ccb_p cp; 6666 6667 hcode = CCB_HASH_CODE(dsa); 6668 cp = np->ccbh[hcode]; 6669 while (cp) { 6670 if (cp->ccb_ba == dsa) 6671 break; 6672 cp = cp->link_ccbh; 6673 } 6674 6675 return cp; 6676 } 6677 6678 /* 6679 * Target control block initialisation. 6680 * Nothing important to do at the moment. 6681 */ 6682 static void sym_init_tcb (hcb_p np, u_char tn) 6683 { 6684 /* 6685 * Check some alignments required by the chip. 6686 */ 6687 assert (((offsetof(struct sym_reg, nc_sxfer) ^ 6688 offsetof(struct sym_tcb, head.sval)) &3) == 0); 6689 assert (((offsetof(struct sym_reg, nc_scntl3) ^ 6690 offsetof(struct sym_tcb, head.wval)) &3) == 0); 6691 } 6692 6693 /* 6694 * Lun control block allocation and initialization. 6695 */ 6696 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln) 6697 { 6698 tcb_p tp = &np->target[tn]; 6699 lcb_p lp = sym_lp(np, tp, ln); 6700 6701 /* 6702 * Already done, just return. 6703 */ 6704 if (lp) 6705 return lp; 6706 /* 6707 * Check against some race. 6708 */ 6709 assert(!sym_is_bit(tp->busy0_map, ln)); 6710 6711 /* 6712 * Initialize the target control block if not yet. 6713 */ 6714 sym_init_tcb (np, tn); 6715 6716 /* 6717 * Allocate the LCB bus address array. 6718 * Compute the bus address of this table. 6719 */ 6720 if (ln && !tp->luntbl) { 6721 int i; 6722 6723 tp->luntbl = sym_calloc_dma(256, "LUNTBL"); 6724 if (!tp->luntbl) 6725 goto fail; 6726 for (i = 0 ; i < 64 ; i++) 6727 tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); 6728 tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); 6729 } 6730 6731 /* 6732 * Allocate the table of pointers for LUN(s) > 0, if needed. 6733 */ 6734 if (ln && !tp->lunmp) { 6735 tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p), 6736 "LUNMP"); 6737 if (!tp->lunmp) 6738 goto fail; 6739 } 6740 6741 /* 6742 * Allocate the lcb. 6743 * Make it available to the chip. 6744 */ 6745 lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); 6746 if (!lp) 6747 goto fail; 6748 if (ln) { 6749 tp->lunmp[ln] = lp; 6750 tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); 6751 } 6752 else { 6753 tp->lun0p = lp; 6754 tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); 6755 } 6756 6757 /* 6758 * Let the itl task point to error handling. 6759 */ 6760 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); 6761 6762 /* 6763 * Set the reselect pattern to our default. :) 6764 */ 6765 lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); 6766 6767 /* 6768 * Set user capabilities. 6769 */ 6770 lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); 6771 6772 fail: 6773 return lp; 6774 } 6775 6776 /* 6777 * Allocate LCB resources for tagged command queuing. 6778 */ 6779 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln) 6780 { 6781 tcb_p tp = &np->target[tn]; 6782 lcb_p lp = sym_lp(np, tp, ln); 6783 int i; 6784 6785 /* 6786 * If LCB not available, try to allocate it. 6787 */ 6788 if (!lp && !(lp = sym_alloc_lcb(np, tn, ln))) 6789 goto fail; 6790 6791 /* 6792 * Allocate the task table and and the tag allocation 6793 * circular buffer. We want both or none. 6794 */ 6795 lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); 6796 if (!lp->itlq_tbl) 6797 goto fail; 6798 lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS"); 6799 if (!lp->cb_tags) { 6800 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); 6801 lp->itlq_tbl = 0; 6802 goto fail; 6803 } 6804 6805 /* 6806 * Initialize the task table with invalid entries. 6807 */ 6808 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) 6809 lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); 6810 6811 /* 6812 * Fill up the tag buffer with tag numbers. 6813 */ 6814 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) 6815 lp->cb_tags[i] = i; 6816 6817 /* 6818 * Make the task table available to SCRIPTS, 6819 * And accept tagged commands now. 6820 */ 6821 lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); 6822 6823 return; 6824 fail: 6825 return; 6826 } 6827 6828 /* 6829 * Test the pci bus snoop logic :-( 6830 * 6831 * Has to be called with interrupts disabled. 6832 */ 6833 #ifndef SYM_CONF_IOMAPPED 6834 static int sym_regtest (hcb_p np) 6835 { 6836 register volatile u32 data; 6837 /* 6838 * chip registers may NOT be cached. 6839 * write 0xffffffff to a read only register area, 6840 * and try to read it back. 6841 */ 6842 data = 0xffffffff; 6843 OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data); 6844 data = INL_OFF(offsetof(struct sym_reg, nc_dstat)); 6845 #if 1 6846 if (data == 0xffffffff) { 6847 #else 6848 if ((data & 0xe2f0fffd) != 0x02000080) { 6849 #endif 6850 printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", 6851 (unsigned) data); 6852 return (0x10); 6853 }; 6854 return (0); 6855 } 6856 #endif 6857 6858 static int sym_snooptest (hcb_p np) 6859 { 6860 u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; 6861 int i, err=0; 6862 #ifndef SYM_CONF_IOMAPPED 6863 err |= sym_regtest (np); 6864 if (err) return (err); 6865 #endif 6866 restart_test: 6867 /* 6868 * Enable Master Parity Checking as we intend 6869 * to enable it for normal operations. 6870 */ 6871 OUTB (nc_ctest4, (np->rv_ctest4 & MPEE)); 6872 /* 6873 * init 6874 */ 6875 pc = SCRIPTB0_BA (np, snooptest); 6876 host_wr = 1; 6877 sym_wr = 2; 6878 /* 6879 * Set memory and register. 6880 */ 6881 np->cache = cpu_to_scr(host_wr); 6882 OUTL (nc_temp, sym_wr); 6883 /* 6884 * Start script (exchange values) 6885 */ 6886 OUTL (nc_dsa, np->hcb_ba); 6887 OUTL_DSP (pc); 6888 /* 6889 * Wait 'til done (with timeout) 6890 */ 6891 for (i=0; i<SYM_SNOOP_TIMEOUT; i++) 6892 if (INB(nc_istat) & (INTF|SIP|DIP)) 6893 break; 6894 if (i>=SYM_SNOOP_TIMEOUT) { 6895 printf ("CACHE TEST FAILED: timeout.\n"); 6896 return (0x20); 6897 }; 6898 /* 6899 * Check for fatal DMA errors. 6900 */ 6901 dstat = INB (nc_dstat); 6902 #if 1 /* Band aiding for broken hardwares that fail PCI parity */ 6903 if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { 6904 printf ("%s: PCI DATA PARITY ERROR DETECTED - " 6905 "DISABLING MASTER DATA PARITY CHECKING.\n", 6906 sym_name(np)); 6907 np->rv_ctest4 &= ~MPEE; 6908 goto restart_test; 6909 } 6910 #endif 6911 if (dstat & (MDPE|BF|IID)) { 6912 printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); 6913 return (0x80); 6914 } 6915 /* 6916 * Save termination position. 6917 */ 6918 pc = INL (nc_dsp); 6919 /* 6920 * Read memory and register. 6921 */ 6922 host_rd = scr_to_cpu(np->cache); 6923 sym_rd = INL (nc_scratcha); 6924 sym_bk = INL (nc_temp); 6925 6926 /* 6927 * Check termination position. 6928 */ 6929 if (pc != SCRIPTB0_BA (np, snoopend)+8) { 6930 printf ("CACHE TEST FAILED: script execution failed.\n"); 6931 printf ("start=%08lx, pc=%08lx, end=%08lx\n", 6932 (u_long) SCRIPTB0_BA (np, snooptest), (u_long) pc, 6933 (u_long) SCRIPTB0_BA (np, snoopend) +8); 6934 return (0x40); 6935 }; 6936 /* 6937 * Show results. 6938 */ 6939 if (host_wr != sym_rd) { 6940 printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", 6941 (int) host_wr, (int) sym_rd); 6942 err |= 1; 6943 }; 6944 if (host_rd != sym_wr) { 6945 printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", 6946 (int) sym_wr, (int) host_rd); 6947 err |= 2; 6948 }; 6949 if (sym_bk != sym_wr) { 6950 printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", 6951 (int) sym_wr, (int) sym_bk); 6952 err |= 4; 6953 }; 6954 6955 return (err); 6956 } 6957 6958 /* 6959 * Determine the chip's clock frequency. 6960 * 6961 * This is essential for the negotiation of the synchronous 6962 * transfer rate. 6963 * 6964 * Note: we have to return the correct value. 6965 * THERE IS NO SAFE DEFAULT VALUE. 6966 * 6967 * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. 6968 * 53C860 and 53C875 rev. 1 support fast20 transfers but 6969 * do not have a clock doubler and so are provided with a 6970 * 80 MHz clock. All other fast20 boards incorporate a doubler 6971 * and so should be delivered with a 40 MHz clock. 6972 * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base 6973 * clock and provide a clock quadrupler (160 Mhz). 6974 */ 6975 6976 /* 6977 * Select SCSI clock frequency 6978 */ 6979 static void sym_selectclock(hcb_p np, u_char scntl3) 6980 { 6981 /* 6982 * If multiplier not present or not selected, leave here. 6983 */ 6984 if (np->multiplier <= 1) { 6985 OUTB(nc_scntl3, scntl3); 6986 return; 6987 } 6988 6989 if (sym_verbose >= 2) 6990 printf ("%s: enabling clock multiplier\n", sym_name(np)); 6991 6992 OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ 6993 /* 6994 * Wait for the LCKFRQ bit to be set if supported by the chip. 6995 * Otherwise wait 20 micro-seconds. 6996 */ 6997 if (np->features & FE_LCKFRQ) { 6998 int i = 20; 6999 while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) 7000 UDELAY (20); 7001 if (!i) 7002 printf("%s: the chip cannot lock the frequency\n", 7003 sym_name(np)); 7004 } else 7005 UDELAY (20); 7006 OUTB(nc_stest3, HSC); /* Halt the scsi clock */ 7007 OUTB(nc_scntl3, scntl3); 7008 OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ 7009 OUTB(nc_stest3, 0x00); /* Restart scsi clock */ 7010 } 7011 7012 /* 7013 * calculate SCSI clock frequency (in KHz) 7014 */ 7015 static unsigned getfreq (hcb_p np, int gen) 7016 { 7017 unsigned int ms = 0; 7018 unsigned int f; 7019 7020 /* 7021 * Measure GEN timer delay in order 7022 * to calculate SCSI clock frequency 7023 * 7024 * This code will never execute too 7025 * many loop iterations (if DELAY is 7026 * reasonably correct). It could get 7027 * too low a delay (too high a freq.) 7028 * if the CPU is slow executing the 7029 * loop for some reason (an NMI, for 7030 * example). For this reason we will 7031 * if multiple measurements are to be 7032 * performed trust the higher delay 7033 * (lower frequency returned). 7034 */ 7035 OUTW (nc_sien , 0); /* mask all scsi interrupts */ 7036 (void) INW (nc_sist); /* clear pending scsi interrupt */ 7037 OUTB (nc_dien , 0); /* mask all dma interrupts */ 7038 (void) INW (nc_sist); /* another one, just to be sure :) */ 7039 OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ 7040 OUTB (nc_stime1, 0); /* disable general purpose timer */ 7041 OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ 7042 while (!(INW(nc_sist) & GEN) && ms++ < 100000) 7043 UDELAY (1000); /* count ms */ 7044 OUTB (nc_stime1, 0); /* disable general purpose timer */ 7045 /* 7046 * set prescaler to divide by whatever 0 means 7047 * 0 ought to choose divide by 2, but appears 7048 * to set divide by 3.5 mode in my 53c810 ... 7049 */ 7050 OUTB (nc_scntl3, 0); 7051 7052 /* 7053 * adjust for prescaler, and convert into KHz 7054 */ 7055 f = ms ? ((1 << gen) * 4340) / ms : 0; 7056 7057 if (sym_verbose >= 2) 7058 printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", 7059 sym_name(np), gen, ms, f); 7060 7061 return f; 7062 } 7063 7064 static unsigned sym_getfreq (hcb_p np) 7065 { 7066 u_int f1, f2; 7067 int gen = 11; 7068 7069 (void) getfreq (np, gen); /* throw away first result */ 7070 f1 = getfreq (np, gen); 7071 f2 = getfreq (np, gen); 7072 if (f1 > f2) f1 = f2; /* trust lower result */ 7073 return f1; 7074 } 7075 7076 /* 7077 * Get/probe chip SCSI clock frequency 7078 */ 7079 static void sym_getclock (hcb_p np, int mult) 7080 { 7081 unsigned char scntl3 = np->sv_scntl3; 7082 unsigned char stest1 = np->sv_stest1; 7083 unsigned f1; 7084 7085 /* 7086 * For the C10 core, assume 40 MHz. 7087 */ 7088 if (np->features & FE_C10) { 7089 np->multiplier = mult; 7090 np->clock_khz = 40000 * mult; 7091 return; 7092 } 7093 7094 np->multiplier = 1; 7095 f1 = 40000; 7096 /* 7097 * True with 875/895/896/895A with clock multiplier selected 7098 */ 7099 if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { 7100 if (sym_verbose >= 2) 7101 printf ("%s: clock multiplier found\n", sym_name(np)); 7102 np->multiplier = mult; 7103 } 7104 7105 /* 7106 * If multiplier not found or scntl3 not 7,5,3, 7107 * reset chip and get frequency from general purpose timer. 7108 * Otherwise trust scntl3 BIOS setting. 7109 */ 7110 if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { 7111 OUTB (nc_stest1, 0); /* make sure doubler is OFF */ 7112 f1 = sym_getfreq (np); 7113 7114 if (sym_verbose) 7115 printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); 7116 7117 if (f1 < 45000) f1 = 40000; 7118 else if (f1 < 55000) f1 = 50000; 7119 else f1 = 80000; 7120 7121 if (f1 < 80000 && mult > 1) { 7122 if (sym_verbose >= 2) 7123 printf ("%s: clock multiplier assumed\n", 7124 sym_name(np)); 7125 np->multiplier = mult; 7126 } 7127 } else { 7128 if ((scntl3 & 7) == 3) f1 = 40000; 7129 else if ((scntl3 & 7) == 5) f1 = 80000; 7130 else f1 = 160000; 7131 7132 f1 /= np->multiplier; 7133 } 7134 7135 /* 7136 * Compute controller synchronous parameters. 7137 */ 7138 f1 *= np->multiplier; 7139 np->clock_khz = f1; 7140 } 7141 7142 /* 7143 * Get/probe PCI clock frequency 7144 */ 7145 static int sym_getpciclock (hcb_p np) 7146 { 7147 int f = 0; 7148 7149 /* 7150 * For the C1010-33, this doesn't work. 7151 * For the C1010-66, this will be tested when I'll have 7152 * such a beast to play with. 7153 */ 7154 if (!(np->features & FE_C10)) { 7155 OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ 7156 f = (int) sym_getfreq (np); 7157 OUTB (nc_stest1, 0); 7158 } 7159 np->pciclk_khz = f; 7160 7161 return f; 7162 } 7163 7164 /*============= DRIVER ACTION/COMPLETION ====================*/ 7165 7166 /* 7167 * Print something that tells about extended errors. 7168 */ 7169 static void sym_print_xerr(ccb_p cp, int x_status) 7170 { 7171 if (x_status & XE_PARITY_ERR) { 7172 PRINT_ADDR(cp); 7173 printf ("unrecovered SCSI parity error.\n"); 7174 } 7175 if (x_status & XE_EXTRA_DATA) { 7176 PRINT_ADDR(cp); 7177 printf ("extraneous data discarded.\n"); 7178 } 7179 if (x_status & XE_BAD_PHASE) { 7180 PRINT_ADDR(cp); 7181 printf ("illegal scsi phase (4/5).\n"); 7182 } 7183 if (x_status & XE_SODL_UNRUN) { 7184 PRINT_ADDR(cp); 7185 printf ("ODD transfer in DATA OUT phase.\n"); 7186 } 7187 if (x_status & XE_SWIDE_OVRUN) { 7188 PRINT_ADDR(cp); 7189 printf ("ODD transfer in DATA IN phase.\n"); 7190 } 7191 } 7192 7193 /* 7194 * Choose the more appropriate CAM status if 7195 * the IO encountered an extended error. 7196 */ 7197 static int sym_xerr_cam_status(int cam_status, int x_status) 7198 { 7199 if (x_status) { 7200 if (x_status & XE_PARITY_ERR) 7201 cam_status = CAM_UNCOR_PARITY; 7202 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) 7203 cam_status = CAM_DATA_RUN_ERR; 7204 else if (x_status & XE_BAD_PHASE) 7205 cam_status = CAM_REQ_CMP_ERR; 7206 else 7207 cam_status = CAM_REQ_CMP_ERR; 7208 } 7209 return cam_status; 7210 } 7211 7212 /* 7213 * Complete execution of a SCSI command with extented 7214 * error, SCSI status error, or having been auto-sensed. 7215 * 7216 * The SCRIPTS processor is not running there, so we 7217 * can safely access IO registers and remove JOBs from 7218 * the START queue. 7219 * SCRATCHA is assumed to have been loaded with STARTPOS 7220 * before the SCRIPTS called the C code. 7221 */ 7222 static void sym_complete_error (hcb_p np, ccb_p cp) 7223 { 7224 struct ccb_scsiio *csio; 7225 u_int cam_status; 7226 int i; 7227 7228 /* 7229 * Paranoid check. :) 7230 */ 7231 if (!cp || !cp->cam_ccb) 7232 return; 7233 7234 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { 7235 printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp, 7236 cp->host_status, cp->ssss_status, cp->host_flags, 7237 cp->target, cp->lun); 7238 MDELAY(100); 7239 } 7240 7241 /* 7242 * Get CAM command pointer. 7243 */ 7244 csio = &cp->cam_ccb->csio; 7245 7246 /* 7247 * Check for extended errors. 7248 */ 7249 if (cp->xerr_status) { 7250 if (sym_verbose) 7251 sym_print_xerr(cp, cp->xerr_status); 7252 if (cp->host_status == HS_COMPLETE) 7253 cp->host_status = HS_COMP_ERR; 7254 } 7255 7256 /* 7257 * Calculate the residual. 7258 */ 7259 csio->sense_resid = 0; 7260 csio->resid = sym_compute_residual(np, cp); 7261 7262 if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */ 7263 csio->resid = 0; /* throw them away. :) */ 7264 cp->sv_resid = 0; 7265 } 7266 7267 if (cp->host_flags & HF_SENSE) { /* Auto sense */ 7268 csio->scsi_status = cp->sv_scsi_status; /* Restore status */ 7269 csio->sense_resid = csio->resid; /* Swap residuals */ 7270 csio->resid = cp->sv_resid; 7271 cp->sv_resid = 0; 7272 if (sym_verbose && cp->sv_xerr_status) 7273 sym_print_xerr(cp, cp->sv_xerr_status); 7274 if (cp->host_status == HS_COMPLETE && 7275 cp->ssss_status == S_GOOD && 7276 cp->xerr_status == 0) { 7277 cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR, 7278 cp->sv_xerr_status); 7279 cam_status |= CAM_AUTOSNS_VALID; 7280 /* 7281 * Bounce back the sense data to user and 7282 * fix the residual. 7283 */ 7284 bzero(&csio->sense_data, csio->sense_len); 7285 bcopy(cp->sns_bbuf, &csio->sense_data, 7286 MIN(csio->sense_len, SYM_SNS_BBUF_LEN)); 7287 csio->sense_resid += csio->sense_len; 7288 csio->sense_resid -= SYM_SNS_BBUF_LEN; 7289 #if 0 7290 /* 7291 * If the device reports a UNIT ATTENTION condition 7292 * due to a RESET condition, we should consider all 7293 * disconnect CCBs for this unit as aborted. 7294 */ 7295 if (1) { 7296 u_char *p; 7297 p = (u_char *) csio->sense_data; 7298 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) 7299 sym_clear_tasks(np, CAM_REQ_ABORTED, 7300 cp->target,cp->lun, -1); 7301 } 7302 #endif 7303 } 7304 else 7305 cam_status = CAM_AUTOSENSE_FAIL; 7306 } 7307 else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */ 7308 csio->scsi_status = cp->ssss_status; 7309 cam_status = CAM_SCSI_STATUS_ERROR; 7310 } 7311 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ 7312 cam_status = CAM_SEL_TIMEOUT; 7313 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ 7314 cam_status = CAM_UNEXP_BUSFREE; 7315 else { /* Extended error */ 7316 if (sym_verbose) { 7317 PRINT_ADDR(cp); 7318 printf ("COMMAND FAILED (%x %x %x).\n", 7319 cp->host_status, cp->ssss_status, 7320 cp->xerr_status); 7321 } 7322 csio->scsi_status = cp->ssss_status; 7323 /* 7324 * Set the most appropriate value for CAM status. 7325 */ 7326 cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR, 7327 cp->xerr_status); 7328 } 7329 7330 /* 7331 * Dequeue all queued CCBs for that device 7332 * not yet started by SCRIPTS. 7333 */ 7334 i = (INL (nc_scratcha) - np->squeue_ba) / 4; 7335 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); 7336 7337 /* 7338 * Restart the SCRIPTS processor. 7339 */ 7340 OUTL_DSP (SCRIPTA_BA (np, start)); 7341 7342 /* 7343 * Synchronize DMA map if needed. 7344 */ 7345 if (cp->dmamapped) { 7346 bus_dmamap_sync(np->data_dmat, cp->dmamap, 7347 (cp->dmamapped == SYM_DMA_READ ? 7348 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 7349 } 7350 /* 7351 * Add this one to the COMP queue. 7352 * Complete all those commands with either error 7353 * or requeue condition. 7354 */ 7355 sym_set_cam_status((union ccb *) csio, cam_status); 7356 sym_remque(&cp->link_ccbq); 7357 sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); 7358 sym_flush_comp_queue(np, 0); 7359 } 7360 7361 /* 7362 * Complete execution of a successful SCSI command. 7363 * 7364 * Only successful commands go to the DONE queue, 7365 * since we need to have the SCRIPTS processor 7366 * stopped on any error condition. 7367 * The SCRIPTS processor is running while we are 7368 * completing successful commands. 7369 */ 7370 static void sym_complete_ok (hcb_p np, ccb_p cp) 7371 { 7372 struct ccb_scsiio *csio; 7373 tcb_p tp; 7374 lcb_p lp; 7375 7376 /* 7377 * Paranoid check. :) 7378 */ 7379 if (!cp || !cp->cam_ccb) 7380 return; 7381 assert (cp->host_status == HS_COMPLETE); 7382 7383 /* 7384 * Get command, target and lun pointers. 7385 */ 7386 csio = &cp->cam_ccb->csio; 7387 tp = &np->target[cp->target]; 7388 lp = sym_lp(np, tp, cp->lun); 7389 7390 /* 7391 * Assume device discovered on first success. 7392 */ 7393 if (!lp) 7394 sym_set_bit(tp->lun_map, cp->lun); 7395 7396 /* 7397 * If all data have been transferred, given than no 7398 * extended error did occur, there is no residual. 7399 */ 7400 csio->resid = 0; 7401 if (cp->phys.head.lastp != cp->phys.head.goalp) 7402 csio->resid = sym_compute_residual(np, cp); 7403 7404 /* 7405 * Wrong transfer residuals may be worse than just always 7406 * returning zero. User can disable this feature from 7407 * sym_conf.h. Residual support is enabled by default. 7408 */ 7409 if (!SYM_CONF_RESIDUAL_SUPPORT) 7410 csio->resid = 0; 7411 7412 /* 7413 * Synchronize DMA map if needed. 7414 */ 7415 if (cp->dmamapped) { 7416 bus_dmamap_sync(np->data_dmat, cp->dmamap, 7417 (cp->dmamapped == SYM_DMA_READ ? 7418 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 7419 } 7420 /* 7421 * Set status and complete the command. 7422 */ 7423 csio->scsi_status = cp->ssss_status; 7424 sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP); 7425 sym_free_ccb (np, cp); 7426 sym_xpt_done(np, (union ccb *) csio); 7427 } 7428 7429 /* 7430 * Our timeout handler. 7431 */ 7432 static void sym_timeout1(void *arg) 7433 { 7434 union ccb *ccb = (union ccb *) arg; 7435 hcb_p np = ccb->ccb_h.sym_hcb_ptr; 7436 7437 /* 7438 * Check that the CAM CCB is still queued. 7439 */ 7440 if (!np) 7441 return; 7442 7443 switch(ccb->ccb_h.func_code) { 7444 case XPT_SCSI_IO: 7445 (void) sym_abort_scsiio(np, ccb, 1); 7446 break; 7447 default: 7448 break; 7449 } 7450 } 7451 7452 static void sym_timeout(void *arg) 7453 { 7454 int s = splcam(); 7455 sym_timeout1(arg); 7456 splx(s); 7457 } 7458 7459 /* 7460 * Abort an SCSI IO. 7461 */ 7462 static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out) 7463 { 7464 ccb_p cp; 7465 SYM_QUEHEAD *qp; 7466 7467 /* 7468 * Look up our CCB control block. 7469 */ 7470 cp = 0; 7471 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 7472 ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); 7473 if (cp2->cam_ccb == ccb) { 7474 cp = cp2; 7475 break; 7476 } 7477 } 7478 if (!cp || cp->host_status == HS_WAIT) 7479 return -1; 7480 7481 /* 7482 * If a previous abort didn't succeed in time, 7483 * perform a BUS reset. 7484 */ 7485 if (cp->to_abort) { 7486 sym_reset_scsi_bus(np, 1); 7487 return 0; 7488 } 7489 7490 /* 7491 * Mark the CCB for abort and allow time for. 7492 */ 7493 cp->to_abort = timed_out ? 2 : 1; 7494 ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb, 10*hz); 7495 7496 /* 7497 * Tell the SCRIPTS processor to stop and synchronize with us. 7498 */ 7499 np->istat_sem = SEM; 7500 OUTB (nc_istat, SIGP|SEM); 7501 return 0; 7502 } 7503 7504 /* 7505 * Reset a SCSI device (all LUNs of a target). 7506 */ 7507 static void sym_reset_dev(hcb_p np, union ccb *ccb) 7508 { 7509 tcb_p tp; 7510 struct ccb_hdr *ccb_h = &ccb->ccb_h; 7511 7512 if (ccb_h->target_id == np->myaddr || 7513 ccb_h->target_id >= SYM_CONF_MAX_TARGET || 7514 ccb_h->target_lun >= SYM_CONF_MAX_LUN) { 7515 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); 7516 return; 7517 } 7518 7519 tp = &np->target[ccb_h->target_id]; 7520 7521 tp->to_reset = 1; 7522 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 7523 7524 np->istat_sem = SEM; 7525 OUTB (nc_istat, SIGP|SEM); 7526 return; 7527 } 7528 7529 /* 7530 * SIM action entry point. 7531 */ 7532 static void sym_action(struct cam_sim *sim, union ccb *ccb) 7533 { 7534 int s = splcam(); 7535 sym_action1(sim, ccb); 7536 splx(s); 7537 } 7538 7539 static void sym_action1(struct cam_sim *sim, union ccb *ccb) 7540 { 7541 hcb_p np; 7542 tcb_p tp; 7543 lcb_p lp; 7544 ccb_p cp; 7545 int tmp; 7546 u_char idmsg, *msgptr; 7547 u_int msglen; 7548 struct ccb_scsiio *csio; 7549 struct ccb_hdr *ccb_h; 7550 7551 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n")); 7552 7553 /* 7554 * Retrieve our controller data structure. 7555 */ 7556 np = (hcb_p) cam_sim_softc(sim); 7557 7558 /* 7559 * The common case is SCSI IO. 7560 * We deal with other ones elsewhere. 7561 */ 7562 if (ccb->ccb_h.func_code != XPT_SCSI_IO) { 7563 sym_action2(sim, ccb); 7564 return; 7565 } 7566 csio = &ccb->csio; 7567 ccb_h = &csio->ccb_h; 7568 7569 /* 7570 * Work around races. 7571 */ 7572 if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 7573 xpt_done(ccb); 7574 return; 7575 } 7576 7577 /* 7578 * Minimal checkings, so that we will not 7579 * go outside our tables. 7580 */ 7581 if (ccb_h->target_id == np->myaddr || 7582 ccb_h->target_id >= SYM_CONF_MAX_TARGET || 7583 ccb_h->target_lun >= SYM_CONF_MAX_LUN) { 7584 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); 7585 return; 7586 } 7587 7588 /* 7589 * Retreive the target and lun descriptors. 7590 */ 7591 tp = &np->target[ccb_h->target_id]; 7592 lp = sym_lp(np, tp, ccb_h->target_lun); 7593 7594 /* 7595 * Complete the 1st INQUIRY command with error 7596 * condition if the device is flagged NOSCAN 7597 * at BOOT in the NVRAM. This may speed up 7598 * the boot and maintain coherency with BIOS 7599 * device numbering. Clearing the flag allows 7600 * user to rescan skipped devices later. 7601 * We also return error for devices not flagged 7602 * for SCAN LUNS in the NVRAM since some mono-lun 7603 * devices behave badly when asked for some non 7604 * zero LUN. Btw, this is an absolute hack.:-) 7605 */ 7606 if (!(ccb_h->flags & CAM_CDB_PHYS) && 7607 (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ? 7608 csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) { 7609 if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) || 7610 ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && 7611 ccb_h->target_lun != 0)) { 7612 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 7613 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); 7614 return; 7615 } 7616 } 7617 7618 /* 7619 * Get a control block for this IO. 7620 */ 7621 tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0); 7622 cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp); 7623 if (!cp) { 7624 sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL); 7625 return; 7626 } 7627 7628 /* 7629 * Keep track of the IO in our CCB. 7630 */ 7631 cp->cam_ccb = ccb; 7632 7633 /* 7634 * Build the IDENTIFY message. 7635 */ 7636 idmsg = M_IDENTIFY | cp->lun; 7637 if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED))) 7638 idmsg |= 0x40; 7639 7640 msgptr = cp->scsi_smsg; 7641 msglen = 0; 7642 msgptr[msglen++] = idmsg; 7643 7644 /* 7645 * Build the tag message if present. 7646 */ 7647 if (cp->tag != NO_TAG) { 7648 u_char order = csio->tag_action; 7649 7650 switch(order) { 7651 case M_ORDERED_TAG: 7652 break; 7653 case M_HEAD_TAG: 7654 break; 7655 default: 7656 order = M_SIMPLE_TAG; 7657 } 7658 msgptr[msglen++] = order; 7659 7660 /* 7661 * For less than 128 tags, actual tags are numbered 7662 * 1,3,5,..2*MAXTAGS+1,since we may have to deal 7663 * with devices that have problems with #TAG 0 or too 7664 * great #TAG numbers. For more tags (up to 256), 7665 * we use directly our tag number. 7666 */ 7667 #if SYM_CONF_MAX_TASK > (512/4) 7668 msgptr[msglen++] = cp->tag; 7669 #else 7670 msgptr[msglen++] = (cp->tag << 1) + 1; 7671 #endif 7672 } 7673 7674 /* 7675 * Build a negotiation message if needed. 7676 * (nego_status is filled by sym_prepare_nego()) 7677 */ 7678 cp->nego_status = 0; 7679 if (tp->tinfo.current.width != tp->tinfo.goal.width || 7680 tp->tinfo.current.period != tp->tinfo.goal.period || 7681 tp->tinfo.current.offset != tp->tinfo.goal.offset || 7682 tp->tinfo.current.options != tp->tinfo.goal.options) { 7683 if (!tp->nego_cp && lp) 7684 msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen); 7685 } 7686 7687 /* 7688 * Fill in our ccb 7689 */ 7690 7691 /* 7692 * Startqueue 7693 */ 7694 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); 7695 cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA (np, resel_dsa)); 7696 7697 /* 7698 * select 7699 */ 7700 cp->phys.select.sel_id = cp->target; 7701 cp->phys.select.sel_scntl3 = tp->head.wval; 7702 cp->phys.select.sel_sxfer = tp->head.sval; 7703 cp->phys.select.sel_scntl4 = tp->head.uval; 7704 7705 /* 7706 * message 7707 */ 7708 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg)); 7709 cp->phys.smsg.size = cpu_to_scr(msglen); 7710 7711 /* 7712 * command 7713 */ 7714 if (sym_setup_cdb(np, csio, cp) < 0) { 7715 sym_free_ccb(np, cp); 7716 sym_xpt_done(np, ccb); 7717 return; 7718 } 7719 7720 /* 7721 * status 7722 */ 7723 #if 0 /* Provision */ 7724 cp->actualquirks = tp->quirks; 7725 #endif 7726 cp->actualquirks = SYM_QUIRK_AUTOSAVE; 7727 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; 7728 cp->ssss_status = S_ILLEGAL; 7729 cp->xerr_status = 0; 7730 cp->host_flags = 0; 7731 cp->extra_bytes = 0; 7732 7733 /* 7734 * extreme data pointer. 7735 * shall be positive, so -1 is lower than lowest.:) 7736 */ 7737 cp->ext_sg = -1; 7738 cp->ext_ofs = 0; 7739 7740 /* 7741 * Build the data descriptor block 7742 * and start the IO. 7743 */ 7744 sym_setup_data_and_start(np, csio, cp); 7745 } 7746 7747 /* 7748 * Setup buffers and pointers that address the CDB. 7749 * I bet, physical CDBs will never be used on the planet, 7750 * since they can be bounced without significant overhead. 7751 */ 7752 static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) 7753 { 7754 struct ccb_hdr *ccb_h; 7755 u32 cmd_ba; 7756 int cmd_len; 7757 7758 ccb_h = &csio->ccb_h; 7759 7760 /* 7761 * CDB is 16 bytes max. 7762 */ 7763 if (csio->cdb_len > sizeof(cp->cdb_buf)) { 7764 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 7765 return -1; 7766 } 7767 cmd_len = csio->cdb_len; 7768 7769 if (ccb_h->flags & CAM_CDB_POINTER) { 7770 /* CDB is a pointer */ 7771 if (!(ccb_h->flags & CAM_CDB_PHYS)) { 7772 /* CDB pointer is virtual */ 7773 bcopy(csio->cdb_io.cdb_ptr, cp->cdb_buf, cmd_len); 7774 cmd_ba = CCB_BA (cp, cdb_buf[0]); 7775 } else { 7776 /* CDB pointer is physical */ 7777 #if 0 7778 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff; 7779 #else 7780 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 7781 return -1; 7782 #endif 7783 } 7784 } else { 7785 /* CDB is in the CAM ccb (buffer) */ 7786 bcopy(csio->cdb_io.cdb_bytes, cp->cdb_buf, cmd_len); 7787 cmd_ba = CCB_BA (cp, cdb_buf[0]); 7788 } 7789 7790 cp->phys.cmd.addr = cpu_to_scr(cmd_ba); 7791 cp->phys.cmd.size = cpu_to_scr(cmd_len); 7792 7793 return 0; 7794 } 7795 7796 /* 7797 * Set up data pointers used by SCRIPTS. 7798 */ 7799 static void __inline 7800 sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir) 7801 { 7802 u32 lastp, goalp; 7803 7804 /* 7805 * No segments means no data. 7806 */ 7807 if (!cp->segments) 7808 dir = CAM_DIR_NONE; 7809 7810 /* 7811 * Set the data pointer. 7812 */ 7813 switch(dir) { 7814 case CAM_DIR_OUT: 7815 goalp = SCRIPTA_BA (np, data_out2) + 8; 7816 lastp = goalp - 8 - (cp->segments * (2*4)); 7817 break; 7818 case CAM_DIR_IN: 7819 cp->host_flags |= HF_DATA_IN; 7820 goalp = SCRIPTA_BA (np, data_in2) + 8; 7821 lastp = goalp - 8 - (cp->segments * (2*4)); 7822 break; 7823 case CAM_DIR_NONE: 7824 default: 7825 lastp = goalp = SCRIPTB_BA (np, no_data); 7826 break; 7827 } 7828 7829 cp->phys.head.lastp = cpu_to_scr(lastp); 7830 cp->phys.head.goalp = cpu_to_scr(goalp); 7831 cp->phys.head.savep = cpu_to_scr(lastp); 7832 cp->startp = cp->phys.head.savep; 7833 } 7834 7835 7836 /* 7837 * Call back routine for the DMA map service. 7838 * If bounce buffers are used (why ?), we may sleep and then 7839 * be called there in another context. 7840 */ 7841 static void 7842 sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error) 7843 { 7844 ccb_p cp; 7845 hcb_p np; 7846 union ccb *ccb; 7847 int s; 7848 7849 s = splcam(); 7850 7851 cp = (ccb_p) arg; 7852 ccb = cp->cam_ccb; 7853 np = (hcb_p) cp->arg; 7854 7855 /* 7856 * Deal with weird races. 7857 */ 7858 if (sym_get_cam_status(ccb) != CAM_REQ_INPROG) 7859 goto out_abort; 7860 7861 /* 7862 * Deal with weird errors. 7863 */ 7864 if (error) { 7865 cp->dmamapped = 0; 7866 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); 7867 goto out_abort; 7868 } 7869 7870 /* 7871 * Build the data descriptor for the chip. 7872 */ 7873 if (nsegs) { 7874 int retv; 7875 /* 896 rev 1 requires to be careful about boundaries */ 7876 if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1) 7877 retv = sym_scatter_sg_physical(np, cp, psegs, nsegs); 7878 else 7879 retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs); 7880 if (retv < 0) { 7881 sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG); 7882 goto out_abort; 7883 } 7884 } 7885 7886 /* 7887 * Synchronize the DMA map only if we have 7888 * actually mapped the data. 7889 */ 7890 if (cp->dmamapped) { 7891 bus_dmamap_sync(np->data_dmat, cp->dmamap, 7892 (cp->dmamapped == SYM_DMA_READ ? 7893 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 7894 } 7895 7896 /* 7897 * Set host status to busy state. 7898 * May have been set back to HS_WAIT to avoid a race. 7899 */ 7900 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; 7901 7902 /* 7903 * Set data pointers. 7904 */ 7905 sym_setup_data_pointers(np, cp, (ccb->ccb_h.flags & CAM_DIR_MASK)); 7906 7907 /* 7908 * Enqueue this IO in our pending queue. 7909 */ 7910 sym_enqueue_cam_ccb(np, ccb); 7911 7912 /* 7913 * When `#ifed 1', the code below makes the driver 7914 * panic on the first attempt to write to a SCSI device. 7915 * It is the first test we want to do after a driver 7916 * change that does not seem obviously safe. :) 7917 */ 7918 #if 0 7919 switch (cp->cdb_buf[0]) { 7920 case 0x0A: case 0x2A: case 0xAA: 7921 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); 7922 MDELAY(10000); 7923 break; 7924 default: 7925 break; 7926 } 7927 #endif 7928 /* 7929 * Activate this job. 7930 */ 7931 sym_put_start_queue(np, cp); 7932 out: 7933 splx(s); 7934 return; 7935 out_abort: 7936 sym_free_ccb(np, cp); 7937 sym_xpt_done(np, ccb); 7938 goto out; 7939 } 7940 7941 /* 7942 * How complex it gets to deal with the data in CAM. 7943 * The Bus Dma stuff makes things still more complex. 7944 */ 7945 static void 7946 sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) 7947 { 7948 struct ccb_hdr *ccb_h; 7949 int dir, retv; 7950 7951 ccb_h = &csio->ccb_h; 7952 7953 /* 7954 * Now deal with the data. 7955 */ 7956 cp->data_len = csio->dxfer_len; 7957 cp->arg = np; 7958 7959 /* 7960 * No direction means no data. 7961 */ 7962 dir = (ccb_h->flags & CAM_DIR_MASK); 7963 if (dir == CAM_DIR_NONE) { 7964 sym_execute_ccb(cp, NULL, 0, 0); 7965 return; 7966 } 7967 7968 if (!(ccb_h->flags & CAM_SCATTER_VALID)) { 7969 /* Single buffer */ 7970 if (!(ccb_h->flags & CAM_DATA_PHYS)) { 7971 /* Buffer is virtual */ 7972 int s; 7973 7974 cp->dmamapped = (dir == CAM_DIR_IN) ? 7975 SYM_DMA_READ : SYM_DMA_WRITE; 7976 s = splsoftvm(); 7977 retv = bus_dmamap_load(np->data_dmat, cp->dmamap, 7978 csio->data_ptr, csio->dxfer_len, 7979 sym_execute_ccb, cp, 0); 7980 if (retv == EINPROGRESS) { 7981 cp->host_status = HS_WAIT; 7982 xpt_freeze_simq(np->sim, 1); 7983 csio->ccb_h.status |= CAM_RELEASE_SIMQ; 7984 } 7985 splx(s); 7986 } else { 7987 /* Buffer is physical */ 7988 struct bus_dma_segment seg; 7989 7990 seg.ds_addr = (bus_addr_t) csio->data_ptr; 7991 sym_execute_ccb(cp, &seg, 1, 0); 7992 } 7993 } else { 7994 /* Scatter/gather list */ 7995 struct bus_dma_segment *segs; 7996 7997 if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) { 7998 /* The SG list pointer is physical */ 7999 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 8000 goto out_abort; 8001 } 8002 8003 if (!(ccb_h->flags & CAM_DATA_PHYS)) { 8004 /* SG buffer pointers are virtual */ 8005 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 8006 goto out_abort; 8007 } 8008 8009 /* SG buffer pointers are physical */ 8010 segs = (struct bus_dma_segment *)csio->data_ptr; 8011 sym_execute_ccb(cp, segs, csio->sglist_cnt, 0); 8012 } 8013 return; 8014 out_abort: 8015 sym_free_ccb(np, cp); 8016 sym_xpt_done(np, (union ccb *) csio); 8017 } 8018 8019 /* 8020 * Move the scatter list to our data block. 8021 */ 8022 static int 8023 sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, 8024 bus_dma_segment_t *psegs, int nsegs) 8025 { 8026 struct sym_tblmove *data; 8027 bus_dma_segment_t *psegs2; 8028 8029 if (nsegs > SYM_CONF_MAX_SG) 8030 return -1; 8031 8032 data = &cp->phys.data[SYM_CONF_MAX_SG-1]; 8033 psegs2 = &psegs[nsegs-1]; 8034 cp->segments = nsegs; 8035 8036 while (1) { 8037 data->addr = cpu_to_scr(psegs2->ds_addr); 8038 data->size = cpu_to_scr(psegs2->ds_len); 8039 if (DEBUG_FLAGS & DEBUG_SCATTER) { 8040 printf ("%s scatter: paddr=%lx len=%ld\n", 8041 sym_name(np), (long) psegs2->ds_addr, 8042 (long) psegs2->ds_len); 8043 } 8044 if (psegs2 != psegs) { 8045 --data; 8046 --psegs2; 8047 continue; 8048 } 8049 break; 8050 } 8051 return 0; 8052 } 8053 8054 8055 /* 8056 * Scatter a SG list with physical addresses into bus addressable chunks. 8057 * We need to ensure 16MB boundaries not to be crossed during DMA of 8058 * each segment, due to some chips being flawed. 8059 */ 8060 #define BOUND_MASK ((1UL<<24)-1) 8061 static int 8062 sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) 8063 { 8064 u_long ps, pe, pn; 8065 u_long k; 8066 int s, t; 8067 8068 s = SYM_CONF_MAX_SG - 1; 8069 t = nsegs - 1; 8070 ps = psegs[t].ds_addr; 8071 pe = ps + psegs[t].ds_len; 8072 8073 while (s >= 0) { 8074 pn = (pe - 1) & ~BOUND_MASK; 8075 if (pn <= ps) 8076 pn = ps; 8077 k = pe - pn; 8078 if (DEBUG_FLAGS & DEBUG_SCATTER) { 8079 printf ("%s scatter: paddr=%lx len=%ld\n", 8080 sym_name(np), pn, k); 8081 } 8082 cp->phys.data[s].addr = cpu_to_scr(pn); 8083 cp->phys.data[s].size = cpu_to_scr(k); 8084 --s; 8085 if (pn == ps) { 8086 if (--t < 0) 8087 break; 8088 ps = psegs[t].ds_addr; 8089 pe = ps + psegs[t].ds_len; 8090 } 8091 else 8092 pe = pn; 8093 } 8094 8095 cp->segments = SYM_CONF_MAX_SG - 1 - s; 8096 8097 return t >= 0 ? -1 : 0; 8098 } 8099 #undef BOUND_MASK 8100 8101 /* 8102 * SIM action for non performance critical stuff. 8103 */ 8104 static void sym_action2(struct cam_sim *sim, union ccb *ccb) 8105 { 8106 hcb_p np; 8107 tcb_p tp; 8108 lcb_p lp; 8109 struct ccb_hdr *ccb_h; 8110 8111 /* 8112 * Retrieve our controller data structure. 8113 */ 8114 np = (hcb_p) cam_sim_softc(sim); 8115 8116 ccb_h = &ccb->ccb_h; 8117 8118 switch (ccb_h->func_code) { 8119 case XPT_SET_TRAN_SETTINGS: 8120 { 8121 struct ccb_trans_settings *cts; 8122 8123 cts = &ccb->cts; 8124 tp = &np->target[ccb_h->target_id]; 8125 8126 /* 8127 * Update SPI transport settings in TARGET control block. 8128 * Update SCSI device settings in LUN control block. 8129 */ 8130 lp = sym_lp(np, tp, ccb_h->target_lun); 8131 #ifdef FreeBSD_New_Tran_Settings 8132 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 8133 #else 8134 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 8135 #endif 8136 sym_update_trans(np, tp, &tp->tinfo.goal, cts); 8137 if (lp) 8138 sym_update_dflags(np, &lp->current_flags, cts); 8139 } 8140 #ifdef FreeBSD_New_Tran_Settings 8141 if (cts->type == CTS_TYPE_USER_SETTINGS) { 8142 #else 8143 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 8144 #endif 8145 sym_update_trans(np, tp, &tp->tinfo.user, cts); 8146 if (lp) 8147 sym_update_dflags(np, &lp->user_flags, cts); 8148 } 8149 8150 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8151 break; 8152 } 8153 case XPT_GET_TRAN_SETTINGS: 8154 { 8155 struct ccb_trans_settings *cts; 8156 struct sym_trans *tip; 8157 u_char dflags; 8158 8159 cts = &ccb->cts; 8160 tp = &np->target[ccb_h->target_id]; 8161 lp = sym_lp(np, tp, ccb_h->target_lun); 8162 8163 #ifdef FreeBSD_New_Tran_Settings 8164 #define cts__scsi (&cts->proto_specific.scsi) 8165 #define cts__spi (&cts->xport_specific.spi) 8166 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 8167 tip = &tp->tinfo.current; 8168 dflags = lp ? lp->current_flags : 0; 8169 } 8170 else { 8171 tip = &tp->tinfo.user; 8172 dflags = lp ? lp->user_flags : tp->usrflags; 8173 } 8174 8175 cts->protocol = PROTO_SCSI; 8176 cts->transport = XPORT_SPI; 8177 cts->protocol_version = tip->scsi_version; 8178 cts->transport_version = tip->spi_version; 8179 8180 cts__spi->sync_period = tip->period; 8181 cts__spi->sync_offset = tip->offset; 8182 cts__spi->bus_width = tip->width; 8183 cts__spi->ppr_options = tip->options; 8184 8185 cts__spi->valid = CTS_SPI_VALID_SYNC_RATE 8186 | CTS_SPI_VALID_SYNC_OFFSET 8187 | CTS_SPI_VALID_BUS_WIDTH 8188 | CTS_SPI_VALID_PPR_OPTIONS; 8189 8190 cts__spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 8191 if (dflags & SYM_DISC_ENABLED) 8192 cts__spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 8193 cts__spi->valid |= CTS_SPI_VALID_DISC; 8194 8195 cts__scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 8196 if (dflags & SYM_TAGS_ENABLED) 8197 cts__scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 8198 cts__scsi->valid |= CTS_SCSI_VALID_TQ; 8199 #undef cts__spi 8200 #undef cts__scsi 8201 #else 8202 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 8203 tip = &tp->tinfo.current; 8204 dflags = lp ? lp->current_flags : 0; 8205 } 8206 else { 8207 tip = &tp->tinfo.user; 8208 dflags = lp ? lp->user_flags : tp->usrflags; 8209 } 8210 8211 cts->sync_period = tip->period; 8212 cts->sync_offset = tip->offset; 8213 cts->bus_width = tip->width; 8214 8215 cts->valid = CCB_TRANS_SYNC_RATE_VALID 8216 | CCB_TRANS_SYNC_OFFSET_VALID 8217 | CCB_TRANS_BUS_WIDTH_VALID; 8218 8219 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 8220 8221 if (dflags & SYM_DISC_ENABLED) 8222 cts->flags |= CCB_TRANS_DISC_ENB; 8223 8224 if (dflags & SYM_TAGS_ENABLED) 8225 cts->flags |= CCB_TRANS_TAG_ENB; 8226 8227 cts->valid |= CCB_TRANS_DISC_VALID; 8228 cts->valid |= CCB_TRANS_TQ_VALID; 8229 #endif 8230 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8231 break; 8232 } 8233 case XPT_CALC_GEOMETRY: 8234 { 8235 cam_calc_geometry(&ccb->ccg, /*extended*/1); 8236 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8237 break; 8238 } 8239 case XPT_PATH_INQ: 8240 { 8241 struct ccb_pathinq *cpi = &ccb->cpi; 8242 cpi->version_num = 1; 8243 cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE; 8244 if ((np->features & FE_WIDE) != 0) 8245 cpi->hba_inquiry |= PI_WIDE_16; 8246 cpi->target_sprt = 0; 8247 cpi->hba_misc = 0; 8248 if (np->usrflags & SYM_SCAN_TARGETS_HILO) 8249 cpi->hba_misc |= PIM_SCANHILO; 8250 if (np->usrflags & SYM_AVOID_BUS_RESET) 8251 cpi->hba_misc |= PIM_NOBUSRESET; 8252 cpi->hba_eng_cnt = 0; 8253 cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; 8254 /* Semantic problem:)LUN number max = max number of LUNs - 1 */ 8255 cpi->max_lun = SYM_CONF_MAX_LUN-1; 8256 if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN) 8257 cpi->max_lun = SYM_SETUP_MAX_LUN-1; 8258 cpi->bus_id = cam_sim_bus(sim); 8259 cpi->initiator_id = np->myaddr; 8260 cpi->base_transfer_speed = 3300; 8261 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 8262 strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN); 8263 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 8264 cpi->unit_number = cam_sim_unit(sim); 8265 8266 #ifdef FreeBSD_New_Tran_Settings 8267 cpi->protocol = PROTO_SCSI; 8268 cpi->protocol_version = SCSI_REV_2; 8269 cpi->transport = XPORT_SPI; 8270 cpi->transport_version = 2; 8271 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; 8272 if (np->features & FE_ULTRA3) { 8273 cpi->transport_version = 3; 8274 cpi->xport_specific.spi.ppr_options = 8275 SID_SPI_CLOCK_DT_ST; 8276 } 8277 #endif 8278 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8279 break; 8280 } 8281 case XPT_ABORT: 8282 { 8283 union ccb *abort_ccb = ccb->cab.abort_ccb; 8284 switch(abort_ccb->ccb_h.func_code) { 8285 case XPT_SCSI_IO: 8286 if (sym_abort_scsiio(np, abort_ccb, 0) == 0) { 8287 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8288 break; 8289 } 8290 default: 8291 sym_xpt_done2(np, ccb, CAM_UA_ABORT); 8292 break; 8293 } 8294 break; 8295 } 8296 case XPT_RESET_DEV: 8297 { 8298 sym_reset_dev(np, ccb); 8299 break; 8300 } 8301 case XPT_RESET_BUS: 8302 { 8303 sym_reset_scsi_bus(np, 0); 8304 if (sym_verbose) { 8305 xpt_print_path(np->path); 8306 printf("SCSI BUS reset delivered.\n"); 8307 } 8308 sym_init (np, 1); 8309 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8310 break; 8311 } 8312 case XPT_ACCEPT_TARGET_IO: 8313 case XPT_CONT_TARGET_IO: 8314 case XPT_EN_LUN: 8315 case XPT_NOTIFY_ACK: 8316 case XPT_IMMED_NOTIFY: 8317 case XPT_TERM_IO: 8318 default: 8319 sym_xpt_done2(np, ccb, CAM_REQ_INVALID); 8320 break; 8321 } 8322 } 8323 8324 /* 8325 * Asynchronous notification handler. 8326 */ 8327 static void 8328 sym_async(void *cb_arg, u32 code, struct cam_path *path, void *arg) 8329 { 8330 hcb_p np; 8331 struct cam_sim *sim; 8332 u_int tn; 8333 tcb_p tp; 8334 int s; 8335 8336 s = splcam(); 8337 8338 sim = (struct cam_sim *) cb_arg; 8339 np = (hcb_p) cam_sim_softc(sim); 8340 8341 switch (code) { 8342 case AC_LOST_DEVICE: 8343 tn = xpt_path_target_id(path); 8344 if (tn >= SYM_CONF_MAX_TARGET) 8345 break; 8346 8347 tp = &np->target[tn]; 8348 8349 tp->to_reset = 0; 8350 tp->head.sval = 0; 8351 tp->head.wval = np->rv_scntl3; 8352 tp->head.uval = 0; 8353 8354 tp->tinfo.current.period = tp->tinfo.goal.period = 0; 8355 tp->tinfo.current.offset = tp->tinfo.goal.offset = 0; 8356 tp->tinfo.current.width = tp->tinfo.goal.width = BUS_8_BIT; 8357 tp->tinfo.current.options = tp->tinfo.goal.options = 0; 8358 8359 break; 8360 default: 8361 break; 8362 } 8363 8364 splx(s); 8365 } 8366 8367 /* 8368 * Update transfer settings of a target. 8369 */ 8370 static void sym_update_trans(hcb_p np, tcb_p tp, struct sym_trans *tip, 8371 struct ccb_trans_settings *cts) 8372 { 8373 /* 8374 * Update the infos. 8375 */ 8376 #ifdef FreeBSD_New_Tran_Settings 8377 #define cts__spi (&cts->xport_specific.spi) 8378 if ((cts__spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) 8379 tip->width = cts__spi->bus_width; 8380 if ((cts__spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) 8381 tip->offset = cts__spi->sync_offset; 8382 if ((cts__spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 8383 tip->period = cts__spi->sync_period; 8384 if ((cts__spi->valid & CTS_SPI_VALID_PPR_OPTIONS) != 0) 8385 tip->options = (cts__spi->ppr_options & PPR_OPT_DT); 8386 if (cts->protocol_version != PROTO_VERSION_UNSPECIFIED && 8387 cts->protocol_version != PROTO_VERSION_UNKNOWN) 8388 tip->scsi_version = cts->protocol_version; 8389 if (cts->transport_version != XPORT_VERSION_UNSPECIFIED && 8390 cts->transport_version != XPORT_VERSION_UNKNOWN) 8391 tip->spi_version = cts->transport_version; 8392 #undef cts__spi 8393 #else 8394 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 8395 tip->width = cts->bus_width; 8396 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0) 8397 tip->offset = cts->sync_offset; 8398 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 8399 tip->period = cts->sync_period; 8400 #endif 8401 /* 8402 * Scale against driver configuration limits. 8403 */ 8404 if (tip->width > SYM_SETUP_MAX_WIDE) tip->width = SYM_SETUP_MAX_WIDE; 8405 if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset = SYM_SETUP_MAX_OFFS; 8406 if (tip->period < SYM_SETUP_MIN_SYNC) tip->period = SYM_SETUP_MIN_SYNC; 8407 8408 /* 8409 * Scale against actual controller BUS width. 8410 */ 8411 if (tip->width > np->maxwide) 8412 tip->width = np->maxwide; 8413 8414 #ifdef FreeBSD_New_Tran_Settings 8415 /* 8416 * Only accept DT if controller supports and SYNC/WIDE asked. 8417 */ 8418 if (!((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) || 8419 !(tip->width == BUS_16_BIT && tip->offset)) { 8420 tip->options &= ~PPR_OPT_DT; 8421 } 8422 #else 8423 /* 8424 * For now, only assume DT if period <= 9, BUS 16 and offset != 0. 8425 */ 8426 tip->options = 0; 8427 if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3) && 8428 tip->period <= 9 && tip->width == BUS_16_BIT && tip->offset) { 8429 tip->options |= PPR_OPT_DT; 8430 } 8431 #endif 8432 8433 /* 8434 * Scale period factor and offset against controller limits. 8435 */ 8436 if (tip->options & PPR_OPT_DT) { 8437 if (tip->period < np->minsync_dt) 8438 tip->period = np->minsync_dt; 8439 if (tip->period > np->maxsync_dt) 8440 tip->period = np->maxsync_dt; 8441 if (tip->offset > np->maxoffs_dt) 8442 tip->offset = np->maxoffs_dt; 8443 } 8444 else { 8445 if (tip->period < np->minsync) 8446 tip->period = np->minsync; 8447 if (tip->period > np->maxsync) 8448 tip->period = np->maxsync; 8449 if (tip->offset > np->maxoffs) 8450 tip->offset = np->maxoffs; 8451 } 8452 } 8453 8454 /* 8455 * Update flags for a device (logical unit). 8456 */ 8457 static void 8458 sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts) 8459 { 8460 #ifdef FreeBSD_New_Tran_Settings 8461 #define cts__scsi (&cts->proto_specific.scsi) 8462 #define cts__spi (&cts->xport_specific.spi) 8463 if ((cts__spi->valid & CTS_SPI_VALID_DISC) != 0) { 8464 if ((cts__spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 8465 *flags |= SYM_DISC_ENABLED; 8466 else 8467 *flags &= ~SYM_DISC_ENABLED; 8468 } 8469 8470 if ((cts__scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 8471 if ((cts__scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 8472 *flags |= SYM_TAGS_ENABLED; 8473 else 8474 *flags &= ~SYM_TAGS_ENABLED; 8475 } 8476 #undef cts__spi 8477 #undef cts__scsi 8478 #else 8479 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 8480 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 8481 *flags |= SYM_DISC_ENABLED; 8482 else 8483 *flags &= ~SYM_DISC_ENABLED; 8484 } 8485 8486 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 8487 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 8488 *flags |= SYM_TAGS_ENABLED; 8489 else 8490 *flags &= ~SYM_TAGS_ENABLED; 8491 } 8492 #endif 8493 } 8494 8495 8496 /*============= DRIVER INITIALISATION ==================*/ 8497 8498 8499 static device_method_t sym_pci_methods[] = { 8500 DEVMETHOD(device_probe, sym_pci_probe), 8501 DEVMETHOD(device_attach, sym_pci_attach), 8502 { 0, 0 } 8503 }; 8504 8505 static driver_t sym_pci_driver = { 8506 "sym", 8507 sym_pci_methods, 8508 sizeof(struct sym_hcb) 8509 }; 8510 8511 static devclass_t sym_devclass; 8512 8513 DRIVER_MODULE(sym, pci, sym_pci_driver, sym_devclass, 0, 0); 8514 MODULE_DEPEND(sym, cam, 1, 1, 1); 8515 MODULE_DEPEND(sym, pci, 1, 1, 1); 8516 8517 8518 static struct sym_pci_chip sym_pci_dev_table[] = { 8519 {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 64, 8520 FE_ERL} 8521 , 8522 #ifdef SYM_DEBUG_GENERIC_SUPPORT 8523 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, 8524 FE_BOF} 8525 , 8526 #else 8527 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, 8528 FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} 8529 , 8530 #endif 8531 {PCI_ID_SYM53C815, 0xff, "815", 4, 8, 4, 64, 8532 FE_BOF|FE_ERL} 8533 , 8534 {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 64, 8535 FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} 8536 , 8537 {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2, 8538 FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} 8539 , 8540 {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1, 8541 FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} 8542 , 8543 {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2, 8544 FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8545 FE_RAM|FE_DIFF} 8546 , 8547 {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2, 8548 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8549 FE_RAM|FE_DIFF} 8550 , 8551 {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2, 8552 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8553 FE_RAM|FE_DIFF} 8554 , 8555 {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2, 8556 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8557 FE_RAM|FE_DIFF} 8558 , 8559 #ifdef SYM_DEBUG_GENERIC_SUPPORT 8560 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, 8561 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| 8562 FE_RAM|FE_LCKFRQ} 8563 , 8564 #else 8565 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, 8566 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8567 FE_RAM|FE_LCKFRQ} 8568 , 8569 #endif 8570 {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4, 8571 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8572 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} 8573 , 8574 {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4, 8575 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8576 FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} 8577 , 8578 {PCI_ID_LSI53C1010, 0x00, "1010-33", 6, 31, 7, 8, 8579 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| 8580 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| 8581 FE_C10} 8582 , 8583 {PCI_ID_LSI53C1010, 0xff, "1010-33", 6, 31, 7, 8, 8584 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| 8585 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| 8586 FE_C10|FE_U3EN} 8587 , 8588 {PCI_ID_LSI53C1010_2, 0xff, "1010-66", 6, 31, 7, 8, 8589 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| 8590 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| 8591 FE_C10|FE_U3EN} 8592 , 8593 {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4, 8594 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8595 FE_RAM|FE_IO256|FE_LEDC} 8596 }; 8597 8598 #define sym_pci_num_devs \ 8599 (sizeof(sym_pci_dev_table) / sizeof(sym_pci_dev_table[0])) 8600 8601 /* 8602 * Look up the chip table. 8603 * 8604 * Return a pointer to the chip entry if found, 8605 * zero otherwise. 8606 */ 8607 static struct sym_pci_chip * 8608 sym_find_pci_chip(device_t dev) 8609 { 8610 struct sym_pci_chip *chip; 8611 int i; 8612 u_short device_id; 8613 u_char revision; 8614 8615 if (pci_get_vendor(dev) != PCI_VENDOR_NCR) 8616 return 0; 8617 8618 device_id = pci_get_device(dev); 8619 revision = pci_get_revid(dev); 8620 8621 for (i = 0; i < sym_pci_num_devs; i++) { 8622 chip = &sym_pci_dev_table[i]; 8623 if (device_id != chip->device_id) 8624 continue; 8625 if (revision > chip->revision_id) 8626 continue; 8627 return chip; 8628 } 8629 8630 return 0; 8631 } 8632 8633 /* 8634 * Tell upper layer if the chip is supported. 8635 */ 8636 static int 8637 sym_pci_probe(device_t dev) 8638 { 8639 struct sym_pci_chip *chip; 8640 8641 chip = sym_find_pci_chip(dev); 8642 if (chip && sym_find_firmware(chip)) { 8643 device_set_desc(dev, chip->name); 8644 return (chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)? -2000 : 0; 8645 } 8646 return ENXIO; 8647 } 8648 8649 /* 8650 * Attach a sym53c8xx device. 8651 */ 8652 static int 8653 sym_pci_attach(device_t dev) 8654 { 8655 struct sym_pci_chip *chip; 8656 u_short command; 8657 u_char cachelnsz; 8658 struct sym_hcb *np = 0; 8659 struct sym_nvram nvram; 8660 struct sym_fw *fw = 0; 8661 int i; 8662 bus_dma_tag_t bus_dmat; 8663 8664 /* 8665 * I expected to be told about a parent 8666 * DMA tag, but didn't find any. 8667 */ 8668 bus_dmat = NULL; 8669 8670 /* 8671 * Only probed devices should be attached. 8672 * We just enjoy being paranoid. :) 8673 */ 8674 chip = sym_find_pci_chip(dev); 8675 if (chip == NULL || (fw = sym_find_firmware(chip)) == NULL) 8676 return (ENXIO); 8677 8678 /* 8679 * Allocate immediately the host control block, 8680 * since we are only expecting to succeed. :) 8681 * We keep track in the HCB of all the resources that 8682 * are to be released on error. 8683 */ 8684 np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB"); 8685 if (np) 8686 np->bus_dmat = bus_dmat; 8687 else 8688 goto attach_failed; 8689 8690 /* 8691 * Copy some useful infos to the HCB. 8692 */ 8693 np->hcb_ba = vtobus(np); 8694 np->verbose = bootverbose; 8695 np->device = dev; 8696 np->unit = device_get_unit(dev); 8697 np->device_id = pci_get_device(dev); 8698 np->revision_id = pci_get_revid(dev); 8699 np->features = chip->features; 8700 np->clock_divn = chip->nr_divisor; 8701 np->maxoffs = chip->offset_max; 8702 np->maxburst = chip->burst_max; 8703 np->scripta_sz = fw->a_size; 8704 np->scriptb_sz = fw->b_size; 8705 np->fw_setup = fw->setup; 8706 np->fw_patch = fw->patch; 8707 np->fw_name = fw->name; 8708 8709 /* 8710 * Edit its name. 8711 */ 8712 snprintf(np->inst_name, sizeof(np->inst_name), "sym%d", np->unit); 8713 8714 /* 8715 * Initialyze the CCB free and busy queues. 8716 */ 8717 sym_que_init(&np->free_ccbq); 8718 sym_que_init(&np->busy_ccbq); 8719 sym_que_init(&np->comp_ccbq); 8720 sym_que_init(&np->cam_ccbq); 8721 8722 /* 8723 * Allocate a tag for the DMA of user data. 8724 */ 8725 if (bus_dma_tag_create(np->bus_dmat, 1, (1<<24), 8726 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 8727 NULL, NULL, 8728 BUS_SPACE_MAXSIZE, SYM_CONF_MAX_SG, 8729 (1<<24), 0, busdma_lock_mutex, &Giant, 8730 &np->data_dmat)) { 8731 device_printf(dev, "failed to create DMA tag.\n"); 8732 goto attach_failed; 8733 } 8734 /* 8735 * Read and apply some fix-ups to the PCI COMMAND 8736 * register. We want the chip to be enabled for: 8737 * - BUS mastering 8738 * - PCI parity checking (reporting would also be fine) 8739 * - Write And Invalidate. 8740 */ 8741 command = pci_read_config(dev, PCIR_COMMAND, 2); 8742 command |= PCIM_CMD_BUSMASTEREN; 8743 command |= PCIM_CMD_PERRESPEN; 8744 command |= /* PCIM_CMD_MWIEN */ 0x0010; 8745 pci_write_config(dev, PCIR_COMMAND, command, 2); 8746 8747 /* 8748 * Let the device know about the cache line size, 8749 * if it doesn't yet. 8750 */ 8751 cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 8752 if (!cachelnsz) { 8753 cachelnsz = 8; 8754 pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); 8755 } 8756 8757 /* 8758 * Alloc/get/map/retrieve everything that deals with MMIO. 8759 */ 8760 if ((command & PCIM_CMD_MEMEN) != 0) { 8761 int regs_id = SYM_PCI_MMIO; 8762 np->mmio_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 8763 ®s_id, RF_ACTIVE); 8764 } 8765 if (!np->mmio_res) { 8766 device_printf(dev, "failed to allocate MMIO resources\n"); 8767 goto attach_failed; 8768 } 8769 np->mmio_bsh = rman_get_bushandle(np->mmio_res); 8770 np->mmio_tag = rman_get_bustag(np->mmio_res); 8771 np->mmio_pa = rman_get_start(np->mmio_res); 8772 np->mmio_va = (vm_offset_t) rman_get_virtual(np->mmio_res); 8773 np->mmio_ba = np->mmio_pa; 8774 8775 /* 8776 * Allocate the IRQ. 8777 */ 8778 i = 0; 8779 np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, 8780 RF_ACTIVE | RF_SHAREABLE); 8781 if (!np->irq_res) { 8782 device_printf(dev, "failed to allocate IRQ resource\n"); 8783 goto attach_failed; 8784 } 8785 8786 #ifdef SYM_CONF_IOMAPPED 8787 /* 8788 * User want us to use normal IO with PCI. 8789 * Alloc/get/map/retrieve everything that deals with IO. 8790 */ 8791 if ((command & PCI_COMMAND_IO_ENABLE) != 0) { 8792 int regs_id = SYM_PCI_IO; 8793 np->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, 8794 ®s_id, RF_ACTIVE); 8795 } 8796 if (!np->io_res) { 8797 device_printf(dev, "failed to allocate IO resources\n"); 8798 goto attach_failed; 8799 } 8800 np->io_bsh = rman_get_bushandle(np->io_res); 8801 np->io_tag = rman_get_bustag(np->io_res); 8802 np->io_port = rman_get_start(np->io_res); 8803 8804 #endif /* SYM_CONF_IOMAPPED */ 8805 8806 /* 8807 * If the chip has RAM. 8808 * Alloc/get/map/retrieve the corresponding resources. 8809 */ 8810 if ((np->features & (FE_RAM|FE_RAM8K)) && 8811 (command & PCIM_CMD_MEMEN) != 0) { 8812 int regs_id = SYM_PCI_RAM; 8813 if (np->features & FE_64BIT) 8814 regs_id = SYM_PCI_RAM64; 8815 np->ram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 8816 ®s_id, RF_ACTIVE); 8817 if (!np->ram_res) { 8818 device_printf(dev,"failed to allocate RAM resources\n"); 8819 goto attach_failed; 8820 } 8821 np->ram_id = regs_id; 8822 np->ram_bsh = rman_get_bushandle(np->ram_res); 8823 np->ram_tag = rman_get_bustag(np->ram_res); 8824 np->ram_pa = rman_get_start(np->ram_res); 8825 np->ram_va = (vm_offset_t) rman_get_virtual(np->ram_res); 8826 np->ram_ba = np->ram_pa; 8827 } 8828 8829 /* 8830 * Save setting of some IO registers, so we will 8831 * be able to probe specific implementations. 8832 */ 8833 sym_save_initial_setting (np); 8834 8835 /* 8836 * Reset the chip now, since it has been reported 8837 * that SCSI clock calibration may not work properly 8838 * if the chip is currently active. 8839 */ 8840 sym_chip_reset (np); 8841 8842 /* 8843 * Try to read the user set-up. 8844 */ 8845 (void) sym_read_nvram(np, &nvram); 8846 8847 /* 8848 * Prepare controller and devices settings, according 8849 * to chip features, user set-up and driver set-up. 8850 */ 8851 (void) sym_prepare_setting(np, &nvram); 8852 8853 /* 8854 * Check the PCI clock frequency. 8855 * Must be performed after prepare_setting since it destroys 8856 * STEST1 that is used to probe for the clock doubler. 8857 */ 8858 i = sym_getpciclock(np); 8859 if (i > 37000) 8860 device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i); 8861 8862 /* 8863 * Allocate the start queue. 8864 */ 8865 np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); 8866 if (!np->squeue) 8867 goto attach_failed; 8868 np->squeue_ba = vtobus(np->squeue); 8869 8870 /* 8871 * Allocate the done queue. 8872 */ 8873 np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); 8874 if (!np->dqueue) 8875 goto attach_failed; 8876 np->dqueue_ba = vtobus(np->dqueue); 8877 8878 /* 8879 * Allocate the target bus address array. 8880 */ 8881 np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL"); 8882 if (!np->targtbl) 8883 goto attach_failed; 8884 np->targtbl_ba = vtobus(np->targtbl); 8885 8886 /* 8887 * Allocate SCRIPTS areas. 8888 */ 8889 np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); 8890 np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); 8891 if (!np->scripta0 || !np->scriptb0) 8892 goto attach_failed; 8893 8894 /* 8895 * Allocate some CCB. We need at least ONE. 8896 */ 8897 if (!sym_alloc_ccb(np)) 8898 goto attach_failed; 8899 8900 /* 8901 * Calculate BUS addresses where we are going 8902 * to load the SCRIPTS. 8903 */ 8904 np->scripta_ba = vtobus(np->scripta0); 8905 np->scriptb_ba = vtobus(np->scriptb0); 8906 np->scriptb0_ba = np->scriptb_ba; 8907 8908 if (np->ram_ba) { 8909 np->scripta_ba = np->ram_ba; 8910 if (np->features & FE_RAM8K) { 8911 np->ram_ws = 8192; 8912 np->scriptb_ba = np->scripta_ba + 4096; 8913 #if BITS_PER_LONG > 32 8914 np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); 8915 #endif 8916 } 8917 else 8918 np->ram_ws = 4096; 8919 } 8920 8921 /* 8922 * Copy scripts to controller instance. 8923 */ 8924 bcopy(fw->a_base, np->scripta0, np->scripta_sz); 8925 bcopy(fw->b_base, np->scriptb0, np->scriptb_sz); 8926 8927 /* 8928 * Setup variable parts in scripts and compute 8929 * scripts bus addresses used from the C code. 8930 */ 8931 np->fw_setup(np, fw); 8932 8933 /* 8934 * Bind SCRIPTS with physical addresses usable by the 8935 * SCRIPTS processor (as seen from the BUS = BUS addresses). 8936 */ 8937 sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); 8938 sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); 8939 8940 #ifdef SYM_CONF_IARB_SUPPORT 8941 /* 8942 * If user wants IARB to be set when we win arbitration 8943 * and have other jobs, compute the max number of consecutive 8944 * settings of IARB hints before we leave devices a chance to 8945 * arbitrate for reselection. 8946 */ 8947 #ifdef SYM_SETUP_IARB_MAX 8948 np->iarb_max = SYM_SETUP_IARB_MAX; 8949 #else 8950 np->iarb_max = 4; 8951 #endif 8952 #endif 8953 8954 /* 8955 * Prepare the idle and invalid task actions. 8956 */ 8957 np->idletask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 8958 np->idletask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); 8959 np->idletask_ba = vtobus(&np->idletask); 8960 8961 np->notask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 8962 np->notask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); 8963 np->notask_ba = vtobus(&np->notask); 8964 8965 np->bad_itl.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 8966 np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); 8967 np->bad_itl_ba = vtobus(&np->bad_itl); 8968 8969 np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 8970 np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA (np,bad_i_t_l_q)); 8971 np->bad_itlq_ba = vtobus(&np->bad_itlq); 8972 8973 /* 8974 * Allocate and prepare the lun JUMP table that is used 8975 * for a target prior the probing of devices (bad lun table). 8976 * A private table will be allocated for the target on the 8977 * first INQUIRY response received. 8978 */ 8979 np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); 8980 if (!np->badluntbl) 8981 goto attach_failed; 8982 8983 np->badlun_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); 8984 for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ 8985 np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); 8986 8987 /* 8988 * Prepare the bus address array that contains the bus 8989 * address of each target control block. 8990 * For now, assume all logical units are wrong. :) 8991 */ 8992 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 8993 np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); 8994 np->target[i].head.luntbl_sa = 8995 cpu_to_scr(vtobus(np->badluntbl)); 8996 np->target[i].head.lun0_sa = 8997 cpu_to_scr(vtobus(&np->badlun_sa)); 8998 } 8999 9000 /* 9001 * Now check the cache handling of the pci chipset. 9002 */ 9003 if (sym_snooptest (np)) { 9004 device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n"); 9005 goto attach_failed; 9006 }; 9007 9008 /* 9009 * Now deal with CAM. 9010 * Hopefully, we will succeed with that one.:) 9011 */ 9012 if (!sym_cam_attach(np)) 9013 goto attach_failed; 9014 9015 /* 9016 * Sigh! we are done. 9017 */ 9018 return 0; 9019 9020 /* 9021 * We have failed. 9022 * We will try to free all the resources we have 9023 * allocated, but if we are a boot device, this 9024 * will not help that much.;) 9025 */ 9026 attach_failed: 9027 if (np) 9028 sym_pci_free(np); 9029 return ENXIO; 9030 } 9031 9032 /* 9033 * Free everything that have been allocated for this device. 9034 */ 9035 static void sym_pci_free(hcb_p np) 9036 { 9037 SYM_QUEHEAD *qp; 9038 ccb_p cp; 9039 tcb_p tp; 9040 lcb_p lp; 9041 int target, lun; 9042 int s; 9043 9044 /* 9045 * First free CAM resources. 9046 */ 9047 s = splcam(); 9048 sym_cam_free(np); 9049 splx(s); 9050 9051 /* 9052 * Now every should be quiet for us to 9053 * free other resources. 9054 */ 9055 if (np->ram_res) 9056 bus_release_resource(np->device, SYS_RES_MEMORY, 9057 np->ram_id, np->ram_res); 9058 if (np->mmio_res) 9059 bus_release_resource(np->device, SYS_RES_MEMORY, 9060 SYM_PCI_MMIO, np->mmio_res); 9061 if (np->io_res) 9062 bus_release_resource(np->device, SYS_RES_IOPORT, 9063 SYM_PCI_IO, np->io_res); 9064 if (np->irq_res) 9065 bus_release_resource(np->device, SYS_RES_IRQ, 9066 0, np->irq_res); 9067 9068 if (np->scriptb0) 9069 sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); 9070 if (np->scripta0) 9071 sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); 9072 if (np->squeue) 9073 sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); 9074 if (np->dqueue) 9075 sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); 9076 9077 while ((qp = sym_remque_head(&np->free_ccbq)) != 0) { 9078 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 9079 bus_dmamap_destroy(np->data_dmat, cp->dmamap); 9080 sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF"); 9081 sym_mfree_dma(cp, sizeof(*cp), "CCB"); 9082 } 9083 9084 if (np->badluntbl) 9085 sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); 9086 9087 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { 9088 tp = &np->target[target]; 9089 for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) { 9090 lp = sym_lp(np, tp, lun); 9091 if (!lp) 9092 continue; 9093 if (lp->itlq_tbl) 9094 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, 9095 "ITLQ_TBL"); 9096 if (lp->cb_tags) 9097 sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK, 9098 "CB_TAGS"); 9099 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 9100 } 9101 #if SYM_CONF_MAX_LUN > 1 9102 if (tp->lunmp) 9103 sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p), 9104 "LUNMP"); 9105 #endif 9106 } 9107 if (np->targtbl) 9108 sym_mfree_dma(np->targtbl, 256, "TARGTBL"); 9109 if (np->data_dmat) 9110 bus_dma_tag_destroy(np->data_dmat); 9111 sym_mfree_dma(np, sizeof(*np), "HCB"); 9112 } 9113 9114 /* 9115 * Allocate CAM resources and register a bus to CAM. 9116 */ 9117 static int sym_cam_attach(hcb_p np) 9118 { 9119 struct cam_devq *devq = 0; 9120 struct cam_sim *sim = 0; 9121 struct cam_path *path = 0; 9122 struct ccb_setasync csa; 9123 int err, s; 9124 9125 s = splcam(); 9126 9127 /* 9128 * Establish our interrupt handler. 9129 */ 9130 err = bus_setup_intr(np->device, np->irq_res, 9131 INTR_TYPE_CAM | INTR_ENTROPY, sym_intr, np, 9132 &np->intr); 9133 if (err) { 9134 device_printf(np->device, "bus_setup_intr() failed: %d\n", 9135 err); 9136 goto fail; 9137 } 9138 9139 /* 9140 * Create the device queue for our sym SIM. 9141 */ 9142 devq = cam_simq_alloc(SYM_CONF_MAX_START); 9143 if (!devq) 9144 goto fail; 9145 9146 /* 9147 * Construct our SIM entry. 9148 */ 9149 sim = cam_sim_alloc(sym_action, sym_poll, "sym", np, np->unit, 9150 1, SYM_SETUP_MAX_TAG, devq); 9151 if (!sim) 9152 goto fail; 9153 devq = 0; 9154 9155 if (xpt_bus_register(sim, 0) != CAM_SUCCESS) 9156 goto fail; 9157 np->sim = sim; 9158 sim = 0; 9159 9160 if (xpt_create_path(&path, 0, 9161 cam_sim_path(np->sim), CAM_TARGET_WILDCARD, 9162 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 9163 goto fail; 9164 } 9165 np->path = path; 9166 9167 /* 9168 * Establish our async notification handler. 9169 */ 9170 xpt_setup_ccb(&csa.ccb_h, np->path, 5); 9171 csa.ccb_h.func_code = XPT_SASYNC_CB; 9172 csa.event_enable = AC_LOST_DEVICE; 9173 csa.callback = sym_async; 9174 csa.callback_arg = np->sim; 9175 xpt_action((union ccb *)&csa); 9176 9177 /* 9178 * Start the chip now, without resetting the BUS, since 9179 * it seems that this must stay under control of CAM. 9180 * With LVD/SE capable chips and BUS in SE mode, we may 9181 * get a spurious SMBC interrupt. 9182 */ 9183 sym_init (np, 0); 9184 9185 splx(s); 9186 return 1; 9187 fail: 9188 if (sim) 9189 cam_sim_free(sim, FALSE); 9190 if (devq) 9191 cam_simq_free(devq); 9192 9193 sym_cam_free(np); 9194 9195 splx(s); 9196 return 0; 9197 } 9198 9199 /* 9200 * Free everything that deals with CAM. 9201 */ 9202 static void sym_cam_free(hcb_p np) 9203 { 9204 if (np->intr) 9205 bus_teardown_intr(np->device, np->irq_res, np->intr); 9206 9207 if (np->sim) { 9208 xpt_bus_deregister(cam_sim_path(np->sim)); 9209 cam_sim_free(np->sim, /*free_devq*/ TRUE); 9210 } 9211 if (np->path) 9212 xpt_free_path(np->path); 9213 } 9214 9215 /*============ OPTIONNAL NVRAM SUPPORT =================*/ 9216 9217 /* 9218 * Get host setup from NVRAM. 9219 */ 9220 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram) 9221 { 9222 #ifdef SYM_CONF_NVRAM_SUPPORT 9223 /* 9224 * Get parity checking, host ID, verbose mode 9225 * and miscellaneous host flags from NVRAM. 9226 */ 9227 switch(nvram->type) { 9228 case SYM_SYMBIOS_NVRAM: 9229 if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE)) 9230 np->rv_scntl0 &= ~0x0a; 9231 np->myaddr = nvram->data.Symbios.host_id & 0x0f; 9232 if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS) 9233 np->verbose += 1; 9234 if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO) 9235 np->usrflags |= SYM_SCAN_TARGETS_HILO; 9236 if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET) 9237 np->usrflags |= SYM_AVOID_BUS_RESET; 9238 break; 9239 case SYM_TEKRAM_NVRAM: 9240 np->myaddr = nvram->data.Tekram.host_id & 0x0f; 9241 break; 9242 default: 9243 break; 9244 } 9245 #endif 9246 } 9247 9248 /* 9249 * Get target setup from NVRAM. 9250 */ 9251 #ifdef SYM_CONF_NVRAM_SUPPORT 9252 static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram); 9253 static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram); 9254 #endif 9255 9256 static void 9257 sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp) 9258 { 9259 #ifdef SYM_CONF_NVRAM_SUPPORT 9260 switch(nvp->type) { 9261 case SYM_SYMBIOS_NVRAM: 9262 sym_Symbios_setup_target (np, target, &nvp->data.Symbios); 9263 break; 9264 case SYM_TEKRAM_NVRAM: 9265 sym_Tekram_setup_target (np, target, &nvp->data.Tekram); 9266 break; 9267 default: 9268 break; 9269 } 9270 #endif 9271 } 9272 9273 #ifdef SYM_CONF_NVRAM_SUPPORT 9274 /* 9275 * Get target set-up from Symbios format NVRAM. 9276 */ 9277 static void 9278 sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram) 9279 { 9280 tcb_p tp = &np->target[target]; 9281 Symbios_target *tn = &nvram->target[target]; 9282 9283 tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0; 9284 tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT; 9285 tp->usrtags = 9286 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0; 9287 9288 if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE)) 9289 tp->usrflags &= ~SYM_DISC_ENABLED; 9290 if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)) 9291 tp->usrflags |= SYM_SCAN_BOOT_DISABLED; 9292 if (!(tn->flags & SYMBIOS_SCAN_LUNS)) 9293 tp->usrflags |= SYM_SCAN_LUNS_DISABLED; 9294 } 9295 9296 /* 9297 * Get target set-up from Tekram format NVRAM. 9298 */ 9299 static void 9300 sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram) 9301 { 9302 tcb_p tp = &np->target[target]; 9303 struct Tekram_target *tn = &nvram->target[target]; 9304 int i; 9305 9306 if (tn->flags & TEKRAM_SYNC_NEGO) { 9307 i = tn->sync_index & 0xf; 9308 tp->tinfo.user.period = Tekram_sync[i]; 9309 } 9310 9311 tp->tinfo.user.width = 9312 (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT; 9313 9314 if (tn->flags & TEKRAM_TAGGED_COMMANDS) { 9315 tp->usrtags = 2 << nvram->max_tags_index; 9316 } 9317 9318 if (tn->flags & TEKRAM_DISCONNECT_ENABLE) 9319 tp->usrflags |= SYM_DISC_ENABLED; 9320 9321 /* If any device does not support parity, we will not use this option */ 9322 if (!(tn->flags & TEKRAM_PARITY_CHECK)) 9323 np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */ 9324 } 9325 9326 #ifdef SYM_CONF_DEBUG_NVRAM 9327 /* 9328 * Dump Symbios format NVRAM for debugging purpose. 9329 */ 9330 static void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram) 9331 { 9332 int i; 9333 9334 /* display Symbios nvram host data */ 9335 printf("%s: HOST ID=%d%s%s%s%s%s%s\n", 9336 sym_name(np), nvram->host_id & 0x0f, 9337 (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", 9338 (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"", 9339 (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"", 9340 (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"", 9341 (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"", 9342 (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :""); 9343 9344 /* display Symbios nvram drive data */ 9345 for (i = 0 ; i < 15 ; i++) { 9346 struct Symbios_target *tn = &nvram->target[i]; 9347 printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", 9348 sym_name(np), i, 9349 (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "", 9350 (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "", 9351 (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "", 9352 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "", 9353 tn->bus_width, 9354 tn->sync_period / 4, 9355 tn->timeout); 9356 } 9357 } 9358 9359 /* 9360 * Dump TEKRAM format NVRAM for debugging purpose. 9361 */ 9362 static u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120}; 9363 static void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram) 9364 { 9365 int i, tags, boot_delay; 9366 char *rem; 9367 9368 /* display Tekram nvram host data */ 9369 tags = 2 << nvram->max_tags_index; 9370 boot_delay = 0; 9371 if (nvram->boot_delay_index < 6) 9372 boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; 9373 switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) { 9374 default: 9375 case 0: rem = ""; break; 9376 case 1: rem = " REMOVABLE=boot device"; break; 9377 case 2: rem = " REMOVABLE=all"; break; 9378 } 9379 9380 printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", 9381 sym_name(np), nvram->host_id & 0x0f, 9382 (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", 9383 (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"", 9384 (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"", 9385 (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"", 9386 (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"", 9387 (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"", 9388 (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"", 9389 (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"", 9390 rem, boot_delay, tags); 9391 9392 /* display Tekram nvram drive data */ 9393 for (i = 0; i <= 15; i++) { 9394 int sync, j; 9395 struct Tekram_target *tn = &nvram->target[i]; 9396 j = tn->sync_index & 0xf; 9397 sync = Tekram_sync[j]; 9398 printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n", 9399 sym_name(np), i, 9400 (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "", 9401 (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "", 9402 (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "", 9403 (tn->flags & TEKRAM_START_CMD) ? " START" : "", 9404 (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "", 9405 (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "", 9406 sync); 9407 } 9408 } 9409 #endif /* SYM_CONF_DEBUG_NVRAM */ 9410 #endif /* SYM_CONF_NVRAM_SUPPORT */ 9411 9412 9413 /* 9414 * Try reading Symbios or Tekram NVRAM 9415 */ 9416 #ifdef SYM_CONF_NVRAM_SUPPORT 9417 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram); 9418 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram); 9419 #endif 9420 9421 static int sym_read_nvram(hcb_p np, struct sym_nvram *nvp) 9422 { 9423 #ifdef SYM_CONF_NVRAM_SUPPORT 9424 /* 9425 * Try to read SYMBIOS nvram. 9426 * Try to read TEKRAM nvram if Symbios nvram not found. 9427 */ 9428 if (SYM_SETUP_SYMBIOS_NVRAM && 9429 !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) { 9430 nvp->type = SYM_SYMBIOS_NVRAM; 9431 #ifdef SYM_CONF_DEBUG_NVRAM 9432 sym_display_Symbios_nvram(np, &nvp->data.Symbios); 9433 #endif 9434 } 9435 else if (SYM_SETUP_TEKRAM_NVRAM && 9436 !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) { 9437 nvp->type = SYM_TEKRAM_NVRAM; 9438 #ifdef SYM_CONF_DEBUG_NVRAM 9439 sym_display_Tekram_nvram(np, &nvp->data.Tekram); 9440 #endif 9441 } 9442 else 9443 nvp->type = 0; 9444 #else 9445 nvp->type = 0; 9446 #endif 9447 return nvp->type; 9448 } 9449 9450 9451 #ifdef SYM_CONF_NVRAM_SUPPORT 9452 /* 9453 * 24C16 EEPROM reading. 9454 * 9455 * GPOI0 - data in/data out 9456 * GPIO1 - clock 9457 * Symbios NVRAM wiring now also used by Tekram. 9458 */ 9459 9460 #define SET_BIT 0 9461 #define CLR_BIT 1 9462 #define SET_CLK 2 9463 #define CLR_CLK 3 9464 9465 /* 9466 * Set/clear data/clock bit in GPIO0 9467 */ 9468 static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg, 9469 int bit_mode) 9470 { 9471 UDELAY (5); 9472 switch (bit_mode){ 9473 case SET_BIT: 9474 *gpreg |= write_bit; 9475 break; 9476 case CLR_BIT: 9477 *gpreg &= 0xfe; 9478 break; 9479 case SET_CLK: 9480 *gpreg |= 0x02; 9481 break; 9482 case CLR_CLK: 9483 *gpreg &= 0xfd; 9484 break; 9485 9486 } 9487 OUTB (nc_gpreg, *gpreg); 9488 UDELAY (5); 9489 } 9490 9491 /* 9492 * Send START condition to NVRAM to wake it up. 9493 */ 9494 static void S24C16_start(hcb_p np, u_char *gpreg) 9495 { 9496 S24C16_set_bit(np, 1, gpreg, SET_BIT); 9497 S24C16_set_bit(np, 0, gpreg, SET_CLK); 9498 S24C16_set_bit(np, 0, gpreg, CLR_BIT); 9499 S24C16_set_bit(np, 0, gpreg, CLR_CLK); 9500 } 9501 9502 /* 9503 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! 9504 */ 9505 static void S24C16_stop(hcb_p np, u_char *gpreg) 9506 { 9507 S24C16_set_bit(np, 0, gpreg, SET_CLK); 9508 S24C16_set_bit(np, 1, gpreg, SET_BIT); 9509 } 9510 9511 /* 9512 * Read or write a bit to the NVRAM, 9513 * read if GPIO0 input else write if GPIO0 output 9514 */ 9515 static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit, 9516 u_char *gpreg) 9517 { 9518 S24C16_set_bit(np, write_bit, gpreg, SET_BIT); 9519 S24C16_set_bit(np, 0, gpreg, SET_CLK); 9520 if (read_bit) 9521 *read_bit = INB (nc_gpreg); 9522 S24C16_set_bit(np, 0, gpreg, CLR_CLK); 9523 S24C16_set_bit(np, 0, gpreg, CLR_BIT); 9524 } 9525 9526 /* 9527 * Output an ACK to the NVRAM after reading, 9528 * change GPIO0 to output and when done back to an input 9529 */ 9530 static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg, 9531 u_char *gpcntl) 9532 { 9533 OUTB (nc_gpcntl, *gpcntl & 0xfe); 9534 S24C16_do_bit(np, 0, write_bit, gpreg); 9535 OUTB (nc_gpcntl, *gpcntl); 9536 } 9537 9538 /* 9539 * Input an ACK from NVRAM after writing, 9540 * change GPIO0 to input and when done back to an output 9541 */ 9542 static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg, 9543 u_char *gpcntl) 9544 { 9545 OUTB (nc_gpcntl, *gpcntl | 0x01); 9546 S24C16_do_bit(np, read_bit, 1, gpreg); 9547 OUTB (nc_gpcntl, *gpcntl); 9548 } 9549 9550 /* 9551 * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, 9552 * GPIO0 must already be set as an output 9553 */ 9554 static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data, 9555 u_char *gpreg, u_char *gpcntl) 9556 { 9557 int x; 9558 9559 for (x = 0; x < 8; x++) 9560 S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg); 9561 9562 S24C16_read_ack(np, ack_data, gpreg, gpcntl); 9563 } 9564 9565 /* 9566 * READ a byte from the NVRAM and then send an ACK to say we have got it, 9567 * GPIO0 must already be set as an input 9568 */ 9569 static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data, 9570 u_char *gpreg, u_char *gpcntl) 9571 { 9572 int x; 9573 u_char read_bit; 9574 9575 *read_data = 0; 9576 for (x = 0; x < 8; x++) { 9577 S24C16_do_bit(np, &read_bit, 1, gpreg); 9578 *read_data |= ((read_bit & 0x01) << (7 - x)); 9579 } 9580 9581 S24C16_write_ack(np, ack_data, gpreg, gpcntl); 9582 } 9583 9584 /* 9585 * Read 'len' bytes starting at 'offset'. 9586 */ 9587 static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len) 9588 { 9589 u_char gpcntl, gpreg; 9590 u_char old_gpcntl, old_gpreg; 9591 u_char ack_data; 9592 int retv = 1; 9593 int x; 9594 9595 /* save current state of GPCNTL and GPREG */ 9596 old_gpreg = INB (nc_gpreg); 9597 old_gpcntl = INB (nc_gpcntl); 9598 gpcntl = old_gpcntl & 0x1c; 9599 9600 /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ 9601 OUTB (nc_gpreg, old_gpreg); 9602 OUTB (nc_gpcntl, gpcntl); 9603 9604 /* this is to set NVRAM into a known state with GPIO0/1 both low */ 9605 gpreg = old_gpreg; 9606 S24C16_set_bit(np, 0, &gpreg, CLR_CLK); 9607 S24C16_set_bit(np, 0, &gpreg, CLR_BIT); 9608 9609 /* now set NVRAM inactive with GPIO0/1 both high */ 9610 S24C16_stop(np, &gpreg); 9611 9612 /* activate NVRAM */ 9613 S24C16_start(np, &gpreg); 9614 9615 /* write device code and random address MSB */ 9616 S24C16_write_byte(np, &ack_data, 9617 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); 9618 if (ack_data & 0x01) 9619 goto out; 9620 9621 /* write random address LSB */ 9622 S24C16_write_byte(np, &ack_data, 9623 offset & 0xff, &gpreg, &gpcntl); 9624 if (ack_data & 0x01) 9625 goto out; 9626 9627 /* regenerate START state to set up for reading */ 9628 S24C16_start(np, &gpreg); 9629 9630 /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ 9631 S24C16_write_byte(np, &ack_data, 9632 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); 9633 if (ack_data & 0x01) 9634 goto out; 9635 9636 /* now set up GPIO0 for inputting data */ 9637 gpcntl |= 0x01; 9638 OUTB (nc_gpcntl, gpcntl); 9639 9640 /* input all requested data - only part of total NVRAM */ 9641 for (x = 0; x < len; x++) 9642 S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); 9643 9644 /* finally put NVRAM back in inactive mode */ 9645 gpcntl &= 0xfe; 9646 OUTB (nc_gpcntl, gpcntl); 9647 S24C16_stop(np, &gpreg); 9648 retv = 0; 9649 out: 9650 /* return GPIO0/1 to original states after having accessed NVRAM */ 9651 OUTB (nc_gpcntl, old_gpcntl); 9652 OUTB (nc_gpreg, old_gpreg); 9653 9654 return retv; 9655 } 9656 9657 #undef SET_BIT /* 0 */ 9658 #undef CLR_BIT /* 1 */ 9659 #undef SET_CLK /* 2 */ 9660 #undef CLR_CLK /* 3 */ 9661 9662 /* 9663 * Try reading Symbios NVRAM. 9664 * Return 0 if OK. 9665 */ 9666 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram) 9667 { 9668 static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; 9669 u_char *data = (u_char *) nvram; 9670 int len = sizeof(*nvram); 9671 u_short csum; 9672 int x; 9673 9674 /* probe the 24c16 and read the SYMBIOS 24c16 area */ 9675 if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len)) 9676 return 1; 9677 9678 /* check valid NVRAM signature, verify byte count and checksum */ 9679 if (nvram->type != 0 || 9680 bcmp(nvram->trailer, Symbios_trailer, 6) || 9681 nvram->byte_count != len - 12) 9682 return 1; 9683 9684 /* verify checksum */ 9685 for (x = 6, csum = 0; x < len - 6; x++) 9686 csum += data[x]; 9687 if (csum != nvram->checksum) 9688 return 1; 9689 9690 return 0; 9691 } 9692 9693 /* 9694 * 93C46 EEPROM reading. 9695 * 9696 * GPOI0 - data in 9697 * GPIO1 - data out 9698 * GPIO2 - clock 9699 * GPIO4 - chip select 9700 * 9701 * Used by Tekram. 9702 */ 9703 9704 /* 9705 * Pulse clock bit in GPIO0 9706 */ 9707 static void T93C46_Clk(hcb_p np, u_char *gpreg) 9708 { 9709 OUTB (nc_gpreg, *gpreg | 0x04); 9710 UDELAY (2); 9711 OUTB (nc_gpreg, *gpreg); 9712 } 9713 9714 /* 9715 * Read bit from NVRAM 9716 */ 9717 static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg) 9718 { 9719 UDELAY (2); 9720 T93C46_Clk(np, gpreg); 9721 *read_bit = INB (nc_gpreg); 9722 } 9723 9724 /* 9725 * Write bit to GPIO0 9726 */ 9727 static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg) 9728 { 9729 if (write_bit & 0x01) 9730 *gpreg |= 0x02; 9731 else 9732 *gpreg &= 0xfd; 9733 9734 *gpreg |= 0x10; 9735 9736 OUTB (nc_gpreg, *gpreg); 9737 UDELAY (2); 9738 9739 T93C46_Clk(np, gpreg); 9740 } 9741 9742 /* 9743 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! 9744 */ 9745 static void T93C46_Stop(hcb_p np, u_char *gpreg) 9746 { 9747 *gpreg &= 0xef; 9748 OUTB (nc_gpreg, *gpreg); 9749 UDELAY (2); 9750 9751 T93C46_Clk(np, gpreg); 9752 } 9753 9754 /* 9755 * Send read command and address to NVRAM 9756 */ 9757 static void T93C46_Send_Command(hcb_p np, u_short write_data, 9758 u_char *read_bit, u_char *gpreg) 9759 { 9760 int x; 9761 9762 /* send 9 bits, start bit (1), command (2), address (6) */ 9763 for (x = 0; x < 9; x++) 9764 T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg); 9765 9766 *read_bit = INB (nc_gpreg); 9767 } 9768 9769 /* 9770 * READ 2 bytes from the NVRAM 9771 */ 9772 static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg) 9773 { 9774 int x; 9775 u_char read_bit; 9776 9777 *nvram_data = 0; 9778 for (x = 0; x < 16; x++) { 9779 T93C46_Read_Bit(np, &read_bit, gpreg); 9780 9781 if (read_bit & 0x01) 9782 *nvram_data |= (0x01 << (15 - x)); 9783 else 9784 *nvram_data &= ~(0x01 << (15 - x)); 9785 } 9786 } 9787 9788 /* 9789 * Read Tekram NvRAM data. 9790 */ 9791 static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg) 9792 { 9793 u_char read_bit; 9794 int x; 9795 9796 for (x = 0; x < len; x++) { 9797 9798 /* output read command and address */ 9799 T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); 9800 if (read_bit & 0x01) 9801 return 1; /* Bad */ 9802 T93C46_Read_Word(np, &data[x], gpreg); 9803 T93C46_Stop(np, gpreg); 9804 } 9805 9806 return 0; 9807 } 9808 9809 /* 9810 * Try reading 93C46 Tekram NVRAM. 9811 */ 9812 static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram) 9813 { 9814 u_char gpcntl, gpreg; 9815 u_char old_gpcntl, old_gpreg; 9816 int retv = 1; 9817 9818 /* save current state of GPCNTL and GPREG */ 9819 old_gpreg = INB (nc_gpreg); 9820 old_gpcntl = INB (nc_gpcntl); 9821 9822 /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, 9823 1/2/4 out */ 9824 gpreg = old_gpreg & 0xe9; 9825 OUTB (nc_gpreg, gpreg); 9826 gpcntl = (old_gpcntl & 0xe9) | 0x09; 9827 OUTB (nc_gpcntl, gpcntl); 9828 9829 /* input all of NVRAM, 64 words */ 9830 retv = T93C46_Read_Data(np, (u_short *) nvram, 9831 sizeof(*nvram) / sizeof(short), &gpreg); 9832 9833 /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ 9834 OUTB (nc_gpcntl, old_gpcntl); 9835 OUTB (nc_gpreg, old_gpreg); 9836 9837 return retv; 9838 } 9839 9840 /* 9841 * Try reading Tekram NVRAM. 9842 * Return 0 if OK. 9843 */ 9844 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram) 9845 { 9846 u_char *data = (u_char *) nvram; 9847 int len = sizeof(*nvram); 9848 u_short csum; 9849 int x; 9850 9851 switch (np->device_id) { 9852 case PCI_ID_SYM53C885: 9853 case PCI_ID_SYM53C895: 9854 case PCI_ID_SYM53C896: 9855 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, 9856 data, len); 9857 break; 9858 case PCI_ID_SYM53C875: 9859 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, 9860 data, len); 9861 if (!x) 9862 break; 9863 default: 9864 x = sym_read_T93C46_nvram(np, nvram); 9865 break; 9866 } 9867 if (x) 9868 return 1; 9869 9870 /* verify checksum */ 9871 for (x = 0, csum = 0; x < len - 1; x += 2) 9872 csum += data[x] + (data[x+1] << 8); 9873 if (csum != 0x1234) 9874 return 1; 9875 9876 return 0; 9877 } 9878 9879 #endif /* SYM_CONF_NVRAM_SUPPORT */ 9880