1 /* 2 * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010 3 * PCI-SCSI controllers. 4 * 5 * Copyright (C) 1999-2000 Gerard Roudier <groudier@club-internet.fr> 6 * 7 * This driver also supports the following Symbios/LSI PCI-SCSI chips: 8 * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895. 9 * 10 * but does not support earlier chips as the following ones: 11 * 53C810, 53C815, 53C825. 12 * 13 * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver. 14 * Copyright (C) 1998-1999 Gerard Roudier 15 * 16 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 17 * a port of the FreeBSD ncr driver to Linux-1.2.13. 18 * 19 * The original ncr driver has been written for 386bsd and FreeBSD by 20 * Wolfgang Stanglmeier <wolf@cologne.de> 21 * Stefan Esser <se@mi.Uni-Koeln.de> 22 * Copyright (C) 1994 Wolfgang Stanglmeier 23 * 24 * The initialisation code, and part of the code that addresses 25 * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM 26 * written by Justin T. Gibbs. 27 * 28 * Other major contributions: 29 * 30 * NVRAM detection and reading. 31 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> 32 * 33 *----------------------------------------------------------------------------- 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 3. The name of the author may not be used to endorse or promote products 44 * derived from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 50 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 */ 58 59 /* $FreeBSD$ */ 60 61 #define SYM_DRIVER_NAME "sym-1.3.2-20000206" 62 63 /* #define SYM_DEBUG_PM_WITH_WSR (current debugging) */ 64 65 #include <pci.h> 66 #include <stddef.h> /* For offsetof */ 67 68 #include <sys/param.h> 69 /* 70 * Only use the BUS stuff for PCI under FreeBSD 4 and later versions. 71 * Note that the old BUS stuff also works for FreeBSD 4 and spares 72 * about 1.5KB for the driver objet file. 73 */ 74 #if __FreeBSD_version >= 400000 75 #define FreeBSD_4_Bus 76 #endif 77 78 #include <sys/systm.h> 79 #include <sys/malloc.h> 80 #include <sys/kernel.h> 81 #ifdef FreeBSD_4_Bus 82 #include <sys/module.h> 83 #include <sys/bus.h> 84 #endif 85 86 #include <sys/buf.h> 87 #include <sys/proc.h> 88 89 #include <pci/pcireg.h> 90 #include <pci/pcivar.h> 91 92 #include <machine/bus_memio.h> 93 #include <machine/bus_pio.h> 94 #include <machine/bus.h> 95 #ifdef FreeBSD_4_Bus 96 #include <machine/resource.h> 97 #include <sys/rman.h> 98 #endif 99 #include <machine/clock.h> 100 101 #include <cam/cam.h> 102 #include <cam/cam_ccb.h> 103 #include <cam/cam_sim.h> 104 #include <cam/cam_xpt_sim.h> 105 #include <cam/cam_debug.h> 106 107 #include <cam/scsi/scsi_all.h> 108 #include <cam/scsi/scsi_message.h> 109 110 #include <vm/vm.h> 111 #include <vm/vm_param.h> 112 #include <vm/pmap.h> 113 114 #if 0 115 #include <sys/kernel.h> 116 #include <sys/sysctl.h> 117 #include <vm/vm_extern.h> 118 #endif 119 120 /* Short and quite clear integer types */ 121 typedef int8_t s8; 122 typedef int16_t s16; 123 typedef int32_t s32; 124 typedef u_int8_t u8; 125 typedef u_int16_t u16; 126 typedef u_int32_t u32; 127 128 /* Driver configuration and definitions */ 129 #if 1 130 #include "opt_sym.h" 131 #include <dev/sym/sym_conf.h> 132 #include <dev/sym/sym_defs.h> 133 #else 134 #include "ncr.h" /* To know if the ncr has been configured */ 135 #include <pci/sym_conf.h> 136 #include <pci/sym_defs.h> 137 #endif 138 139 /* 140 * On x86 architecture, write buffers management does not 141 * reorder writes to memory. So, preventing compiler from 142 * optimizing the code is enough to guarantee some ordering 143 * when the CPU is writing data accessed by the PCI chip. 144 * On Alpha architecture, explicit barriers are to be used. 145 * By the way, the *BSD semantic associates the barrier 146 * with some window on the BUS and the corresponding verbs 147 * are for now unused. What a strangeness. The driver must 148 * ensure that accesses from the CPU to the start and done 149 * queues are not reordered by either the compiler or the 150 * CPU and uses 'volatile' for this purpose. 151 */ 152 153 #ifdef __alpha__ 154 #define MEMORY_BARRIER() alpha_mb() 155 #else /*__i386__*/ 156 #define MEMORY_BARRIER() do { ; } while(0) 157 #endif 158 159 /* 160 * A la VMS/CAM-3 queue management. 161 */ 162 163 typedef struct sym_quehead { 164 struct sym_quehead *flink; /* Forward pointer */ 165 struct sym_quehead *blink; /* Backward pointer */ 166 } SYM_QUEHEAD; 167 168 #define sym_que_init(ptr) do { \ 169 (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ 170 } while (0) 171 172 static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head) 173 { 174 return (head->flink == head) ? 0 : head->flink; 175 } 176 177 static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head) 178 { 179 return (head->blink == head) ? 0 : head->blink; 180 } 181 182 static __inline void __sym_que_add(struct sym_quehead * new, 183 struct sym_quehead * blink, 184 struct sym_quehead * flink) 185 { 186 flink->blink = new; 187 new->flink = flink; 188 new->blink = blink; 189 blink->flink = new; 190 } 191 192 static __inline void __sym_que_del(struct sym_quehead * blink, 193 struct sym_quehead * flink) 194 { 195 flink->blink = blink; 196 blink->flink = flink; 197 } 198 199 static __inline int sym_que_empty(struct sym_quehead *head) 200 { 201 return head->flink == head; 202 } 203 204 static __inline void sym_que_splice(struct sym_quehead *list, 205 struct sym_quehead *head) 206 { 207 struct sym_quehead *first = list->flink; 208 209 if (first != list) { 210 struct sym_quehead *last = list->blink; 211 struct sym_quehead *at = head->flink; 212 213 first->blink = head; 214 head->flink = first; 215 216 last->flink = at; 217 at->blink = last; 218 } 219 } 220 221 #define sym_que_entry(ptr, type, member) \ 222 ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member))) 223 224 225 #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) 226 227 #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink) 228 229 #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) 230 231 static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) 232 { 233 struct sym_quehead *elem = head->flink; 234 235 if (elem != head) 236 __sym_que_del(head, elem->flink); 237 else 238 elem = 0; 239 return elem; 240 } 241 242 #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) 243 244 static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head) 245 { 246 struct sym_quehead *elem = head->blink; 247 248 if (elem != head) 249 __sym_que_del(elem->blink, head); 250 else 251 elem = 0; 252 return elem; 253 } 254 255 /* 256 * This one may be usefull. 257 */ 258 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ 259 for (qp = (head)->flink; qp != (head); qp = qp->flink) 260 /* 261 * FreeBSD does not offer our kind of queue in the CAM CCB. 262 * So, we have to cast. 263 */ 264 #define sym_qptr(p) ((struct sym_quehead *) (p)) 265 266 /* 267 * Simple bitmap operations. 268 */ 269 #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) 270 #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) 271 #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) 272 273 /* 274 * Number of tasks per device we want to handle. 275 */ 276 #if SYM_CONF_MAX_TAG_ORDER > 8 277 #error "more than 256 tags per logical unit not allowed." 278 #endif 279 #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER) 280 281 /* 282 * Donnot use more tasks that we can handle. 283 */ 284 #ifndef SYM_CONF_MAX_TAG 285 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK 286 #endif 287 #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK 288 #undef SYM_CONF_MAX_TAG 289 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK 290 #endif 291 292 /* 293 * This one means 'NO TAG for this job' 294 */ 295 #define NO_TAG (256) 296 297 /* 298 * Number of SCSI targets. 299 */ 300 #if SYM_CONF_MAX_TARGET > 16 301 #error "more than 16 targets not allowed." 302 #endif 303 304 /* 305 * Number of logical units per target. 306 */ 307 #if SYM_CONF_MAX_LUN > 64 308 #error "more than 64 logical units per target not allowed." 309 #endif 310 311 /* 312 * Asynchronous pre-scaler (ns). Shall be 40 for 313 * the SCSI timings to be compliant. 314 */ 315 #define SYM_CONF_MIN_ASYNC (40) 316 317 /* 318 * Number of entries in the START and DONE queues. 319 * 320 * We limit to 1 PAGE in order to succeed allocation of 321 * these queues. Each entry is 8 bytes long (2 DWORDS). 322 */ 323 #ifdef SYM_CONF_MAX_START 324 #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2) 325 #else 326 #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2) 327 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) 328 #endif 329 330 #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8 331 #undef SYM_CONF_MAX_QUEUE 332 #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8 333 #undef SYM_CONF_MAX_START 334 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) 335 #endif 336 337 /* 338 * For this one, we want a short name :-) 339 */ 340 #define MAX_QUEUE SYM_CONF_MAX_QUEUE 341 342 /* 343 * This one should have been already defined. 344 */ 345 #ifndef offsetof 346 #define offsetof(t, m) ((size_t) (&((t *)0)->m)) 347 #endif 348 349 /* 350 * Active debugging tags and verbosity. 351 */ 352 #define DEBUG_ALLOC (0x0001) 353 #define DEBUG_PHASE (0x0002) 354 #define DEBUG_POLL (0x0004) 355 #define DEBUG_QUEUE (0x0008) 356 #define DEBUG_RESULT (0x0010) 357 #define DEBUG_SCATTER (0x0020) 358 #define DEBUG_SCRIPT (0x0040) 359 #define DEBUG_TINY (0x0080) 360 #define DEBUG_TIMING (0x0100) 361 #define DEBUG_NEGO (0x0200) 362 #define DEBUG_TAGS (0x0400) 363 #define DEBUG_POINTER (0x0800) 364 365 #if 0 366 static int sym_debug = 0; 367 #define DEBUG_FLAGS sym_debug 368 #else 369 /* #define DEBUG_FLAGS (0x0631) */ 370 #define DEBUG_FLAGS (0x0000) 371 #endif 372 #define sym_verbose (np->verbose) 373 374 /* 375 * Virtual to bus address translation. 376 */ 377 #ifdef __alpha__ 378 #define vtobus(p) alpha_XXX_dmamap((vm_offset_t)(p)) 379 #else /*__i386__*/ 380 #define vtobus(p) vtophys(p) 381 #endif 382 383 /* 384 * Copy from main memory to PCI memory space. 385 */ 386 #ifdef __alpha__ 387 #define memcpy_to_pci(d, s, n) memcpy_toio((u32)(d), (void *)(s), (n)) 388 #else /*__i386__*/ 389 #define memcpy_to_pci(d, s, n) bcopy((s), (void *)(d), (n)) 390 #endif 391 392 /* 393 * Insert a delay in micro-seconds and milli-seconds. 394 */ 395 static void UDELAY(long us) { DELAY(us); } 396 static void MDELAY(long ms) { while (ms--) UDELAY(1000); } 397 398 /* 399 * Memory allocation/allocator. 400 * We assume allocations are naturally aligned and if it is 401 * not guaranteed, we may use our internal allocator. 402 */ 403 #ifdef SYM_CONF_USE_INTERNAL_ALLOCATOR 404 /* 405 * Simple power of two buddy-like allocator. 406 * 407 * This simple code is not intended to be fast, but to 408 * provide power of 2 aligned memory allocations. 409 * Since the SCRIPTS processor only supplies 8 bit arithmetic, 410 * this allocator allows simple and fast address calculations 411 * from the SCRIPTS code. In addition, cache line alignment 412 * is guaranteed for power of 2 cache line size. 413 * 414 * This allocator has been developped for the Linux sym53c8xx 415 * driver, since this O/S does not provide naturally aligned 416 * allocations. 417 * It has the vertue to allow the driver to use private pages 418 * of memory that will be useful if we ever need to deal with 419 * IO MMU for PCI. 420 */ 421 422 #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */ 423 #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum (for now (ever?) */ 424 typedef unsigned long addr; /* Enough bits to bit-hack addresses */ 425 426 #if 0 427 #define MEMO_FREE_UNUSED /* Free unused pages immediately */ 428 #endif 429 430 struct m_link { 431 struct m_link *next; /* Simple links are enough */ 432 }; 433 434 #ifndef M_DMA_32BIT 435 #define M_DMA_32BIT 0 /* Will this flag ever exist */ 436 #endif 437 438 #define get_pages() \ 439 malloc(PAGE_SIZE<<MEMO_PAGE_ORDER, M_DEVBUF, M_NOWAIT) 440 #define free_pages(p) \ 441 free((p), M_DEVBUF) 442 443 /* 444 * Lists of available memory chunks. 445 * Starts with 16 bytes chunks until 1 PAGE chunks. 446 */ 447 static struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1]; 448 449 /* 450 * Allocate a memory area aligned on the lowest power of 2 451 * greater than the requested size. 452 */ 453 static void *__sym_malloc(int size) 454 { 455 int i = 0; 456 int s = (1 << MEMO_SHIFT); 457 int j; 458 addr a ; 459 460 if (size > (PAGE_SIZE << MEMO_PAGE_ORDER)) 461 return 0; 462 463 while (size > s) { 464 s <<= 1; 465 ++i; 466 } 467 468 j = i; 469 while (!h[j].next) { 470 if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) { 471 h[j].next = (struct m_link *)get_pages(); 472 if (h[j].next) 473 h[j].next->next = 0; 474 break; 475 } 476 ++j; 477 s <<= 1; 478 } 479 a = (addr) h[j].next; 480 if (a) { 481 h[j].next = h[j].next->next; 482 while (j > i) { 483 j -= 1; 484 s >>= 1; 485 h[j].next = (struct m_link *) (a+s); 486 h[j].next->next = 0; 487 } 488 } 489 #ifdef DEBUG 490 printf("__sym_malloc(%d) = %p\n", size, (void *) a); 491 #endif 492 return (void *) a; 493 } 494 495 /* 496 * Free a memory area allocated using sym_malloc(). 497 * Coalesce buddies. 498 * Free pages that become unused if MEMO_FREE_UNUSED is 499 * defined. 500 */ 501 static void __sym_mfree(void *ptr, int size) 502 { 503 int i = 0; 504 int s = (1 << MEMO_SHIFT); 505 struct m_link *q; 506 addr a, b; 507 508 #ifdef DEBUG 509 printf("sym_mfree(%p, %d)\n", ptr, size); 510 #endif 511 512 if (size > (PAGE_SIZE << MEMO_PAGE_ORDER)) 513 return; 514 515 while (size > s) { 516 s <<= 1; 517 ++i; 518 } 519 520 a = (addr) ptr; 521 522 while (1) { 523 #ifdef MEMO_FREE_UNUSED 524 if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) { 525 free_pages(a); 526 break; 527 } 528 #endif 529 b = a ^ s; 530 q = &h[i]; 531 while (q->next && q->next != (struct m_link *) b) { 532 q = q->next; 533 } 534 if (!q->next) { 535 ((struct m_link *) a)->next = h[i].next; 536 h[i].next = (struct m_link *) a; 537 break; 538 } 539 q->next = q->next->next; 540 a = a & b; 541 s <<= 1; 542 ++i; 543 } 544 } 545 546 #else /* !defined SYSCONF_USE_INTERNAL_ALLOCATOR */ 547 548 /* 549 * Using directly the system memory allocator. 550 */ 551 552 #define __sym_mfree(ptr, size) free((ptr), M_DEVBUF) 553 #define __sym_malloc(size) malloc((size), M_DEVBUF, M_NOWAIT) 554 555 #endif /* SYM_CONF_USE_INTERNAL_ALLOCATOR */ 556 557 #define MEMO_WARN 1 558 559 static void *sym_calloc2(int size, char *name, int uflags) 560 { 561 void *p; 562 563 p = __sym_malloc(size); 564 565 if (DEBUG_FLAGS & DEBUG_ALLOC) 566 printf ("new %-10s[%4d] @%p.\n", name, size, p); 567 568 if (p) 569 bzero(p, size); 570 else if (uflags & MEMO_WARN) 571 printf ("sym_calloc: failed to allocate %s[%d]\n", name, size); 572 573 return p; 574 } 575 576 #define sym_calloc(s, n) sym_calloc2(s, n, MEMO_WARN) 577 578 static void sym_mfree(void *ptr, int size, char *name) 579 { 580 if (DEBUG_FLAGS & DEBUG_ALLOC) 581 printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); 582 583 __sym_mfree(ptr, size); 584 } 585 586 /* 587 * Print a buffer in hexadecimal format. 588 */ 589 static void sym_printb_hex (u_char *p, int n) 590 { 591 while (n-- > 0) 592 printf (" %x", *p++); 593 } 594 595 /* 596 * Same with a label at beginning and .\n at end. 597 */ 598 static void sym_printl_hex (char *label, u_char *p, int n) 599 { 600 printf ("%s", label); 601 sym_printb_hex (p, n); 602 printf (".\n"); 603 } 604 605 /* 606 * Return a string for SCSI BUS mode. 607 */ 608 static char *sym_scsi_bus_mode(int mode) 609 { 610 switch(mode) { 611 case SMODE_HVD: return "HVD"; 612 case SMODE_SE: return "SE"; 613 case SMODE_LVD: return "LVD"; 614 } 615 return "??"; 616 } 617 618 /* 619 * Some poor sync table that refers to Tekram NVRAM layout. 620 */ 621 #ifdef SYM_CONF_NVRAM_SUPPORT 622 static u_char Tekram_sync[16] = 623 {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10}; 624 #endif 625 626 /* 627 * Union of supported NVRAM formats. 628 */ 629 struct sym_nvram { 630 int type; 631 #define SYM_SYMBIOS_NVRAM (1) 632 #define SYM_TEKRAM_NVRAM (2) 633 #ifdef SYM_CONF_NVRAM_SUPPORT 634 union { 635 Symbios_nvram Symbios; 636 Tekram_nvram Tekram; 637 } data; 638 #endif 639 }; 640 641 /* 642 * This one is hopefully useless, but actually useful. :-) 643 */ 644 #ifndef assert 645 #define assert(expression) { \ 646 if (!(expression)) { \ 647 (void)panic( \ 648 "assertion \"%s\" failed: file \"%s\", line %d\n", \ 649 #expression, \ 650 __FILE__, __LINE__); \ 651 } \ 652 } 653 #endif 654 655 /* 656 * Some provision for a possible big endian support. 657 * By the way some Symbios chips also may support some kind 658 * of big endian byte ordering. 659 * For now, this stuff does not deserve any comments. :) 660 */ 661 662 #define sym_offb(o) (o) 663 #define sym_offw(o) (o) 664 665 #define cpu_to_scr(dw) (dw) 666 #define scr_to_cpu(dw) (dw) 667 668 /* 669 * Access to the controller chip. 670 * 671 * If SYM_CONF_IOMAPPED is defined, the driver will use 672 * normal IOs instead of the MEMORY MAPPED IO method 673 * recommended by PCI specifications. 674 */ 675 676 /* 677 * Define some understable verbs so we will not suffer of 678 * having to deal with the stupid PC tokens for IO. 679 */ 680 #define io_read8(p) scr_to_cpu(inb((p))) 681 #define io_read16(p) scr_to_cpu(inw((p))) 682 #define io_read32(p) scr_to_cpu(inl((p))) 683 #define io_write8(p, v) outb((p), cpu_to_scr(v)) 684 #define io_write16(p, v) outw((p), cpu_to_scr(v)) 685 #define io_write32(p, v) outl((p), cpu_to_scr(v)) 686 687 #ifdef __alpha__ 688 689 #define mmio_read8(a) readb(a) 690 #define mmio_read16(a) readw(a) 691 #define mmio_read32(a) readl(a) 692 #define mmio_write8(a, b) writeb(a, b) 693 #define mmio_write16(a, b) writew(a, b) 694 #define mmio_write32(a, b) writel(a, b) 695 696 #else /*__i386__*/ 697 698 #define mmio_read8(a) scr_to_cpu((*(volatile unsigned char *) (a))) 699 #define mmio_read16(a) scr_to_cpu((*(volatile unsigned short *) (a))) 700 #define mmio_read32(a) scr_to_cpu((*(volatile unsigned int *) (a))) 701 #define mmio_write8(a, b) (*(volatile unsigned char *) (a)) = cpu_to_scr(b) 702 #define mmio_write16(a, b) (*(volatile unsigned short *) (a)) = cpu_to_scr(b) 703 #define mmio_write32(a, b) (*(volatile unsigned int *) (a)) = cpu_to_scr(b) 704 705 #endif 706 707 /* 708 * Normal IO 709 */ 710 #if defined(SYM_CONF_IOMAPPED) 711 712 #define INB_OFF(o) io_read8(np->io_port + sym_offb(o)) 713 #define OUTB_OFF(o, v) io_write8(np->io_port + sym_offb(o), (v)) 714 715 #define INW_OFF(o) io_read16(np->io_port + sym_offw(o)) 716 #define OUTW_OFF(o, v) io_write16(np->io_port + sym_offw(o), (v)) 717 718 #define INL_OFF(o) io_read32(np->io_port + (o)) 719 #define OUTL_OFF(o, v) io_write32(np->io_port + (o), (v)) 720 721 #else /* Memory mapped IO */ 722 723 #define INB_OFF(o) mmio_read8(np->mmio_va + sym_offb(o)) 724 #define OUTB_OFF(o, v) mmio_write8(np->mmio_va + sym_offb(o), (v)) 725 726 #define INW_OFF(o) mmio_read16(np->mmio_va + sym_offw(o)) 727 #define OUTW_OFF(o, v) mmio_write16(np->mmio_va + sym_offw(o), (v)) 728 729 #define INL_OFF(o) mmio_read32(np->mmio_va + (o)) 730 #define OUTL_OFF(o, v) mmio_write32(np->mmio_va + (o), (v)) 731 732 #endif 733 734 /* 735 * Common to both normal IO and MMIO. 736 */ 737 #define INB(r) INB_OFF(offsetof(struct sym_reg,r)) 738 #define INW(r) INW_OFF(offsetof(struct sym_reg,r)) 739 #define INL(r) INL_OFF(offsetof(struct sym_reg,r)) 740 741 #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v)) 742 #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v)) 743 #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v)) 744 745 #define OUTONB(r, m) OUTB(r, INB(r) | (m)) 746 #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) 747 #define OUTONW(r, m) OUTW(r, INW(r) | (m)) 748 #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) 749 #define OUTONL(r, m) OUTL(r, INL(r) | (m)) 750 #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) 751 752 /* 753 * Command control block states. 754 */ 755 #define HS_IDLE (0) 756 #define HS_BUSY (1) 757 #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ 758 #define HS_DISCONNECT (3) /* Disconnected by target */ 759 760 #define HS_DONEMASK (0x80) 761 #define HS_COMPLETE (4|HS_DONEMASK) 762 #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ 763 #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */ 764 #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */ 765 766 /* 767 * Software Interrupt Codes 768 */ 769 #define SIR_BAD_SCSI_STATUS (1) 770 #define SIR_SEL_ATN_NO_MSG_OUT (2) 771 #define SIR_MSG_RECEIVED (3) 772 #define SIR_MSG_WEIRD (4) 773 #define SIR_NEGO_FAILED (5) 774 #define SIR_NEGO_PROTO (6) 775 #define SIR_SCRIPT_STOPPED (7) 776 #define SIR_REJECT_TO_SEND (8) 777 #define SIR_SWIDE_OVERRUN (9) 778 #define SIR_SODL_UNDERRUN (10) 779 #define SIR_RESEL_NO_MSG_IN (11) 780 #define SIR_RESEL_NO_IDENTIFY (12) 781 #define SIR_RESEL_BAD_LUN (13) 782 #define SIR_TARGET_SELECTED (14) 783 #define SIR_RESEL_BAD_I_T_L (15) 784 #define SIR_RESEL_BAD_I_T_L_Q (16) 785 #define SIR_ABORT_SENT (17) 786 #define SIR_RESEL_ABORTED (18) 787 #define SIR_MSG_OUT_DONE (19) 788 #define SIR_COMPLETE_ERROR (20) 789 #ifdef SYM_DEBUG_PM_WITH_WSR 790 #define SIR_PM_WITH_WSR (21) 791 #define SIR_MAX (21) 792 #else 793 #define SIR_MAX (20) 794 #endif 795 796 /* 797 * Extended error bit codes. 798 * xerr_status field of struct sym_ccb. 799 */ 800 #define XE_EXTRA_DATA (1) /* unexpected data phase */ 801 #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */ 802 #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */ 803 #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */ 804 #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */ 805 806 /* 807 * Negotiation status. 808 * nego_status field of struct sym_ccb. 809 */ 810 #define NS_SYNC (1) 811 #define NS_WIDE (2) 812 #define NS_PPR (3) 813 814 /* 815 * A CCB hashed table is used to retrieve CCB address 816 * from DSA value. 817 */ 818 #define CCB_HASH_SHIFT 8 819 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT) 820 #define CCB_HASH_MASK (CCB_HASH_SIZE-1) 821 #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK) 822 823 /* 824 * Device flags. 825 */ 826 #define SYM_DISC_ENABLED (1) 827 #define SYM_TAGS_ENABLED (1<<1) 828 #define SYM_SCAN_BOOT_DISABLED (1<<2) 829 #define SYM_SCAN_LUNS_DISABLED (1<<3) 830 831 /* 832 * Host adapter miscellaneous flags. 833 */ 834 #define SYM_AVOID_BUS_RESET (1) 835 #define SYM_SCAN_TARGETS_HILO (1<<1) 836 837 /* 838 * Device quirks. 839 * Some devices, for example the CHEETAH 2 LVD, disconnects without 840 * saving the DATA POINTER then reconnect and terminates the IO. 841 * On reselection, the automatic RESTORE DATA POINTER makes the 842 * CURRENT DATA POINTER not point at the end of the IO. 843 * This behaviour just breaks our calculation of the residual. 844 * For now, we just force an AUTO SAVE on disconnection and will 845 * fix that in a further driver version. 846 */ 847 #define SYM_QUIRK_AUTOSAVE 1 848 849 /* 850 * Misc. 851 */ 852 #define SYM_SNOOP_TIMEOUT (10000000) 853 #define SYM_PCI_IO PCIR_MAPS 854 #define SYM_PCI_MMIO (PCIR_MAPS + 4) 855 #define SYM_PCI_RAM (PCIR_MAPS + 8) 856 #define SYM_PCI_RAM64 (PCIR_MAPS + 12) 857 858 /* 859 * Back-pointer from the CAM CCB to our data structures. 860 */ 861 #define sym_hcb_ptr spriv_ptr0 862 /* #define sym_ccb_ptr spriv_ptr1 */ 863 864 /* 865 * We mostly have to deal with pointers. 866 * Thus these typedef's. 867 */ 868 typedef struct sym_tcb *tcb_p; 869 typedef struct sym_lcb *lcb_p; 870 typedef struct sym_ccb *ccb_p; 871 typedef struct sym_hcb *hcb_p; 872 typedef struct sym_scr *script_p; 873 typedef struct sym_scrh *scripth_p; 874 875 /* 876 * Gather negotiable parameters value 877 */ 878 struct sym_trans { 879 u8 period; 880 u8 offset; 881 u8 width; 882 u8 options; /* PPR options */ 883 }; 884 885 struct sym_tinfo { 886 struct sym_trans current; 887 struct sym_trans goal; 888 struct sym_trans user; 889 }; 890 891 #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT 892 #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT 893 894 /* 895 * Target Control Block 896 */ 897 struct sym_tcb { 898 /* 899 * LUN table used by the SCRIPTS processor. 900 * An array of bus addresses is used on reselection. 901 * LUN #0 is a special case, since multi-lun devices are rare, 902 * and we we want to speed-up the general case and not waste 903 * resources. 904 */ 905 u32 *luntbl; /* LCBs bus address table */ 906 u32 luntbl_sa; /* bus address of this table */ 907 u32 lun0_sa; /* bus address of LCB #0 */ 908 909 /* 910 * LUN table used by the C code. 911 */ 912 lcb_p lun0p; /* LCB of LUN #0 (usual case) */ 913 #if SYM_CONF_MAX_LUN > 1 914 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */ 915 #endif 916 917 /* 918 * Bitmap that tells about LUNs that succeeded at least 919 * 1 IO and therefore assumed to be a real device. 920 * Avoid useless allocation of the LCB structure. 921 */ 922 u32 lun_map[(SYM_CONF_MAX_LUN+31)/32]; 923 924 /* 925 * Bitmap that tells about LUNs that haven't yet an LCB 926 * allocated (not discovered or LCB allocation failed). 927 */ 928 u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32]; 929 930 /* 931 * Actual SYNC/WIDE IO registers value for this target. 932 * 'sval', 'wval' and 'uval' are read from SCRIPTS and 933 * so have alignment constraints. 934 */ 935 /*0*/ u_char uval; /* -> SCNTL4 register */ 936 /*1*/ u_char sval; /* -> SXFER io register */ 937 /*2*/ u_char filler1; 938 /*3*/ u_char wval; /* -> SCNTL3 io register */ 939 940 /* 941 * Transfer capabilities (SIP) 942 */ 943 struct sym_tinfo tinfo; 944 945 /* 946 * Keep track of the CCB used for the negotiation in order 947 * to ensure that only 1 negotiation is queued at a time. 948 */ 949 ccb_p nego_cp; /* CCB used for the nego */ 950 951 /* 952 * Set when we want to reset the device. 953 */ 954 u_char to_reset; 955 956 /* 957 * Other user settable limits and options. 958 * These limits are read from the NVRAM if present. 959 */ 960 u_char usrflags; 961 u_short usrtags; 962 }; 963 964 /* 965 * Logical Unit Control Block 966 */ 967 struct sym_lcb { 968 /* 969 * SCRIPTS address jumped by SCRIPTS on reselection. 970 * For not probed logical units, this address points to 971 * SCRIPTS that deal with bad LU handling (must be at 972 * offset zero for that reason). 973 */ 974 /*0*/ u32 resel_sa; 975 976 /* 977 * Task (bus address of a CCB) read from SCRIPTS that points 978 * to the unique ITL nexus allowed to be disconnected. 979 */ 980 u32 itl_task_sa; 981 982 /* 983 * Task table read from SCRIPTS that contains pointers to 984 * ITLQ nexuses (bus addresses read from SCRIPTS). 985 */ 986 u32 *itlq_tbl; /* Kernel virtual address */ 987 u32 itlq_tbl_sa; /* Bus address used by SCRIPTS */ 988 989 /* 990 * Busy CCBs management. 991 */ 992 u_short busy_itlq; /* Number of busy tagged CCBs */ 993 u_short busy_itl; /* Number of busy untagged CCBs */ 994 995 /* 996 * Circular tag allocation buffer. 997 */ 998 u_short ia_tag; /* Tag allocation index */ 999 u_short if_tag; /* Tag release index */ 1000 u_char *cb_tags; /* Circular tags buffer */ 1001 1002 /* 1003 * Set when we want to clear all tasks. 1004 */ 1005 u_char to_clear; 1006 1007 /* 1008 * Capabilities. 1009 */ 1010 u_char user_flags; 1011 u_char current_flags; 1012 }; 1013 1014 /* 1015 * Action from SCRIPTS on a task. 1016 * Is part of the CCB, but is also used separately to plug 1017 * error handling action to perform from SCRIPTS. 1018 */ 1019 struct sym_actscr { 1020 u32 start; /* Jumped by SCRIPTS after selection */ 1021 u32 restart; /* Jumped by SCRIPTS on relection */ 1022 }; 1023 1024 /* 1025 * Phase mismatch context. 1026 * 1027 * It is part of the CCB and is used as parameters for the 1028 * DATA pointer. We need two contexts to handle correctly the 1029 * SAVED DATA POINTER. 1030 */ 1031 struct sym_pmc { 1032 struct sym_tblmove sg; /* Updated interrupted SG block */ 1033 u32 ret; /* SCRIPT return address */ 1034 }; 1035 1036 /* 1037 * LUN control block lookup. 1038 * We use a direct pointer for LUN #0, and a table of 1039 * pointers which is only allocated for devices that support 1040 * LUN(s) > 0. 1041 */ 1042 #if SYM_CONF_MAX_LUN <= 1 1043 #define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : 0 1044 #else 1045 #define sym_lp(np, tp, lun) \ 1046 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0 1047 #endif 1048 1049 /* 1050 * Status are used by the host and the script processor. 1051 * 1052 * The last four bytes (status[4]) are copied to the 1053 * scratchb register (declared as scr0..scr3) just after the 1054 * select/reselect, and copied back just after disconnecting. 1055 * Inside the script the XX_REG are used. 1056 * 1057 * The first four bytes (scr_st[4]) are used inside the 1058 * script by "LOAD/STORE" commands. 1059 * Because source and destination must have the same alignment 1060 * in a DWORD, the fields HAVE to be at the choosen offsets. 1061 * xerr_st 0 (0x34) scratcha 1062 * nego_st 2 1063 */ 1064 1065 /* 1066 * Last four bytes (script) 1067 */ 1068 #define QU_REG scr0 1069 #define HS_REG scr1 1070 #define HS_PRT nc_scr1 1071 #define SS_REG scr2 1072 #define SS_PRT nc_scr2 1073 #define HF_REG scr3 1074 #define HF_PRT nc_scr3 1075 1076 /* 1077 * Last four bytes (host) 1078 */ 1079 #define actualquirks phys.status[0] 1080 #define host_status phys.status[1] 1081 #define ssss_status phys.status[2] 1082 #define host_flags phys.status[3] 1083 1084 /* 1085 * Host flags 1086 */ 1087 #define HF_IN_PM0 1u 1088 #define HF_IN_PM1 (1u<<1) 1089 #define HF_ACT_PM (1u<<2) 1090 #define HF_DP_SAVED (1u<<3) 1091 #define HF_SENSE (1u<<4) 1092 #define HF_EXT_ERR (1u<<5) 1093 #ifdef SYM_CONF_IARB_SUPPORT 1094 #define HF_HINT_IARB (1u<<7) 1095 #endif 1096 1097 /* 1098 * First four bytes (script) 1099 */ 1100 #define xerr_st scr_st[0] 1101 #define nego_st scr_st[2] 1102 1103 /* 1104 * First four bytes (host) 1105 */ 1106 #define xerr_status phys.xerr_st 1107 #define nego_status phys.nego_st 1108 1109 /* 1110 * Data Structure Block 1111 * 1112 * During execution of a ccb by the script processor, the 1113 * DSA (data structure address) register points to this 1114 * substructure of the ccb. 1115 */ 1116 struct dsb { 1117 /* 1118 * Start and restart SCRIPTS addresses (must be at 0). 1119 */ 1120 /*0*/ struct sym_actscr go; 1121 1122 /* 1123 * SCRIPTS jump address that deal with data pointers. 1124 * 'savep' points to the position in the script responsible 1125 * for the actual transfer of data. 1126 * It's written on reception of a SAVE_DATA_POINTER message. 1127 */ 1128 u32 savep; /* Jump address to saved data pointer */ 1129 u32 lastp; /* SCRIPTS address at end of data */ 1130 u32 goalp; /* Not used for now */ 1131 1132 /* 1133 * Status fields. 1134 */ 1135 u8 scr_st[4]; /* script status */ 1136 u8 status[4]; /* host status */ 1137 1138 /* 1139 * Table data for Script 1140 */ 1141 struct sym_tblsel select; 1142 struct sym_tblmove smsg; 1143 struct sym_tblmove smsg_ext; 1144 struct sym_tblmove cmd; 1145 struct sym_tblmove sense; 1146 struct sym_tblmove wresid; 1147 struct sym_tblmove data [SYM_CONF_MAX_SG]; 1148 1149 /* 1150 * Phase mismatch contexts. 1151 * We need two to handle correctly the SAVED DATA POINTER. 1152 */ 1153 struct sym_pmc pm0; 1154 struct sym_pmc pm1; 1155 1156 /* 1157 * Extra bytes count transferred in case of data overrun. 1158 */ 1159 u32 extra_bytes; 1160 }; 1161 1162 /* 1163 * Our Command Control Block 1164 */ 1165 struct sym_ccb { 1166 /* 1167 * This is the data structure which is pointed by the DSA 1168 * register when it is executed by the script processor. 1169 * It must be the first entry. 1170 */ 1171 struct dsb phys; 1172 1173 /* 1174 * Pointer to CAM ccb and related stuff. 1175 */ 1176 union ccb *cam_ccb; /* CAM scsiio ccb */ 1177 int data_len; /* Total data length */ 1178 int segments; /* Number of SG segments */ 1179 1180 /* 1181 * Message areas. 1182 * We prepare a message to be sent after selection. 1183 * We may use a second one if the command is rescheduled 1184 * due to CHECK_CONDITION or COMMAND TERMINATED. 1185 * Contents are IDENTIFY and SIMPLE_TAG. 1186 * While negotiating sync or wide transfer, 1187 * a SDTR or WDTR message is appended. 1188 */ 1189 u_char scsi_smsg [12]; 1190 u_char scsi_smsg2[12]; 1191 1192 /* 1193 * Auto request sense related fields. 1194 */ 1195 u_char sensecmd[6]; /* Request Sense command */ 1196 u_char sv_scsi_status; /* Saved SCSI status */ 1197 u_char sv_xerr_status; /* Saved extended status */ 1198 int sv_resid; /* Saved residual */ 1199 1200 /* 1201 * Other fields. 1202 */ 1203 u_long ccb_ba; /* BUS address of this CCB */ 1204 u_short tag; /* Tag for this transfer */ 1205 /* NO_TAG means no tag */ 1206 u_char target; 1207 u_char lun; 1208 ccb_p link_ccbh; /* Host adapter CCB hash chain */ 1209 SYM_QUEHEAD 1210 link_ccbq; /* Link to free/busy CCB queue */ 1211 u32 startp; /* Initial data pointer */ 1212 int ext_sg; /* Extreme data pointer, used */ 1213 int ext_ofs; /* to calculate the residual. */ 1214 u_char to_abort; /* Want this IO to be aborted */ 1215 }; 1216 1217 #define CCB_PHYS(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl)) 1218 1219 /* 1220 * Host Control Block 1221 */ 1222 struct sym_hcb { 1223 /* 1224 * Idle task and invalid task actions and 1225 * their bus addresses. 1226 */ 1227 struct sym_actscr idletask, notask, bad_itl, bad_itlq; 1228 vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba; 1229 1230 /* 1231 * Dummy lun table to protect us against target 1232 * returning bad lun number on reselection. 1233 */ 1234 u32 *badluntbl; /* Table physical address */ 1235 u32 badlun_sa; /* SCRIPT handler BUS address */ 1236 1237 /* 1238 * Bit 32-63 of the on-chip RAM bus address in LE format. 1239 * The START_RAM64 script loads the MMRS and MMWS from this 1240 * field. 1241 */ 1242 u32 scr_ram_seg; 1243 1244 /* 1245 * Chip and controller indentification. 1246 */ 1247 #ifdef FreeBSD_4_Bus 1248 device_t device; 1249 #else 1250 pcici_t pci_tag; 1251 #endif 1252 int unit; 1253 char inst_name[8]; 1254 1255 /* 1256 * Initial value of some IO register bits. 1257 * These values are assumed to have been set by BIOS, and may 1258 * be used to probe adapter implementation differences. 1259 */ 1260 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, 1261 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4, 1262 sv_stest1; 1263 1264 /* 1265 * Actual initial value of IO register bits used by the 1266 * driver. They are loaded at initialisation according to 1267 * features that are to be enabled/disabled. 1268 */ 1269 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, 1270 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; 1271 1272 /* 1273 * Target data used by the CPU. 1274 */ 1275 struct sym_tcb target[SYM_CONF_MAX_TARGET]; 1276 1277 /* 1278 * Target control block bus address array used by the SCRIPT 1279 * on reselection. 1280 */ 1281 u32 *targtbl; 1282 1283 /* 1284 * CAM SIM information for this instance. 1285 */ 1286 struct cam_sim *sim; 1287 struct cam_path *path; 1288 1289 /* 1290 * Allocated hardware resources. 1291 */ 1292 #ifdef FreeBSD_4_Bus 1293 struct resource *irq_res; 1294 struct resource *io_res; 1295 struct resource *mmio_res; 1296 struct resource *ram_res; 1297 int ram_id; 1298 void *intr; 1299 #endif 1300 1301 /* 1302 * Bus stuff. 1303 * 1304 * My understanding of PCI is that all agents must share the 1305 * same addressing range and model. 1306 * But some hardware architecture guys provide complex and 1307 * brain-deaded stuff that makes shit. 1308 * This driver only support PCI compliant implementations and 1309 * deals with part of the BUS stuff complexity only to fit O/S 1310 * requirements. 1311 */ 1312 #ifdef FreeBSD_4_Bus 1313 bus_space_handle_t io_bsh; 1314 bus_space_tag_t io_tag; 1315 bus_space_handle_t mmio_bsh; 1316 bus_space_tag_t mmio_tag; 1317 bus_space_handle_t ram_bsh; 1318 bus_space_tag_t ram_tag; 1319 #endif 1320 1321 /* 1322 * Virtual and physical bus addresses of the chip. 1323 */ 1324 vm_offset_t mmio_va; /* MMIO kernel virtual address */ 1325 vm_offset_t mmio_pa; /* MMIO CPU physical address */ 1326 vm_offset_t mmio_ba; /* MMIO BUS address */ 1327 int mmio_ws; /* MMIO Window size */ 1328 1329 vm_offset_t ram_va; /* RAM kernel virtual address */ 1330 vm_offset_t ram_pa; /* RAM CPU physical address */ 1331 vm_offset_t ram_ba; /* RAM BUS address */ 1332 int ram_ws; /* RAM window size */ 1333 u32 io_port; /* IO port address */ 1334 1335 /* 1336 * SCRIPTS virtual and physical bus addresses. 1337 * 'script' is loaded in the on-chip RAM if present. 1338 * 'scripth' stays in main memory for all chips except the 1339 * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM. 1340 */ 1341 struct sym_scr *script0; /* Copies of script and scripth */ 1342 struct sym_scrh *scripth0; /* relocated for this host. */ 1343 vm_offset_t script_ba; /* Actual script and scripth */ 1344 vm_offset_t scripth_ba; /* bus addresses. */ 1345 vm_offset_t scripth0_ba; 1346 1347 /* 1348 * General controller parameters and configuration. 1349 */ 1350 u_short device_id; /* PCI device id */ 1351 u_char revision_id; /* PCI device revision id */ 1352 u_int features; /* Chip features map */ 1353 u_char myaddr; /* SCSI id of the adapter */ 1354 u_char maxburst; /* log base 2 of dwords burst */ 1355 u_char maxwide; /* Maximum transfer width */ 1356 u_char minsync; /* Min sync period factor (ST) */ 1357 u_char maxsync; /* Max sync period factor (ST) */ 1358 u_char minsync_dt; /* Min sync period factor (DT) */ 1359 u_char maxsync_dt; /* Max sync period factor (DT) */ 1360 u_char maxoffs; /* Max scsi offset */ 1361 u_char multiplier; /* Clock multiplier (1,2,4) */ 1362 u_char clock_divn; /* Number of clock divisors */ 1363 u_long clock_khz; /* SCSI clock frequency in KHz */ 1364 1365 /* 1366 * Start queue management. 1367 * It is filled up by the host processor and accessed by the 1368 * SCRIPTS processor in order to start SCSI commands. 1369 */ 1370 volatile /* Prevent code optimizations */ 1371 u32 *squeue; /* Start queue */ 1372 u_short squeueput; /* Next free slot of the queue */ 1373 u_short actccbs; /* Number of allocated CCBs */ 1374 1375 /* 1376 * Command completion queue. 1377 * It is the same size as the start queue to avoid overflow. 1378 */ 1379 u_short dqueueget; /* Next position to scan */ 1380 volatile /* Prevent code optimizations */ 1381 u32 *dqueue; /* Completion (done) queue */ 1382 1383 /* 1384 * Miscellaneous buffers accessed by the scripts-processor. 1385 * They shall be DWORD aligned, because they may be read or 1386 * written with a script command. 1387 */ 1388 u_char msgout[8]; /* Buffer for MESSAGE OUT */ 1389 u_char msgin [8]; /* Buffer for MESSAGE IN */ 1390 u32 lastmsg; /* Last SCSI message sent */ 1391 u_char scratch; /* Scratch for SCSI receive */ 1392 1393 /* 1394 * Miscellaneous configuration and status parameters. 1395 */ 1396 u_char usrflags; /* Miscellaneous user flags */ 1397 u_char scsi_mode; /* Current SCSI BUS mode */ 1398 u_char verbose; /* Verbosity for this controller*/ 1399 u32 cache; /* Used for cache test at init. */ 1400 1401 /* 1402 * CCB lists and queue. 1403 */ 1404 ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */ 1405 SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */ 1406 SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ 1407 1408 /* 1409 * During error handling and/or recovery, 1410 * active CCBs that are to be completed with 1411 * error or requeued are moved from the busy_ccbq 1412 * to the comp_ccbq prior to completion. 1413 */ 1414 SYM_QUEHEAD comp_ccbq; 1415 1416 /* 1417 * CAM CCB pending queue. 1418 */ 1419 SYM_QUEHEAD cam_ccbq; 1420 1421 /* 1422 * IMMEDIATE ARBITRATION (IARB) control. 1423 * 1424 * We keep track in 'last_cp' of the last CCB that has been 1425 * queued to the SCRIPTS processor and clear 'last_cp' when 1426 * this CCB completes. If last_cp is not zero at the moment 1427 * we queue a new CCB, we set a flag in 'last_cp' that is 1428 * used by the SCRIPTS as a hint for setting IARB. 1429 * We donnot set more than 'iarb_max' consecutive hints for 1430 * IARB in order to leave devices a chance to reselect. 1431 * By the way, any non zero value of 'iarb_max' is unfair. :) 1432 */ 1433 #ifdef SYM_CONF_IARB_SUPPORT 1434 u_short iarb_max; /* Max. # consecutive IARB hints*/ 1435 u_short iarb_count; /* Actual # of these hints */ 1436 ccb_p last_cp; 1437 #endif 1438 1439 /* 1440 * Command abort handling. 1441 * We need to synchronize tightly with the SCRIPTS 1442 * processor in order to handle things correctly. 1443 */ 1444 u_char abrt_msg[4]; /* Message to send buffer */ 1445 struct sym_tblmove abrt_tbl; /* Table for the MOV of it */ 1446 struct sym_tblsel abrt_sel; /* Sync params for selection */ 1447 u_char istat_sem; /* Tells the chip to stop (SEM) */ 1448 }; 1449 1450 #define SCRIPT_BA(np,lbl) (np->script_ba + offsetof(struct sym_scr, lbl)) 1451 #define SCRIPTH_BA(np,lbl) (np->scripth_ba + offsetof(struct sym_scrh,lbl)) 1452 #define SCRIPTH0_BA(np,lbl) (np->scripth0_ba + offsetof(struct sym_scrh,lbl)) 1453 1454 /* 1455 * Scripts for SYMBIOS-Processor 1456 * 1457 * Use sym_fill_scripts() to create the variable parts. 1458 * Use sym_bind_script() to make a copy and bind to 1459 * physical bus addresses. 1460 * We have to know the offsets of all labels before we reach 1461 * them (for forward jumps). Therefore we declare a struct 1462 * here. If you make changes inside the script, 1463 * 1464 * DONT FORGET TO CHANGE THE LENGTHS HERE! 1465 */ 1466 1467 /* 1468 * Script fragments which are loaded into the on-chip RAM 1469 * of 825A, 875, 876, 895, 895A, 896 and 1010 chips. 1470 * Must not exceed 4K bytes. 1471 */ 1472 struct sym_scr { 1473 u32 start [ 14]; 1474 u32 getjob_begin [ 4]; 1475 u32 getjob_end [ 4]; 1476 u32 select [ 8]; 1477 u32 wf_sel_done [ 2]; 1478 u32 send_ident [ 2]; 1479 #ifdef SYM_CONF_IARB_SUPPORT 1480 u32 select2 [ 8]; 1481 #else 1482 u32 select2 [ 2]; 1483 #endif 1484 u32 command [ 2]; 1485 u32 dispatch [ 30]; 1486 u32 sel_no_cmd [ 10]; 1487 u32 init [ 6]; 1488 u32 clrack [ 4]; 1489 u32 disp_status [ 4]; 1490 u32 datai_done [ 26]; 1491 u32 datao_done [ 12]; 1492 u32 dataphase [ 2]; 1493 u32 msg_in [ 2]; 1494 u32 msg_in2 [ 10]; 1495 #ifdef SYM_CONF_IARB_SUPPORT 1496 u32 status [ 14]; 1497 #else 1498 u32 status [ 10]; 1499 #endif 1500 u32 complete [ 8]; 1501 u32 complete2 [ 12]; 1502 u32 complete_error [ 4]; 1503 u32 done [ 14]; 1504 u32 done_end [ 2]; 1505 u32 save_dp [ 8]; 1506 u32 restore_dp [ 4]; 1507 u32 disconnect [ 20]; 1508 #ifdef SYM_CONF_IARB_SUPPORT 1509 u32 idle [ 4]; 1510 #else 1511 u32 idle [ 2]; 1512 #endif 1513 #ifdef SYM_CONF_IARB_SUPPORT 1514 u32 ungetjob [ 6]; 1515 #else 1516 u32 ungetjob [ 4]; 1517 #endif 1518 u32 reselect [ 4]; 1519 u32 reselected [ 20]; 1520 u32 resel_scntl4 [ 28]; 1521 #if SYM_CONF_MAX_TASK*4 > 512 1522 u32 resel_tag [ 26]; 1523 #elif SYM_CONF_MAX_TASK*4 > 256 1524 u32 resel_tag [ 20]; 1525 #else 1526 u32 resel_tag [ 16]; 1527 #endif 1528 u32 resel_dsa [ 2]; 1529 u32 resel_dsa1 [ 6]; 1530 u32 resel_no_tag [ 6]; 1531 u32 data_in [SYM_CONF_MAX_SG * 2]; 1532 u32 data_in2 [ 4]; 1533 u32 data_out [SYM_CONF_MAX_SG * 2]; 1534 u32 data_out2 [ 4]; 1535 u32 pm0_data [ 16]; 1536 u32 pm1_data [ 16]; 1537 }; 1538 1539 /* 1540 * Script fragments which stay in main memory for all chips 1541 * except for chips that support 8K on-chip RAM. 1542 */ 1543 struct sym_scrh { 1544 u32 start64 [ 2]; 1545 u32 no_data [ 2]; 1546 u32 sel_for_abort [ 18]; 1547 u32 sel_for_abort_1 [ 2]; 1548 u32 select_no_atn [ 8]; 1549 u32 wf_sel_done_no_atn [ 4]; 1550 1551 u32 msg_in_etc [ 14]; 1552 u32 msg_received [ 4]; 1553 u32 msg_weird_seen [ 4]; 1554 u32 msg_extended [ 20]; 1555 u32 msg_bad [ 6]; 1556 u32 msg_weird [ 4]; 1557 u32 msg_weird1 [ 8]; 1558 1559 u32 wdtr_resp [ 6]; 1560 u32 send_wdtr [ 4]; 1561 u32 sdtr_resp [ 6]; 1562 u32 send_sdtr [ 4]; 1563 u32 ppr_resp [ 6]; 1564 u32 send_ppr [ 4]; 1565 u32 nego_bad_phase [ 4]; 1566 u32 msg_out [ 4]; 1567 u32 msg_out_done [ 4]; 1568 u32 data_ovrun [ 18]; 1569 u32 data_ovrun1 [ 20]; 1570 u32 abort_resel [ 16]; 1571 u32 resend_ident [ 4]; 1572 u32 ident_break [ 4]; 1573 u32 ident_break_atn [ 4]; 1574 u32 sdata_in [ 6]; 1575 u32 resel_bad_lun [ 4]; 1576 u32 bad_i_t_l [ 4]; 1577 u32 bad_i_t_l_q [ 4]; 1578 u32 bad_status [ 6]; 1579 u32 pm_handle [ 20]; 1580 u32 pm_handle1 [ 4]; 1581 u32 pm_save [ 4]; 1582 u32 pm0_save [ 14]; 1583 u32 pm1_save [ 14]; 1584 1585 /* WSR handling */ 1586 #ifdef SYM_DEBUG_PM_WITH_WSR 1587 u32 pm_wsr_handle [ 44]; 1588 #else 1589 u32 pm_wsr_handle [ 42]; 1590 #endif 1591 u32 wsr_ma_helper [ 4]; 1592 1593 /* Data area */ 1594 u32 zero [ 1]; 1595 u32 scratch [ 1]; 1596 u32 pm0_data_addr [ 1]; 1597 u32 pm1_data_addr [ 1]; 1598 u32 saved_dsa [ 1]; 1599 u32 saved_drs [ 1]; 1600 u32 done_pos [ 1]; 1601 u32 startpos [ 1]; 1602 u32 targtbl [ 1]; 1603 /* End of data area */ 1604 1605 u32 snooptest [ 6]; 1606 u32 snoopend [ 2]; 1607 }; 1608 1609 /* 1610 * Function prototypes. 1611 */ 1612 static void sym_fill_scripts (script_p scr, scripth_p scrh); 1613 static void sym_bind_script (hcb_p np, u32 *src, u32 *dst, int len); 1614 static void sym_save_initial_setting (hcb_p np); 1615 static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram); 1616 static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr); 1617 static void sym_put_start_queue (hcb_p np, ccb_p cp); 1618 static void sym_chip_reset (hcb_p np); 1619 static void sym_soft_reset (hcb_p np); 1620 static void sym_start_reset (hcb_p np); 1621 static int sym_reset_scsi_bus (hcb_p np, int enab_int); 1622 static int sym_wakeup_done (hcb_p np); 1623 static void sym_flush_busy_queue (hcb_p np, int cam_status); 1624 static void sym_flush_comp_queue (hcb_p np, int cam_status); 1625 static void sym_init (hcb_p np, int reason); 1626 static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, 1627 u_char *fakp); 1628 static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per, 1629 u_char div, u_char fak); 1630 static void sym_setwide (hcb_p np, ccb_p cp, u_char wide); 1631 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 1632 u_char per, u_char wide, u_char div, u_char fak); 1633 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 1634 u_char per, u_char wide, u_char div, u_char fak); 1635 static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat); 1636 static void sym_intr (void *arg); 1637 static void sym_poll (struct cam_sim *sim); 1638 static void sym_recover_scsi_int (hcb_p np, u_char hsts); 1639 static void sym_int_sto (hcb_p np); 1640 static void sym_int_udc (hcb_p np); 1641 static void sym_int_sbmc (hcb_p np); 1642 static void sym_int_par (hcb_p np, u_short sist); 1643 static void sym_int_ma (hcb_p np); 1644 static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, 1645 int task); 1646 static void sym_sir_bad_scsi_status (hcb_p np, int num, ccb_p cp); 1647 static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task); 1648 static void sym_sir_task_recovery (hcb_p np, int num); 1649 static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs); 1650 static void sym_modify_dp (hcb_p np, tcb_p tp, ccb_p cp, int ofs); 1651 static int sym_compute_residual (hcb_p np, ccb_p cp); 1652 static int sym_show_msg (u_char * msg); 1653 static void sym_print_msg (ccb_p cp, char *label, u_char *msg); 1654 static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp); 1655 static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp); 1656 static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp); 1657 static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp); 1658 static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp); 1659 static void sym_int_sir (hcb_p np); 1660 static void sym_free_ccb (hcb_p np, ccb_p cp); 1661 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order); 1662 static ccb_p sym_alloc_ccb (hcb_p np); 1663 static ccb_p sym_ccb_from_dsa (hcb_p np, u_long dsa); 1664 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln); 1665 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln); 1666 static int sym_snooptest (hcb_p np); 1667 static void sym_selectclock(hcb_p np, u_char scntl3); 1668 static void sym_getclock (hcb_p np, int mult); 1669 static int sym_getpciclock (hcb_p np); 1670 static void sym_complete_ok (hcb_p np, ccb_p cp); 1671 static void sym_complete_error (hcb_p np, ccb_p cp); 1672 static void sym_timeout (void *arg); 1673 static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out); 1674 static void sym_reset_dev (hcb_p np, union ccb *ccb); 1675 static void sym_action (struct cam_sim *sim, union ccb *ccb); 1676 static void sym_action1 (struct cam_sim *sim, union ccb *ccb); 1677 static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); 1678 static int sym_setup_data(hcb_p np, struct ccb_scsiio *csio, ccb_p cp); 1679 static int sym_scatter_virtual (hcb_p np, ccb_p cp, vm_offset_t vaddr, 1680 vm_size_t len); 1681 static int sym_scatter_physical (hcb_p np, ccb_p cp, vm_offset_t vaddr, 1682 vm_size_t len); 1683 static void sym_action2 (struct cam_sim *sim, union ccb *ccb); 1684 static void sym_update_trans (hcb_p np, tcb_p tp, struct sym_trans *tip, 1685 struct ccb_trans_settings *cts); 1686 static void sym_update_dflags(hcb_p np, u_char *flags, 1687 struct ccb_trans_settings *cts); 1688 1689 #ifdef FreeBSD_4_Bus 1690 static struct sym_pci_chip *sym_find_pci_chip (device_t dev); 1691 static int sym_pci_probe (device_t dev); 1692 static int sym_pci_attach (device_t dev); 1693 #else 1694 static struct sym_pci_chip *sym_find_pci_chip (pcici_t tag); 1695 static const char *sym_pci_probe (pcici_t tag, pcidi_t type); 1696 static void sym_pci_attach (pcici_t tag, int unit); 1697 static int sym_pci_attach2 (pcici_t tag, int unit); 1698 #endif 1699 1700 static void sym_pci_free (hcb_p np); 1701 static int sym_cam_attach (hcb_p np); 1702 static void sym_cam_free (hcb_p np); 1703 1704 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram); 1705 static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp); 1706 static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp); 1707 1708 /* 1709 * Return the name of the controller. 1710 */ 1711 static __inline char *sym_name(hcb_p np) 1712 { 1713 return np->inst_name; 1714 } 1715 1716 /* 1717 * Scripts for SYMBIOS-Processor 1718 * 1719 * Use sym_bind_script for binding to physical addresses. 1720 * 1721 * NADDR generates a reference to a field of the controller data. 1722 * PADDR generates a reference to another part of the script. 1723 * RADDR generates a reference to a script processor register. 1724 * FADDR generates a reference to a script processor register 1725 * with offset. 1726 * 1727 */ 1728 #define RELOC_SOFTC 0x40000000 1729 #define RELOC_LABEL 0x50000000 1730 #define RELOC_REGISTER 0x60000000 1731 #if 0 1732 #define RELOC_KVAR 0x70000000 1733 #endif 1734 #define RELOC_LABELH 0x80000000 1735 #define RELOC_MASK 0xf0000000 1736 1737 #define NADDR(label) (RELOC_SOFTC | offsetof(struct sym_hcb, label)) 1738 #define PADDR(label) (RELOC_LABEL | offsetof(struct sym_scr, label)) 1739 #define PADDRH(label) (RELOC_LABELH | offsetof(struct sym_scrh, label)) 1740 #define RADDR(label) (RELOC_REGISTER | REG(label)) 1741 #define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs))) 1742 #define KVAR(which) (RELOC_KVAR | (which)) 1743 1744 #define SCR_DATA_ZERO 0xf00ff00f 1745 1746 #ifdef RELOC_KVAR 1747 #define SCRIPT_KVAR_JIFFIES (0) 1748 #define SCRIPT_KVAR_FIRST SCRIPT_KVAR_XXXXXXX 1749 #define SCRIPT_KVAR_LAST SCRIPT_KVAR_XXXXXXX 1750 /* 1751 * Kernel variables referenced in the scripts. 1752 * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY. 1753 */ 1754 static void *script_kvars[] = 1755 { (void *)&xxxxxxx }; 1756 #endif 1757 1758 static struct sym_scr script0 = { 1759 /*--------------------------< START >-----------------------*/ { 1760 /* 1761 * This NOP will be patched with LED ON 1762 * SCR_REG_REG (gpreg, SCR_AND, 0xfe) 1763 */ 1764 SCR_NO_OP, 1765 0, 1766 /* 1767 * Clear SIGP. 1768 */ 1769 SCR_FROM_REG (ctest2), 1770 0, 1771 /* 1772 * Stop here if the C code wants to perform 1773 * some error recovery procedure manually. 1774 * (Indicate this by setting SEM in ISTAT) 1775 */ 1776 SCR_FROM_REG (istat), 1777 0, 1778 /* 1779 * Report to the C code the next position in 1780 * the start queue the SCRIPTS will schedule. 1781 * The C code must not change SCRATCHA. 1782 */ 1783 SCR_LOAD_ABS (scratcha, 4), 1784 PADDRH (startpos), 1785 SCR_INT ^ IFTRUE (MASK (SEM, SEM)), 1786 SIR_SCRIPT_STOPPED, 1787 /* 1788 * Start the next job. 1789 * 1790 * @DSA = start point for this job. 1791 * SCRATCHA = address of this job in the start queue. 1792 * 1793 * We will restore startpos with SCRATCHA if we fails the 1794 * arbitration or if it is the idle job. 1795 * 1796 * The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS 1797 * is a critical path. If it is partially executed, it then 1798 * may happen that the job address is not yet in the DSA 1799 * and the the next queue position points to the next JOB. 1800 */ 1801 SCR_LOAD_ABS (dsa, 4), 1802 PADDRH (startpos), 1803 SCR_LOAD_REL (temp, 4), 1804 4, 1805 }/*-------------------------< GETJOB_BEGIN >------------------*/,{ 1806 SCR_STORE_ABS (temp, 4), 1807 PADDRH (startpos), 1808 SCR_LOAD_REL (dsa, 4), 1809 0, 1810 }/*-------------------------< GETJOB_END >--------------------*/,{ 1811 SCR_LOAD_REL (temp, 4), 1812 0, 1813 SCR_RETURN, 1814 0, 1815 }/*-------------------------< SELECT >----------------------*/,{ 1816 /* 1817 * DSA contains the address of a scheduled 1818 * data structure. 1819 * 1820 * SCRATCHA contains the address of the start queue 1821 * entry which points to the next job. 1822 * 1823 * Set Initiator mode. 1824 * 1825 * (Target mode is left as an exercise for the reader) 1826 */ 1827 SCR_CLR (SCR_TRG), 1828 0, 1829 /* 1830 * And try to select this target. 1831 */ 1832 SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), 1833 PADDR (ungetjob), 1834 /* 1835 * Now there are 4 possibilities: 1836 * 1837 * (1) The chip looses arbitration. 1838 * This is ok, because it will try again, 1839 * when the bus becomes idle. 1840 * (But beware of the timeout function!) 1841 * 1842 * (2) The chip is reselected. 1843 * Then the script processor takes the jump 1844 * to the RESELECT label. 1845 * 1846 * (3) The chip wins arbitration. 1847 * Then it will execute SCRIPTS instruction until 1848 * the next instruction that checks SCSI phase. 1849 * Then will stop and wait for selection to be 1850 * complete or selection time-out to occur. 1851 * 1852 * After having won arbitration, the SCRIPTS 1853 * processor is able to execute instructions while 1854 * the SCSI core is performing SCSI selection. 1855 */ 1856 /* 1857 * load the savep (saved data pointer) into 1858 * the actual data pointer. 1859 */ 1860 SCR_LOAD_REL (temp, 4), 1861 offsetof (struct sym_ccb, phys.savep), 1862 /* 1863 * Initialize the status registers 1864 */ 1865 SCR_LOAD_REL (scr0, 4), 1866 offsetof (struct sym_ccb, phys.status), 1867 }/*-------------------------< WF_SEL_DONE >----------------------*/,{ 1868 SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), 1869 SIR_SEL_ATN_NO_MSG_OUT, 1870 }/*-------------------------< SEND_IDENT >----------------------*/,{ 1871 /* 1872 * Selection complete. 1873 * Send the IDENTIFY and possibly the TAG message 1874 * and negotiation message if present. 1875 */ 1876 SCR_MOVE_TBL ^ SCR_MSG_OUT, 1877 offsetof (struct dsb, smsg), 1878 }/*-------------------------< SELECT2 >----------------------*/,{ 1879 #ifdef SYM_CONF_IARB_SUPPORT 1880 /* 1881 * Set IMMEDIATE ARBITRATION if we have been given 1882 * a hint to do so. (Some job to do after this one). 1883 */ 1884 SCR_FROM_REG (HF_REG), 1885 0, 1886 SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)), 1887 8, 1888 SCR_REG_REG (scntl1, SCR_OR, IARB), 1889 0, 1890 #endif 1891 /* 1892 * Anticipate the COMMAND phase. 1893 * This is the PHASE we expect at this point. 1894 */ 1895 SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)), 1896 PADDR (sel_no_cmd), 1897 }/*-------------------------< COMMAND >--------------------*/,{ 1898 /* 1899 * ... and send the command 1900 */ 1901 SCR_MOVE_TBL ^ SCR_COMMAND, 1902 offsetof (struct dsb, cmd), 1903 }/*-----------------------< DISPATCH >----------------------*/,{ 1904 /* 1905 * MSG_IN is the only phase that shall be 1906 * entered at least once for each (re)selection. 1907 * So we test it first. 1908 */ 1909 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), 1910 PADDR (msg_in), 1911 SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)), 1912 PADDR (dataphase), 1913 SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)), 1914 PADDR (dataphase), 1915 SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), 1916 PADDR (status), 1917 SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), 1918 PADDR (command), 1919 SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), 1920 PADDRH (msg_out), 1921 1922 /* 1923 * Set the extended error flag. 1924 */ 1925 SCR_REG_REG (HF_REG, SCR_OR, HF_EXT_ERR), 1926 0, 1927 /* 1928 * Discard one illegal phase byte, if required. 1929 */ 1930 SCR_LOAD_REL (scratcha, 1), 1931 offsetof (struct sym_ccb, xerr_status), 1932 SCR_REG_REG (scratcha, SCR_OR, XE_BAD_PHASE), 1933 0, 1934 SCR_STORE_REL (scratcha, 1), 1935 offsetof (struct sym_ccb, xerr_status), 1936 SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)), 1937 8, 1938 SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, 1939 NADDR (scratch), 1940 SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)), 1941 8, 1942 SCR_MOVE_ABS (1) ^ SCR_ILG_IN, 1943 NADDR (scratch), 1944 1945 SCR_JUMP, 1946 PADDR (dispatch), 1947 }/*---------------------< SEL_NO_CMD >----------------------*/,{ 1948 /* 1949 * The target does not switch to command 1950 * phase after IDENTIFY has been sent. 1951 * 1952 * If it stays in MSG OUT phase send it 1953 * the IDENTIFY again. 1954 */ 1955 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), 1956 PADDRH (resend_ident), 1957 /* 1958 * If target does not switch to MSG IN phase 1959 * and we sent a negotiation, assert the 1960 * failure immediately. 1961 */ 1962 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), 1963 PADDR (dispatch), 1964 SCR_FROM_REG (HS_REG), 1965 0, 1966 SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), 1967 SIR_NEGO_FAILED, 1968 /* 1969 * Jump to dispatcher. 1970 */ 1971 SCR_JUMP, 1972 PADDR (dispatch), 1973 }/*-------------------------< INIT >------------------------*/,{ 1974 /* 1975 * Wait for the SCSI RESET signal to be 1976 * inactive before restarting operations, 1977 * since the chip may hang on SEL_ATN 1978 * if SCSI RESET is active. 1979 */ 1980 SCR_FROM_REG (sstat0), 1981 0, 1982 SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)), 1983 -16, 1984 SCR_JUMP, 1985 PADDR (start), 1986 }/*-------------------------< CLRACK >----------------------*/,{ 1987 /* 1988 * Terminate possible pending message phase. 1989 */ 1990 SCR_CLR (SCR_ACK), 1991 0, 1992 SCR_JUMP, 1993 PADDR (dispatch), 1994 }/*-------------------------< DISP_STATUS >----------------------*/,{ 1995 /* 1996 * Anticipate STATUS phase. 1997 * 1998 * Does spare 3 SCRIPTS instructions when we have 1999 * completed the INPUT of the data. 2000 */ 2001 SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)), 2002 PADDR (status), 2003 SCR_JUMP, 2004 PADDR (dispatch), 2005 }/*-------------------------< DATAI_DONE >-------------------*/,{ 2006 /* 2007 * If the device still wants to send us data, 2008 * we must count the extra bytes. 2009 */ 2010 SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_IN)), 2011 PADDRH (data_ovrun), 2012 /* 2013 * If the SWIDE is not full, jump to dispatcher. 2014 * We anticipate a STATUS phase. 2015 */ 2016 SCR_FROM_REG (scntl2), 2017 0, 2018 SCR_JUMP ^ IFFALSE (MASK (WSR, WSR)), 2019 PADDR (disp_status), 2020 /* 2021 * The SWIDE is full. 2022 * Clear this condition. 2023 */ 2024 SCR_REG_REG (scntl2, SCR_OR, WSR), 2025 0, 2026 /* 2027 * We are expecting an IGNORE RESIDUE message 2028 * from the device, otherwise we are in data 2029 * overrun condition. Check against MSG_IN phase. 2030 */ 2031 SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)), 2032 SIR_SWIDE_OVERRUN, 2033 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), 2034 PADDR (disp_status), 2035 /* 2036 * We are in MSG_IN phase, 2037 * Read the first byte of the message. 2038 * If it is not an IGNORE RESIDUE message, 2039 * signal overrun and jump to message 2040 * processing. 2041 */ 2042 SCR_MOVE_ABS (1) ^ SCR_MSG_IN, 2043 NADDR (msgin[0]), 2044 SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)), 2045 SIR_SWIDE_OVERRUN, 2046 SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)), 2047 PADDR (msg_in2), 2048 /* 2049 * We got the message we expected. 2050 * Read the 2nd byte, and jump to dispatcher. 2051 */ 2052 SCR_CLR (SCR_ACK), 2053 0, 2054 SCR_MOVE_ABS (1) ^ SCR_MSG_IN, 2055 NADDR (msgin[1]), 2056 SCR_CLR (SCR_ACK), 2057 0, 2058 SCR_JUMP, 2059 PADDR (disp_status), 2060 }/*-------------------------< DATAO_DONE >-------------------*/,{ 2061 /* 2062 * If the device wants us to send more data, 2063 * we must count the extra bytes. 2064 */ 2065 SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)), 2066 PADDRH (data_ovrun), 2067 /* 2068 * If the SODL is not full jump to dispatcher. 2069 * We anticipate a STATUS phase. 2070 */ 2071 SCR_FROM_REG (scntl2), 2072 0, 2073 SCR_JUMP ^ IFFALSE (MASK (WSS, WSS)), 2074 PADDR (disp_status), 2075 /* 2076 * The SODL is full, clear this condition. 2077 */ 2078 SCR_REG_REG (scntl2, SCR_OR, WSS), 2079 0, 2080 /* 2081 * And signal a DATA UNDERRUN condition 2082 * to the C code. 2083 */ 2084 SCR_INT, 2085 SIR_SODL_UNDERRUN, 2086 SCR_JUMP, 2087 PADDR (dispatch), 2088 }/*-------------------------< DATAPHASE >------------------*/,{ 2089 SCR_RETURN, 2090 0, 2091 }/*-------------------------< MSG_IN >--------------------*/,{ 2092 /* 2093 * Get the first byte of the message. 2094 * 2095 * The script processor doesn't negate the 2096 * ACK signal after this transfer. 2097 */ 2098 SCR_MOVE_ABS (1) ^ SCR_MSG_IN, 2099 NADDR (msgin[0]), 2100 }/*-------------------------< MSG_IN2 >--------------------*/,{ 2101 /* 2102 * Check first against 1 byte messages 2103 * that we handle from SCRIPTS. 2104 */ 2105 SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)), 2106 PADDR (complete), 2107 SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)), 2108 PADDR (disconnect), 2109 SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)), 2110 PADDR (save_dp), 2111 SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)), 2112 PADDR (restore_dp), 2113 /* 2114 * We handle all other messages from the 2115 * C code, so no need to waste on-chip RAM 2116 * for those ones. 2117 */ 2118 SCR_JUMP, 2119 PADDRH (msg_in_etc), 2120 }/*-------------------------< STATUS >--------------------*/,{ 2121 /* 2122 * get the status 2123 */ 2124 SCR_MOVE_ABS (1) ^ SCR_STATUS, 2125 NADDR (scratch), 2126 #ifdef SYM_CONF_IARB_SUPPORT 2127 /* 2128 * If STATUS is not GOOD, clear IMMEDIATE ARBITRATION, 2129 * since we may have to tamper the start queue from 2130 * the C code. 2131 */ 2132 SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)), 2133 8, 2134 SCR_REG_REG (scntl1, SCR_AND, ~IARB), 2135 0, 2136 #endif 2137 /* 2138 * save status to scsi_status. 2139 * mark as complete. 2140 */ 2141 SCR_TO_REG (SS_REG), 2142 0, 2143 SCR_LOAD_REG (HS_REG, HS_COMPLETE), 2144 0, 2145 /* 2146 * Anticipate the MESSAGE PHASE for 2147 * the TASK COMPLETE message. 2148 */ 2149 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), 2150 PADDR (msg_in), 2151 SCR_JUMP, 2152 PADDR (dispatch), 2153 }/*-------------------------< COMPLETE >-----------------*/,{ 2154 /* 2155 * Complete message. 2156 * 2157 * Copy the data pointer to LASTP. 2158 */ 2159 SCR_STORE_REL (temp, 4), 2160 offsetof (struct sym_ccb, phys.lastp), 2161 /* 2162 * When we terminate the cycle by clearing ACK, 2163 * the target may disconnect immediately. 2164 * 2165 * We don't want to be told of an "unexpected disconnect", 2166 * so we disable this feature. 2167 */ 2168 SCR_REG_REG (scntl2, SCR_AND, 0x7f), 2169 0, 2170 /* 2171 * Terminate cycle ... 2172 */ 2173 SCR_CLR (SCR_ACK|SCR_ATN), 2174 0, 2175 /* 2176 * ... and wait for the disconnect. 2177 */ 2178 SCR_WAIT_DISC, 2179 0, 2180 }/*-------------------------< COMPLETE2 >-----------------*/,{ 2181 /* 2182 * Save host status. 2183 */ 2184 SCR_STORE_REL (scr0, 4), 2185 offsetof (struct sym_ccb, phys.status), 2186 /* 2187 * Some bridges may reorder DMA writes to memory. 2188 * We donnot want the CPU to deal with completions 2189 * without all the posted write having been flushed 2190 * to memory. This DUMMY READ should flush posted 2191 * buffers prior to the CPU having to deal with 2192 * completions. 2193 */ 2194 SCR_LOAD_REL (scr0, 4), /* DUMMY READ */ 2195 offsetof (struct sym_ccb, phys.status), 2196 2197 /* 2198 * If command resulted in not GOOD status, 2199 * call the C code if needed. 2200 */ 2201 SCR_FROM_REG (SS_REG), 2202 0, 2203 SCR_CALL ^ IFFALSE (DATA (S_GOOD)), 2204 PADDRH (bad_status), 2205 /* 2206 * If we performed an auto-sense, call 2207 * the C code to synchronyze task aborts 2208 * with UNIT ATTENTION conditions. 2209 */ 2210 SCR_FROM_REG (HF_REG), 2211 0, 2212 SCR_JUMPR ^ IFTRUE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))), 2213 16, 2214 }/*-------------------------< COMPLETE_ERROR >-----------------*/,{ 2215 SCR_LOAD_ABS (scratcha, 4), 2216 PADDRH (startpos), 2217 SCR_INT, 2218 SIR_COMPLETE_ERROR, 2219 }/*------------------------< DONE >-----------------*/,{ 2220 /* 2221 * Copy the DSA to the DONE QUEUE and 2222 * signal completion to the host. 2223 * If we are interrupted between DONE 2224 * and DONE_END, we must reset, otherwise 2225 * the completed CCB may be lost. 2226 */ 2227 SCR_STORE_ABS (dsa, 4), 2228 PADDRH (saved_dsa), 2229 SCR_LOAD_ABS (dsa, 4), 2230 PADDRH (done_pos), 2231 SCR_LOAD_ABS (scratcha, 4), 2232 PADDRH (saved_dsa), 2233 SCR_STORE_REL (scratcha, 4), 2234 0, 2235 /* 2236 * The instruction below reads the DONE QUEUE next 2237 * free position from memory. 2238 * In addition it ensures that all PCI posted writes 2239 * are flushed and so the DSA value of the done 2240 * CCB is visible by the CPU before INTFLY is raised. 2241 */ 2242 SCR_LOAD_REL (temp, 4), 2243 4, 2244 SCR_INT_FLY, 2245 0, 2246 SCR_STORE_ABS (temp, 4), 2247 PADDRH (done_pos), 2248 }/*------------------------< DONE_END >-----------------*/,{ 2249 SCR_JUMP, 2250 PADDR (start), 2251 }/*-------------------------< SAVE_DP >------------------*/,{ 2252 /* 2253 * Clear ACK immediately. 2254 * No need to delay it. 2255 */ 2256 SCR_CLR (SCR_ACK), 2257 0, 2258 /* 2259 * Keep track we received a SAVE DP, so 2260 * we will switch to the other PM context 2261 * on the next PM since the DP may point 2262 * to the current PM context. 2263 */ 2264 SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED), 2265 0, 2266 /* 2267 * SAVE_DP message: 2268 * Copy the data pointer to SAVEP. 2269 */ 2270 SCR_STORE_REL (temp, 4), 2271 offsetof (struct sym_ccb, phys.savep), 2272 SCR_JUMP, 2273 PADDR (dispatch), 2274 }/*-------------------------< RESTORE_DP >---------------*/,{ 2275 /* 2276 * RESTORE_DP message: 2277 * Copy SAVEP to actual data pointer. 2278 */ 2279 SCR_LOAD_REL (temp, 4), 2280 offsetof (struct sym_ccb, phys.savep), 2281 SCR_JUMP, 2282 PADDR (clrack), 2283 }/*-------------------------< DISCONNECT >---------------*/,{ 2284 /* 2285 * DISCONNECTing ... 2286 * 2287 * disable the "unexpected disconnect" feature, 2288 * and remove the ACK signal. 2289 */ 2290 SCR_REG_REG (scntl2, SCR_AND, 0x7f), 2291 0, 2292 SCR_CLR (SCR_ACK|SCR_ATN), 2293 0, 2294 /* 2295 * Wait for the disconnect. 2296 */ 2297 SCR_WAIT_DISC, 2298 0, 2299 /* 2300 * Status is: DISCONNECTED. 2301 */ 2302 SCR_LOAD_REG (HS_REG, HS_DISCONNECT), 2303 0, 2304 /* 2305 * Save host status. 2306 */ 2307 SCR_STORE_REL (scr0, 4), 2308 offsetof (struct sym_ccb, phys.status), 2309 /* 2310 * If QUIRK_AUTOSAVE is set, 2311 * do an "save pointer" operation. 2312 */ 2313 SCR_FROM_REG (QU_REG), 2314 0, 2315 SCR_JUMP ^ IFFALSE (MASK (SYM_QUIRK_AUTOSAVE, SYM_QUIRK_AUTOSAVE)), 2316 PADDR (start), 2317 /* 2318 * like SAVE_DP message: 2319 * Remember we saved the data pointer. 2320 * Copy data pointer to SAVEP. 2321 */ 2322 SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED), 2323 0, 2324 SCR_STORE_REL (temp, 4), 2325 offsetof (struct sym_ccb, phys.savep), 2326 SCR_JUMP, 2327 PADDR (start), 2328 }/*-------------------------< IDLE >------------------------*/,{ 2329 /* 2330 * Nothing to do? 2331 * Wait for reselect. 2332 * This NOP will be patched with LED OFF 2333 * SCR_REG_REG (gpreg, SCR_OR, 0x01) 2334 */ 2335 SCR_NO_OP, 2336 0, 2337 #ifdef SYM_CONF_IARB_SUPPORT 2338 SCR_JUMPR, 2339 8, 2340 #endif 2341 }/*-------------------------< UNGETJOB >-----------------*/,{ 2342 #ifdef SYM_CONF_IARB_SUPPORT 2343 /* 2344 * Set IMMEDIATE ARBITRATION, for the next time. 2345 * This will give us better chance to win arbitration 2346 * for the job we just wanted to do. 2347 */ 2348 SCR_REG_REG (scntl1, SCR_OR, IARB), 2349 0, 2350 #endif 2351 /* 2352 * We are not able to restart the SCRIPTS if we are 2353 * interrupted and these instruction haven't been 2354 * all executed. BTW, this is very unlikely to 2355 * happen, but we check that from the C code. 2356 */ 2357 SCR_LOAD_REG (dsa, 0xff), 2358 0, 2359 SCR_STORE_ABS (scratcha, 4), 2360 PADDRH (startpos), 2361 }/*-------------------------< RESELECT >--------------------*/,{ 2362 /* 2363 * Make sure we are in initiator mode. 2364 */ 2365 SCR_CLR (SCR_TRG), 2366 0, 2367 /* 2368 * Sleep waiting for a reselection. 2369 */ 2370 SCR_WAIT_RESEL, 2371 PADDR(start), 2372 }/*-------------------------< RESELECTED >------------------*/,{ 2373 /* 2374 * This NOP will be patched with LED ON 2375 * SCR_REG_REG (gpreg, SCR_AND, 0xfe) 2376 */ 2377 SCR_NO_OP, 2378 0, 2379 /* 2380 * load the target id into the sdid 2381 */ 2382 SCR_REG_SFBR (ssid, SCR_AND, 0x8F), 2383 0, 2384 SCR_TO_REG (sdid), 2385 0, 2386 /* 2387 * Load the target control block address 2388 */ 2389 SCR_LOAD_ABS (dsa, 4), 2390 PADDRH (targtbl), 2391 SCR_SFBR_REG (dsa, SCR_SHL, 0), 2392 0, 2393 SCR_REG_REG (dsa, SCR_SHL, 0), 2394 0, 2395 SCR_REG_REG (dsa, SCR_AND, 0x3c), 2396 0, 2397 SCR_LOAD_REL (dsa, 4), 2398 0, 2399 /* 2400 * Load the legacy synchronous transfer registers. 2401 */ 2402 SCR_LOAD_REL (scntl3, 1), 2403 offsetof(struct sym_tcb, wval), 2404 SCR_LOAD_REL (sxfer, 1), 2405 offsetof(struct sym_tcb, sval), 2406 }/*-------------------------< RESEL_SCNTL4 >------------------*/,{ 2407 /* 2408 * If C1010, patched with the load of SCNTL4 that 2409 * allows a new synchronous timing scheme. 2410 * 2411 * SCR_LOAD_REL (scntl4, 1), 2412 * offsetof(struct tcb, uval), 2413 */ 2414 SCR_NO_OP, 2415 0, 2416 /* 2417 * We expect MESSAGE IN phase. 2418 * If not, get help from the C code. 2419 */ 2420 SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)), 2421 SIR_RESEL_NO_MSG_IN, 2422 SCR_MOVE_ABS (1) ^ SCR_MSG_IN, 2423 NADDR (msgin), 2424 /* 2425 * If IDENTIFY LUN #0, use a faster path 2426 * to find the LCB structure. 2427 */ 2428 SCR_JUMPR ^ IFTRUE (MASK (0x80, 0xbf)), 2429 56, 2430 /* 2431 * If message isn't an IDENTIFY, 2432 * tell the C code about. 2433 */ 2434 SCR_INT ^ IFFALSE (MASK (0x80, 0x80)), 2435 SIR_RESEL_NO_IDENTIFY, 2436 /* 2437 * It is an IDENTIFY message, 2438 * Load the LUN control block address. 2439 */ 2440 SCR_LOAD_REL (dsa, 4), 2441 offsetof(struct sym_tcb, luntbl_sa), 2442 SCR_SFBR_REG (dsa, SCR_SHL, 0), 2443 0, 2444 SCR_REG_REG (dsa, SCR_SHL, 0), 2445 0, 2446 SCR_REG_REG (dsa, SCR_AND, 0xfc), 2447 0, 2448 SCR_LOAD_REL (dsa, 4), 2449 0, 2450 SCR_JUMPR, 2451 8, 2452 /* 2453 * LUN 0 special case (but usual one :)) 2454 */ 2455 SCR_LOAD_REL (dsa, 4), 2456 offsetof(struct sym_tcb, lun0_sa), 2457 /* 2458 * Jump indirectly to the reselect action for this LUN. 2459 */ 2460 SCR_LOAD_REL (temp, 4), 2461 offsetof(struct sym_lcb, resel_sa), 2462 SCR_RETURN, 2463 0, 2464 /* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */ 2465 }/*-------------------------< RESEL_TAG >-------------------*/,{ 2466 /* 2467 * ACK the IDENTIFY or TAG previously received. 2468 */ 2469 SCR_CLR (SCR_ACK), 2470 0, 2471 /* 2472 * It shall be a tagged command. 2473 * Read SIMPLE+TAG. 2474 * The C code will deal with errors. 2475 * Agressive optimization, is'nt it? :) 2476 */ 2477 SCR_MOVE_ABS (2) ^ SCR_MSG_IN, 2478 NADDR (msgin), 2479 /* 2480 * Load the pointer to the tagged task 2481 * table for this LUN. 2482 */ 2483 SCR_LOAD_REL (dsa, 4), 2484 offsetof(struct sym_lcb, itlq_tbl_sa), 2485 /* 2486 * The SIDL still contains the TAG value. 2487 * Agressive optimization, isn't it? :):) 2488 */ 2489 SCR_REG_SFBR (sidl, SCR_SHL, 0), 2490 0, 2491 #if SYM_CONF_MAX_TASK*4 > 512 2492 SCR_JUMPR ^ IFFALSE (CARRYSET), 2493 8, 2494 SCR_REG_REG (dsa1, SCR_OR, 2), 2495 0, 2496 SCR_REG_REG (sfbr, SCR_SHL, 0), 2497 0, 2498 SCR_JUMPR ^ IFFALSE (CARRYSET), 2499 8, 2500 SCR_REG_REG (dsa1, SCR_OR, 1), 2501 0, 2502 #elif SYM_CONF_MAX_TASK*4 > 256 2503 SCR_JUMPR ^ IFFALSE (CARRYSET), 2504 8, 2505 SCR_REG_REG (dsa1, SCR_OR, 1), 2506 0, 2507 #endif 2508 /* 2509 * Retrieve the DSA of this task. 2510 * JUMP indirectly to the restart point of the CCB. 2511 */ 2512 SCR_SFBR_REG (dsa, SCR_AND, 0xfc), 2513 0, 2514 SCR_LOAD_REL (dsa, 4), 2515 0, 2516 SCR_LOAD_REL (temp, 4), 2517 offsetof(struct sym_ccb, phys.go.restart), 2518 SCR_RETURN, 2519 0, 2520 /* In normal situations we branch to RESEL_DSA */ 2521 }/*-------------------------< RESEL_DSA >-------------------*/,{ 2522 /* 2523 * ACK the IDENTIFY or TAG previously received. 2524 */ 2525 SCR_CLR (SCR_ACK), 2526 0, 2527 }/*-------------------------< RESEL_DSA1 >------------------*/,{ 2528 /* 2529 * load the savep (saved pointer) into 2530 * the actual data pointer. 2531 */ 2532 SCR_LOAD_REL (temp, 4), 2533 offsetof (struct sym_ccb, phys.savep), 2534 /* 2535 * Initialize the status registers 2536 */ 2537 SCR_LOAD_REL (scr0, 4), 2538 offsetof (struct sym_ccb, phys.status), 2539 /* 2540 * Jump to dispatcher. 2541 */ 2542 SCR_JUMP, 2543 PADDR (dispatch), 2544 }/*-------------------------< RESEL_NO_TAG >-------------------*/,{ 2545 /* 2546 * Load the DSA with the unique ITL task. 2547 */ 2548 SCR_LOAD_REL (dsa, 4), 2549 offsetof(struct sym_lcb, itl_task_sa), 2550 /* 2551 * JUMP indirectly to the restart point of the CCB. 2552 */ 2553 SCR_LOAD_REL (temp, 4), 2554 offsetof(struct sym_ccb, phys.go.restart), 2555 SCR_RETURN, 2556 0, 2557 /* In normal situations we branch to RESEL_DSA */ 2558 }/*-------------------------< DATA_IN >--------------------*/,{ 2559 /* 2560 * Because the size depends on the 2561 * #define SYM_CONF_MAX_SG parameter, 2562 * it is filled in at runtime. 2563 * 2564 * ##===========< i=0; i<SYM_CONF_MAX_SG >========= 2565 * || SCR_CHMOV_TBL ^ SCR_DATA_IN, 2566 * || offsetof (struct dsb, data[ i]), 2567 * ##========================================== 2568 */ 2569 0 2570 }/*-------------------------< DATA_IN2 >-------------------*/,{ 2571 SCR_CALL, 2572 PADDR (datai_done), 2573 SCR_JUMP, 2574 PADDRH (data_ovrun), 2575 }/*-------------------------< DATA_OUT >--------------------*/,{ 2576 /* 2577 * Because the size depends on the 2578 * #define SYM_CONF_MAX_SG parameter, 2579 * it is filled in at runtime. 2580 * 2581 * ##===========< i=0; i<SYM_CONF_MAX_SG >========= 2582 * || SCR_CHMOV_TBL ^ SCR_DATA_OUT, 2583 * || offsetof (struct dsb, data[ i]), 2584 * ##========================================== 2585 */ 2586 0 2587 }/*-------------------------< DATA_OUT2 >-------------------*/,{ 2588 SCR_CALL, 2589 PADDR (datao_done), 2590 SCR_JUMP, 2591 PADDRH (data_ovrun), 2592 }/*-------------------------< PM0_DATA >--------------------*/,{ 2593 /* 2594 * Keep track we are executing the PM0 DATA 2595 * mini-script. 2596 */ 2597 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0), 2598 0, 2599 /* 2600 * MOVE the data according to the actual 2601 * DATA direction. 2602 */ 2603 SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)), 2604 16, 2605 SCR_CHMOV_TBL ^ SCR_DATA_IN, 2606 offsetof (struct sym_ccb, phys.pm0.sg), 2607 SCR_JUMPR, 2608 8, 2609 SCR_CHMOV_TBL ^ SCR_DATA_OUT, 2610 offsetof (struct sym_ccb, phys.pm0.sg), 2611 /* 2612 * Clear the flag that told we were in 2613 * the PM0 DATA mini-script. 2614 */ 2615 SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)), 2616 0, 2617 /* 2618 * Return to the previous DATA script which 2619 * is guaranteed by design (if no bug) to be 2620 * the main DATA script for this transfer. 2621 */ 2622 SCR_LOAD_REL (temp, 4), 2623 offsetof (struct sym_ccb, phys.pm0.ret), 2624 SCR_RETURN, 2625 0, 2626 }/*-------------------------< PM1_DATA >--------------------*/,{ 2627 /* 2628 * Keep track we are executing the PM1 DATA 2629 * mini-script. 2630 */ 2631 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1), 2632 0, 2633 /* 2634 * MOVE the data according to the actual 2635 * DATA direction. 2636 */ 2637 SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)), 2638 16, 2639 SCR_CHMOV_TBL ^ SCR_DATA_IN, 2640 offsetof (struct sym_ccb, phys.pm1.sg), 2641 SCR_JUMPR, 2642 8, 2643 SCR_CHMOV_TBL ^ SCR_DATA_OUT, 2644 offsetof (struct sym_ccb, phys.pm1.sg), 2645 /* 2646 * Clear the flag that told we were in 2647 * the PM1 DATA mini-script. 2648 */ 2649 SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)), 2650 0, 2651 /* 2652 * Return to the previous DATA script which 2653 * is guaranteed by design (if no bug) to be 2654 * the main DATA script for this transfer. 2655 */ 2656 SCR_LOAD_REL (temp, 4), 2657 offsetof (struct sym_ccb, phys.pm1.ret), 2658 SCR_RETURN, 2659 0, 2660 }/*---------------------------------------------------------*/ 2661 }; 2662 2663 static struct sym_scrh scripth0 = { 2664 /*------------------------< START64 >-----------------------*/{ 2665 /* 2666 * SCRIPT entry point for the 895A, 896 and 1010. 2667 * For now, there is no specific stuff for those 2668 * chips at this point, but this may come. 2669 */ 2670 SCR_JUMP, 2671 PADDR (init), 2672 }/*-------------------------< NO_DATA >-------------------*/,{ 2673 SCR_JUMP, 2674 PADDRH (data_ovrun), 2675 }/*-----------------------< SEL_FOR_ABORT >------------------*/,{ 2676 /* 2677 * We are jumped here by the C code, if we have 2678 * some target to reset or some disconnected 2679 * job to abort. Since error recovery is a serious 2680 * busyness, we will really reset the SCSI BUS, if 2681 * case of a SCSI interrupt occuring in this path. 2682 */ 2683 2684 /* 2685 * Set initiator mode. 2686 */ 2687 SCR_CLR (SCR_TRG), 2688 0, 2689 /* 2690 * And try to select this target. 2691 */ 2692 SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel), 2693 PADDR (reselect), 2694 /* 2695 * Wait for the selection to complete or 2696 * the selection to time out. 2697 */ 2698 SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)), 2699 -8, 2700 /* 2701 * Call the C code. 2702 */ 2703 SCR_INT, 2704 SIR_TARGET_SELECTED, 2705 /* 2706 * The C code should let us continue here. 2707 * Send the 'kiss of death' message. 2708 * We expect an immediate disconnect once 2709 * the target has eaten the message. 2710 */ 2711 SCR_REG_REG (scntl2, SCR_AND, 0x7f), 2712 0, 2713 SCR_MOVE_TBL ^ SCR_MSG_OUT, 2714 offsetof (struct sym_hcb, abrt_tbl), 2715 SCR_CLR (SCR_ACK|SCR_ATN), 2716 0, 2717 SCR_WAIT_DISC, 2718 0, 2719 /* 2720 * Tell the C code that we are done. 2721 */ 2722 SCR_INT, 2723 SIR_ABORT_SENT, 2724 }/*-----------------------< SEL_FOR_ABORT_1 >--------------*/,{ 2725 /* 2726 * Jump at scheduler. 2727 */ 2728 SCR_JUMP, 2729 PADDR (start), 2730 2731 }/*------------------------< SELECT_NO_ATN >-----------------*/,{ 2732 /* 2733 * Set Initiator mode. 2734 * And try to select this target without ATN. 2735 */ 2736 SCR_CLR (SCR_TRG), 2737 0, 2738 SCR_SEL_TBL ^ offsetof (struct dsb, select), 2739 PADDR (ungetjob), 2740 /* 2741 * load the savep (saved pointer) into 2742 * the actual data pointer. 2743 */ 2744 SCR_LOAD_REL (temp, 4), 2745 offsetof (struct sym_ccb, phys.savep), 2746 /* 2747 * Initialize the status registers 2748 */ 2749 SCR_LOAD_REL (scr0, 4), 2750 offsetof (struct sym_ccb, phys.status), 2751 }/*------------------------< WF_SEL_DONE_NO_ATN >-----------------*/,{ 2752 /* 2753 * Wait immediately for the next phase or 2754 * the selection to complete or time-out. 2755 */ 2756 SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)), 2757 0, 2758 SCR_JUMP, 2759 PADDR (select2), 2760 }/*-------------------------< MSG_IN_ETC >--------------------*/,{ 2761 /* 2762 * If it is an EXTENDED (variable size message) 2763 * Handle it. 2764 */ 2765 SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)), 2766 PADDRH (msg_extended), 2767 /* 2768 * Let the C code handle any other 2769 * 1 byte message. 2770 */ 2771 SCR_INT ^ IFTRUE (MASK (0x00, 0xf0)), 2772 SIR_MSG_RECEIVED, 2773 SCR_INT ^ IFTRUE (MASK (0x10, 0xf0)), 2774 SIR_MSG_RECEIVED, 2775 /* 2776 * We donnot handle 2 bytes messages from SCRIPTS. 2777 * So, let the C code deal with these ones too. 2778 */ 2779 SCR_INT ^ IFFALSE (MASK (0x20, 0xf0)), 2780 SIR_MSG_WEIRD, 2781 SCR_CLR (SCR_ACK), 2782 0, 2783 SCR_MOVE_ABS (1) ^ SCR_MSG_IN, 2784 NADDR (msgin[1]), 2785 SCR_INT, 2786 SIR_MSG_RECEIVED, 2787 2788 }/*-------------------------< MSG_RECEIVED >--------------------*/,{ 2789 SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */ 2790 0, 2791 SCR_INT, 2792 SIR_MSG_RECEIVED, 2793 2794 }/*-------------------------< MSG_WEIRD_SEEN >------------------*/,{ 2795 SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */ 2796 0, 2797 SCR_INT, 2798 SIR_MSG_WEIRD, 2799 2800 }/*-------------------------< MSG_EXTENDED >--------------------*/,{ 2801 /* 2802 * Clear ACK and get the next byte 2803 * assumed to be the message length. 2804 */ 2805 SCR_CLR (SCR_ACK), 2806 0, 2807 SCR_MOVE_ABS (1) ^ SCR_MSG_IN, 2808 NADDR (msgin[1]), 2809 /* 2810 * Try to catch some unlikely situations as 0 length 2811 * or too large the length. 2812 */ 2813 SCR_JUMP ^ IFTRUE (DATA (0)), 2814 PADDRH (msg_weird_seen), 2815 SCR_TO_REG (scratcha), 2816 0, 2817 SCR_REG_REG (sfbr, SCR_ADD, (256-8)), 2818 0, 2819 SCR_JUMP ^ IFTRUE (CARRYSET), 2820 PADDRH (msg_weird_seen), 2821 /* 2822 * We donnot handle extended messages from SCRIPTS. 2823 * Read the amount of data correponding to the 2824 * message length and call the C code. 2825 */ 2826 SCR_STORE_REL (scratcha, 1), 2827 offsetof (struct dsb, smsg_ext.size), 2828 SCR_CLR (SCR_ACK), 2829 0, 2830 SCR_MOVE_TBL ^ SCR_MSG_IN, 2831 offsetof (struct dsb, smsg_ext), 2832 SCR_JUMP, 2833 PADDRH (msg_received), 2834 2835 }/*-------------------------< MSG_BAD >------------------*/,{ 2836 /* 2837 * unimplemented message - reject it. 2838 */ 2839 SCR_INT, 2840 SIR_REJECT_TO_SEND, 2841 SCR_SET (SCR_ATN), 2842 0, 2843 SCR_JUMP, 2844 PADDR (clrack), 2845 }/*-------------------------< MSG_WEIRD >--------------------*/,{ 2846 /* 2847 * weird message received 2848 * ignore all MSG IN phases and reject it. 2849 */ 2850 SCR_INT, 2851 SIR_REJECT_TO_SEND, 2852 SCR_SET (SCR_ATN), 2853 0, 2854 }/*-------------------------< MSG_WEIRD1 >--------------------*/,{ 2855 SCR_CLR (SCR_ACK), 2856 0, 2857 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), 2858 PADDR (dispatch), 2859 SCR_MOVE_ABS (1) ^ SCR_MSG_IN, 2860 NADDR (scratch), 2861 SCR_JUMP, 2862 PADDRH (msg_weird1), 2863 }/*-------------------------< WDTR_RESP >----------------*/,{ 2864 /* 2865 * let the target fetch our answer. 2866 */ 2867 SCR_SET (SCR_ATN), 2868 0, 2869 SCR_CLR (SCR_ACK), 2870 0, 2871 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), 2872 PADDRH (nego_bad_phase), 2873 }/*-------------------------< SEND_WDTR >----------------*/,{ 2874 /* 2875 * Send the M_X_WIDE_REQ 2876 */ 2877 SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, 2878 NADDR (msgout), 2879 SCR_JUMP, 2880 PADDRH (msg_out_done), 2881 }/*-------------------------< SDTR_RESP >-------------*/,{ 2882 /* 2883 * let the target fetch our answer. 2884 */ 2885 SCR_SET (SCR_ATN), 2886 0, 2887 SCR_CLR (SCR_ACK), 2888 0, 2889 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), 2890 PADDRH (nego_bad_phase), 2891 }/*-------------------------< SEND_SDTR >-------------*/,{ 2892 /* 2893 * Send the M_X_SYNC_REQ 2894 */ 2895 SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, 2896 NADDR (msgout), 2897 SCR_JUMP, 2898 PADDRH (msg_out_done), 2899 }/*-------------------------< PPR_RESP >-------------*/,{ 2900 /* 2901 * let the target fetch our answer. 2902 */ 2903 SCR_SET (SCR_ATN), 2904 0, 2905 SCR_CLR (SCR_ACK), 2906 0, 2907 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), 2908 PADDRH (nego_bad_phase), 2909 }/*-------------------------< SEND_PPR >-------------*/,{ 2910 /* 2911 * Send the M_X_PPR_REQ 2912 */ 2913 SCR_MOVE_ABS (8) ^ SCR_MSG_OUT, 2914 NADDR (msgout), 2915 SCR_JUMP, 2916 PADDRH (msg_out_done), 2917 }/*-------------------------< NEGO_BAD_PHASE >------------*/,{ 2918 SCR_INT, 2919 SIR_NEGO_PROTO, 2920 SCR_JUMP, 2921 PADDR (dispatch), 2922 }/*-------------------------< MSG_OUT >-------------------*/,{ 2923 /* 2924 * The target requests a message. 2925 * We donnot send messages that may 2926 * require the device to go to bus free. 2927 */ 2928 SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, 2929 NADDR (msgout), 2930 /* 2931 * ... wait for the next phase 2932 * if it's a message out, send it again, ... 2933 */ 2934 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), 2935 PADDRH (msg_out), 2936 }/*-------------------------< MSG_OUT_DONE >--------------*/,{ 2937 /* 2938 * Let the C code be aware of the 2939 * sent message and clear the message. 2940 */ 2941 SCR_INT, 2942 SIR_MSG_OUT_DONE, 2943 /* 2944 * ... and process the next phase 2945 */ 2946 SCR_JUMP, 2947 PADDR (dispatch), 2948 2949 }/*-------------------------< NO_DATA >--------------------*/,{ 2950 /* 2951 * The target may want to transfer too much data. 2952 * 2953 * If phase is DATA OUT write 1 byte and count it. 2954 */ 2955 SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), 2956 16, 2957 SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT, 2958 NADDR (scratch), 2959 SCR_JUMP, 2960 PADDRH (data_ovrun1), 2961 /* 2962 * If WSR is set, clear this condition, and 2963 * count this byte. 2964 */ 2965 SCR_FROM_REG (scntl2), 2966 0, 2967 SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), 2968 16, 2969 SCR_REG_REG (scntl2, SCR_OR, WSR), 2970 0, 2971 SCR_JUMP, 2972 PADDRH (data_ovrun1), 2973 /* 2974 * Finally check against DATA IN phase. 2975 * Jump to dispatcher if not so. 2976 * Read 1 byte otherwise and count it. 2977 */ 2978 SCR_JUMP ^ IFFALSE (IF (SCR_DATA_IN)), 2979 PADDR (dispatch), 2980 SCR_CHMOV_ABS (1) ^ SCR_DATA_IN, 2981 NADDR (scratch), 2982 }/*-------------------------< NO_DATA1 >--------------------*/,{ 2983 /* 2984 * Set the extended error flag. 2985 */ 2986 SCR_REG_REG (HF_REG, SCR_OR, HF_EXT_ERR), 2987 0, 2988 SCR_LOAD_REL (scratcha, 1), 2989 offsetof (struct sym_ccb, xerr_status), 2990 SCR_REG_REG (scratcha, SCR_OR, XE_EXTRA_DATA), 2991 0, 2992 SCR_STORE_REL (scratcha, 1), 2993 offsetof (struct sym_ccb, xerr_status), 2994 /* 2995 * Count this byte. 2996 * This will allow to return a negative 2997 * residual to user. 2998 */ 2999 SCR_LOAD_REL (scratcha, 4), 3000 offsetof (struct sym_ccb, phys.extra_bytes), 3001 SCR_REG_REG (scratcha, SCR_ADD, 0x01), 3002 0, 3003 SCR_REG_REG (scratcha1, SCR_ADDC, 0), 3004 0, 3005 SCR_REG_REG (scratcha2, SCR_ADDC, 0), 3006 0, 3007 SCR_STORE_REL (scratcha, 4), 3008 offsetof (struct sym_ccb, phys.extra_bytes), 3009 /* 3010 * .. and repeat as required. 3011 */ 3012 SCR_JUMP, 3013 PADDRH (data_ovrun), 3014 3015 }/*-------------------------< ABORT_RESEL >----------------*/,{ 3016 SCR_SET (SCR_ATN), 3017 0, 3018 SCR_CLR (SCR_ACK), 3019 0, 3020 /* 3021 * send the abort/abortag/reset message 3022 * we expect an immediate disconnect 3023 */ 3024 SCR_REG_REG (scntl2, SCR_AND, 0x7f), 3025 0, 3026 SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, 3027 NADDR (msgout), 3028 SCR_CLR (SCR_ACK|SCR_ATN), 3029 0, 3030 SCR_WAIT_DISC, 3031 0, 3032 SCR_INT, 3033 SIR_RESEL_ABORTED, 3034 SCR_JUMP, 3035 PADDR (start), 3036 }/*-------------------------< RESEND_IDENT >-------------------*/,{ 3037 /* 3038 * The target stays in MSG OUT phase after having acked 3039 * Identify [+ Tag [+ Extended message ]]. Targets shall 3040 * behave this way on parity error. 3041 * We must send it again all the messages. 3042 */ 3043 SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */ 3044 0, /* 1rst ACK = 90 ns. Hope the chip isn't too fast */ 3045 SCR_JUMP, 3046 PADDR (send_ident), 3047 }/*-------------------------< IDENT_BREAK >-------------------*/,{ 3048 SCR_CLR (SCR_ATN), 3049 0, 3050 SCR_JUMP, 3051 PADDR (select2), 3052 }/*-------------------------< IDENT_BREAK_ATN >----------------*/,{ 3053 SCR_SET (SCR_ATN), 3054 0, 3055 SCR_JUMP, 3056 PADDR (select2), 3057 }/*-------------------------< SDATA_IN >-------------------*/,{ 3058 SCR_CHMOV_TBL ^ SCR_DATA_IN, 3059 offsetof (struct dsb, sense), 3060 SCR_CALL, 3061 PADDR (datai_done), 3062 SCR_JUMP, 3063 PADDRH (data_ovrun), 3064 3065 }/*-------------------------< RESEL_BAD_LUN >---------------*/,{ 3066 /* 3067 * Message is an IDENTIFY, but lun is unknown. 3068 * Signal problem to C code for logging the event. 3069 * Send a M_ABORT to clear all pending tasks. 3070 */ 3071 SCR_INT, 3072 SIR_RESEL_BAD_LUN, 3073 SCR_JUMP, 3074 PADDRH (abort_resel), 3075 }/*-------------------------< BAD_I_T_L >------------------*/,{ 3076 /* 3077 * We donnot have a task for that I_T_L. 3078 * Signal problem to C code for logging the event. 3079 * Send a M_ABORT message. 3080 */ 3081 SCR_INT, 3082 SIR_RESEL_BAD_I_T_L, 3083 SCR_JUMP, 3084 PADDRH (abort_resel), 3085 }/*-------------------------< BAD_I_T_L_Q >----------------*/,{ 3086 /* 3087 * We donnot have a task that matches the tag. 3088 * Signal problem to C code for logging the event. 3089 * Send a M_ABORTTAG message. 3090 */ 3091 SCR_INT, 3092 SIR_RESEL_BAD_I_T_L_Q, 3093 SCR_JUMP, 3094 PADDRH (abort_resel), 3095 }/*-------------------------< BAD_STATUS >-----------------*/,{ 3096 /* 3097 * Anything different from INTERMEDIATE 3098 * CONDITION MET should be a bad SCSI status, 3099 * given that GOOD status has already been tested. 3100 * Call the C code. 3101 */ 3102 SCR_LOAD_ABS (scratcha, 4), 3103 PADDRH (startpos), 3104 SCR_INT ^ IFFALSE (DATA (S_COND_MET)), 3105 SIR_BAD_SCSI_STATUS, 3106 SCR_RETURN, 3107 0, 3108 3109 }/*-------------------------< PM_HANDLE >------------------*/,{ 3110 /* 3111 * Phase mismatch handling. 3112 * 3113 * Since we have to deal with 2 SCSI data pointers 3114 * (current and saved), we need at least 2 contexts. 3115 * Each context (pm0 and pm1) has a saved area, a 3116 * SAVE mini-script and a DATA phase mini-script. 3117 */ 3118 /* 3119 * Get the PM handling flags. 3120 */ 3121 SCR_FROM_REG (HF_REG), 3122 0, 3123 /* 3124 * If no flags (1rst PM for example), avoid 3125 * all the below heavy flags testing. 3126 * This makes the normal case a bit faster. 3127 */ 3128 SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED))), 3129 PADDRH (pm_handle1), 3130 /* 3131 * If we received a SAVE DP, switch to the 3132 * other PM context since the savep may point 3133 * to the current PM context. 3134 */ 3135 SCR_JUMPR ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED)), 3136 8, 3137 SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM), 3138 0, 3139 /* 3140 * If we have been interrupt in a PM DATA mini-script, 3141 * we take the return address from the corresponding 3142 * saved area. 3143 * This ensure the return address always points to the 3144 * main DATA script for this transfer. 3145 */ 3146 SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1))), 3147 PADDRH (pm_handle1), 3148 SCR_JUMPR ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0)), 3149 16, 3150 SCR_LOAD_REL (ia, 4), 3151 offsetof(struct sym_ccb, phys.pm0.ret), 3152 SCR_JUMP, 3153 PADDRH (pm_save), 3154 SCR_LOAD_REL (ia, 4), 3155 offsetof(struct sym_ccb, phys.pm1.ret), 3156 SCR_JUMP, 3157 PADDRH (pm_save), 3158 }/*-------------------------< PM_HANDLE1 >-----------------*/,{ 3159 /* 3160 * Normal case. 3161 * Update the return address so that it 3162 * will point after the interrupted MOVE. 3163 */ 3164 SCR_REG_REG (ia, SCR_ADD, 8), 3165 0, 3166 SCR_REG_REG (ia1, SCR_ADDC, 0), 3167 0, 3168 }/*-------------------------< PM_SAVE >--------------------*/,{ 3169 /* 3170 * Clear all the flags that told us if we were 3171 * interrupted in a PM DATA mini-script and/or 3172 * we received a SAVE DP. 3173 */ 3174 SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED))), 3175 0, 3176 /* 3177 * Choose the current PM context. 3178 */ 3179 SCR_JUMP ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM)), 3180 PADDRH (pm1_save), 3181 }/*-------------------------< PM0_SAVE >-------------------*/,{ 3182 SCR_STORE_REL (ia, 4), 3183 offsetof(struct sym_ccb, phys.pm0.ret), 3184 /* 3185 * If WSR bit is set, either UA and RBC may 3186 * have to be changed whether the device wants 3187 * to ignore this residue or not. 3188 */ 3189 SCR_FROM_REG (scntl2), 3190 0, 3191 SCR_CALL ^ IFTRUE (MASK (WSR, WSR)), 3192 PADDRH (pm_wsr_handle), 3193 /* 3194 * Save the remaining byte count, the updated 3195 * address and the return address. 3196 */ 3197 SCR_STORE_REL (rbc, 4), 3198 offsetof(struct sym_ccb, phys.pm0.sg.size), 3199 SCR_STORE_REL (ua, 4), 3200 offsetof(struct sym_ccb, phys.pm0.sg.addr), 3201 /* 3202 * Set the current pointer at the PM0 DATA mini-script. 3203 */ 3204 SCR_LOAD_ABS (temp, 4), 3205 PADDRH (pm0_data_addr), 3206 SCR_JUMP, 3207 PADDR (dispatch), 3208 }/*-------------------------< PM1_SAVE >-------------------*/,{ 3209 SCR_STORE_REL (ia, 4), 3210 offsetof(struct sym_ccb, phys.pm1.ret), 3211 /* 3212 * If WSR bit is set, either UA and RBC may 3213 * have to be changed whether the device wants 3214 * to ignore this residue or not. 3215 */ 3216 SCR_FROM_REG (scntl2), 3217 0, 3218 SCR_CALL ^ IFTRUE (MASK (WSR, WSR)), 3219 PADDRH (pm_wsr_handle), 3220 /* 3221 * Save the remaining byte count, the updated 3222 * address and the return address. 3223 */ 3224 SCR_STORE_REL (rbc, 4), 3225 offsetof(struct sym_ccb, phys.pm1.sg.size), 3226 SCR_STORE_REL (ua, 4), 3227 offsetof(struct sym_ccb, phys.pm1.sg.addr), 3228 /* 3229 * Set the current pointer at the PM1 DATA mini-script. 3230 */ 3231 SCR_LOAD_ABS (temp, 4), 3232 PADDRH (pm1_data_addr), 3233 SCR_JUMP, 3234 PADDR (dispatch), 3235 3236 }/*--------------------------< PM_WSR_HANDLE >-----------------------*/,{ 3237 /* 3238 * Phase mismatch handling from SCRIPT with WSR set. 3239 * Such a condition can occur if the chip wants to 3240 * execute a CHMOV(size > 1) when the WSR bit is 3241 * set and the target changes PHASE. 3242 */ 3243 #ifdef SYM_DEBUG_PM_WITH_WSR 3244 /* 3245 * Some debugging may still be needed.:) 3246 */ 3247 SCR_INT, 3248 SIR_PM_WITH_WSR, 3249 #endif 3250 /* 3251 * We must move the residual byte to memory. 3252 * 3253 * UA contains bit 0..31 of the address to 3254 * move the residual byte. 3255 * Move it to the table indirect. 3256 */ 3257 SCR_STORE_REL (ua, 4), 3258 offsetof (struct sym_ccb, phys.wresid.addr), 3259 /* 3260 * Increment UA (move address to next position). 3261 */ 3262 SCR_REG_REG (ua, SCR_ADD, 1), 3263 0, 3264 SCR_REG_REG (ua1, SCR_ADDC, 0), 3265 0, 3266 SCR_REG_REG (ua2, SCR_ADDC, 0), 3267 0, 3268 SCR_REG_REG (ua3, SCR_ADDC, 0), 3269 0, 3270 /* 3271 * Compute SCRATCHA as: 3272 * - size to transfer = 1 byte. 3273 * - bit 24..31 = high address bit [32...39]. 3274 */ 3275 SCR_LOAD_ABS (scratcha, 4), 3276 PADDRH (zero), 3277 SCR_REG_REG (scratcha, SCR_OR, 1), 3278 0, 3279 SCR_FROM_REG (rbc3), 3280 0, 3281 SCR_TO_REG (scratcha3), 3282 0, 3283 /* 3284 * Move this value to the table indirect. 3285 */ 3286 SCR_STORE_REL (scratcha, 4), 3287 offsetof (struct sym_ccb, phys.wresid.size), 3288 /* 3289 * Wait for a valid phase. 3290 * While testing with bogus QUANTUM drives, the C1010 3291 * sometimes raised a spurious phase mismatch with 3292 * WSR and the CHMOV(1) triggered another PM. 3293 * Waiting explicitely for the PHASE seemed to avoid 3294 * the nested phase mismatch. Btw, this didn't happen 3295 * using my IBM drives. 3296 */ 3297 SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)), 3298 0, 3299 /* 3300 * Perform the move of the residual byte. 3301 */ 3302 SCR_CHMOV_TBL ^ SCR_DATA_IN, 3303 offsetof (struct sym_ccb, phys.wresid), 3304 /* 3305 * We can now handle the phase mismatch with UA fixed. 3306 * RBC[0..23]=0 is a special case that does not require 3307 * a PM context. The C code also checks against this. 3308 */ 3309 SCR_FROM_REG (rbc), 3310 0, 3311 SCR_RETURN ^ IFFALSE (DATA (0)), 3312 0, 3313 SCR_FROM_REG (rbc1), 3314 0, 3315 SCR_RETURN ^ IFFALSE (DATA (0)), 3316 0, 3317 SCR_FROM_REG (rbc2), 3318 0, 3319 SCR_RETURN ^ IFFALSE (DATA (0)), 3320 0, 3321 /* 3322 * RBC[0..23]=0. 3323 * Not only we donnot need a PM context, but this would 3324 * lead to a bogus CHMOV(0). This condition means that 3325 * the residual was the last byte to move from this CHMOV. 3326 * So, we just have to move the current data script pointer 3327 * (i.e. TEMP) to the SCRIPTS address following the 3328 * interrupted CHMOV and jump to dispatcher. 3329 */ 3330 SCR_STORE_ABS (ia, 4), 3331 PADDRH (scratch), 3332 SCR_LOAD_ABS (temp, 4), 3333 PADDRH (scratch), 3334 SCR_JUMP, 3335 PADDR (dispatch), 3336 }/*--------------------------< WSR_MA_HELPER >-----------------------*/,{ 3337 /* 3338 * Helper for the C code when WSR bit is set. 3339 * Perform the move of the residual byte. 3340 */ 3341 SCR_CHMOV_TBL ^ SCR_DATA_IN, 3342 offsetof (struct sym_ccb, phys.wresid), 3343 SCR_JUMP, 3344 PADDR (dispatch), 3345 3346 }/*-------------------------< ZERO >------------------------*/,{ 3347 SCR_DATA_ZERO, 3348 }/*-------------------------< SCRATCH >---------------------*/,{ 3349 SCR_DATA_ZERO, 3350 }/*-------------------------< PM0_DATA_ADDR >---------------*/,{ 3351 SCR_DATA_ZERO, 3352 }/*-------------------------< PM1_DATA_ADDR >---------------*/,{ 3353 SCR_DATA_ZERO, 3354 }/*-------------------------< SAVED_DSA >-------------------*/,{ 3355 SCR_DATA_ZERO, 3356 }/*-------------------------< SAVED_DRS >-------------------*/,{ 3357 SCR_DATA_ZERO, 3358 }/*-------------------------< DONE_POS >--------------------*/,{ 3359 SCR_DATA_ZERO, 3360 }/*-------------------------< STARTPOS >--------------------*/,{ 3361 SCR_DATA_ZERO, 3362 }/*-------------------------< TARGTBL >---------------------*/,{ 3363 SCR_DATA_ZERO, 3364 3365 }/*-------------------------< SNOOPTEST >-------------------*/,{ 3366 /* 3367 * Read the variable. 3368 */ 3369 SCR_LOAD_REL (scratcha, 4), 3370 offsetof(struct sym_hcb, cache), 3371 SCR_STORE_REL (temp, 4), 3372 offsetof(struct sym_hcb, cache), 3373 SCR_LOAD_REL (temp, 4), 3374 offsetof(struct sym_hcb, cache), 3375 }/*-------------------------< SNOOPEND >-------------------*/,{ 3376 /* 3377 * And stop. 3378 */ 3379 SCR_INT, 3380 99, 3381 }/*--------------------------------------------------------*/ 3382 }; 3383 3384 /* 3385 * Fill in #define dependent parts of the scripts 3386 */ 3387 static void sym_fill_scripts (script_p scr, scripth_p scrh) 3388 { 3389 int i; 3390 u32 *p; 3391 3392 p = scr->data_in; 3393 for (i=0; i<SYM_CONF_MAX_SG; i++) { 3394 *p++ =SCR_CHMOV_TBL ^ SCR_DATA_IN; 3395 *p++ =offsetof (struct dsb, data[i]); 3396 }; 3397 assert ((u_long)p == (u_long)&scr->data_in + sizeof (scr->data_in)); 3398 3399 p = scr->data_out; 3400 for (i=0; i<SYM_CONF_MAX_SG; i++) { 3401 *p++ =SCR_CHMOV_TBL ^ SCR_DATA_OUT; 3402 *p++ =offsetof (struct dsb, data[i]); 3403 }; 3404 assert ((u_long)p == (u_long)&scr->data_out + sizeof (scr->data_out)); 3405 } 3406 3407 /* 3408 * Copy and bind a script. 3409 */ 3410 static void sym_bind_script (hcb_p np, u32 *src, u32 *dst, int len) 3411 { 3412 u32 opcode, new, old, tmp1, tmp2; 3413 u32 *start, *end; 3414 int relocs; 3415 int opchanged = 0; 3416 3417 start = src; 3418 end = src + len/4; 3419 3420 while (src < end) { 3421 3422 opcode = *src++; 3423 *dst++ = cpu_to_scr(opcode); 3424 3425 /* 3426 * If we forget to change the length 3427 * in scripts, a field will be 3428 * padded with 0. This is an illegal 3429 * command. 3430 */ 3431 if (opcode == 0) { 3432 printf ("%s: ERROR0 IN SCRIPT at %d.\n", 3433 sym_name(np), (int) (src-start-1)); 3434 MDELAY (10000); 3435 continue; 3436 }; 3437 3438 /* 3439 * We use the bogus value 0xf00ff00f ;-) 3440 * to reserve data area in SCRIPTS. 3441 */ 3442 if (opcode == SCR_DATA_ZERO) { 3443 dst[-1] = 0; 3444 continue; 3445 } 3446 3447 if (DEBUG_FLAGS & DEBUG_SCRIPT) 3448 printf ("%p: <%x>\n", (src-1), (unsigned)opcode); 3449 3450 /* 3451 * We don't have to decode ALL commands 3452 */ 3453 switch (opcode >> 28) { 3454 case 0xf: 3455 /* 3456 * LOAD / STORE DSA relative, don't relocate. 3457 */ 3458 relocs = 0; 3459 break; 3460 case 0xe: 3461 /* 3462 * LOAD / STORE absolute. 3463 */ 3464 relocs = 1; 3465 break; 3466 case 0xc: 3467 /* 3468 * COPY has TWO arguments. 3469 */ 3470 relocs = 2; 3471 tmp1 = src[0]; 3472 tmp2 = src[1]; 3473 #ifdef RELOC_KVAR 3474 if ((tmp1 & RELOC_MASK) == RELOC_KVAR) 3475 tmp1 = 0; 3476 if ((tmp2 & RELOC_MASK) == RELOC_KVAR) 3477 tmp2 = 0; 3478 #endif 3479 if ((tmp1 ^ tmp2) & 3) { 3480 printf ("%s: ERROR1 IN SCRIPT at %d.\n", 3481 sym_name(np), (int) (src-start-1)); 3482 MDELAY (1000); 3483 } 3484 /* 3485 * If PREFETCH feature not enabled, remove 3486 * the NO FLUSH bit if present. 3487 */ 3488 if ((opcode & SCR_NO_FLUSH) && 3489 !(np->features & FE_PFEN)) { 3490 dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH); 3491 ++opchanged; 3492 } 3493 break; 3494 case 0x0: 3495 /* 3496 * MOVE/CHMOV (absolute address) 3497 */ 3498 if (!(np->features & FE_WIDE)) 3499 dst[-1] = cpu_to_scr(opcode | OPC_MOVE); 3500 relocs = 1; 3501 break; 3502 case 0x1: 3503 /* 3504 * MOVE/CHMOV (table indirect) 3505 */ 3506 if (!(np->features & FE_WIDE)) 3507 dst[-1] = cpu_to_scr(opcode | OPC_MOVE); 3508 relocs = 0; 3509 break; 3510 case 0x8: 3511 /* 3512 * JUMP / CALL 3513 * dont't relocate if relative :-) 3514 */ 3515 if (opcode & 0x00800000) 3516 relocs = 0; 3517 else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ 3518 relocs = 2; 3519 else 3520 relocs = 1; 3521 break; 3522 case 0x4: 3523 case 0x5: 3524 case 0x6: 3525 case 0x7: 3526 relocs = 1; 3527 break; 3528 default: 3529 relocs = 0; 3530 break; 3531 }; 3532 3533 if (!relocs) { 3534 *dst++ = cpu_to_scr(*src++); 3535 continue; 3536 } 3537 while (relocs--) { 3538 old = *src++; 3539 3540 switch (old & RELOC_MASK) { 3541 case RELOC_REGISTER: 3542 new = (old & ~RELOC_MASK) + np->mmio_ba; 3543 break; 3544 case RELOC_LABEL: 3545 new = (old & ~RELOC_MASK) + np->script_ba; 3546 break; 3547 case RELOC_LABELH: 3548 new = (old & ~RELOC_MASK) + np->scripth_ba; 3549 break; 3550 case RELOC_SOFTC: 3551 new = (old & ~RELOC_MASK) + vtobus(np); 3552 break; 3553 #ifdef RELOC_KVAR 3554 case RELOC_KVAR: 3555 if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) || 3556 ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST)) 3557 panic("KVAR out of range"); 3558 new = vtobus(script_kvars[old & ~RELOC_MASK]); 3559 #endif 3560 break; 3561 case 0: 3562 /* Don't relocate a 0 address. */ 3563 if (old == 0) { 3564 new = old; 3565 break; 3566 } 3567 /* fall through */ 3568 default: 3569 new = 0; /* For 'cc' not to complain */ 3570 panic("sym_bind_script: " 3571 "weird relocation %x\n", old); 3572 break; 3573 } 3574 3575 *dst++ = cpu_to_scr(new); 3576 } 3577 }; 3578 } 3579 3580 /* 3581 * Print something which allows to retrieve the controler type, 3582 * unit, target, lun concerned by a kernel message. 3583 */ 3584 static void PRINT_TARGET (hcb_p np, int target) 3585 { 3586 printf ("%s:%d:", sym_name(np), target); 3587 } 3588 3589 static void PRINT_LUN(hcb_p np, int target, int lun) 3590 { 3591 printf ("%s:%d:%d:", sym_name(np), target, lun); 3592 } 3593 3594 static void PRINT_ADDR (ccb_p cp) 3595 { 3596 if (cp && cp->cam_ccb) 3597 xpt_print_path(cp->cam_ccb->ccb_h.path); 3598 } 3599 3600 /* 3601 * Take into account this ccb in the freeze count. 3602 * The flag that tells user about avoids doing that 3603 * more than once for a ccb. 3604 */ 3605 static void sym_freeze_cam_ccb(union ccb *ccb) 3606 { 3607 if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) { 3608 if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 3609 ccb->ccb_h.status |= CAM_DEV_QFRZN; 3610 xpt_freeze_devq(ccb->ccb_h.path, 1); 3611 } 3612 } 3613 } 3614 3615 /* 3616 * Set the status field of a CAM CCB. 3617 */ 3618 static __inline void sym_set_cam_status(union ccb *ccb, cam_status status) 3619 { 3620 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3621 ccb->ccb_h.status |= status; 3622 } 3623 3624 /* 3625 * Get the status field of a CAM CCB. 3626 */ 3627 static __inline int sym_get_cam_status(union ccb *ccb) 3628 { 3629 return ccb->ccb_h.status & CAM_STATUS_MASK; 3630 } 3631 3632 /* 3633 * Enqueue a CAM CCB. 3634 */ 3635 static void sym_enqueue_cam_ccb(hcb_p np, union ccb *ccb) 3636 { 3637 assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED)); 3638 ccb->ccb_h.status = CAM_REQ_INPROG; 3639 3640 ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb, 3641 ccb->ccb_h.timeout*hz/1000); 3642 ccb->ccb_h.status |= CAM_SIM_QUEUED; 3643 ccb->ccb_h.sym_hcb_ptr = np; 3644 3645 sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq); 3646 } 3647 3648 /* 3649 * Complete a pending CAM CCB. 3650 */ 3651 static void sym_xpt_done(hcb_p np, union ccb *ccb) 3652 { 3653 if (ccb->ccb_h.status & CAM_SIM_QUEUED) { 3654 untimeout(sym_timeout, (caddr_t) ccb, ccb->ccb_h.timeout_ch); 3655 sym_remque(sym_qptr(&ccb->ccb_h.sim_links)); 3656 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3657 ccb->ccb_h.sym_hcb_ptr = 0; 3658 } 3659 if (ccb->ccb_h.flags & CAM_DEV_QFREEZE) 3660 sym_freeze_cam_ccb(ccb); 3661 xpt_done(ccb); 3662 } 3663 3664 static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status) 3665 { 3666 sym_set_cam_status(ccb, cam_status); 3667 sym_xpt_done(np, ccb); 3668 } 3669 3670 /* 3671 * SYMBIOS chip clock divisor table. 3672 * 3673 * Divisors are multiplied by 10,000,000 in order to make 3674 * calculations more simple. 3675 */ 3676 #define _5M 5000000 3677 static u_long div_10M[] = 3678 {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; 3679 3680 /* 3681 * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, 3682 * 128 transfers. All chips support at least 16 transfers 3683 * bursts. The 825A, 875 and 895 chips support bursts of up 3684 * to 128 transfers and the 895A and 896 support bursts of up 3685 * to 64 transfers. All other chips support up to 16 3686 * transfers bursts. 3687 * 3688 * For PCI 32 bit data transfers each transfer is a DWORD. 3689 * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. 3690 * Only the 896 is able to perform 64 bit data transfers. 3691 * 3692 * We use log base 2 (burst length) as internal code, with 3693 * value 0 meaning "burst disabled". 3694 */ 3695 3696 /* 3697 * Burst length from burst code. 3698 */ 3699 #define burst_length(bc) (!(bc))? 0 : 1 << (bc) 3700 3701 /* 3702 * Burst code from io register bits. 3703 */ 3704 #define burst_code(dmode, ctest4, ctest5) \ 3705 (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 3706 3707 /* 3708 * Set initial io register bits from burst code. 3709 */ 3710 static __inline void sym_init_burst(hcb_p np, u_char bc) 3711 { 3712 np->rv_ctest4 &= ~0x80; 3713 np->rv_dmode &= ~(0x3 << 6); 3714 np->rv_ctest5 &= ~0x4; 3715 3716 if (!bc) { 3717 np->rv_ctest4 |= 0x80; 3718 } 3719 else { 3720 --bc; 3721 np->rv_dmode |= ((bc & 0x3) << 6); 3722 np->rv_ctest5 |= (bc & 0x4); 3723 } 3724 } 3725 3726 3727 /* 3728 * Print out the list of targets that have some flag disabled by user. 3729 */ 3730 static void sym_print_targets_flag(hcb_p np, int mask, char *msg) 3731 { 3732 int cnt; 3733 int i; 3734 3735 for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 3736 if (i == np->myaddr) 3737 continue; 3738 if (np->target[i].usrflags & mask) { 3739 if (!cnt++) 3740 printf("%s: %s disabled for targets", 3741 sym_name(np), msg); 3742 printf(" %d", i); 3743 } 3744 } 3745 if (cnt) 3746 printf(".\n"); 3747 } 3748 3749 /* 3750 * Save initial settings of some IO registers. 3751 * Assumed to have been set by BIOS. 3752 * We cannot reset the chip prior to reading the 3753 * IO registers, since informations will be lost. 3754 * Since the SCRIPTS processor may be running, this 3755 * is not safe on paper, but it seems to work quite 3756 * well. :) 3757 */ 3758 static void sym_save_initial_setting (hcb_p np) 3759 { 3760 np->sv_scntl0 = INB(nc_scntl0) & 0x0a; 3761 np->sv_scntl3 = INB(nc_scntl3) & 0x07; 3762 np->sv_dmode = INB(nc_dmode) & 0xce; 3763 np->sv_dcntl = INB(nc_dcntl) & 0xa8; 3764 np->sv_ctest3 = INB(nc_ctest3) & 0x01; 3765 np->sv_ctest4 = INB(nc_ctest4) & 0x80; 3766 np->sv_gpcntl = INB(nc_gpcntl); 3767 np->sv_stest1 = INB(nc_stest1); 3768 np->sv_stest2 = INB(nc_stest2) & 0x20; 3769 np->sv_stest4 = INB(nc_stest4); 3770 if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ 3771 np->sv_scntl4 = INB(nc_scntl4); 3772 np->sv_ctest5 = INB(nc_ctest5) & 0x04; 3773 } 3774 else 3775 np->sv_ctest5 = INB(nc_ctest5) & 0x24; 3776 } 3777 3778 /* 3779 * Prepare io register values used by sym_init() according 3780 * to selected and supported features. 3781 */ 3782 static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram) 3783 { 3784 u_char burst_max; 3785 u_long period; 3786 int i; 3787 3788 /* 3789 * Wide ? 3790 */ 3791 np->maxwide = (np->features & FE_WIDE)? 1 : 0; 3792 3793 /* 3794 * Get the frequency of the chip's clock. 3795 */ 3796 if (np->features & FE_QUAD) 3797 np->multiplier = 4; 3798 else if (np->features & FE_DBLR) 3799 np->multiplier = 2; 3800 else 3801 np->multiplier = 1; 3802 3803 np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000; 3804 np->clock_khz *= np->multiplier; 3805 3806 if (np->clock_khz != 40000) 3807 sym_getclock(np, np->multiplier); 3808 3809 /* 3810 * Divisor to be used for async (timer pre-scaler). 3811 */ 3812 i = np->clock_divn - 1; 3813 while (--i >= 0) { 3814 if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { 3815 ++i; 3816 break; 3817 } 3818 } 3819 np->rv_scntl3 = i+1; 3820 3821 /* 3822 * The C1010 uses hardwired divisors for async. 3823 * So, we just throw away, the async. divisor.:-) 3824 */ 3825 if (np->features & FE_C10) 3826 np->rv_scntl3 = 0; 3827 3828 /* 3829 * Minimum synchronous period factor supported by the chip. 3830 * Btw, 'period' is in tenths of nanoseconds. 3831 */ 3832 period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; 3833 if (period <= 250) np->minsync = 10; 3834 else if (period <= 303) np->minsync = 11; 3835 else if (period <= 500) np->minsync = 12; 3836 else np->minsync = (period + 40 - 1) / 40; 3837 3838 /* 3839 * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). 3840 */ 3841 if (np->minsync < 25 && 3842 !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) 3843 np->minsync = 25; 3844 else if (np->minsync < 12 && 3845 !(np->features & (FE_ULTRA2|FE_ULTRA3))) 3846 np->minsync = 12; 3847 3848 /* 3849 * Maximum synchronous period factor supported by the chip. 3850 */ 3851 period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); 3852 np->maxsync = period > 2540 ? 254 : period / 10; 3853 3854 /* 3855 * If chip is a C1010, guess the sync limits in DT mode. 3856 */ 3857 if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { 3858 if (np->clock_khz == 160000) { 3859 np->minsync_dt = 9; 3860 np->maxsync_dt = 50; 3861 } 3862 } 3863 3864 /* 3865 * 64 bit (53C895A or 53C896) ? 3866 */ 3867 if (np->features & FE_64BIT) 3868 #if BITS_PER_LONG > 32 3869 np->rv_ccntl1 |= (XTIMOD | EXTIBMV); 3870 #else 3871 np->rv_ccntl1 |= (DDAC); 3872 #endif 3873 3874 /* 3875 * Phase mismatch handled by SCRIPTS (895A/896/1010) ? 3876 */ 3877 if (np->features & FE_NOPM) 3878 np->rv_ccntl0 |= (ENPMJ); 3879 3880 /* 3881 * C1010 Errata. 3882 * In dual channel mode, contention occurs if internal cycles 3883 * are used. Disable internal cycles. 3884 */ 3885 if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x45) 3886 np->rv_ccntl0 |= DILS; 3887 3888 /* 3889 * Select burst length (dwords) 3890 */ 3891 burst_max = SYM_SETUP_BURST_ORDER; 3892 if (burst_max == 255) 3893 burst_max = burst_code(np->sv_dmode, np->sv_ctest4, 3894 np->sv_ctest5); 3895 if (burst_max > 7) 3896 burst_max = 7; 3897 if (burst_max > np->maxburst) 3898 burst_max = np->maxburst; 3899 3900 /* 3901 * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. 3902 * This chip and the 860 Rev 1 may wrongly use PCI cache line 3903 * based transactions on LOAD/STORE instructions. So we have 3904 * to prevent these chips from using such PCI transactions in 3905 * this driver. The generic ncr driver that does not use 3906 * LOAD/STORE instructions does not need this work-around. 3907 */ 3908 if ((np->device_id == PCI_ID_SYM53C810 && 3909 np->revision_id >= 0x10 && np->revision_id <= 0x11) || 3910 (np->device_id == PCI_ID_SYM53C860 && 3911 np->revision_id <= 0x1)) 3912 np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); 3913 3914 /* 3915 * Select all supported special features. 3916 * If we are using on-board RAM for scripts, prefetch (PFEN) 3917 * does not help, but burst op fetch (BOF) does. 3918 * Disabling PFEN makes sure BOF will be used. 3919 */ 3920 if (np->features & FE_ERL) 3921 np->rv_dmode |= ERL; /* Enable Read Line */ 3922 if (np->features & FE_BOF) 3923 np->rv_dmode |= BOF; /* Burst Opcode Fetch */ 3924 if (np->features & FE_ERMP) 3925 np->rv_dmode |= ERMP; /* Enable Read Multiple */ 3926 #if 1 3927 if ((np->features & FE_PFEN) && !np->ram_ba) 3928 #else 3929 if (np->features & FE_PFEN) 3930 #endif 3931 np->rv_dcntl |= PFEN; /* Prefetch Enable */ 3932 if (np->features & FE_CLSE) 3933 np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ 3934 if (np->features & FE_WRIE) 3935 np->rv_ctest3 |= WRIE; /* Write and Invalidate */ 3936 if (np->features & FE_DFS) 3937 np->rv_ctest5 |= DFS; /* Dma Fifo Size */ 3938 3939 /* 3940 * Select some other 3941 */ 3942 if (SYM_SETUP_PCI_PARITY) 3943 np->rv_ctest4 |= MPEE; /* Master parity checking */ 3944 if (SYM_SETUP_SCSI_PARITY) 3945 np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ 3946 3947 /* 3948 * Get parity checking, host ID and verbose mode from NVRAM 3949 */ 3950 np->myaddr = 255; 3951 sym_nvram_setup_host (np, nvram); 3952 3953 /* 3954 * Get SCSI addr of host adapter (set by bios?). 3955 */ 3956 if (np->myaddr == 255) { 3957 np->myaddr = INB(nc_scid) & 0x07; 3958 if (!np->myaddr) 3959 np->myaddr = SYM_SETUP_HOST_ID; 3960 } 3961 3962 /* 3963 * Prepare initial io register bits for burst length 3964 */ 3965 sym_init_burst(np, burst_max); 3966 3967 /* 3968 * Set SCSI BUS mode. 3969 * - LVD capable chips (895/895A/896/1010) report the 3970 * current BUS mode through the STEST4 IO register. 3971 * - For previous generation chips (825/825A/875), 3972 * user has to tell us how to check against HVD, 3973 * since a 100% safe algorithm is not possible. 3974 */ 3975 np->scsi_mode = SMODE_SE; 3976 if (np->features & (FE_ULTRA2|FE_ULTRA3)) 3977 np->scsi_mode = (np->sv_stest4 & SMODE); 3978 else if (np->features & FE_DIFF) { 3979 if (SYM_SETUP_SCSI_DIFF == 1) { 3980 if (np->sv_scntl3) { 3981 if (np->sv_stest2 & 0x20) 3982 np->scsi_mode = SMODE_HVD; 3983 } 3984 else if (nvram->type == SYM_SYMBIOS_NVRAM) { 3985 if (INB(nc_gpreg) & 0x08) 3986 np->scsi_mode = SMODE_HVD; 3987 } 3988 } 3989 else if (SYM_SETUP_SCSI_DIFF == 2) 3990 np->scsi_mode = SMODE_HVD; 3991 } 3992 if (np->scsi_mode == SMODE_HVD) 3993 np->rv_stest2 |= 0x20; 3994 3995 /* 3996 * Set LED support from SCRIPTS. 3997 * Ignore this feature for boards known to use a 3998 * specific GPIO wiring and for the 895A or 896 3999 * that drive the LED directly. 4000 */ 4001 if ((SYM_SETUP_SCSI_LED || nvram->type == SYM_SYMBIOS_NVRAM) && 4002 !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) 4003 np->features |= FE_LED0; 4004 4005 /* 4006 * Set irq mode. 4007 */ 4008 switch(SYM_SETUP_IRQ_MODE & 3) { 4009 case 2: 4010 np->rv_dcntl |= IRQM; 4011 break; 4012 case 1: 4013 np->rv_dcntl |= (np->sv_dcntl & IRQM); 4014 break; 4015 default: 4016 break; 4017 } 4018 4019 /* 4020 * Configure targets according to driver setup. 4021 * If NVRAM present get targets setup from NVRAM. 4022 */ 4023 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 4024 tcb_p tp = &np->target[i]; 4025 4026 tp->tinfo.user.period = np->minsync; 4027 tp->tinfo.user.offset = np->maxoffs; 4028 tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT; 4029 tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); 4030 tp->usrtags = SYM_SETUP_MAX_TAG; 4031 4032 sym_nvram_setup_target (np, i, nvram); 4033 4034 if (!tp->usrtags) 4035 tp->usrflags &= ~SYM_TAGS_ENABLED; 4036 } 4037 4038 /* 4039 * Let user know about the settings. 4040 */ 4041 i = nvram->type; 4042 printf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np), 4043 i == SYM_SYMBIOS_NVRAM ? "Symbios" : 4044 (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"), 4045 np->myaddr, 4046 (np->features & FE_ULTRA3) ? 80 : 4047 (np->features & FE_ULTRA2) ? 40 : 4048 (np->features & FE_ULTRA) ? 20 : 10, 4049 sym_scsi_bus_mode(np->scsi_mode), 4050 (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); 4051 /* 4052 * Tell him more on demand. 4053 */ 4054 if (sym_verbose) { 4055 printf("%s: %s IRQ line driver%s\n", 4056 sym_name(np), 4057 np->rv_dcntl & IRQM ? "totem pole" : "open drain", 4058 np->ram_ba ? ", using on-chip SRAM" : ""); 4059 if (np->features & FE_NOPM) 4060 printf("%s: handling phase mismatch from SCRIPTS.\n", 4061 sym_name(np)); 4062 } 4063 /* 4064 * And still more. 4065 */ 4066 if (sym_verbose > 1) { 4067 printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " 4068 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", 4069 sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, 4070 np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); 4071 4072 printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " 4073 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", 4074 sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, 4075 np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); 4076 } 4077 /* 4078 * Let user be aware of targets that have some disable flags set. 4079 */ 4080 sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT"); 4081 if (sym_verbose) 4082 sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED, 4083 "SCAN FOR LUNS"); 4084 4085 return 0; 4086 } 4087 4088 /* 4089 * Prepare the next negotiation message if needed. 4090 * 4091 * Fill in the part of message buffer that contains the 4092 * negotiation and the nego_status field of the CCB. 4093 * Returns the size of the message in bytes. 4094 */ 4095 4096 static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr) 4097 { 4098 tcb_p tp = &np->target[cp->target]; 4099 int msglen = 0; 4100 4101 #if 1 4102 /* 4103 * For now, only use PPR with DT option if period factor = 9. 4104 */ 4105 if (tp->tinfo.goal.period == 9) { 4106 tp->tinfo.goal.width = BUS_16_BIT; 4107 tp->tinfo.goal.options |= PPR_OPT_DT; 4108 } 4109 else 4110 tp->tinfo.goal.options &= ~PPR_OPT_DT; 4111 #endif 4112 /* 4113 * Early C1010 chips need a work-around for DT 4114 * data transfer to work. 4115 */ 4116 if (!(np->features & FE_U3EN)) 4117 tp->tinfo.goal.options = 0; 4118 /* 4119 * negotiate using PPR ? 4120 */ 4121 if (tp->tinfo.goal.options & PPR_OPT_MASK) 4122 nego = NS_PPR; 4123 /* 4124 * negotiate wide transfers ? 4125 */ 4126 else if (tp->tinfo.current.width != tp->tinfo.goal.width) 4127 nego = NS_WIDE; 4128 /* 4129 * negotiate synchronous transfers? 4130 */ 4131 else if (tp->tinfo.current.period != tp->tinfo.goal.period || 4132 tp->tinfo.current.offset != tp->tinfo.goal.offset) 4133 nego = NS_SYNC; 4134 4135 switch (nego) { 4136 case NS_SYNC: 4137 msgptr[msglen++] = M_EXTENDED; 4138 msgptr[msglen++] = 3; 4139 msgptr[msglen++] = M_X_SYNC_REQ; 4140 msgptr[msglen++] = tp->tinfo.goal.period; 4141 msgptr[msglen++] = tp->tinfo.goal.offset; 4142 break; 4143 case NS_WIDE: 4144 msgptr[msglen++] = M_EXTENDED; 4145 msgptr[msglen++] = 2; 4146 msgptr[msglen++] = M_X_WIDE_REQ; 4147 msgptr[msglen++] = tp->tinfo.goal.width; 4148 break; 4149 case NS_PPR: 4150 msgptr[msglen++] = M_EXTENDED; 4151 msgptr[msglen++] = 6; 4152 msgptr[msglen++] = M_X_PPR_REQ; 4153 msgptr[msglen++] = tp->tinfo.goal.period; 4154 msgptr[msglen++] = 0; 4155 msgptr[msglen++] = tp->tinfo.goal.offset; 4156 msgptr[msglen++] = tp->tinfo.goal.width; 4157 msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT; 4158 break; 4159 }; 4160 4161 cp->nego_status = nego; 4162 4163 if (nego) { 4164 tp->nego_cp = cp; /* Keep track a nego will be performed */ 4165 if (DEBUG_FLAGS & DEBUG_NEGO) { 4166 sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" : 4167 nego == NS_WIDE ? "wide msgout" : 4168 "ppr msgout", msgptr); 4169 }; 4170 }; 4171 4172 return msglen; 4173 } 4174 4175 /* 4176 * Insert a job into the start queue. 4177 */ 4178 static void sym_put_start_queue(hcb_p np, ccb_p cp) 4179 { 4180 u_short qidx; 4181 4182 #ifdef SYM_CONF_IARB_SUPPORT 4183 /* 4184 * If the previously queued CCB is not yet done, 4185 * set the IARB hint. The SCRIPTS will go with IARB 4186 * for this job when starting the previous one. 4187 * We leave devices a chance to win arbitration by 4188 * not using more than 'iarb_max' consecutive 4189 * immediate arbitrations. 4190 */ 4191 if (np->last_cp && np->iarb_count < np->iarb_max) { 4192 np->last_cp->host_flags |= HF_HINT_IARB; 4193 ++np->iarb_count; 4194 } 4195 else 4196 np->iarb_count = 0; 4197 np->last_cp = cp; 4198 #endif 4199 4200 /* 4201 * Insert first the idle task and then our job. 4202 * The MB should ensure proper ordering. 4203 */ 4204 qidx = np->squeueput + 2; 4205 if (qidx >= MAX_QUEUE*2) qidx = 0; 4206 4207 np->squeue [qidx] = cpu_to_scr(np->idletask_ba); 4208 MEMORY_BARRIER(); 4209 np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); 4210 4211 np->squeueput = qidx; 4212 4213 if (DEBUG_FLAGS & DEBUG_QUEUE) 4214 printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); 4215 4216 /* 4217 * Script processor may be waiting for reselect. 4218 * Wake it up. 4219 */ 4220 MEMORY_BARRIER(); 4221 OUTB (nc_istat, SIGP|np->istat_sem); 4222 } 4223 4224 4225 /* 4226 * Soft reset the chip. 4227 * 4228 * Raising SRST when the chip is running may cause 4229 * problems on dual function chips (see below). 4230 * On the other hand, LVD devices need some delay 4231 * to settle and report actual BUS mode in STEST4. 4232 */ 4233 static void sym_chip_reset (hcb_p np) 4234 { 4235 OUTB (nc_istat, SRST); 4236 UDELAY (10); 4237 OUTB (nc_istat, 0); 4238 UDELAY(2000); /* For BUS MODE to settle */ 4239 } 4240 4241 /* 4242 * Soft reset the chip. 4243 * 4244 * Some 896 and 876 chip revisions may hang-up if we set 4245 * the SRST (soft reset) bit at the wrong time when SCRIPTS 4246 * are running. 4247 * So, we need to abort the current operation prior to 4248 * soft resetting the chip. 4249 */ 4250 static void sym_soft_reset (hcb_p np) 4251 { 4252 u_char istat; 4253 int i; 4254 4255 OUTB (nc_istat, CABRT); 4256 for (i = 1000000 ; i ; --i) { 4257 istat = INB (nc_istat); 4258 if (istat & SIP) { 4259 INW (nc_sist); 4260 continue; 4261 } 4262 if (istat & DIP) { 4263 OUTB (nc_istat, 0); 4264 INB (nc_dstat); 4265 break; 4266 } 4267 } 4268 if (!i) 4269 printf("%s: unable to abort current chip operation.\n", 4270 sym_name(np)); 4271 sym_chip_reset (np); 4272 } 4273 4274 /* 4275 * Start reset process. 4276 * 4277 * The interrupt handler will reinitialize the chip. 4278 */ 4279 static void sym_start_reset(hcb_p np) 4280 { 4281 (void) sym_reset_scsi_bus(np, 1); 4282 } 4283 4284 static int sym_reset_scsi_bus(hcb_p np, int enab_int) 4285 { 4286 u32 term; 4287 int retv = 0; 4288 4289 sym_soft_reset(np); /* Soft reset the chip */ 4290 if (enab_int) 4291 OUTW (nc_sien, RST); 4292 /* 4293 * Enable Tolerant, reset IRQD if present and 4294 * properly set IRQ mode, prior to resetting the bus. 4295 */ 4296 OUTB (nc_stest3, TE); 4297 OUTB (nc_dcntl, (np->rv_dcntl & IRQM)); 4298 OUTB (nc_scntl1, CRST); 4299 UDELAY (200); 4300 4301 if (!SYM_SETUP_SCSI_BUS_CHECK) 4302 goto out; 4303 /* 4304 * Check for no terminators or SCSI bus shorts to ground. 4305 * Read SCSI data bus, data parity bits and control signals. 4306 * We are expecting RESET to be TRUE and other signals to be 4307 * FALSE. 4308 */ 4309 term = INB(nc_sstat0); 4310 term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ 4311 term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */ 4312 ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */ 4313 ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */ 4314 INB(nc_sbcl); /* req ack bsy sel atn msg cd io */ 4315 4316 if (!(np->features & FE_WIDE)) 4317 term &= 0x3ffff; 4318 4319 if (term != (2<<7)) { 4320 printf("%s: suspicious SCSI data while resetting the BUS.\n", 4321 sym_name(np)); 4322 printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " 4323 "0x%lx, expecting 0x%lx\n", 4324 sym_name(np), 4325 (np->features & FE_WIDE) ? "dp1,d15-8," : "", 4326 (u_long)term, (u_long)(2<<7)); 4327 if (SYM_SETUP_SCSI_BUS_CHECK == 1) 4328 retv = 1; 4329 } 4330 out: 4331 OUTB (nc_scntl1, 0); 4332 /* MDELAY(100); */ 4333 return retv; 4334 } 4335 4336 /* 4337 * The chip may have completed jobs. Look at the DONE QUEUE. 4338 */ 4339 static int sym_wakeup_done (hcb_p np) 4340 { 4341 ccb_p cp; 4342 int i, n; 4343 u_long dsa; 4344 4345 n = 0; 4346 i = np->dqueueget; 4347 while (1) { 4348 dsa = scr_to_cpu(np->dqueue[i]); 4349 if (!dsa) 4350 break; 4351 np->dqueue[i] = 0; 4352 if ((i = i+2) >= MAX_QUEUE*2) 4353 i = 0; 4354 4355 cp = sym_ccb_from_dsa(np, dsa); 4356 if (cp) { 4357 sym_complete_ok (np, cp); 4358 ++n; 4359 } 4360 else 4361 printf ("%s: bad DSA (%lx) in done queue.\n", 4362 sym_name(np), dsa); 4363 } 4364 np->dqueueget = i; 4365 4366 return n; 4367 } 4368 4369 /* 4370 * Complete all active CCBs with error. 4371 * Used on CHIP/SCSI RESET. 4372 */ 4373 static void sym_flush_busy_queue (hcb_p np, int cam_status) 4374 { 4375 /* 4376 * Move all active CCBs to the COMP queue 4377 * and flush this queue. 4378 */ 4379 sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); 4380 sym_que_init(&np->busy_ccbq); 4381 sym_flush_comp_queue(np, cam_status); 4382 } 4383 4384 /* 4385 * Start chip. 4386 * 4387 * 'reason' means: 4388 * 0: initialisation. 4389 * 1: SCSI BUS RESET delivered or received. 4390 * 2: SCSI BUS MODE changed. 4391 */ 4392 static void sym_init (hcb_p np, int reason) 4393 { 4394 int i; 4395 u_long phys; 4396 4397 /* 4398 * Reset chip if asked, otherwise just clear fifos. 4399 */ 4400 if (reason == 1) 4401 sym_soft_reset(np); 4402 else { 4403 OUTB (nc_stest3, TE|CSF); 4404 OUTONB (nc_ctest3, CLF); 4405 } 4406 4407 /* 4408 * Clear Start Queue 4409 */ 4410 phys = vtobus(np->squeue); 4411 for (i = 0; i < MAX_QUEUE*2; i += 2) { 4412 np->squeue[i] = cpu_to_scr(np->idletask_ba); 4413 np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); 4414 } 4415 np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); 4416 4417 /* 4418 * Start at first entry. 4419 */ 4420 np->squeueput = 0; 4421 np->scripth0->startpos[0] = cpu_to_scr(phys); 4422 4423 /* 4424 * Clear Done Queue 4425 */ 4426 phys = vtobus(np->dqueue); 4427 for (i = 0; i < MAX_QUEUE*2; i += 2) { 4428 np->dqueue[i] = 0; 4429 np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); 4430 } 4431 np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); 4432 4433 /* 4434 * Start at first entry. 4435 */ 4436 np->scripth0->done_pos[0] = cpu_to_scr(phys); 4437 np->dqueueget = 0; 4438 4439 /* 4440 * Wakeup all pending jobs. 4441 */ 4442 sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET); 4443 4444 /* 4445 * Init chip. 4446 */ 4447 OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */ 4448 UDELAY (2000); /* The 895 needs time for the bus mode to settle */ 4449 4450 OUTB (nc_scntl0, np->rv_scntl0 | 0xc0); 4451 /* full arb., ena parity, par->ATN */ 4452 OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ 4453 4454 sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ 4455 4456 OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ 4457 OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */ 4458 OUTB (nc_istat , SIGP ); /* Signal Process */ 4459 OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */ 4460 OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ 4461 4462 OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ 4463 OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */ 4464 OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */ 4465 4466 /* Extended Sreq/Sack filtering not supported on the C10 */ 4467 if (np->features & FE_C10) 4468 OUTB (nc_stest2, np->rv_stest2); 4469 else 4470 OUTB (nc_stest2, EXT|np->rv_stest2); 4471 4472 OUTB (nc_stest3, TE); /* TolerANT enable */ 4473 OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ 4474 4475 /* 4476 * C10101 Errata. 4477 * Errant SGE's when in narrow. Write bits 4 & 5 of 4478 * STEST1 register to disable SGE. We probably should do 4479 * that from SCRIPTS for each selection/reselection, but 4480 * I just don't want. :) 4481 */ 4482 if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x45) 4483 OUTB (nc_stest1, INB(nc_stest1) | 0x30); 4484 4485 /* 4486 * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. 4487 * Disable overlapped arbitration for some dual function devices, 4488 * regardless revision id (kind of post-chip-design feature. ;-)) 4489 */ 4490 if (np->device_id == PCI_ID_SYM53C875) 4491 OUTB (nc_ctest0, (1<<5)); 4492 else if (np->device_id == PCI_ID_SYM53C896) 4493 np->rv_ccntl0 |= DPR; 4494 4495 /* 4496 * If 64 bit (895A/896/1010) write CCNTL1 to enable 40 bit 4497 * address table indirect addressing for MOVE. 4498 * Also write CCNTL0 if 64 bit chip, since this register seems 4499 * to only be used by 64 bit cores. 4500 */ 4501 if (np->features & FE_64BIT) { 4502 OUTB (nc_ccntl0, np->rv_ccntl0); 4503 OUTB (nc_ccntl1, np->rv_ccntl1); 4504 } 4505 4506 /* 4507 * If phase mismatch handled by scripts (895A/896/1010), 4508 * set PM jump addresses. 4509 */ 4510 if (np->features & FE_NOPM) { 4511 OUTL (nc_pmjad1, SCRIPTH_BA (np, pm_handle)); 4512 OUTL (nc_pmjad2, SCRIPTH_BA (np, pm_handle)); 4513 } 4514 4515 /* 4516 * Enable GPIO0 pin for writing if LED support from SCRIPTS. 4517 * Also set GPIO5 and clear GPIO6 if hardware LED control. 4518 */ 4519 if (np->features & FE_LED0) 4520 OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01); 4521 else if (np->features & FE_LEDC) 4522 OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20); 4523 4524 /* 4525 * enable ints 4526 */ 4527 OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); 4528 OUTB (nc_dien , MDPE|BF|SSI|SIR|IID); 4529 4530 /* 4531 * For 895/6 enable SBMC interrupt and save current SCSI bus mode. 4532 * Try to eat the spurious SBMC interrupt that may occur when 4533 * we reset the chip but not the SCSI BUS (at initialization). 4534 */ 4535 if (np->features & (FE_ULTRA2|FE_ULTRA3)) { 4536 OUTONW (nc_sien, SBMC); 4537 if (reason == 0) { 4538 MDELAY(100); 4539 INW (nc_sist); 4540 } 4541 np->scsi_mode = INB (nc_stest4) & SMODE; 4542 } 4543 4544 /* 4545 * Fill in target structure. 4546 * Reinitialize usrsync. 4547 * Reinitialize usrwide. 4548 * Prepare sync negotiation according to actual SCSI bus mode. 4549 */ 4550 for (i=0;i<SYM_CONF_MAX_TARGET;i++) { 4551 tcb_p tp = &np->target[i]; 4552 4553 tp->to_reset = 0; 4554 tp->sval = 0; 4555 tp->wval = np->rv_scntl3; 4556 tp->uval = 0; 4557 4558 tp->tinfo.current.period = 0; 4559 tp->tinfo.current.offset = 0; 4560 tp->tinfo.current.width = BUS_8_BIT; 4561 tp->tinfo.current.options = 0; 4562 } 4563 4564 /* 4565 * Download SCSI SCRIPTS to on-chip RAM if present, 4566 * and start script processor. 4567 */ 4568 if (np->ram_ba) { 4569 if (sym_verbose > 1) 4570 printf ("%s: Downloading SCSI SCRIPTS.\n", 4571 sym_name(np)); 4572 if (np->ram_ws == 8192) { 4573 memcpy_to_pci(np->ram_va + 4096, 4574 np->scripth0, sizeof(struct sym_scrh)); 4575 OUTL (nc_mmws, np->scr_ram_seg); 4576 OUTL (nc_mmrs, np->scr_ram_seg); 4577 OUTL (nc_sfs, np->scr_ram_seg); 4578 phys = SCRIPTH_BA (np, start64); 4579 } 4580 else 4581 phys = SCRIPT_BA (np, init); 4582 memcpy_to_pci(np->ram_va,np->script0,sizeof(struct sym_scr)); 4583 } 4584 else 4585 phys = SCRIPT_BA (np, init); 4586 4587 np->istat_sem = 0; 4588 4589 MEMORY_BARRIER(); 4590 OUTL (nc_dsa, vtobus(np)); 4591 OUTL (nc_dsp, phys); 4592 4593 /* 4594 * Notify the XPT about the RESET condition. 4595 */ 4596 if (reason != 0) 4597 xpt_async(AC_BUS_RESET, np->path, NULL); 4598 } 4599 4600 /* 4601 * Get clock factor and sync divisor for a given 4602 * synchronous factor period. 4603 */ 4604 static int 4605 sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) 4606 { 4607 u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ 4608 int div = np->clock_divn; /* Number of divisors supported */ 4609 u32 fak; /* Sync factor in sxfer */ 4610 u32 per; /* Period in tenths of ns */ 4611 u32 kpc; /* (per * clk) */ 4612 int ret; 4613 4614 /* 4615 * Compute the synchronous period in tenths of nano-seconds 4616 */ 4617 if (dt && sfac <= 9) per = 125; 4618 else if (sfac <= 10) per = 250; 4619 else if (sfac == 11) per = 303; 4620 else if (sfac == 12) per = 500; 4621 else per = 40 * sfac; 4622 ret = per; 4623 4624 kpc = per * clk; 4625 if (dt) 4626 kpc <<= 1; 4627 4628 /* 4629 * For earliest C10, the extra clocks does not apply 4630 * to CRC cycles, so it may be safe not to use them. 4631 * Note that this limits the lowest sync data transfer 4632 * to 5 Mega-transfers per second and may result in 4633 * using higher clock divisors. 4634 */ 4635 #if 1 4636 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { 4637 /* 4638 * Look for the lowest clock divisor that allows an 4639 * output speed not faster than the period. 4640 */ 4641 while (div > 0) { 4642 --div; 4643 if (kpc > (div_10M[div] << 2)) { 4644 ++div; 4645 break; 4646 } 4647 } 4648 fak = 0; /* No extra clocks */ 4649 if (div == np->clock_divn) { /* Are we too fast ? */ 4650 ret = -1; 4651 } 4652 *divp = div; 4653 *fakp = fak; 4654 return ret; 4655 } 4656 #endif 4657 4658 /* 4659 * Look for the greatest clock divisor that allows an 4660 * input speed faster than the period. 4661 */ 4662 while (div-- > 0) 4663 if (kpc >= (div_10M[div] << 2)) break; 4664 4665 /* 4666 * Calculate the lowest clock factor that allows an output 4667 * speed not faster than the period, and the max output speed. 4668 * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. 4669 * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. 4670 */ 4671 if (dt) { 4672 fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; 4673 /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ 4674 } 4675 else { 4676 fak = (kpc - 1) / div_10M[div] + 1 - 4; 4677 /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ 4678 } 4679 4680 /* 4681 * Check against our hardware limits, or bugs :). 4682 */ 4683 if (fak < 0) {fak = 0; ret = -1;} 4684 if (fak > 2) {fak = 2; ret = -1;} 4685 4686 /* 4687 * Compute and return sync parameters. 4688 */ 4689 *divp = div; 4690 *fakp = fak; 4691 4692 return ret; 4693 } 4694 4695 /* 4696 * We received a WDTR. 4697 * Let everything be aware of the changes. 4698 */ 4699 static void sym_setwide(hcb_p np, ccb_p cp, u_char wide) 4700 { 4701 struct ccb_trans_settings neg; 4702 union ccb *ccb = cp->cam_ccb; 4703 tcb_p tp = &np->target[cp->target]; 4704 4705 sym_settrans(np, cp, 0, 0, 0, wide, 0, 0); 4706 4707 /* 4708 * Tell the SCSI layer about the new transfer parameters. 4709 */ 4710 tp->tinfo.goal.width = tp->tinfo.current.width = wide; 4711 tp->tinfo.current.offset = 0; 4712 tp->tinfo.current.period = 0; 4713 tp->tinfo.current.options = 0; 4714 neg.bus_width = wide ? BUS_16_BIT : BUS_8_BIT; 4715 neg.sync_period = tp->tinfo.current.period; 4716 neg.sync_offset = tp->tinfo.current.offset; 4717 neg.valid = CCB_TRANS_BUS_WIDTH_VALID 4718 | CCB_TRANS_SYNC_RATE_VALID 4719 | CCB_TRANS_SYNC_OFFSET_VALID; 4720 xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); 4721 xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); 4722 } 4723 4724 /* 4725 * We received a SDTR. 4726 * Let everything be aware of the changes. 4727 */ 4728 static void 4729 sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak) 4730 { 4731 struct ccb_trans_settings neg; 4732 union ccb *ccb = cp->cam_ccb; 4733 tcb_p tp = &np->target[cp->target]; 4734 u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0; 4735 4736 sym_settrans(np, cp, 0, ofs, per, wide, div, fak); 4737 4738 /* 4739 * Tell the SCSI layer about the new transfer parameters. 4740 */ 4741 tp->tinfo.goal.period = tp->tinfo.current.period = per; 4742 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; 4743 tp->tinfo.goal.options = tp->tinfo.current.options = 0; 4744 neg.sync_period = tp->tinfo.current.period; 4745 neg.sync_offset = tp->tinfo.current.offset; 4746 neg.valid = CCB_TRANS_SYNC_RATE_VALID 4747 | CCB_TRANS_SYNC_OFFSET_VALID; 4748 xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); 4749 xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); 4750 } 4751 4752 /* 4753 * We received a PPR. 4754 * Let everything be aware of the changes. 4755 */ 4756 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 4757 u_char per, u_char wide, u_char div, u_char fak) 4758 { 4759 struct ccb_trans_settings neg; 4760 union ccb *ccb = cp->cam_ccb; 4761 tcb_p tp = &np->target[cp->target]; 4762 4763 sym_settrans(np, cp, dt, ofs, per, wide, div, fak); 4764 4765 /* 4766 * Tell the SCSI layer about the new transfer parameters. 4767 */ 4768 tp->tinfo.goal.width = tp->tinfo.current.width = wide; 4769 tp->tinfo.goal.period = tp->tinfo.current.period = per; 4770 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; 4771 tp->tinfo.goal.options = tp->tinfo.current.options = dt; 4772 neg.sync_period = tp->tinfo.current.period; 4773 neg.sync_offset = tp->tinfo.current.offset; 4774 neg.bus_width = wide ? BUS_16_BIT : BUS_8_BIT; 4775 neg.valid = CCB_TRANS_BUS_WIDTH_VALID 4776 | CCB_TRANS_SYNC_RATE_VALID 4777 | CCB_TRANS_SYNC_OFFSET_VALID; 4778 xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); 4779 xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); 4780 } 4781 4782 /* 4783 * Switch trans mode for current job and it's target. 4784 */ 4785 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 4786 u_char per, u_char wide, u_char div, u_char fak) 4787 { 4788 SYM_QUEHEAD *qp; 4789 union ccb *ccb; 4790 tcb_p tp; 4791 u_char target = INB (nc_sdid) & 0x0f; 4792 u_char sval, wval, uval; 4793 4794 assert (cp); 4795 if (!cp) return; 4796 ccb = cp->cam_ccb; 4797 assert (ccb); 4798 if (!ccb) return; 4799 assert (target == (cp->target & 0xf)); 4800 tp = &np->target[target]; 4801 4802 sval = tp->sval; 4803 wval = tp->wval; 4804 uval = tp->uval; 4805 4806 #if 0 4807 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", 4808 sval, wval, uval, np->rv_scntl3); 4809 #endif 4810 /* 4811 * Set the offset. 4812 */ 4813 if (!(np->features & FE_C10)) 4814 sval = (sval & ~0x1f) | ofs; 4815 else 4816 sval = (sval & ~0x3f) | ofs; 4817 4818 /* 4819 * Set the sync divisor and extra clock factor. 4820 */ 4821 if (ofs != 0) { 4822 wval = (wval & ~0x70) | ((div+1) << 4); 4823 if (!(np->features & FE_C10)) 4824 sval = (sval & ~0xe0) | (fak << 5); 4825 else { 4826 uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); 4827 if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); 4828 if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); 4829 } 4830 } 4831 4832 /* 4833 * Set the bus width. 4834 */ 4835 wval = wval & ~EWS; 4836 if (wide != 0) 4837 wval |= EWS; 4838 4839 /* 4840 * Set misc. ultra enable bits. 4841 */ 4842 if (np->features & FE_C10) { 4843 uval = uval & ~U3EN; 4844 if (dt) { 4845 assert(np->features & FE_U3EN); 4846 uval |= U3EN; 4847 } 4848 } 4849 else { 4850 wval = wval & ~ULTRA; 4851 if (per <= 12) wval |= ULTRA; 4852 } 4853 4854 /* 4855 * Stop there if sync parameters are unchanged. 4856 */ 4857 if (tp->sval == sval && tp->wval == wval && tp->uval == uval) return; 4858 tp->sval = sval; 4859 tp->wval = wval; 4860 tp->uval = uval; 4861 4862 /* 4863 * Disable extended Sreq/Sack filtering if per < 50. 4864 * Not supported on the C1010. 4865 */ 4866 if (per < 50 && !(np->features & FE_C10)) 4867 OUTOFFB (nc_stest2, EXT); 4868 4869 /* 4870 * set actual value and sync_status 4871 */ 4872 OUTB (nc_sxfer, tp->sval); 4873 OUTB (nc_scntl3, tp->wval); 4874 4875 if (np->features & FE_C10) { 4876 OUTB (nc_scntl4, tp->uval); 4877 } 4878 4879 /* 4880 * patch ALL busy ccbs of this target. 4881 */ 4882 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 4883 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 4884 if (cp->target != target) 4885 continue; 4886 cp->phys.select.sel_scntl3 = tp->wval; 4887 cp->phys.select.sel_sxfer = tp->sval; 4888 if (np->features & FE_C10) { 4889 cp->phys.select.sel_scntl4 = tp->uval; 4890 } 4891 } 4892 } 4893 4894 /* 4895 * log message for real hard errors 4896 * 4897 * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc). 4898 * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. 4899 * 4900 * exception register: 4901 * ds: dstat 4902 * si: sist 4903 * 4904 * SCSI bus lines: 4905 * so: control lines as driven by chip. 4906 * si: control lines as seen by chip. 4907 * sd: scsi data lines as seen by chip. 4908 * 4909 * wide/fastmode: 4910 * sxfer: (see the manual) 4911 * scntl3: (see the manual) 4912 * 4913 * current script command: 4914 * dsp: script adress (relative to start of script). 4915 * dbc: first word of script command. 4916 * 4917 * First 24 register of the chip: 4918 * r0..rf 4919 */ 4920 static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat) 4921 { 4922 u32 dsp; 4923 int script_ofs; 4924 int script_size; 4925 char *script_name; 4926 u_char *script_base; 4927 int i; 4928 4929 dsp = INL (nc_dsp); 4930 4931 if (dsp > np->script_ba && 4932 dsp <= np->script_ba + sizeof(struct sym_scr)) { 4933 script_ofs = dsp - np->script_ba; 4934 script_size = sizeof(struct sym_scr); 4935 script_base = (u_char *) np->script0; 4936 script_name = "script"; 4937 } 4938 else if (np->scripth_ba < dsp && 4939 dsp <= np->scripth_ba + sizeof(struct sym_scrh)) { 4940 script_ofs = dsp - np->scripth_ba; 4941 script_size = sizeof(struct sym_scrh); 4942 script_base = (u_char *) np->scripth0; 4943 script_name = "scripth"; 4944 } else { 4945 script_ofs = dsp; 4946 script_size = 0; 4947 script_base = 0; 4948 script_name = "mem"; 4949 } 4950 4951 printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", 4952 sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist, 4953 (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), 4954 (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer), 4955 (unsigned)INB (nc_scntl3), script_name, script_ofs, 4956 (unsigned)INL (nc_dbc)); 4957 4958 if (((script_ofs & 3) == 0) && 4959 (unsigned)script_ofs < script_size) { 4960 printf ("%s: script cmd = %08x\n", sym_name(np), 4961 scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); 4962 } 4963 4964 printf ("%s: regdump:", sym_name(np)); 4965 for (i=0; i<24;i++) 4966 printf (" %02x", (unsigned)INB_OFF(i)); 4967 printf (".\n"); 4968 4969 /* 4970 * PCI BUS error, read the PCI ststus register. 4971 */ 4972 if (dstat & (MDPE|BF)) { 4973 u_short pci_sts; 4974 #ifdef FreeBSD_4_Bus 4975 pci_sts = pci_read_config(np->device, PCIR_STATUS, 2); 4976 #else 4977 pci_sts = pci_cfgread(np->pci_tag, PCIR_STATUS, 2); 4978 #endif 4979 if (pci_sts & 0xf900) { 4980 #ifdef FreeBSD_4_Bus 4981 pci_write_config(np->device, PCIR_STATUS, pci_sts, 2); 4982 #else 4983 pci_cfgwrite(np->pci_tag, PCIR_STATUS, pci_sts, 2); 4984 #endif 4985 printf("%s: PCI STATUS = 0x%04x\n", 4986 sym_name(np), pci_sts & 0xf900); 4987 } 4988 } 4989 } 4990 4991 /* 4992 * chip interrupt handler 4993 * 4994 * In normal situations, interrupt conditions occur one at 4995 * a time. But when something bad happens on the SCSI BUS, 4996 * the chip may raise several interrupt flags before 4997 * stopping and interrupting the CPU. The additionnal 4998 * interrupt flags are stacked in some extra registers 4999 * after the SIP and/or DIP flag has been raised in the 5000 * ISTAT. After the CPU has read the interrupt condition 5001 * flag from SIST or DSTAT, the chip unstacks the other 5002 * interrupt flags and sets the corresponding bits in 5003 * SIST or DSTAT. Since the chip starts stacking once the 5004 * SIP or DIP flag is set, there is a small window of time 5005 * where the stacking does not occur. 5006 * 5007 * Typically, multiple interrupt conditions may happen in 5008 * the following situations: 5009 * 5010 * - SCSI parity error + Phase mismatch (PAR|MA) 5011 * When an parity error is detected in input phase 5012 * and the device switches to msg-in phase inside a 5013 * block MOV. 5014 * - SCSI parity error + Unexpected disconnect (PAR|UDC) 5015 * When a stupid device does not want to handle the 5016 * recovery of an SCSI parity error. 5017 * - Some combinations of STO, PAR, UDC, ... 5018 * When using non compliant SCSI stuff, when user is 5019 * doing non compliant hot tampering on the BUS, when 5020 * something really bad happens to a device, etc ... 5021 * 5022 * The heuristic suggested by SYMBIOS to handle 5023 * multiple interrupts is to try unstacking all 5024 * interrupts conditions and to handle them on some 5025 * priority based on error severity. 5026 * This will work when the unstacking has been 5027 * successful, but we cannot be 100 % sure of that, 5028 * since the CPU may have been faster to unstack than 5029 * the chip is able to stack. Hmmm ... But it seems that 5030 * such a situation is very unlikely to happen. 5031 * 5032 * If this happen, for example STO caught by the CPU 5033 * then UDC happenning before the CPU have restarted 5034 * the SCRIPTS, the driver may wrongly complete the 5035 * same command on UDC, since the SCRIPTS didn't restart 5036 * and the DSA still points to the same command. 5037 * We avoid this situation by setting the DSA to an 5038 * invalid value when the CCB is completed and before 5039 * restarting the SCRIPTS. 5040 * 5041 * Another issue is that we need some section of our 5042 * recovery procedures to be somehow uninterruptible but 5043 * the SCRIPTS processor does not provides such a 5044 * feature. For this reason, we handle recovery preferently 5045 * from the C code and check against some SCRIPTS critical 5046 * sections from the C code. 5047 * 5048 * Hopefully, the interrupt handling of the driver is now 5049 * able to resist to weird BUS error conditions, but donnot 5050 * ask me for any guarantee that it will never fail. :-) 5051 * Use at your own decision and risk. 5052 */ 5053 5054 static void sym_intr1 (hcb_p np) 5055 { 5056 u_char istat, istatc; 5057 u_char dstat; 5058 u_short sist; 5059 5060 /* 5061 * interrupt on the fly ? 5062 */ 5063 istat = INB (nc_istat); 5064 if (istat & INTF) { 5065 OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem); 5066 #if 1 5067 istat = INB (nc_istat); /* DUMMY READ */ 5068 #endif 5069 if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); 5070 (void)sym_wakeup_done (np); 5071 }; 5072 5073 if (!(istat & (SIP|DIP))) 5074 return; 5075 5076 #if 0 /* We should never get this one */ 5077 if (istat & CABRT) 5078 OUTB (nc_istat, CABRT); 5079 #endif 5080 5081 /* 5082 * PAR and MA interrupts may occur at the same time, 5083 * and we need to know of both in order to handle 5084 * this situation properly. We try to unstack SCSI 5085 * interrupts for that reason. BTW, I dislike a LOT 5086 * such a loop inside the interrupt routine. 5087 * Even if DMA interrupt stacking is very unlikely to 5088 * happen, we also try unstacking these ones, since 5089 * this has no performance impact. 5090 */ 5091 sist = 0; 5092 dstat = 0; 5093 istatc = istat; 5094 do { 5095 if (istatc & SIP) 5096 sist |= INW (nc_sist); 5097 if (istatc & DIP) 5098 dstat |= INB (nc_dstat); 5099 istatc = INB (nc_istat); 5100 istat |= istatc; 5101 } while (istatc & (SIP|DIP)); 5102 5103 if (DEBUG_FLAGS & DEBUG_TINY) 5104 printf ("<%d|%x:%x|%x:%x>", 5105 (int)INB(nc_scr0), 5106 dstat,sist, 5107 (unsigned)INL(nc_dsp), 5108 (unsigned)INL(nc_dbc)); 5109 /* 5110 * First, interrupts we want to service cleanly. 5111 * 5112 * Phase mismatch (MA) is the most frequent interrupt 5113 * for chip earlier than the 896 and so we have to service 5114 * it as quickly as possible. 5115 * A SCSI parity error (PAR) may be combined with a phase 5116 * mismatch condition (MA). 5117 * Programmed interrupts (SIR) are used to call the C code 5118 * from SCRIPTS. 5119 * The single step interrupt (SSI) is not used in this 5120 * driver. 5121 */ 5122 if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && 5123 !(dstat & (MDPE|BF|ABRT|IID))) { 5124 if (sist & PAR) sym_int_par (np, sist); 5125 else if (sist & MA) sym_int_ma (np); 5126 else if (dstat & SIR) sym_int_sir (np); 5127 else if (dstat & SSI) OUTONB (nc_dcntl, (STD|NOCOM)); 5128 else goto unknown_int; 5129 return; 5130 }; 5131 5132 /* 5133 * Now, interrupts that donnot happen in normal 5134 * situations and that we may need to recover from. 5135 * 5136 * On SCSI RESET (RST), we reset everything. 5137 * On SCSI BUS MODE CHANGE (SBMC), we complete all 5138 * active CCBs with RESET status, prepare all devices 5139 * for negotiating again and restart the SCRIPTS. 5140 * On STO and UDC, we complete the CCB with the corres- 5141 * ponding status and restart the SCRIPTS. 5142 */ 5143 if (sist & RST) { 5144 xpt_print_path(np->path); 5145 printf("SCSI BUS reset detected.\n"); 5146 sym_init (np, 1); 5147 return; 5148 }; 5149 5150 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ 5151 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ 5152 5153 if (!(sist & (GEN|HTH|SGE)) && 5154 !(dstat & (MDPE|BF|ABRT|IID))) { 5155 if (sist & SBMC) sym_int_sbmc (np); 5156 else if (sist & STO) sym_int_sto (np); 5157 else if (sist & UDC) sym_int_udc (np); 5158 else goto unknown_int; 5159 return; 5160 }; 5161 5162 /* 5163 * Now, interrupts we are not able to recover cleanly. 5164 * 5165 * Log message for hard errors. 5166 * Reset everything. 5167 */ 5168 5169 sym_log_hard_error(np, sist, dstat); 5170 5171 if ((sist & (GEN|HTH|SGE)) || 5172 (dstat & (MDPE|BF|ABRT|IID))) { 5173 sym_start_reset(np); 5174 return; 5175 }; 5176 5177 unknown_int: 5178 /* 5179 * We just miss the cause of the interrupt. :( 5180 * Print a message. The timeout will do the real work. 5181 */ 5182 printf( "%s: unknown interrupt(s) ignored, " 5183 "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", 5184 sym_name(np), istat, dstat, sist); 5185 } 5186 5187 static void sym_intr(void *arg) 5188 { 5189 if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); 5190 sym_intr1((hcb_p) arg); 5191 if (DEBUG_FLAGS & DEBUG_TINY) printf ("]"); 5192 return; 5193 } 5194 5195 static void sym_poll(struct cam_sim *sim) 5196 { 5197 int s = splcam(); 5198 sym_intr(cam_sim_softc(sim)); 5199 splx(s); 5200 } 5201 5202 5203 /* 5204 * generic recovery from scsi interrupt 5205 * 5206 * The doc says that when the chip gets an SCSI interrupt, 5207 * it tries to stop in an orderly fashion, by completing 5208 * an instruction fetch that had started or by flushing 5209 * the DMA fifo for a write to memory that was executing. 5210 * Such a fashion is not enough to know if the instruction 5211 * that was just before the current DSP value has been 5212 * executed or not. 5213 * 5214 * There are some small SCRIPTS sections that deal with 5215 * the start queue and the done queue that may break any 5216 * assomption from the C code if we are interrupted 5217 * inside, so we reset if this happens. Btw, since these 5218 * SCRIPTS sections are executed while the SCRIPTS hasn't 5219 * started SCSI operations, it is very unlikely to happen. 5220 * 5221 * All the driver data structures are supposed to be 5222 * allocated from the same 4 GB memory window, so there 5223 * is a 1 to 1 relationship between DSA and driver data 5224 * structures. Since we are careful :) to invalidate the 5225 * DSA when we complete a command or when the SCRIPTS 5226 * pushes a DSA into a queue, we can trust it when it 5227 * points to a CCB. 5228 */ 5229 static void sym_recover_scsi_int (hcb_p np, u_char hsts) 5230 { 5231 u32 dsp = INL (nc_dsp); 5232 u32 dsa = INL (nc_dsa); 5233 ccb_p cp = sym_ccb_from_dsa(np, dsa); 5234 5235 /* 5236 * If we haven't been interrupted inside the SCRIPTS 5237 * critical pathes, we can safely restart the SCRIPTS 5238 * and trust the DSA value if it matches a CCB. 5239 */ 5240 if ((!(dsp > SCRIPT_BA (np, getjob_begin) && 5241 dsp < SCRIPT_BA (np, getjob_end) + 1)) && 5242 (!(dsp > SCRIPT_BA (np, ungetjob) && 5243 dsp < SCRIPT_BA (np, reselect) + 1)) && 5244 (!(dsp > SCRIPTH_BA (np, sel_for_abort) && 5245 dsp < SCRIPTH_BA (np, sel_for_abort_1) + 1)) && 5246 (!(dsp > SCRIPT_BA (np, done) && 5247 dsp < SCRIPT_BA (np, done_end) + 1))) { 5248 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ 5249 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ 5250 /* 5251 * If we have a CCB, let the SCRIPTS call us back for 5252 * the handling of the error with SCRATCHA filled with 5253 * STARTPOS. This way, we will be able to freeze the 5254 * device queue and requeue awaiting IOs. 5255 */ 5256 if (cp) { 5257 cp->host_status = hsts; 5258 OUTL (nc_dsp, SCRIPT_BA (np, complete_error)); 5259 } 5260 /* 5261 * Otherwise just restart the SCRIPTS. 5262 */ 5263 else { 5264 OUTL (nc_dsa, 0xffffff); 5265 OUTL (nc_dsp, SCRIPT_BA (np, start)); 5266 } 5267 } 5268 else 5269 goto reset_all; 5270 5271 return; 5272 5273 reset_all: 5274 sym_start_reset(np); 5275 } 5276 5277 /* 5278 * chip exception handler for selection timeout 5279 */ 5280 void sym_int_sto (hcb_p np) 5281 { 5282 u32 dsp = INL (nc_dsp); 5283 5284 if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); 5285 5286 if (dsp == SCRIPT_BA (np, wf_sel_done) + 8) 5287 sym_recover_scsi_int(np, HS_SEL_TIMEOUT); 5288 else 5289 sym_start_reset(np); 5290 } 5291 5292 /* 5293 * chip exception handler for unexpected disconnect 5294 */ 5295 void sym_int_udc (hcb_p np) 5296 { 5297 printf ("%s: unexpected disconnect\n", sym_name(np)); 5298 sym_recover_scsi_int(np, HS_UNEXPECTED); 5299 } 5300 5301 /* 5302 * chip exception handler for SCSI bus mode change 5303 * 5304 * spi2-r12 11.2.3 says a transceiver mode change must 5305 * generate a reset event and a device that detects a reset 5306 * event shall initiate a hard reset. It says also that a 5307 * device that detects a mode change shall set data transfer 5308 * mode to eight bit asynchronous, etc... 5309 * So, just reinitializing all except chip should be enough. 5310 */ 5311 static void sym_int_sbmc (hcb_p np) 5312 { 5313 u_char scsi_mode = INB (nc_stest4) & SMODE; 5314 5315 /* 5316 * Notify user. 5317 */ 5318 xpt_print_path(np->path); 5319 printf("SCSI BUS mode change from %s to %s.\n", 5320 sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); 5321 5322 /* 5323 * Should suspend command processing for a few seconds and 5324 * reinitialize all except the chip. 5325 */ 5326 sym_init (np, 2); 5327 } 5328 5329 /* 5330 * chip exception handler for SCSI parity error. 5331 * 5332 * When the chip detects a SCSI parity error and is 5333 * currently executing a (CH)MOV instruction, it does 5334 * not interrupt immediately, but tries to finish the 5335 * transfer of the current scatter entry before 5336 * interrupting. The following situations may occur: 5337 * 5338 * - The complete scatter entry has been transferred 5339 * without the device having changed phase. 5340 * The chip will then interrupt with the DSP pointing 5341 * to the instruction that follows the MOV. 5342 * 5343 * - A phase mismatch occurs before the MOV finished 5344 * and phase errors are to be handled by the C code. 5345 * The chip will then interrupt with both PAR and MA 5346 * conditions set. 5347 * 5348 * - A phase mismatch occurs before the MOV finished and 5349 * phase errors are to be handled by SCRIPTS. 5350 * The chip will load the DSP with the phase mismatch 5351 * JUMP address and interrupt the host processor. 5352 */ 5353 static void sym_int_par (hcb_p np, u_short sist) 5354 { 5355 u_char hsts = INB (HS_PRT); 5356 u32 dsp = INL (nc_dsp); 5357 u32 dbc = INL (nc_dbc); 5358 u32 dsa = INL (nc_dsa); 5359 u_char sbcl = INB (nc_sbcl); 5360 u_char cmd = dbc >> 24; 5361 int phase = cmd & 7; 5362 ccb_p cp = sym_ccb_from_dsa(np, dsa); 5363 5364 printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", 5365 sym_name(np), hsts, dbc, sbcl); 5366 5367 /* 5368 * Check that the chip is connected to the SCSI BUS. 5369 */ 5370 if (!(INB (nc_scntl1) & ISCON)) { 5371 sym_recover_scsi_int(np, HS_UNEXPECTED); 5372 return; 5373 } 5374 5375 /* 5376 * If the nexus is not clearly identified, reset the bus. 5377 * We will try to do better later. 5378 */ 5379 if (!cp) 5380 goto reset_all; 5381 5382 /* 5383 * Check instruction was a MOV, direction was INPUT and 5384 * ATN is asserted. 5385 */ 5386 if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) 5387 goto reset_all; 5388 5389 /* 5390 * Keep track of the parity error. 5391 */ 5392 OUTONB (HF_PRT, HF_EXT_ERR); 5393 cp->xerr_status |= XE_PARITY_ERR; 5394 5395 /* 5396 * Prepare the message to send to the device. 5397 */ 5398 np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; 5399 5400 /* 5401 * If the old phase was DATA IN phase, we have to deal with 5402 * the 3 situations described above. 5403 * For other input phases (MSG IN and STATUS), the device 5404 * must resend the whole thing that failed parity checking 5405 * or signal error. So, jumping to dispatcher should be OK. 5406 */ 5407 if (phase == 1) { 5408 /* Phase mismatch handled by SCRIPTS */ 5409 if (dsp == SCRIPTH_BA (np, pm_handle)) 5410 OUTL (nc_dsp, dsp); 5411 /* Phase mismatch handled by the C code */ 5412 else if (sist & MA) 5413 sym_int_ma (np); 5414 /* No phase mismatch occurred */ 5415 else { 5416 OUTL (nc_temp, dsp); 5417 OUTL (nc_dsp, SCRIPT_BA (np, dispatch)); 5418 } 5419 } 5420 else 5421 OUTL (nc_dsp, SCRIPT_BA (np, clrack)); 5422 return; 5423 5424 reset_all: 5425 sym_start_reset(np); 5426 return; 5427 } 5428 5429 /* 5430 * chip exception handler for phase errors. 5431 * 5432 * We have to construct a new transfer descriptor, 5433 * to transfer the rest of the current block. 5434 */ 5435 static void sym_int_ma (hcb_p np) 5436 { 5437 u32 dbc; 5438 u32 rest; 5439 u32 dsp; 5440 u32 dsa; 5441 u32 nxtdsp; 5442 u32 *vdsp; 5443 u32 oadr, olen; 5444 u32 *tblp; 5445 u32 newcmd; 5446 u_int delta; 5447 u_char cmd; 5448 u_char hflags, hflags0; 5449 struct sym_pmc *pm; 5450 ccb_p cp; 5451 5452 dsp = INL (nc_dsp); 5453 dbc = INL (nc_dbc); 5454 dsa = INL (nc_dsa); 5455 5456 cmd = dbc >> 24; 5457 rest = dbc & 0xffffff; 5458 delta = 0; 5459 5460 /* 5461 * locate matching cp if any. 5462 */ 5463 cp = sym_ccb_from_dsa(np, dsa); 5464 5465 /* 5466 * Donnot take into account dma fifo and various buffers in 5467 * INPUT phase since the chip flushes everything before 5468 * raising the MA interrupt for interrupted INPUT phases. 5469 * For DATA IN phase, we will check for the SWIDE later. 5470 */ 5471 if ((cmd & 7) != 1) { 5472 u_char ss0, ss2; 5473 5474 if (np->features & FE_DFBC) 5475 delta = INW (nc_dfbc); 5476 else { 5477 u32 dfifo; 5478 5479 /* 5480 * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. 5481 */ 5482 dfifo = INL(nc_dfifo); 5483 5484 /* 5485 * Calculate remaining bytes in DMA fifo. 5486 * (CTEST5 = dfifo >> 16) 5487 */ 5488 if (dfifo & (DFS << 16)) 5489 delta = ((((dfifo >> 8) & 0x300) | 5490 (dfifo & 0xff)) - rest) & 0x3ff; 5491 else 5492 delta = ((dfifo & 0xff) - rest) & 0x7f; 5493 } 5494 5495 /* 5496 * The data in the dma fifo has not been transfered to 5497 * the target -> add the amount to the rest 5498 * and clear the data. 5499 * Check the sstat2 register in case of wide transfer. 5500 */ 5501 rest += delta; 5502 ss0 = INB (nc_sstat0); 5503 if (ss0 & OLF) rest++; 5504 if (!(np->features & FE_C10)) 5505 if (ss0 & ORF) rest++; 5506 if (cp && (cp->phys.select.sel_scntl3 & EWS)) { 5507 ss2 = INB (nc_sstat2); 5508 if (ss2 & OLF1) rest++; 5509 if (!(np->features & FE_C10)) 5510 if (ss2 & ORF1) rest++; 5511 }; 5512 5513 /* 5514 * Clear fifos. 5515 */ 5516 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ 5517 OUTB (nc_stest3, TE|CSF); /* scsi fifo */ 5518 } 5519 5520 /* 5521 * log the information 5522 */ 5523 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) 5524 printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7, 5525 (unsigned) rest, (unsigned) delta); 5526 5527 /* 5528 * try to find the interrupted script command, 5529 * and the address at which to continue. 5530 */ 5531 vdsp = 0; 5532 nxtdsp = 0; 5533 if (dsp > np->script_ba && 5534 dsp <= np->script_ba + sizeof(struct sym_scr)) { 5535 vdsp = (u32 *)((char*)np->script0 + (dsp-np->script_ba-8)); 5536 nxtdsp = dsp; 5537 } 5538 else if (dsp > np->scripth_ba && 5539 dsp <= np->scripth_ba + sizeof(struct sym_scrh)) { 5540 vdsp = (u32 *)((char*)np->scripth0 + (dsp-np->scripth_ba-8)); 5541 nxtdsp = dsp; 5542 } 5543 5544 /* 5545 * log the information 5546 */ 5547 if (DEBUG_FLAGS & DEBUG_PHASE) { 5548 printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", 5549 cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); 5550 }; 5551 5552 if (!vdsp) { 5553 printf ("%s: interrupted SCRIPT address not found.\n", 5554 sym_name (np)); 5555 goto reset_all; 5556 } 5557 5558 if (!cp) { 5559 printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", 5560 sym_name (np)); 5561 goto reset_all; 5562 } 5563 5564 /* 5565 * get old startaddress and old length. 5566 */ 5567 oadr = scr_to_cpu(vdsp[1]); 5568 5569 if (cmd & 0x10) { /* Table indirect */ 5570 tblp = (u32 *) ((char*) &cp->phys + oadr); 5571 olen = scr_to_cpu(tblp[0]); 5572 oadr = scr_to_cpu(tblp[1]); 5573 } else { 5574 tblp = (u32 *) 0; 5575 olen = scr_to_cpu(vdsp[0]) & 0xffffff; 5576 }; 5577 5578 if (DEBUG_FLAGS & DEBUG_PHASE) { 5579 printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", 5580 (unsigned) (scr_to_cpu(vdsp[0]) >> 24), 5581 tblp, 5582 (unsigned) olen, 5583 (unsigned) oadr); 5584 }; 5585 5586 /* 5587 * check cmd against assumed interrupted script command. 5588 */ 5589 if (cmd != (scr_to_cpu(vdsp[0]) >> 24)) { 5590 PRINT_ADDR(cp); 5591 printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", 5592 (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24); 5593 5594 goto reset_all; 5595 }; 5596 5597 /* 5598 * if old phase not dataphase, leave here. 5599 */ 5600 if ((cmd & 5) != (cmd & 7)) { 5601 PRINT_ADDR(cp); 5602 printf ("phase change %x-%x %d@%08x resid=%d.\n", 5603 cmd&7, INB(nc_sbcl)&7, (unsigned)olen, 5604 (unsigned)oadr, (unsigned)rest); 5605 goto unexpected_phase; 5606 }; 5607 5608 /* 5609 * Choose the correct PM save area. 5610 * 5611 * Look at the PM_SAVE SCRIPT if you want to understand 5612 * this stuff. The equivalent code is implemented in 5613 * SCRIPTS for the 895A and 896 that are able to handle 5614 * PM from the SCRIPTS processor. 5615 */ 5616 hflags0 = INB (HF_PRT); 5617 hflags = hflags0; 5618 5619 if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { 5620 if (hflags & HF_IN_PM0) 5621 nxtdsp = scr_to_cpu(cp->phys.pm0.ret); 5622 else if (hflags & HF_IN_PM1) 5623 nxtdsp = scr_to_cpu(cp->phys.pm1.ret); 5624 5625 if (hflags & HF_DP_SAVED) 5626 hflags ^= HF_ACT_PM; 5627 } 5628 5629 if (!(hflags & HF_ACT_PM)) { 5630 pm = &cp->phys.pm0; 5631 newcmd = SCRIPT_BA(np, pm0_data); 5632 } 5633 else { 5634 pm = &cp->phys.pm1; 5635 newcmd = SCRIPT_BA(np, pm1_data); 5636 } 5637 5638 hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); 5639 if (hflags != hflags0) 5640 OUTB (HF_PRT, hflags); 5641 5642 /* 5643 * fillin the phase mismatch context 5644 */ 5645 pm->sg.addr = cpu_to_scr(oadr + olen - rest); 5646 pm->sg.size = cpu_to_scr(rest); 5647 pm->ret = cpu_to_scr(nxtdsp); 5648 5649 /* 5650 * If we have a SWIDE, 5651 * - prepare the address to write the SWIDE from SCRIPTS, 5652 * - compute the SCRIPTS address to restart from, 5653 * - move current data pointer context by one byte. 5654 */ 5655 nxtdsp = SCRIPT_BA (np, dispatch); 5656 if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && 5657 (INB (nc_scntl2) & WSR)) { 5658 u32 tmp; 5659 #ifdef SYM_DEBUG_PM_WITH_WSR 5660 PRINT_ADDR(cp); 5661 printf ("MA interrupt with WSR set - " 5662 "pm->sg.addr=%x - pm->sg.size=%d\n", 5663 pm->sg.addr, pm->sg.size); 5664 #endif 5665 /* 5666 * Set up the table indirect for the MOVE 5667 * of the residual byte and adjust the data 5668 * pointer context. 5669 */ 5670 tmp = scr_to_cpu(pm->sg.addr); 5671 cp->phys.wresid.addr = cpu_to_scr(tmp); 5672 pm->sg.addr = cpu_to_scr(tmp + 1); 5673 tmp = scr_to_cpu(pm->sg.size); 5674 cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); 5675 pm->sg.size = cpu_to_scr(tmp - 1); 5676 5677 /* 5678 * If only the residual byte is to be moved, 5679 * no PM context is needed. 5680 */ 5681 if ((tmp&0xffffff) == 1) 5682 newcmd = pm->ret; 5683 5684 /* 5685 * Prepare the address of SCRIPTS that will 5686 * move the residual byte to memory. 5687 */ 5688 nxtdsp = SCRIPTH_BA (np, wsr_ma_helper); 5689 } 5690 5691 if (DEBUG_FLAGS & DEBUG_PHASE) { 5692 PRINT_ADDR(cp); 5693 printf ("PM %x %x %x / %x %x %x.\n", 5694 hflags0, hflags, newcmd, 5695 (unsigned)scr_to_cpu(pm->sg.addr), 5696 (unsigned)scr_to_cpu(pm->sg.size), 5697 (unsigned)scr_to_cpu(pm->ret)); 5698 } 5699 5700 /* 5701 * Restart the SCRIPTS processor. 5702 */ 5703 OUTL (nc_temp, newcmd); 5704 OUTL (nc_dsp, nxtdsp); 5705 return; 5706 5707 /* 5708 * Unexpected phase changes that occurs when the current phase 5709 * is not a DATA IN or DATA OUT phase are due to error conditions. 5710 * Such event may only happen when the SCRIPTS is using a 5711 * multibyte SCSI MOVE. 5712 * 5713 * Phase change Some possible cause 5714 * 5715 * COMMAND --> MSG IN SCSI parity error detected by target. 5716 * COMMAND --> STATUS Bad command or refused by target. 5717 * MSG OUT --> MSG IN Message rejected by target. 5718 * MSG OUT --> COMMAND Bogus target that discards extended 5719 * negotiation messages. 5720 * 5721 * The code below does not care of the new phase and so 5722 * trusts the target. Why to annoy it ? 5723 * If the interrupted phase is COMMAND phase, we restart at 5724 * dispatcher. 5725 * If a target does not get all the messages after selection, 5726 * the code assumes blindly that the target discards extended 5727 * messages and clears the negotiation status. 5728 * If the target does not want all our response to negotiation, 5729 * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids 5730 * bloat for such a should_not_happen situation). 5731 * In all other situation, we reset the BUS. 5732 * Are these assumptions reasonnable ? (Wait and see ...) 5733 */ 5734 unexpected_phase: 5735 dsp -= 8; 5736 nxtdsp = 0; 5737 5738 switch (cmd & 7) { 5739 case 2: /* COMMAND phase */ 5740 nxtdsp = SCRIPT_BA (np, dispatch); 5741 break; 5742 #if 0 5743 case 3: /* STATUS phase */ 5744 nxtdsp = SCRIPT_BA (np, dispatch); 5745 break; 5746 #endif 5747 case 6: /* MSG OUT phase */ 5748 /* 5749 * If the device may want to use untagged when we want 5750 * tagged, we prepare an IDENTIFY without disc. granted, 5751 * since we will not be able to handle reselect. 5752 * Otherwise, we just don't care. 5753 */ 5754 if (dsp == SCRIPT_BA (np, send_ident)) { 5755 if (cp->tag != NO_TAG && olen - rest <= 3) { 5756 cp->host_status = HS_BUSY; 5757 np->msgout[0] = M_IDENTIFY | cp->lun; 5758 nxtdsp = SCRIPTH_BA (np, ident_break_atn); 5759 } 5760 else 5761 nxtdsp = SCRIPTH_BA (np, ident_break); 5762 } 5763 else if (dsp == SCRIPTH_BA (np, send_wdtr) || 5764 dsp == SCRIPTH_BA (np, send_sdtr) || 5765 dsp == SCRIPTH_BA (np, send_ppr)) { 5766 nxtdsp = SCRIPTH_BA (np, nego_bad_phase); 5767 } 5768 break; 5769 #if 0 5770 case 7: /* MSG IN phase */ 5771 nxtdsp = SCRIPT_BA (np, clrack); 5772 break; 5773 #endif 5774 } 5775 5776 if (nxtdsp) { 5777 OUTL (nc_dsp, nxtdsp); 5778 return; 5779 } 5780 5781 reset_all: 5782 sym_start_reset(np); 5783 } 5784 5785 /* 5786 * Dequeue from the START queue all CCBs that match 5787 * a given target/lun/task condition (-1 means all), 5788 * and move them from the BUSY queue to the COMP queue 5789 * with CAM_REQUEUE_REQ status condition. 5790 * This function is used during error handling/recovery. 5791 * It is called with SCRIPTS not running. 5792 */ 5793 static int 5794 sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task) 5795 { 5796 int j; 5797 ccb_p cp; 5798 5799 /* 5800 * Make sure the starting index is within range. 5801 */ 5802 assert((i >= 0) && (i < 2*MAX_QUEUE)); 5803 5804 /* 5805 * Walk until end of START queue and dequeue every job 5806 * that matches the target/lun/task condition. 5807 */ 5808 j = i; 5809 while (i != np->squeueput) { 5810 cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); 5811 assert(cp); 5812 #ifdef SYM_CONF_IARB_SUPPORT 5813 /* Forget hints for IARB, they may be no longer relevant */ 5814 cp->host_flags &= ~HF_HINT_IARB; 5815 #endif 5816 if ((target == -1 || cp->target == target) && 5817 (lun == -1 || cp->lun == lun) && 5818 (task == -1 || cp->tag == task)) { 5819 sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ); 5820 sym_remque(&cp->link_ccbq); 5821 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); 5822 } 5823 else { 5824 if (i != j) 5825 np->squeue[j] = np->squeue[i]; 5826 if ((j += 2) >= MAX_QUEUE*2) j = 0; 5827 } 5828 if ((i += 2) >= MAX_QUEUE*2) i = 0; 5829 } 5830 if (i != j) /* Copy back the idle task if needed */ 5831 np->squeue[j] = np->squeue[i]; 5832 np->squeueput = j; /* Update our current start queue pointer */ 5833 5834 return (i - j) / 2; 5835 } 5836 5837 /* 5838 * Complete all CCBs queued to the COMP queue. 5839 * 5840 * These CCBs are assumed: 5841 * - Not to be referenced either by devices or 5842 * SCRIPTS-related queues and datas. 5843 * - To have to be completed with an error condition 5844 * or requeued. 5845 * 5846 * The device queue freeze count is incremented 5847 * for each CCB that does not prevent this. 5848 * This function is called when all CCBs involved 5849 * in error handling/recovery have been reaped. 5850 */ 5851 static void 5852 sym_flush_comp_queue(hcb_p np, int cam_status) 5853 { 5854 SYM_QUEHEAD *qp; 5855 ccb_p cp; 5856 5857 while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) { 5858 union ccb *ccb; 5859 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 5860 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 5861 ccb = cp->cam_ccb; 5862 if (cam_status) 5863 sym_set_cam_status(ccb, cam_status); 5864 sym_free_ccb(np, cp); 5865 sym_freeze_cam_ccb(ccb); 5866 sym_xpt_done(np, ccb); 5867 } 5868 } 5869 5870 /* 5871 * chip handler for bad SCSI status condition 5872 * 5873 * In case of bad SCSI status, we unqueue all the tasks 5874 * currently queued to the controller but not yet started 5875 * and then restart the SCRIPTS processor immediately. 5876 * 5877 * QUEUE FULL and BUSY conditions are handled the same way. 5878 * Basically all the not yet started tasks are requeued in 5879 * device queue and the queue is frozen until a completion. 5880 * 5881 * For CHECK CONDITION and COMMAND TERMINATED status, we use 5882 * the CCB of the failed command to prepare a REQUEST SENSE 5883 * SCSI command and queue it to the controller queue. 5884 * 5885 * SCRATCHA is assumed to have been loaded with STARTPOS 5886 * before the SCRIPTS called the C code. 5887 */ 5888 static void sym_sir_bad_scsi_status(hcb_p np, int num, ccb_p cp) 5889 { 5890 tcb_p tp = &np->target[cp->target]; 5891 u32 startp; 5892 u_char s_status = cp->ssss_status; 5893 u_char h_flags = cp->host_flags; 5894 int msglen; 5895 int nego; 5896 int i; 5897 5898 /* 5899 * Compute the index of the next job to start from SCRIPTS. 5900 */ 5901 i = (INL (nc_scratcha) - vtobus(np->squeue)) / 4; 5902 5903 /* 5904 * The last CCB queued used for IARB hint may be 5905 * no longer relevant. Forget it. 5906 */ 5907 #ifdef SYM_CONF_IARB_SUPPORT 5908 if (np->last_cp) 5909 np->last_cp = 0; 5910 #endif 5911 5912 /* 5913 * Now deal with the SCSI status. 5914 */ 5915 switch(s_status) { 5916 case S_BUSY: 5917 case S_QUEUE_FULL: 5918 if (sym_verbose >= 2) { 5919 PRINT_ADDR(cp); 5920 printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); 5921 } 5922 default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ 5923 sym_complete_error (np, cp); 5924 break; 5925 case S_TERMINATED: 5926 case S_CHECK_COND: 5927 /* 5928 * If we get an SCSI error when requesting sense, give up. 5929 */ 5930 if (h_flags & HF_SENSE) { 5931 sym_complete_error (np, cp); 5932 break; 5933 } 5934 5935 /* 5936 * Dequeue all queued CCBs for that device not yet started, 5937 * and restart the SCRIPTS processor immediately. 5938 */ 5939 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); 5940 OUTL (nc_dsp, SCRIPT_BA (np, start)); 5941 5942 /* 5943 * Save some info of the actual IO. 5944 * Compute the data residual. 5945 */ 5946 cp->sv_scsi_status = cp->ssss_status; 5947 cp->sv_xerr_status = cp->xerr_status; 5948 cp->sv_resid = sym_compute_residual(np, cp); 5949 5950 /* 5951 * Prepare all needed data structures for 5952 * requesting sense data. 5953 */ 5954 5955 /* 5956 * identify message 5957 */ 5958 cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun; 5959 msglen = 1; 5960 5961 /* 5962 * If we are currently using anything different from 5963 * async. 8 bit data transfers with that target, 5964 * start a negotiation, since the device may want 5965 * to report us a UNIT ATTENTION condition due to 5966 * a cause we currently ignore, and we donnot want 5967 * to be stuck with WIDE and/or SYNC data transfer. 5968 * 5969 * cp->nego_status is filled by sym_prepare_nego(). 5970 */ 5971 cp->nego_status = 0; 5972 nego = 0; 5973 if (tp->tinfo.current.options & PPR_OPT_MASK) 5974 nego = NS_PPR; 5975 else if (tp->tinfo.current.width != BUS_8_BIT) 5976 nego = NS_WIDE; 5977 else if (tp->tinfo.current.offset != 0) 5978 nego = NS_SYNC; 5979 if (nego) 5980 msglen += 5981 sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]); 5982 /* 5983 * Message table indirect structure. 5984 */ 5985 cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2)); 5986 cp->phys.smsg.size = cpu_to_scr(msglen); 5987 5988 /* 5989 * sense command 5990 */ 5991 cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd)); 5992 cp->phys.cmd.size = cpu_to_scr(6); 5993 5994 /* 5995 * patch requested size into sense command 5996 */ 5997 cp->sensecmd[0] = 0x03; 5998 cp->sensecmd[1] = cp->lun << 5; 5999 cp->sensecmd[4] = cp->cam_ccb->csio.sense_len; 6000 cp->data_len = cp->cam_ccb->csio.sense_len; 6001 6002 /* 6003 * sense data 6004 */ 6005 cp->phys.sense.addr = 6006 cpu_to_scr(vtobus(&cp->cam_ccb->csio.sense_data)); 6007 cp->phys.sense.size = 6008 cpu_to_scr(cp->cam_ccb->csio.sense_len); 6009 6010 /* 6011 * requeue the command. 6012 */ 6013 startp = SCRIPTH_BA (np, sdata_in); 6014 6015 cp->phys.savep = cpu_to_scr(startp); 6016 cp->phys.goalp = cpu_to_scr(startp + 16); 6017 cp->phys.lastp = cpu_to_scr(startp); 6018 cp->startp = cpu_to_scr(startp); 6019 6020 cp->actualquirks = SYM_QUIRK_AUTOSAVE; 6021 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; 6022 cp->ssss_status = S_ILLEGAL; 6023 cp->host_flags = HF_SENSE; 6024 cp->xerr_status = 0; 6025 cp->phys.extra_bytes = 0; 6026 6027 cp->phys.go.start = 6028 cpu_to_scr(SCRIPT_BA (np, select)); 6029 6030 /* 6031 * Requeue the command. 6032 */ 6033 sym_put_start_queue(np, cp); 6034 6035 /* 6036 * Give back to upper layer everything we have dequeued. 6037 */ 6038 sym_flush_comp_queue(np, 0); 6039 break; 6040 } 6041 } 6042 6043 /* 6044 * After a device has accepted some management message 6045 * as BUS DEVICE RESET, ABORT TASK, etc ..., or when 6046 * a device signals a UNIT ATTENTION condition, some 6047 * tasks are thrown away by the device. We are required 6048 * to reflect that on our tasks list since the device 6049 * will never complete these tasks. 6050 * 6051 * This function move from the BUSY queue to the COMP 6052 * queue all disconnected CCBs for a given target that 6053 * match the following criteria: 6054 * - lun=-1 means any logical UNIT otherwise a given one. 6055 * - task=-1 means any task, otherwise a given one. 6056 */ 6057 static int 6058 sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task) 6059 { 6060 SYM_QUEHEAD qtmp, *qp; 6061 int i = 0; 6062 ccb_p cp; 6063 6064 /* 6065 * Move the entire BUSY queue to our temporary queue. 6066 */ 6067 sym_que_init(&qtmp); 6068 sym_que_splice(&np->busy_ccbq, &qtmp); 6069 sym_que_init(&np->busy_ccbq); 6070 6071 /* 6072 * Put all CCBs that matches our criteria into 6073 * the COMP queue and put back other ones into 6074 * the BUSY queue. 6075 */ 6076 while ((qp = sym_remque_head(&qtmp)) != 0) { 6077 union ccb *ccb; 6078 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 6079 ccb = cp->cam_ccb; 6080 if (cp->host_status != HS_DISCONNECT || 6081 cp->target != target || 6082 (lun != -1 && cp->lun != lun) || 6083 (task != -1 && 6084 (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { 6085 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 6086 continue; 6087 } 6088 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); 6089 6090 /* Preserve the software timeout condition */ 6091 if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT) 6092 sym_set_cam_status(ccb, cam_status); 6093 ++i; 6094 #if 0 6095 printf("XXXX TASK @%p CLEARED\n", cp); 6096 #endif 6097 } 6098 return i; 6099 } 6100 6101 /* 6102 * chip handler for TASKS recovery 6103 * 6104 * We cannot safely abort a command, while the SCRIPTS 6105 * processor is running, since we just would be in race 6106 * with it. 6107 * 6108 * As long as we have tasks to abort, we keep the SEM 6109 * bit set in the ISTAT. When this bit is set, the 6110 * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) 6111 * each time it enters the scheduler. 6112 * 6113 * If we have to reset a target, clear tasks of a unit, 6114 * or to perform the abort of a disconnected job, we 6115 * restart the SCRIPTS for selecting the target. Once 6116 * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). 6117 * If it loses arbitration, the SCRIPTS will interrupt again 6118 * the next time it will enter its scheduler, and so on ... 6119 * 6120 * On SIR_TARGET_SELECTED, we scan for the more 6121 * appropriate thing to do: 6122 * 6123 * - If nothing, we just sent a M_ABORT message to the 6124 * target to get rid of the useless SCSI bus ownership. 6125 * According to the specs, no tasks shall be affected. 6126 * - If the target is to be reset, we send it a M_RESET 6127 * message. 6128 * - If a logical UNIT is to be cleared , we send the 6129 * IDENTIFY(lun) + M_ABORT. 6130 * - If an untagged task is to be aborted, we send the 6131 * IDENTIFY(lun) + M_ABORT. 6132 * - If a tagged task is to be aborted, we send the 6133 * IDENTIFY(lun) + task attributes + M_ABORT_TAG. 6134 * 6135 * Once our 'kiss of death' :) message has been accepted 6136 * by the target, the SCRIPTS interrupts again 6137 * (SIR_ABORT_SENT). On this interrupt, we complete 6138 * all the CCBs that should have been aborted by the 6139 * target according to our message. 6140 */ 6141 static void sym_sir_task_recovery(hcb_p np, int num) 6142 { 6143 SYM_QUEHEAD *qp; 6144 ccb_p cp; 6145 tcb_p tp; 6146 int target=-1, lun=-1, task; 6147 int i, k; 6148 6149 switch(num) { 6150 /* 6151 * The SCRIPTS processor stopped before starting 6152 * the next command in order to allow us to perform 6153 * some task recovery. 6154 */ 6155 case SIR_SCRIPT_STOPPED: 6156 /* 6157 * Do we have any target to reset or unit to clear ? 6158 */ 6159 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 6160 tp = &np->target[i]; 6161 if (tp->to_reset || 6162 (tp->lun0p && tp->lun0p->to_clear)) { 6163 target = i; 6164 break; 6165 } 6166 if (!tp->lunmp) 6167 continue; 6168 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { 6169 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { 6170 target = i; 6171 break; 6172 } 6173 } 6174 if (target != -1) 6175 break; 6176 } 6177 6178 /* 6179 * If not, walk the busy queue for any 6180 * disconnected CCB to be aborted. 6181 */ 6182 if (target == -1) { 6183 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 6184 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); 6185 if (cp->host_status != HS_DISCONNECT) 6186 continue; 6187 if (cp->to_abort) { 6188 target = cp->target; 6189 break; 6190 } 6191 } 6192 } 6193 6194 /* 6195 * If some target is to be selected, 6196 * prepare and start the selection. 6197 */ 6198 if (target != -1) { 6199 tp = &np->target[target]; 6200 np->abrt_sel.sel_id = target; 6201 np->abrt_sel.sel_scntl3 = tp->wval; 6202 np->abrt_sel.sel_sxfer = tp->sval; 6203 OUTL(nc_dsa, vtobus(np)); 6204 OUTL (nc_dsp, SCRIPTH_BA (np, sel_for_abort)); 6205 return; 6206 } 6207 6208 /* 6209 * Now look for a CCB to abort that haven't started yet. 6210 * Btw, the SCRIPTS processor is still stopped, so 6211 * we are not in race. 6212 */ 6213 i = 0; 6214 cp = 0; 6215 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 6216 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 6217 if (cp->host_status != HS_BUSY && 6218 cp->host_status != HS_NEGOTIATE) 6219 continue; 6220 if (!cp->to_abort) 6221 continue; 6222 #ifdef SYM_CONF_IARB_SUPPORT 6223 /* 6224 * If we are using IMMEDIATE ARBITRATION, we donnot 6225 * want to cancel the last queued CCB, since the 6226 * SCRIPTS may have anticipated the selection. 6227 */ 6228 if (cp == np->last_cp) { 6229 cp->to_abort = 0; 6230 continue; 6231 } 6232 #endif 6233 i = 1; /* Means we have found some */ 6234 break; 6235 } 6236 if (!i) { 6237 /* 6238 * We are done, so we donnot need 6239 * to synchronize with the SCRIPTS anylonger. 6240 * Remove the SEM flag from the ISTAT. 6241 */ 6242 np->istat_sem = 0; 6243 OUTB (nc_istat, SIGP); 6244 break; 6245 } 6246 /* 6247 * Compute index of next position in the start 6248 * queue the SCRIPTS intends to start and dequeue 6249 * all CCBs for that device that haven't been started. 6250 */ 6251 i = (INL (nc_scratcha) - vtobus(np->squeue)) / 4; 6252 i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); 6253 6254 /* 6255 * Make sure at least our IO to abort has been dequeued. 6256 */ 6257 assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ); 6258 6259 /* 6260 * Keep track in cam status of the reason of the abort. 6261 */ 6262 if (cp->to_abort == 2) 6263 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); 6264 else 6265 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); 6266 6267 /* 6268 * Complete with error everything that we have dequeued. 6269 */ 6270 sym_flush_comp_queue(np, 0); 6271 break; 6272 /* 6273 * The SCRIPTS processor has selected a target 6274 * we may have some manual recovery to perform for. 6275 */ 6276 case SIR_TARGET_SELECTED: 6277 target = (INB (nc_sdid) & 0xf); 6278 tp = &np->target[target]; 6279 6280 np->abrt_tbl.addr = vtobus(np->abrt_msg); 6281 6282 /* 6283 * If the target is to be reset, prepare a 6284 * M_RESET message and clear the to_reset flag 6285 * since we donnot expect this operation to fail. 6286 */ 6287 if (tp->to_reset) { 6288 np->abrt_msg[0] = M_RESET; 6289 np->abrt_tbl.size = 1; 6290 tp->to_reset = 0; 6291 break; 6292 } 6293 6294 /* 6295 * Otherwise, look for some logical unit to be cleared. 6296 */ 6297 if (tp->lun0p && tp->lun0p->to_clear) 6298 lun = 0; 6299 else if (tp->lunmp) { 6300 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { 6301 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { 6302 lun = k; 6303 break; 6304 } 6305 } 6306 } 6307 6308 /* 6309 * If a logical unit is to be cleared, prepare 6310 * an IDENTIFY(lun) + ABORT MESSAGE. 6311 */ 6312 if (lun != -1) { 6313 lcb_p lp = sym_lp(np, tp, lun); 6314 lp->to_clear = 0; /* We donnot expect to fail here */ 6315 np->abrt_msg[0] = M_IDENTIFY | lun; 6316 np->abrt_msg[1] = M_ABORT; 6317 np->abrt_tbl.size = 2; 6318 break; 6319 } 6320 6321 /* 6322 * Otherwise, look for some disconnected job to 6323 * abort for this target. 6324 */ 6325 i = 0; 6326 cp = 0; 6327 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 6328 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 6329 if (cp->host_status != HS_DISCONNECT) 6330 continue; 6331 if (cp->target != target) 6332 continue; 6333 if (!cp->to_abort) 6334 continue; 6335 i = 1; /* Means we have some */ 6336 break; 6337 } 6338 6339 /* 6340 * If we have none, probably since the device has 6341 * completed the command before we won abitration, 6342 * send a M_ABORT message without IDENTIFY. 6343 * According to the specs, the device must just 6344 * disconnect the BUS and not abort any task. 6345 */ 6346 if (!i) { 6347 np->abrt_msg[0] = M_ABORT; 6348 np->abrt_tbl.size = 1; 6349 break; 6350 } 6351 6352 /* 6353 * We have some task to abort. 6354 * Set the IDENTIFY(lun) 6355 */ 6356 np->abrt_msg[0] = M_IDENTIFY | cp->lun; 6357 6358 /* 6359 * If we want to abort an untagged command, we 6360 * will send a IDENTIFY + M_ABORT. 6361 * Otherwise (tagged command), we will send 6362 * a IDENTITFY + task attributes + ABORT TAG. 6363 */ 6364 if (cp->tag == NO_TAG) { 6365 np->abrt_msg[1] = M_ABORT; 6366 np->abrt_tbl.size = 2; 6367 } 6368 else { 6369 np->abrt_msg[1] = cp->scsi_smsg[1]; 6370 np->abrt_msg[2] = cp->scsi_smsg[2]; 6371 np->abrt_msg[3] = M_ABORT_TAG; 6372 np->abrt_tbl.size = 4; 6373 } 6374 /* 6375 * Keep track of software timeout condition, since the 6376 * peripheral driver may not count retries on abort 6377 * conditions not due to timeout. 6378 */ 6379 if (cp->to_abort == 2) 6380 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); 6381 cp->to_abort = 0; /* We donnot expect to fail here */ 6382 break; 6383 6384 /* 6385 * The target has accepted our message and switched 6386 * to BUS FREE phase as we expected. 6387 */ 6388 case SIR_ABORT_SENT: 6389 target = (INB (nc_sdid) & 0xf); 6390 tp = &np->target[target]; 6391 6392 /* 6393 ** If we didn't abort anything, leave here. 6394 */ 6395 if (np->abrt_msg[0] == M_ABORT) 6396 break; 6397 6398 /* 6399 * If we sent a M_RESET, then a hardware reset has 6400 * been performed by the target. 6401 * - Reset everything to async 8 bit 6402 * - Tell ourself to negotiate next time :-) 6403 * - Prepare to clear all disconnected CCBs for 6404 * this target from our task list (lun=task=-1) 6405 */ 6406 lun = -1; 6407 task = -1; 6408 if (np->abrt_msg[0] == M_RESET) { 6409 tp->sval = 0; 6410 tp->wval = np->rv_scntl3; 6411 tp->uval = 0; 6412 tp->tinfo.current.period = 0; 6413 tp->tinfo.current.offset = 0; 6414 tp->tinfo.current.width = BUS_8_BIT; 6415 tp->tinfo.current.options = 0; 6416 } 6417 6418 /* 6419 * Otherwise, check for the LUN and TASK(s) 6420 * concerned by the cancelation. 6421 * If it is not ABORT_TAG then it is CLEAR_QUEUE 6422 * or an ABORT message :-) 6423 */ 6424 else { 6425 lun = np->abrt_msg[0] & 0x3f; 6426 if (np->abrt_msg[1] == M_ABORT_TAG) 6427 task = np->abrt_msg[2]; 6428 } 6429 6430 /* 6431 * Complete all the CCBs the device should have 6432 * aborted due to our 'kiss of death' message. 6433 */ 6434 i = (INL (nc_scratcha) - vtobus(np->squeue)) / 4; 6435 (void) sym_dequeue_from_squeue(np, i, target, lun, -1); 6436 (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task); 6437 sym_flush_comp_queue(np, 0); 6438 6439 /* 6440 * If we sent a BDR, make uper layer aware of that. 6441 */ 6442 if (np->abrt_msg[0] == M_RESET) 6443 xpt_async(AC_SENT_BDR, np->path, NULL); 6444 break; 6445 } 6446 6447 /* 6448 * Print to the log the message we intend to send. 6449 */ 6450 if (num == SIR_TARGET_SELECTED) { 6451 PRINT_TARGET(np, target); 6452 sym_printl_hex("control msgout:", np->abrt_msg, 6453 np->abrt_tbl.size); 6454 np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); 6455 } 6456 6457 /* 6458 * Let the SCRIPTS processor continue. 6459 */ 6460 OUTONB (nc_dcntl, (STD|NOCOM)); 6461 } 6462 6463 /* 6464 * Gerard's alchemy:) that deals with with the data 6465 * pointer for both MDP and the residual calculation. 6466 * 6467 * I didn't want to bloat the code by more than 200 6468 * lignes for the handling of both MDP and the residual. 6469 * This has been achieved by using a data pointer 6470 * representation consisting in an index in the data 6471 * array (dp_sg) and a negative offset (dp_ofs) that 6472 * have the following meaning: 6473 * 6474 * - dp_sg = SYM_CONF_MAX_SG 6475 * we are at the end of the data script. 6476 * - dp_sg < SYM_CONF_MAX_SG 6477 * dp_sg points to the next entry of the scatter array 6478 * we want to transfer. 6479 * - dp_ofs < 0 6480 * dp_ofs represents the residual of bytes of the 6481 * previous entry scatter entry we will send first. 6482 * - dp_ofs = 0 6483 * no residual to send first. 6484 * 6485 * The function sym_evaluate_dp() accepts an arbitray 6486 * offset (basically from the MDP message) and returns 6487 * the corresponding values of dp_sg and dp_ofs. 6488 */ 6489 6490 static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs) 6491 { 6492 u32 dp_scr; 6493 int dp_ofs, dp_sg, dp_sgmin; 6494 int tmp; 6495 struct sym_pmc *pm; 6496 6497 /* 6498 * Compute the resulted data pointer in term of a script 6499 * address within some DATA script and a signed byte offset. 6500 */ 6501 dp_scr = scr; 6502 dp_ofs = *ofs; 6503 if (dp_scr == SCRIPT_BA (np, pm0_data)) 6504 pm = &cp->phys.pm0; 6505 else if (dp_scr == SCRIPT_BA (np, pm1_data)) 6506 pm = &cp->phys.pm1; 6507 else 6508 pm = 0; 6509 6510 if (pm) { 6511 dp_scr = scr_to_cpu(pm->ret); 6512 dp_ofs -= scr_to_cpu(pm->sg.size); 6513 } 6514 6515 /* 6516 * If we are auto-sensing, then we are done. 6517 */ 6518 if (cp->host_flags & HF_SENSE) { 6519 *ofs = dp_ofs; 6520 return 0; 6521 } 6522 6523 /* 6524 * Deduce the index of the sg entry. 6525 * Keep track of the index of the first valid entry. 6526 * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the 6527 * end of the data. 6528 */ 6529 tmp = scr_to_cpu(cp->phys.goalp); 6530 dp_sg = SYM_CONF_MAX_SG; 6531 if (dp_scr != tmp) 6532 dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); 6533 dp_sgmin = SYM_CONF_MAX_SG - cp->segments; 6534 6535 /* 6536 * Move to the sg entry the data pointer belongs to. 6537 * 6538 * If we are inside the data area, we expect result to be: 6539 * 6540 * Either, 6541 * dp_ofs = 0 and dp_sg is the index of the sg entry 6542 * the data pointer belongs to (or the end of the data) 6543 * Or, 6544 * dp_ofs < 0 and dp_sg is the index of the sg entry 6545 * the data pointer belongs to + 1. 6546 */ 6547 if (dp_ofs < 0) { 6548 int n; 6549 while (dp_sg > dp_sgmin) { 6550 --dp_sg; 6551 tmp = scr_to_cpu(cp->phys.data[dp_sg].size); 6552 n = dp_ofs + (tmp & 0xffffff); 6553 if (n > 0) { 6554 ++dp_sg; 6555 break; 6556 } 6557 dp_ofs = n; 6558 } 6559 } 6560 else if (dp_ofs > 0) { 6561 while (dp_sg < SYM_CONF_MAX_SG) { 6562 tmp = scr_to_cpu(cp->phys.data[dp_sg].size); 6563 dp_ofs -= (tmp & 0xffffff); 6564 ++dp_sg; 6565 if (dp_ofs <= 0) 6566 break; 6567 } 6568 } 6569 6570 /* 6571 * Make sure the data pointer is inside the data area. 6572 * If not, return some error. 6573 */ 6574 if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) 6575 goto out_err; 6576 else if (dp_sg > SYM_CONF_MAX_SG || 6577 (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) 6578 goto out_err; 6579 6580 /* 6581 * Save the extreme pointer if needed. 6582 */ 6583 if (dp_sg > cp->ext_sg || 6584 (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { 6585 cp->ext_sg = dp_sg; 6586 cp->ext_ofs = dp_ofs; 6587 } 6588 6589 /* 6590 * Return data. 6591 */ 6592 *ofs = dp_ofs; 6593 return dp_sg; 6594 6595 out_err: 6596 #ifdef SYM_DEBUG_PM_WITH_WSR 6597 printf("XXXX dp_sg=%d dp_sgmin=%d dp_ofs=%d, SYM_CONF_MAX_SG=%d\n", 6598 dp_sg, dp_sgmin, dp_ofs, SYM_CONF_MAX_SG); 6599 #endif 6600 6601 return -1; 6602 } 6603 6604 /* 6605 * chip handler for MODIFY DATA POINTER MESSAGE 6606 * 6607 * We also call this function on IGNORE WIDE RESIDUE 6608 * messages that do not match a SWIDE full condition. 6609 * Btw, we assume in that situation that such a message 6610 * is equivalent to a MODIFY DATA POINTER (offset=-1). 6611 */ 6612 6613 static void sym_modify_dp(hcb_p np, tcb_p tp, ccb_p cp, int ofs) 6614 { 6615 int dp_ofs = ofs; 6616 u32 dp_scr = INL (nc_temp); 6617 u32 dp_ret; 6618 u32 tmp; 6619 u_char hflags; 6620 int dp_sg; 6621 struct sym_pmc *pm; 6622 6623 /* 6624 * Not supported for auto-sense. 6625 */ 6626 if (cp->host_flags & HF_SENSE) 6627 goto out_reject; 6628 6629 /* 6630 * Apply our alchemy:) (see comments in sym_evaluate_dp()), 6631 * to the resulted data pointer. 6632 */ 6633 dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); 6634 if (dp_sg < 0) 6635 goto out_reject; 6636 6637 /* 6638 * And our alchemy:) allows to easily calculate the data 6639 * script address we want to return for the next data phase. 6640 */ 6641 dp_ret = cpu_to_scr(cp->phys.goalp); 6642 dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); 6643 6644 /* 6645 * If offset / scatter entry is zero we donnot need 6646 * a context for the new current data pointer. 6647 */ 6648 if (dp_ofs == 0) { 6649 dp_scr = dp_ret; 6650 goto out_ok; 6651 } 6652 6653 /* 6654 * Get a context for the new current data pointer. 6655 */ 6656 hflags = INB (HF_PRT); 6657 6658 if (hflags & HF_DP_SAVED) 6659 hflags ^= HF_ACT_PM; 6660 6661 if (!(hflags & HF_ACT_PM)) { 6662 pm = &cp->phys.pm0; 6663 dp_scr = SCRIPT_BA (np, pm0_data); 6664 } 6665 else { 6666 pm = &cp->phys.pm1; 6667 dp_scr = SCRIPT_BA (np, pm1_data); 6668 } 6669 6670 hflags &= ~(HF_DP_SAVED); 6671 6672 OUTB (HF_PRT, hflags); 6673 6674 /* 6675 * Set up the new current data pointer. 6676 * ofs < 0 there, and for the next data phase, we 6677 * want to transfer part of the data of the sg entry 6678 * corresponding to index dp_sg-1 prior to returning 6679 * to the main data script. 6680 */ 6681 pm->ret = cpu_to_scr(dp_ret); 6682 tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); 6683 tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; 6684 pm->sg.addr = cpu_to_scr(tmp); 6685 pm->sg.size = cpu_to_scr(-dp_ofs); 6686 6687 out_ok: 6688 OUTL (nc_temp, dp_scr); 6689 OUTL (nc_dsp, SCRIPT_BA (np, clrack)); 6690 return; 6691 6692 out_reject: 6693 OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad)); 6694 } 6695 6696 6697 /* 6698 * chip calculation of the data residual. 6699 * 6700 * As I used to say, the requirement of data residual 6701 * in SCSI is broken, useless and cannot be achieved 6702 * without huge complexity. 6703 * But most OSes and even the official CAM require it. 6704 * When stupidity happens to be so widely spread inside 6705 * a community, it gets hard to convince. 6706 * 6707 * Anyway, I don't care, since I am not going to use 6708 * any software that considers this data residual as 6709 * a relevant information. :) 6710 */ 6711 6712 static int sym_compute_residual(hcb_p np, ccb_p cp) 6713 { 6714 int dp_sg, dp_sgmin, resid = 0; 6715 int dp_ofs = 0; 6716 6717 /* 6718 * Check for some data lost or just thrown away. 6719 * We are not required to be quite accurate in this 6720 * situation. Btw, if we are odd for output and the 6721 * device claims some more data, it may well happen 6722 * than our residual be zero. :-) 6723 */ 6724 if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { 6725 if (cp->xerr_status & XE_EXTRA_DATA) 6726 resid -= scr_to_cpu(cp->phys.extra_bytes); 6727 if (cp->xerr_status & XE_SODL_UNRUN) 6728 ++resid; 6729 if (cp->xerr_status & XE_SWIDE_OVRUN) 6730 --resid; 6731 } 6732 6733 /* 6734 * If all data has been transferred, 6735 * there is no residual. 6736 */ 6737 if (cp->phys.lastp == cp->phys.goalp) 6738 return resid; 6739 6740 /* 6741 * If no data transfer occurs, or if the data 6742 * pointer is weird, return full residual. 6743 */ 6744 if (cp->startp == cp->phys.lastp || 6745 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.lastp), &dp_ofs) < 0) { 6746 return cp->data_len; 6747 } 6748 6749 /* 6750 * If we were auto-sensing, then we are done. 6751 */ 6752 if (cp->host_flags & HF_SENSE) { 6753 return -dp_ofs; 6754 } 6755 6756 /* 6757 * We are now full comfortable in the computation 6758 * of the data residual (2's complement). 6759 */ 6760 dp_sgmin = SYM_CONF_MAX_SG - cp->segments; 6761 resid = -cp->ext_ofs; 6762 for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { 6763 u_long tmp = scr_to_cpu(cp->phys.data[dp_sg].size); 6764 resid += (tmp & 0xffffff); 6765 } 6766 6767 /* 6768 * Hopefully, the result is not too wrong. 6769 */ 6770 return resid; 6771 } 6772 6773 /* 6774 * Print out the containt of a SCSI message. 6775 */ 6776 6777 static int sym_show_msg (u_char * msg) 6778 { 6779 u_char i; 6780 printf ("%x",*msg); 6781 if (*msg==M_EXTENDED) { 6782 for (i=1;i<8;i++) { 6783 if (i-1>msg[1]) break; 6784 printf ("-%x",msg[i]); 6785 }; 6786 return (i+1); 6787 } else if ((*msg & 0xf0) == 0x20) { 6788 printf ("-%x",msg[1]); 6789 return (2); 6790 }; 6791 return (1); 6792 } 6793 6794 static void sym_print_msg (ccb_p cp, char *label, u_char *msg) 6795 { 6796 PRINT_ADDR(cp); 6797 if (label) 6798 printf ("%s: ", label); 6799 6800 (void) sym_show_msg (msg); 6801 printf (".\n"); 6802 } 6803 6804 /* 6805 * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. 6806 * 6807 * We try to negotiate sync and wide transfer only after 6808 * a successfull inquire command. We look at byte 7 of the 6809 * inquire data to determine the capabilities of the target. 6810 * 6811 * When we try to negotiate, we append the negotiation message 6812 * to the identify and (maybe) simple tag message. 6813 * The host status field is set to HS_NEGOTIATE to mark this 6814 * situation. 6815 * 6816 * If the target doesn't answer this message immediately 6817 * (as required by the standard), the SIR_NEGO_FAILED interrupt 6818 * will be raised eventually. 6819 * The handler removes the HS_NEGOTIATE status, and sets the 6820 * negotiated value to the default (async / nowide). 6821 * 6822 * If we receive a matching answer immediately, we check it 6823 * for validity, and set the values. 6824 * 6825 * If we receive a Reject message immediately, we assume the 6826 * negotiation has failed, and fall back to standard values. 6827 * 6828 * If we receive a negotiation message while not in HS_NEGOTIATE 6829 * state, it's a target initiated negotiation. We prepare a 6830 * (hopefully) valid answer, set our parameters, and send back 6831 * this answer to the target. 6832 * 6833 * If the target doesn't fetch the answer (no message out phase), 6834 * we assume the negotiation has failed, and fall back to default 6835 * settings (SIR_NEGO_PROTO interrupt). 6836 * 6837 * When we set the values, we adjust them in all ccbs belonging 6838 * to this target, in the controller's register, and in the "phys" 6839 * field of the controller's struct sym_hcb. 6840 */ 6841 6842 /* 6843 * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. 6844 */ 6845 static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp) 6846 { 6847 u_char chg, ofs, per, fak, div; 6848 int req = 1; 6849 6850 /* 6851 * Synchronous request message received. 6852 */ 6853 if (DEBUG_FLAGS & DEBUG_NEGO) { 6854 sym_print_msg(cp, "sync msgin", np->msgin); 6855 }; 6856 6857 /* 6858 * request or answer ? 6859 */ 6860 if (INB (HS_PRT) == HS_NEGOTIATE) { 6861 OUTB (HS_PRT, HS_BUSY); 6862 if (cp->nego_status && cp->nego_status != NS_SYNC) 6863 goto reject_it; 6864 req = 0; 6865 } 6866 6867 /* 6868 * get requested values. 6869 */ 6870 chg = 0; 6871 per = np->msgin[3]; 6872 ofs = np->msgin[4]; 6873 6874 /* 6875 * check values against our limits. 6876 */ 6877 if (ofs) { 6878 if (ofs > np->maxoffs) 6879 {chg = 1; ofs = np->maxoffs;} 6880 if (req) { 6881 if (ofs > tp->tinfo.user.offset) 6882 {chg = 1; ofs = tp->tinfo.user.offset;} 6883 } 6884 } 6885 6886 if (ofs) { 6887 if (per < np->minsync) 6888 {chg = 1; per = np->minsync;} 6889 if (req) { 6890 if (per < tp->tinfo.user.period) 6891 {chg = 1; per = tp->tinfo.user.period;} 6892 } 6893 } 6894 6895 div = fak = 0; 6896 if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) 6897 goto reject_it; 6898 6899 if (DEBUG_FLAGS & DEBUG_NEGO) { 6900 PRINT_ADDR(cp); 6901 printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", 6902 ofs, per, div, fak, chg); 6903 } 6904 6905 /* 6906 * This was an answer message 6907 */ 6908 if (req == 0) { 6909 if (chg) /* Answer wasn't acceptable. */ 6910 goto reject_it; 6911 sym_setsync (np, cp, ofs, per, div, fak); 6912 OUTL (nc_dsp, SCRIPT_BA (np, clrack)); 6913 return; 6914 } 6915 6916 /* 6917 * It was a request. Set value and 6918 * prepare an answer message 6919 */ 6920 sym_setsync (np, cp, ofs, per, div, fak); 6921 6922 np->msgout[0] = M_EXTENDED; 6923 np->msgout[1] = 3; 6924 np->msgout[2] = M_X_SYNC_REQ; 6925 np->msgout[3] = per; 6926 np->msgout[4] = ofs; 6927 6928 cp->nego_status = NS_SYNC; 6929 6930 if (DEBUG_FLAGS & DEBUG_NEGO) { 6931 sym_print_msg(cp, "sync msgout", np->msgout); 6932 } 6933 6934 np->msgin [0] = M_NOOP; 6935 6936 OUTL (nc_dsp, SCRIPTH_BA (np, sdtr_resp)); 6937 return; 6938 reject_it: 6939 sym_setsync (np, cp, 0, 0, 0, 0); 6940 OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad)); 6941 } 6942 6943 /* 6944 * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. 6945 */ 6946 static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp) 6947 { 6948 u_char chg, ofs, per, fak, dt, div, wide; 6949 int req = 1; 6950 6951 /* 6952 * Synchronous request message received. 6953 */ 6954 if (DEBUG_FLAGS & DEBUG_NEGO) { 6955 sym_print_msg(cp, "ppr msgin", np->msgin); 6956 }; 6957 6958 /* 6959 * request or answer ? 6960 */ 6961 if (INB (HS_PRT) == HS_NEGOTIATE) { 6962 OUTB (HS_PRT, HS_BUSY); 6963 if (cp->nego_status && cp->nego_status != NS_PPR) 6964 goto reject_it; 6965 req = 0; 6966 } 6967 6968 /* 6969 * get requested values. 6970 */ 6971 chg = 0; 6972 per = np->msgin[3]; 6973 ofs = np->msgin[5]; 6974 wide = np->msgin[6]; 6975 dt = np->msgin[7] & PPR_OPT_DT; 6976 6977 /* 6978 * check values against our limits. 6979 */ 6980 if (wide > np->maxwide) 6981 {chg = 1; wide = np->maxwide;} 6982 if (!wide || !(np->features & FE_ULTRA3)) 6983 dt &= ~PPR_OPT_DT; 6984 if (req) { 6985 if (wide > tp->tinfo.user.width) 6986 {chg = 1; wide = tp->tinfo.user.width;} 6987 } 6988 6989 if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */ 6990 dt &= ~PPR_OPT_DT; 6991 6992 if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; 6993 6994 if (ofs) { 6995 if (ofs > np->maxoffs) 6996 {chg = 1; ofs = np->maxoffs;} 6997 if (req) { 6998 if (ofs > tp->tinfo.user.offset) 6999 {chg = 1; ofs = tp->tinfo.user.offset;} 7000 } 7001 } 7002 7003 if (ofs) { 7004 if (dt) { 7005 if (per < np->minsync_dt) 7006 {chg = 1; per = np->minsync_dt;} 7007 } 7008 else if (per < np->minsync) 7009 {chg = 1; per = np->minsync;} 7010 if (req) { 7011 if (per < tp->tinfo.user.period) 7012 {chg = 1; per = tp->tinfo.user.period;} 7013 } 7014 } 7015 7016 div = fak = 0; 7017 if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) 7018 goto reject_it; 7019 7020 if (DEBUG_FLAGS & DEBUG_NEGO) { 7021 PRINT_ADDR(cp); 7022 printf ("ppr: " 7023 "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n", 7024 dt, ofs, per, wide, div, fak, chg); 7025 } 7026 7027 /* 7028 * It was an answer. 7029 */ 7030 if (req == 0) { 7031 if (chg) /* Answer wasn't acceptable */ 7032 goto reject_it; 7033 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); 7034 OUTL (nc_dsp, SCRIPT_BA (np, clrack)); 7035 return; 7036 } 7037 7038 /* 7039 * It was a request. Set value and 7040 * prepare an answer message 7041 */ 7042 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); 7043 7044 np->msgout[0] = M_EXTENDED; 7045 np->msgout[1] = 6; 7046 np->msgout[2] = M_X_PPR_REQ; 7047 np->msgout[3] = per; 7048 np->msgout[4] = 0; 7049 np->msgout[5] = ofs; 7050 np->msgout[6] = wide; 7051 np->msgout[7] = dt; 7052 7053 cp->nego_status = NS_PPR; 7054 7055 if (DEBUG_FLAGS & DEBUG_NEGO) { 7056 sym_print_msg(cp, "ppr msgout", np->msgout); 7057 } 7058 7059 np->msgin [0] = M_NOOP; 7060 7061 OUTL (nc_dsp, SCRIPTH_BA (np, ppr_resp)); 7062 return; 7063 reject_it: 7064 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); 7065 OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad)); 7066 } 7067 7068 /* 7069 * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. 7070 */ 7071 static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp) 7072 { 7073 u_char chg, wide; 7074 int req = 1; 7075 7076 /* 7077 * Wide request message received. 7078 */ 7079 if (DEBUG_FLAGS & DEBUG_NEGO) { 7080 sym_print_msg(cp, "wide msgin", np->msgin); 7081 }; 7082 7083 /* 7084 * Is it an request from the device? 7085 */ 7086 if (INB (HS_PRT) == HS_NEGOTIATE) { 7087 OUTB (HS_PRT, HS_BUSY); 7088 if (cp->nego_status && cp->nego_status != NS_WIDE) 7089 goto reject_it; 7090 req = 0; 7091 } 7092 7093 /* 7094 * get requested values. 7095 */ 7096 chg = 0; 7097 wide = np->msgin[3]; 7098 7099 /* 7100 * check values against driver limits. 7101 */ 7102 if (wide > np->maxoffs) 7103 {chg = 1; wide = np->maxoffs;} 7104 if (req) { 7105 if (wide > tp->tinfo.user.width) 7106 {chg = 1; wide = tp->tinfo.user.width;} 7107 } 7108 7109 if (DEBUG_FLAGS & DEBUG_NEGO) { 7110 PRINT_ADDR(cp); 7111 printf ("wdtr: wide=%d chg=%d.\n", wide, chg); 7112 } 7113 7114 /* 7115 * This was an answer message 7116 */ 7117 if (req == 0) { 7118 if (chg) /* Answer wasn't acceptable. */ 7119 goto reject_it; 7120 sym_setwide (np, cp, wide); 7121 #if 1 7122 /* 7123 * Negotiate for SYNC immediately after WIDE response. 7124 * This allows to negotiate for both WIDE and SYNC on 7125 * a single SCSI command (Suggested by Justin Gibbs). 7126 */ 7127 if (tp->tinfo.goal.offset) { 7128 np->msgout[0] = M_EXTENDED; 7129 np->msgout[1] = 3; 7130 np->msgout[2] = M_X_SYNC_REQ; 7131 np->msgout[3] = tp->tinfo.goal.period; 7132 np->msgout[4] = tp->tinfo.goal.offset; 7133 7134 if (DEBUG_FLAGS & DEBUG_NEGO) { 7135 sym_print_msg(cp, "sync msgout", np->msgout); 7136 } 7137 7138 cp->nego_status = NS_SYNC; 7139 OUTB (HS_PRT, HS_NEGOTIATE); 7140 OUTL (nc_dsp, SCRIPTH_BA (np, sdtr_resp)); 7141 return; 7142 } 7143 #endif 7144 OUTL (nc_dsp, SCRIPT_BA (np, clrack)); 7145 return; 7146 }; 7147 7148 /* 7149 * It was a request, set value and 7150 * prepare an answer message 7151 */ 7152 sym_setwide (np, cp, wide); 7153 7154 np->msgout[0] = M_EXTENDED; 7155 np->msgout[1] = 2; 7156 np->msgout[2] = M_X_WIDE_REQ; 7157 np->msgout[3] = wide; 7158 7159 np->msgin [0] = M_NOOP; 7160 7161 cp->nego_status = NS_WIDE; 7162 7163 if (DEBUG_FLAGS & DEBUG_NEGO) { 7164 sym_print_msg(cp, "wide msgout", np->msgout); 7165 } 7166 7167 OUTL (nc_dsp, SCRIPTH_BA (np, wdtr_resp)); 7168 return; 7169 reject_it: 7170 OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad)); 7171 } 7172 7173 /* 7174 * Reset SYNC or WIDE to default settings. 7175 * 7176 * Called when a negotiation does not succeed either 7177 * on rejection or on protocol error. 7178 */ 7179 static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp) 7180 { 7181 /* 7182 * any error in negotiation: 7183 * fall back to default mode. 7184 */ 7185 switch (cp->nego_status) { 7186 case NS_PPR: 7187 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); 7188 break; 7189 case NS_SYNC: 7190 sym_setsync (np, cp, 0, 0, 0, 0); 7191 break; 7192 case NS_WIDE: 7193 sym_setwide (np, cp, 0); 7194 break; 7195 }; 7196 np->msgin [0] = M_NOOP; 7197 np->msgout[0] = M_NOOP; 7198 cp->nego_status = 0; 7199 } 7200 7201 /* 7202 * chip handler for MESSAGE REJECT received in response to 7203 * a WIDE or SYNCHRONOUS negotiation. 7204 */ 7205 static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp) 7206 { 7207 sym_nego_default(np, tp, cp); 7208 OUTB (HS_PRT, HS_BUSY); 7209 } 7210 7211 /* 7212 * chip exception handler for programmed interrupts. 7213 */ 7214 void sym_int_sir (hcb_p np) 7215 { 7216 u_char num = INB (nc_dsps); 7217 u_long dsa = INL (nc_dsa); 7218 ccb_p cp = sym_ccb_from_dsa(np, dsa); 7219 u_char target = INB (nc_sdid) & 0x0f; 7220 tcb_p tp = &np->target[target]; 7221 int tmp; 7222 7223 if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); 7224 7225 switch (num) { 7226 #ifdef SYM_DEBUG_PM_WITH_WSR 7227 case SIR_PM_WITH_WSR: 7228 printf ("%s:%d: HW PM with WSR bit set - ", 7229 sym_name (np), target); 7230 tmp = 7231 (vtobus(&cp->phys.data[SYM_CONF_MAX_SG]) - INL (nc_esa))/8; 7232 printf("RBC=%d - SEG=%d - SIZE=%d - OFFS=%d\n", 7233 INL (nc_rbc), cp->segments - tmp, 7234 cp->phys.data[SYM_CONF_MAX_SG - tmp].size, 7235 INL (nc_ua) - cp->phys.data[SYM_CONF_MAX_SG - tmp].addr); 7236 goto out; 7237 #endif 7238 /* 7239 * Command has been completed with error condition 7240 * or has been auto-sensed. 7241 */ 7242 case SIR_COMPLETE_ERROR: 7243 sym_complete_error(np, cp); 7244 return; 7245 /* 7246 * The C code is currently trying to recover from something. 7247 * Typically, user want to abort some command. 7248 */ 7249 case SIR_SCRIPT_STOPPED: 7250 case SIR_TARGET_SELECTED: 7251 case SIR_ABORT_SENT: 7252 sym_sir_task_recovery(np, num); 7253 return; 7254 /* 7255 * The device didn't go to MSG OUT phase after having 7256 * been selected with ATN. We donnot want to handle 7257 * that. 7258 */ 7259 case SIR_SEL_ATN_NO_MSG_OUT: 7260 printf ("%s:%d: No MSG OUT phase after selection with ATN.\n", 7261 sym_name (np), target); 7262 goto out_stuck; 7263 /* 7264 * The device didn't switch to MSG IN phase after 7265 * having reseleted the initiator. 7266 */ 7267 case SIR_RESEL_NO_MSG_IN: 7268 printf ("%s:%d: No MSG IN phase after reselection.\n", 7269 sym_name (np), target); 7270 goto out_stuck; 7271 /* 7272 * After reselection, the device sent a message that wasn't 7273 * an IDENTIFY. 7274 */ 7275 case SIR_RESEL_NO_IDENTIFY: 7276 printf ("%s:%d: No IDENTIFY after reselection.\n", 7277 sym_name (np), target); 7278 goto out_stuck; 7279 /* 7280 * The device reselected a LUN we donnot know about. 7281 */ 7282 case SIR_RESEL_BAD_LUN: 7283 np->msgout[0] = M_RESET; 7284 goto out; 7285 /* 7286 * The device reselected for an untagged nexus and we 7287 * haven't any. 7288 */ 7289 case SIR_RESEL_BAD_I_T_L: 7290 np->msgout[0] = M_ABORT; 7291 goto out; 7292 /* 7293 * The device reselected for a tagged nexus that we donnot 7294 * have. 7295 */ 7296 case SIR_RESEL_BAD_I_T_L_Q: 7297 np->msgout[0] = M_ABORT_TAG; 7298 goto out; 7299 /* 7300 * The SCRIPTS let us know that the device has grabbed 7301 * our message and will abort the job. 7302 */ 7303 case SIR_RESEL_ABORTED: 7304 np->lastmsg = np->msgout[0]; 7305 np->msgout[0] = M_NOOP; 7306 printf ("%s:%d: message %x sent on bad reselection.\n", 7307 sym_name (np), target, np->lastmsg); 7308 goto out; 7309 /* 7310 * The SCRIPTS let us know that a message has been 7311 * successfully sent to the device. 7312 */ 7313 case SIR_MSG_OUT_DONE: 7314 np->lastmsg = np->msgout[0]; 7315 np->msgout[0] = M_NOOP; 7316 /* Should we really care of that */ 7317 if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { 7318 if (cp) { 7319 cp->xerr_status &= ~XE_PARITY_ERR; 7320 if (!cp->xerr_status) 7321 OUTOFFB (HF_PRT, HF_EXT_ERR); 7322 } 7323 } 7324 goto out; 7325 /* 7326 * The device didn't send a GOOD SCSI status. 7327 * We may have some work to do prior to allow 7328 * the SCRIPTS processor to continue. 7329 */ 7330 case SIR_BAD_SCSI_STATUS: 7331 if (!cp) 7332 goto out; 7333 sym_sir_bad_scsi_status(np, num, cp); 7334 return; 7335 /* 7336 * We are asked by the SCRIPTS to prepare a 7337 * REJECT message. 7338 */ 7339 case SIR_REJECT_TO_SEND: 7340 sym_print_msg(cp, "M_REJECT to send for ", np->msgin); 7341 np->msgout[0] = M_REJECT; 7342 goto out; 7343 /* 7344 * We have been ODD at the end of a DATA IN 7345 * transfer and the device didn't send a 7346 * IGNORE WIDE RESIDUE message. 7347 * It is a data overrun condition. 7348 */ 7349 case SIR_SWIDE_OVERRUN: 7350 if (cp) { 7351 OUTONB (HF_PRT, HF_EXT_ERR); 7352 cp->xerr_status |= XE_SWIDE_OVRUN; 7353 } 7354 goto out; 7355 /* 7356 * We have been ODD at the end of a DATA OUT 7357 * transfer. 7358 * It is a data underrun condition. 7359 */ 7360 case SIR_SODL_UNDERRUN: 7361 if (cp) { 7362 OUTONB (HF_PRT, HF_EXT_ERR); 7363 cp->xerr_status |= XE_SODL_UNRUN; 7364 } 7365 goto out; 7366 /* 7367 * We received a message. 7368 */ 7369 case SIR_MSG_RECEIVED: 7370 if (!cp) 7371 goto out_stuck; 7372 switch (np->msgin [0]) { 7373 /* 7374 * We received an extended message. 7375 * We handle MODIFY DATA POINTER, SDTR, WDTR 7376 * and reject all other extended messages. 7377 */ 7378 case M_EXTENDED: 7379 switch (np->msgin [2]) { 7380 case M_X_MODIFY_DP: 7381 if (DEBUG_FLAGS & DEBUG_POINTER) 7382 sym_print_msg(cp,"modify DP",np->msgin); 7383 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 7384 (np->msgin[5]<<8) + (np->msgin[6]); 7385 sym_modify_dp(np, tp, cp, tmp); 7386 return; 7387 case M_X_SYNC_REQ: 7388 sym_sync_nego(np, tp, cp); 7389 return; 7390 case M_X_PPR_REQ: 7391 sym_ppr_nego(np, tp, cp); 7392 return; 7393 case M_X_WIDE_REQ: 7394 sym_wide_nego(np, tp, cp); 7395 return; 7396 default: 7397 goto out_reject; 7398 } 7399 break; 7400 /* 7401 * We received a 1/2 byte message not handled from SCRIPTS. 7402 * We are only expecting MESSAGE REJECT and IGNORE WIDE 7403 * RESIDUE messages that haven't been anticipated by 7404 * SCRIPTS on SWIDE full condition. Unanticipated IGNORE 7405 * WIDE RESIDUE messages are aliased as MODIFY DP (-1). 7406 */ 7407 case M_IGN_RESIDUE: 7408 if (DEBUG_FLAGS & DEBUG_POINTER) 7409 sym_print_msg(cp,"ign wide residue", np->msgin); 7410 sym_modify_dp(np, tp, cp, -1); 7411 return; 7412 case M_REJECT: 7413 if (INB (HS_PRT) == HS_NEGOTIATE) 7414 sym_nego_rejected(np, tp, cp); 7415 else { 7416 PRINT_ADDR(cp); 7417 printf ("M_REJECT received (%x:%x).\n", 7418 scr_to_cpu(np->lastmsg), np->msgout[0]); 7419 } 7420 goto out_clrack; 7421 break; 7422 default: 7423 goto out_reject; 7424 } 7425 break; 7426 /* 7427 * We received an unknown message. 7428 * Ignore all MSG IN phases and reject it. 7429 */ 7430 case SIR_MSG_WEIRD: 7431 sym_print_msg(cp, "WEIRD message received", np->msgin); 7432 OUTL (nc_dsp, SCRIPTH_BA (np, msg_weird)); 7433 return; 7434 /* 7435 * Negotiation failed. 7436 * Target does not send us the reply. 7437 * Remove the HS_NEGOTIATE status. 7438 */ 7439 case SIR_NEGO_FAILED: 7440 OUTB (HS_PRT, HS_BUSY); 7441 /* 7442 * Negotiation failed. 7443 * Target does not want answer message. 7444 */ 7445 case SIR_NEGO_PROTO: 7446 sym_nego_default(np, tp, cp); 7447 goto out; 7448 }; 7449 7450 out: 7451 OUTONB (nc_dcntl, (STD|NOCOM)); 7452 return; 7453 out_reject: 7454 OUTL (nc_dsp, SCRIPTH_BA (np, msg_bad)); 7455 return; 7456 out_clrack: 7457 OUTL (nc_dsp, SCRIPT_BA (np, clrack)); 7458 return; 7459 out_stuck: 7460 } 7461 7462 /* 7463 * Acquire a control block 7464 */ 7465 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order) 7466 { 7467 tcb_p tp = &np->target[tn]; 7468 lcb_p lp = sym_lp(np, tp, ln); 7469 u_short tag = NO_TAG; 7470 SYM_QUEHEAD *qp; 7471 ccb_p cp = (ccb_p) 0; 7472 7473 /* 7474 * Look for a free CCB 7475 */ 7476 if (sym_que_empty(&np->free_ccbq)) 7477 (void) sym_alloc_ccb(np); 7478 qp = sym_remque_head(&np->free_ccbq); 7479 if (!qp) 7480 goto out; 7481 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 7482 7483 /* 7484 * If the LCB is not yet available and the LUN 7485 * has been probed ok, try to allocate the LCB. 7486 */ 7487 if (!lp && sym_is_bit(tp->lun_map, ln)) { 7488 lp = sym_alloc_lcb(np, tn, ln); 7489 if (!lp) 7490 goto out_free; 7491 } 7492 7493 /* 7494 * If the LCB is not available here, then the 7495 * logical unit is not yet discovered. For those 7496 * ones only accept 1 SCSI IO per logical unit, 7497 * since we cannot allow disconnections. 7498 */ 7499 if (!lp) { 7500 if (!sym_is_bit(tp->busy0_map, ln)) 7501 sym_set_bit(tp->busy0_map, ln); 7502 else 7503 goto out_free; 7504 } else { 7505 /* 7506 * If we have been asked for a tagged command. 7507 */ 7508 if (tag_order) { 7509 /* 7510 * Debugging purpose. 7511 */ 7512 assert(lp->busy_itl == 0); 7513 /* 7514 * Allocate resources for tags if not yet. 7515 */ 7516 if (!lp->cb_tags) { 7517 sym_alloc_lcb_tags(np, tn, ln); 7518 if (!lp->cb_tags) 7519 goto out_free; 7520 } 7521 /* 7522 * Get a tag for this SCSI IO and set up 7523 * the CCB bus address for reselection, 7524 * and count it for this LUN. 7525 * Toggle reselect path to tagged. 7526 */ 7527 if (lp->busy_itlq < SYM_CONF_MAX_TASK) { 7528 tag = lp->cb_tags[lp->ia_tag]; 7529 if (++lp->ia_tag == SYM_CONF_MAX_TASK) 7530 lp->ia_tag = 0; 7531 lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); 7532 ++lp->busy_itlq; 7533 lp->resel_sa = 7534 cpu_to_scr(SCRIPT_BA (np, resel_tag)); 7535 } 7536 else 7537 goto out_free; 7538 } 7539 /* 7540 * This command will not be tagged. 7541 * If we already have either a tagged or untagged 7542 * one, refuse to overlap this untagged one. 7543 */ 7544 else { 7545 /* 7546 * Debugging purpose. 7547 */ 7548 assert(lp->busy_itl == 0 && lp->busy_itlq == 0); 7549 /* 7550 * Count this nexus for this LUN. 7551 * Set up the CCB bus address for reselection. 7552 * Toggle reselect path to untagged. 7553 */ 7554 if (++lp->busy_itl == 1) { 7555 lp->itl_task_sa = cpu_to_scr(cp->ccb_ba); 7556 lp->resel_sa = 7557 cpu_to_scr(SCRIPT_BA (np,resel_no_tag)); 7558 } 7559 else 7560 goto out_free; 7561 } 7562 } 7563 /* 7564 * Put the CCB into the busy queue. 7565 */ 7566 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 7567 7568 /* 7569 * Remember all informations needed to free this CCB. 7570 */ 7571 cp->to_abort = 0; 7572 cp->tag = tag; 7573 cp->target = tn; 7574 cp->lun = ln; 7575 7576 if (DEBUG_FLAGS & DEBUG_TAGS) { 7577 PRINT_LUN(np, tn, ln); 7578 printf ("ccb @%p using tag %d.\n", cp, tag); 7579 } 7580 7581 out: 7582 return cp; 7583 out_free: 7584 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); 7585 return (ccb_p) 0; 7586 } 7587 7588 /* 7589 * Release one control block 7590 */ 7591 static void sym_free_ccb (hcb_p np, ccb_p cp) 7592 { 7593 tcb_p tp = &np->target[cp->target]; 7594 lcb_p lp = sym_lp(np, tp, cp->lun); 7595 7596 if (DEBUG_FLAGS & DEBUG_TAGS) { 7597 PRINT_LUN(np, cp->target, cp->lun); 7598 printf ("ccb @%p freeing tag %d.\n", cp, cp->tag); 7599 } 7600 7601 /* 7602 * If LCB available, 7603 */ 7604 if (lp) { 7605 /* 7606 * If tagged, release the tag, set the relect path 7607 */ 7608 if (cp->tag != NO_TAG) { 7609 /* 7610 * Free the tag value. 7611 */ 7612 lp->cb_tags[lp->if_tag] = cp->tag; 7613 if (++lp->if_tag == SYM_CONF_MAX_TASK) 7614 lp->if_tag = 0; 7615 /* 7616 * Make the reselect path invalid, 7617 * and uncount this CCB. 7618 */ 7619 lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); 7620 --lp->busy_itlq; 7621 } else { /* Untagged */ 7622 /* 7623 * Make the reselect path invalid, 7624 * and uncount this CCB. 7625 */ 7626 lp->itl_task_sa = cpu_to_scr(np->bad_itl_ba); 7627 --lp->busy_itl; 7628 } 7629 /* 7630 * If no JOB active, make the LUN reselect path invalid. 7631 */ 7632 if (lp->busy_itlq == 0 && lp->busy_itl == 0) 7633 lp->resel_sa = cpu_to_scr(SCRIPTH_BA(np,resel_bad_lun)); 7634 } 7635 /* 7636 * Otherwise, we only accept 1 IO per LUN. 7637 * Clear the bit that keeps track of this IO. 7638 */ 7639 else 7640 sym_clr_bit(tp->busy0_map, cp->lun); 7641 7642 /* 7643 * We donnot queue more than 1 ccb per target 7644 * with negotiation at any time. If this ccb was 7645 * used for negotiation, clear this info in the tcb. 7646 */ 7647 if (cp == tp->nego_cp) 7648 tp->nego_cp = 0; 7649 7650 #ifdef SYM_CONF_IARB_SUPPORT 7651 /* 7652 * If we just complete the last queued CCB, 7653 * clear this info that is no longer relevant. 7654 */ 7655 if (cp == np->last_cp) 7656 np->last_cp = 0; 7657 #endif 7658 /* 7659 * Make this CCB available. 7660 */ 7661 cp->cam_ccb = 0; 7662 cp->host_status = HS_IDLE; 7663 sym_remque(&cp->link_ccbq); 7664 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); 7665 } 7666 7667 /* 7668 * Allocate a CCB from memory and initialize its fixed part. 7669 */ 7670 static ccb_p sym_alloc_ccb(hcb_p np) 7671 { 7672 ccb_p cp = 0; 7673 int hcode; 7674 7675 /* 7676 * Prevent from allocating more CCBs than we can 7677 * queue to the controller. 7678 */ 7679 if (np->actccbs >= SYM_CONF_MAX_START) 7680 return 0; 7681 7682 /* 7683 * Allocate memory for this CCB. 7684 */ 7685 cp = sym_calloc(sizeof(struct sym_ccb), "CCB"); 7686 if (!cp) 7687 return 0; 7688 7689 /* 7690 * Count it. 7691 */ 7692 np->actccbs++; 7693 7694 /* 7695 * Compute the bus address of this ccb. 7696 */ 7697 cp->ccb_ba = vtobus(cp); 7698 7699 /* 7700 * Insert this ccb into the hashed list. 7701 */ 7702 hcode = CCB_HASH_CODE(cp->ccb_ba); 7703 cp->link_ccbh = np->ccbh[hcode]; 7704 np->ccbh[hcode] = cp; 7705 7706 /* 7707 * Initialyze the start and restart actions. 7708 */ 7709 cp->phys.go.start = cpu_to_scr(SCRIPT_BA (np, idle)); 7710 cp->phys.go.restart = cpu_to_scr(SCRIPTH_BA(np, bad_i_t_l)); 7711 7712 /* 7713 * Initilialyze some other fields. 7714 */ 7715 cp->phys.smsg_ext.addr = cpu_to_scr(vtobus(&np->msgin[2])); 7716 7717 /* 7718 * Chain into free ccb queue. 7719 */ 7720 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); 7721 7722 return cp; 7723 } 7724 7725 /* 7726 * Look up a CCB from a DSA value. 7727 */ 7728 static ccb_p sym_ccb_from_dsa(hcb_p np, u_long dsa) 7729 { 7730 int hcode; 7731 ccb_p cp; 7732 7733 hcode = CCB_HASH_CODE(dsa); 7734 cp = np->ccbh[hcode]; 7735 while (cp) { 7736 if (cp->ccb_ba == dsa) 7737 break; 7738 cp = cp->link_ccbh; 7739 } 7740 7741 return cp; 7742 } 7743 7744 /* 7745 * Target control block initialisation. 7746 * Nothing important to do at the moment. 7747 */ 7748 static void sym_init_tcb (hcb_p np, u_char tn) 7749 { 7750 /* 7751 * Check some alignments required by the chip. 7752 */ 7753 assert (((offsetof(struct sym_reg, nc_sxfer) ^ 7754 offsetof(struct sym_tcb, sval)) &3) == 0); 7755 assert (((offsetof(struct sym_reg, nc_scntl3) ^ 7756 offsetof(struct sym_tcb, wval)) &3) == 0); 7757 } 7758 7759 /* 7760 * Lun control block allocation and initialization. 7761 */ 7762 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln) 7763 { 7764 tcb_p tp = &np->target[tn]; 7765 lcb_p lp = sym_lp(np, tp, ln); 7766 7767 /* 7768 * Already done, just return. 7769 */ 7770 if (lp) 7771 return lp; 7772 /* 7773 * Check against some race. 7774 */ 7775 assert(!sym_is_bit(tp->busy0_map, ln)); 7776 7777 /* 7778 * Initialize the target control block if not yet. 7779 */ 7780 sym_init_tcb (np, tn); 7781 7782 /* 7783 * Allocate the LCB bus address array. 7784 * Compute the bus address of this table. 7785 */ 7786 if (ln && !tp->luntbl) { 7787 int i; 7788 7789 tp->luntbl = sym_calloc(256, "LUNTBL"); 7790 if (!tp->luntbl) 7791 goto fail; 7792 for (i = 0 ; i < 64 ; i++) 7793 tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); 7794 tp->luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); 7795 } 7796 7797 /* 7798 * Allocate the table of pointers for LUN(s) > 0, if needed. 7799 */ 7800 if (ln && !tp->lunmp) { 7801 tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p), 7802 "LUNMP"); 7803 if (!tp->lunmp) 7804 goto fail; 7805 } 7806 7807 /* 7808 * Allocate the lcb. 7809 * Make it available to the chip. 7810 */ 7811 lp = sym_calloc(sizeof(struct sym_lcb), "LCB"); 7812 if (!lp) 7813 goto fail; 7814 if (ln) { 7815 tp->lunmp[ln] = lp; 7816 tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); 7817 } 7818 else { 7819 tp->lun0p = lp; 7820 tp->lun0_sa = cpu_to_scr(vtobus(lp)); 7821 } 7822 7823 /* 7824 * Let the itl task point to error handling. 7825 */ 7826 lp->itl_task_sa = cpu_to_scr(np->bad_itl_ba); 7827 7828 /* 7829 * Set the reselect pattern to our default. :) 7830 */ 7831 lp->resel_sa = cpu_to_scr(SCRIPTH_BA(np, resel_bad_lun)); 7832 7833 /* 7834 * Set user capabilities. 7835 */ 7836 lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); 7837 7838 fail: 7839 return lp; 7840 } 7841 7842 /* 7843 * Allocate LCB resources for tagged command queuing. 7844 */ 7845 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln) 7846 { 7847 tcb_p tp = &np->target[tn]; 7848 lcb_p lp = sym_lp(np, tp, ln); 7849 int i; 7850 7851 /* 7852 * If LCB not available, try to allocate it. 7853 */ 7854 if (!lp && !(lp = sym_alloc_lcb(np, tn, ln))) 7855 goto fail; 7856 7857 /* 7858 * Allocate the task table and and the tag allocation 7859 * circular buffer. We want both or none. 7860 */ 7861 lp->itlq_tbl = sym_calloc(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); 7862 if (!lp->itlq_tbl) 7863 goto fail; 7864 lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS"); 7865 if (!lp->cb_tags) { 7866 sym_mfree(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); 7867 lp->itlq_tbl = 0; 7868 goto fail; 7869 } 7870 7871 /* 7872 * Initialize the task table with invalid entries. 7873 */ 7874 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) 7875 lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); 7876 7877 /* 7878 * Fill up the tag buffer with tag numbers. 7879 */ 7880 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) 7881 lp->cb_tags[i] = i; 7882 7883 /* 7884 * Make the task table available to SCRIPTS, 7885 * And accept tagged commands now. 7886 */ 7887 lp->itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); 7888 7889 return; 7890 fail: 7891 } 7892 7893 /* 7894 * Test the pci bus snoop logic :-( 7895 * 7896 * Has to be called with interrupts disabled. 7897 */ 7898 #ifndef SYM_CONF_IOMAPPED 7899 static int sym_regtest (hcb_p np) 7900 { 7901 register volatile u32 data; 7902 /* 7903 * chip registers may NOT be cached. 7904 * write 0xffffffff to a read only register area, 7905 * and try to read it back. 7906 */ 7907 data = 0xffffffff; 7908 OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data); 7909 data = INL_OFF(offsetof(struct sym_reg, nc_dstat)); 7910 #if 1 7911 if (data == 0xffffffff) { 7912 #else 7913 if ((data & 0xe2f0fffd) != 0x02000080) { 7914 #endif 7915 printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", 7916 (unsigned) data); 7917 return (0x10); 7918 }; 7919 return (0); 7920 } 7921 #endif 7922 7923 static int sym_snooptest (hcb_p np) 7924 { 7925 u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc; 7926 int i, err=0; 7927 #ifndef SYM_CONF_IOMAPPED 7928 err |= sym_regtest (np); 7929 if (err) return (err); 7930 #endif 7931 /* 7932 * init 7933 */ 7934 pc = SCRIPTH0_BA (np, snooptest); 7935 host_wr = 1; 7936 sym_wr = 2; 7937 /* 7938 * Set memory and register. 7939 */ 7940 np->cache = cpu_to_scr(host_wr); 7941 OUTL (nc_temp, sym_wr); 7942 /* 7943 * Start script (exchange values) 7944 */ 7945 OUTL (nc_dsa, vtobus(np)); 7946 OUTL (nc_dsp, pc); 7947 /* 7948 * Wait 'til done (with timeout) 7949 */ 7950 for (i=0; i<SYM_SNOOP_TIMEOUT; i++) 7951 if (INB(nc_istat) & (INTF|SIP|DIP)) 7952 break; 7953 /* 7954 * Save termination position. 7955 */ 7956 pc = INL (nc_dsp); 7957 /* 7958 * Read memory and register. 7959 */ 7960 host_rd = scr_to_cpu(np->cache); 7961 sym_rd = INL (nc_scratcha); 7962 sym_bk = INL (nc_temp); 7963 7964 /* 7965 * check for timeout 7966 */ 7967 if (i>=SYM_SNOOP_TIMEOUT) { 7968 printf ("CACHE TEST FAILED: timeout.\n"); 7969 return (0x20); 7970 }; 7971 /* 7972 * Check termination position. 7973 */ 7974 if (pc != SCRIPTH0_BA (np, snoopend)+8) { 7975 printf ("CACHE TEST FAILED: script execution failed.\n"); 7976 printf ("start=%08lx, pc=%08lx, end=%08lx\n", 7977 (u_long) SCRIPTH0_BA (np, snooptest), (u_long) pc, 7978 (u_long) SCRIPTH0_BA (np, snoopend) +8); 7979 return (0x40); 7980 }; 7981 /* 7982 * Show results. 7983 */ 7984 if (host_wr != sym_rd) { 7985 printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", 7986 (int) host_wr, (int) sym_rd); 7987 err |= 1; 7988 }; 7989 if (host_rd != sym_wr) { 7990 printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", 7991 (int) sym_wr, (int) host_rd); 7992 err |= 2; 7993 }; 7994 if (sym_bk != sym_wr) { 7995 printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", 7996 (int) sym_wr, (int) sym_bk); 7997 err |= 4; 7998 }; 7999 return (err); 8000 } 8001 8002 /* 8003 * Determine the chip's clock frequency. 8004 * 8005 * This is essential for the negotiation of the synchronous 8006 * transfer rate. 8007 * 8008 * Note: we have to return the correct value. 8009 * THERE IS NO SAFE DEFAULT VALUE. 8010 * 8011 * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. 8012 * 53C860 and 53C875 rev. 1 support fast20 transfers but 8013 * do not have a clock doubler and so are provided with a 8014 * 80 MHz clock. All other fast20 boards incorporate a doubler 8015 * and so should be delivered with a 40 MHz clock. 8016 * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base 8017 * clock and provide a clock quadrupler (160 Mhz). 8018 */ 8019 8020 /* 8021 * Select SCSI clock frequency 8022 */ 8023 static void sym_selectclock(hcb_p np, u_char scntl3) 8024 { 8025 /* 8026 * If multiplier not present or not selected, leave here. 8027 */ 8028 if (np->multiplier <= 1) { 8029 OUTB(nc_scntl3, scntl3); 8030 return; 8031 } 8032 8033 if (sym_verbose >= 2) 8034 printf ("%s: enabling clock multiplier\n", sym_name(np)); 8035 8036 OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ 8037 /* 8038 * Wait for the LCKFRQ bit to be set if supported by the chip. 8039 * Otherwise wait 20 micro-seconds. 8040 */ 8041 if (np->features & FE_LCKFRQ) { 8042 int i = 20; 8043 while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) 8044 UDELAY (20); 8045 if (!i) 8046 printf("%s: the chip cannot lock the frequency\n", 8047 sym_name(np)); 8048 } else 8049 UDELAY (20); 8050 OUTB(nc_stest3, HSC); /* Halt the scsi clock */ 8051 OUTB(nc_scntl3, scntl3); 8052 OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ 8053 OUTB(nc_stest3, 0x00); /* Restart scsi clock */ 8054 } 8055 8056 /* 8057 * calculate SCSI clock frequency (in KHz) 8058 */ 8059 static unsigned getfreq (hcb_p np, int gen) 8060 { 8061 unsigned int ms = 0; 8062 unsigned int f; 8063 8064 /* 8065 * Measure GEN timer delay in order 8066 * to calculate SCSI clock frequency 8067 * 8068 * This code will never execute too 8069 * many loop iterations (if DELAY is 8070 * reasonably correct). It could get 8071 * too low a delay (too high a freq.) 8072 * if the CPU is slow executing the 8073 * loop for some reason (an NMI, for 8074 * example). For this reason we will 8075 * if multiple measurements are to be 8076 * performed trust the higher delay 8077 * (lower frequency returned). 8078 */ 8079 OUTW (nc_sien , 0); /* mask all scsi interrupts */ 8080 (void) INW (nc_sist); /* clear pending scsi interrupt */ 8081 OUTB (nc_dien , 0); /* mask all dma interrupts */ 8082 (void) INW (nc_sist); /* another one, just to be sure :) */ 8083 OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ 8084 OUTB (nc_stime1, 0); /* disable general purpose timer */ 8085 OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ 8086 while (!(INW(nc_sist) & GEN) && ms++ < 100000) 8087 UDELAY (1000); /* count ms */ 8088 OUTB (nc_stime1, 0); /* disable general purpose timer */ 8089 /* 8090 * set prescaler to divide by whatever 0 means 8091 * 0 ought to choose divide by 2, but appears 8092 * to set divide by 3.5 mode in my 53c810 ... 8093 */ 8094 OUTB (nc_scntl3, 0); 8095 8096 /* 8097 * adjust for prescaler, and convert into KHz 8098 */ 8099 f = ms ? ((1 << gen) * 4340) / ms : 0; 8100 8101 if (sym_verbose >= 2) 8102 printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", 8103 sym_name(np), gen, ms, f); 8104 8105 return f; 8106 } 8107 8108 static unsigned sym_getfreq (hcb_p np) 8109 { 8110 u_int f1, f2; 8111 int gen = 11; 8112 8113 (void) getfreq (np, gen); /* throw away first result */ 8114 f1 = getfreq (np, gen); 8115 f2 = getfreq (np, gen); 8116 if (f1 > f2) f1 = f2; /* trust lower result */ 8117 return f1; 8118 } 8119 8120 /* 8121 * Get/probe chip SCSI clock frequency 8122 */ 8123 static void sym_getclock (hcb_p np, int mult) 8124 { 8125 unsigned char scntl3 = np->sv_scntl3; 8126 unsigned char stest1 = np->sv_stest1; 8127 unsigned f1; 8128 8129 /* 8130 * For the C10 core, assume 40 MHz. 8131 */ 8132 if (np->features & FE_C10) { 8133 np->multiplier = mult; 8134 np->clock_khz = 40000 * mult; 8135 return; 8136 } 8137 8138 np->multiplier = 1; 8139 f1 = 40000; 8140 /* 8141 * True with 875/895/896/895A with clock multiplier selected 8142 */ 8143 if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { 8144 if (sym_verbose >= 2) 8145 printf ("%s: clock multiplier found\n", sym_name(np)); 8146 np->multiplier = mult; 8147 } 8148 8149 /* 8150 * If multiplier not found or scntl3 not 7,5,3, 8151 * reset chip and get frequency from general purpose timer. 8152 * Otherwise trust scntl3 BIOS setting. 8153 */ 8154 if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { 8155 OUTB (nc_stest1, 0); /* make sure doubler is OFF */ 8156 f1 = sym_getfreq (np); 8157 8158 if (sym_verbose) 8159 printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); 8160 8161 if (f1 < 45000) f1 = 40000; 8162 else if (f1 < 55000) f1 = 50000; 8163 else f1 = 80000; 8164 8165 if (f1 < 80000 && mult > 1) { 8166 if (sym_verbose >= 2) 8167 printf ("%s: clock multiplier assumed\n", 8168 sym_name(np)); 8169 np->multiplier = mult; 8170 } 8171 } else { 8172 if ((scntl3 & 7) == 3) f1 = 40000; 8173 else if ((scntl3 & 7) == 5) f1 = 80000; 8174 else f1 = 160000; 8175 8176 f1 /= np->multiplier; 8177 } 8178 8179 /* 8180 * Compute controller synchronous parameters. 8181 */ 8182 f1 *= np->multiplier; 8183 np->clock_khz = f1; 8184 } 8185 8186 /* 8187 * Get/probe PCI clock frequency 8188 */ 8189 static int sym_getpciclock (hcb_p np) 8190 { 8191 static int f = 0; 8192 8193 /* For the C10, this will not work */ 8194 if (!f && !(np->features & FE_C10)) { 8195 OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ 8196 f = (int) sym_getfreq (np); 8197 OUTB (nc_stest1, 0); 8198 } 8199 return f; 8200 } 8201 8202 /*============= DRIVER ACTION/COMPLETION ====================*/ 8203 8204 /* 8205 * Print something that tells about extended errors. 8206 */ 8207 static void sym_print_xerr(ccb_p cp, int x_status) 8208 { 8209 if (x_status & XE_PARITY_ERR) { 8210 PRINT_ADDR(cp); 8211 printf ("unrecovered SCSI parity error.\n"); 8212 } 8213 if (x_status & XE_EXTRA_DATA) { 8214 PRINT_ADDR(cp); 8215 printf ("extraneous data discarded.\n"); 8216 } 8217 if (x_status & XE_BAD_PHASE) { 8218 PRINT_ADDR(cp); 8219 printf ("illegal scsi phase (4/5).\n"); 8220 } 8221 if (x_status & XE_SODL_UNRUN) { 8222 PRINT_ADDR(cp); 8223 printf ("ODD transfer in DATA OUT phase.\n"); 8224 } 8225 if (x_status & XE_SWIDE_OVRUN) { 8226 PRINT_ADDR(cp); 8227 printf ("ODD transfer in DATA IN phase.\n"); 8228 } 8229 } 8230 8231 /* 8232 * Choose the more appropriate CAM status if 8233 * the IO encountered an extended error. 8234 */ 8235 static int sym_xerr_cam_status(int cam_status, int x_status) 8236 { 8237 if (x_status) { 8238 if (x_status & XE_PARITY_ERR) 8239 cam_status = CAM_UNCOR_PARITY; 8240 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) 8241 cam_status = CAM_DATA_RUN_ERR; 8242 else if (x_status & XE_BAD_PHASE) 8243 cam_status = CAM_REQ_CMP_ERR; 8244 else 8245 cam_status = CAM_REQ_CMP_ERR; 8246 } 8247 return cam_status; 8248 } 8249 8250 /* 8251 * Complete execution of a SCSI command with extented 8252 * error, SCSI status error, or having been auto-sensed. 8253 * 8254 * The SCRIPTS processor is not running there, so we 8255 * can safely access IO registers and remove JOBs from 8256 * the START queue. 8257 * SCRATCHA is assumed to have been loaded with STARTPOS 8258 * before the SCRIPTS called the C code. 8259 */ 8260 static void sym_complete_error (hcb_p np, ccb_p cp) 8261 { 8262 struct ccb_scsiio *csio; 8263 u_int cam_status; 8264 int i; 8265 8266 /* 8267 * Paranoid check. :) 8268 */ 8269 if (!cp || !cp->cam_ccb) 8270 return; 8271 8272 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { 8273 printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp, 8274 cp->host_status, cp->ssss_status, cp->host_flags, 8275 cp->target, cp->lun); 8276 MDELAY(100); 8277 } 8278 8279 /* 8280 * Get command, target and lun pointers. 8281 */ 8282 csio = &cp->cam_ccb->csio; 8283 8284 /* 8285 * Check for extended errors. 8286 */ 8287 if (cp->xerr_status) { 8288 if (sym_verbose) 8289 sym_print_xerr(cp, cp->xerr_status); 8290 if (cp->host_status == HS_COMPLETE) 8291 cp->host_status = HS_COMP_ERR; 8292 } 8293 8294 /* 8295 * Calculate the residual. 8296 */ 8297 csio->sense_resid = 0; 8298 csio->resid = sym_compute_residual(np, cp); 8299 8300 if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */ 8301 csio->resid = 0; /* throw them away. :) */ 8302 cp->sv_resid = 0; 8303 } 8304 8305 if (cp->host_flags & HF_SENSE) { /* Auto sense */ 8306 csio->scsi_status = cp->sv_scsi_status; /* Restore status */ 8307 csio->sense_resid = csio->resid; /* Swap residuals */ 8308 csio->resid = cp->sv_resid; 8309 cp->sv_resid = 0; 8310 if (sym_verbose && cp->sv_xerr_status) 8311 sym_print_xerr(cp, cp->sv_xerr_status); 8312 if (cp->host_status == HS_COMPLETE && 8313 cp->ssss_status == S_GOOD && 8314 cp->xerr_status == 0) { 8315 cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR, 8316 cp->sv_xerr_status); 8317 cam_status |= CAM_AUTOSNS_VALID; 8318 #if 0 8319 /* 8320 * If the device reports a UNIT ATTENTION condition 8321 * due to a RESET condition, we should consider all 8322 * disconnect CCBs for this unit as aborted. 8323 */ 8324 if (1) { 8325 u_char *p; 8326 p = (u_char *) &cp->cam_ccb->csio.sense_data; 8327 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) 8328 sym_clear_tasks(np, CAM_REQ_ABORTED, 8329 cp->target,cp->lun, -1); 8330 } 8331 #endif 8332 } 8333 else 8334 cam_status = CAM_AUTOSENSE_FAIL; 8335 } 8336 else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */ 8337 csio->scsi_status = cp->ssss_status; 8338 cam_status = CAM_SCSI_STATUS_ERROR; 8339 } 8340 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ 8341 cam_status = CAM_SEL_TIMEOUT; 8342 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ 8343 cam_status = CAM_UNEXP_BUSFREE; 8344 else { /* Extended error */ 8345 if (sym_verbose) { 8346 PRINT_ADDR(cp); 8347 printf ("COMMAND FAILED (%x %x %x).\n", 8348 cp->host_status, cp->ssss_status, 8349 cp->xerr_status); 8350 } 8351 csio->scsi_status = cp->ssss_status; 8352 /* 8353 * Set the most appropriate value for CAM status. 8354 */ 8355 cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR, 8356 cp->xerr_status); 8357 } 8358 8359 /* 8360 * Dequeue all queued CCBs for that device 8361 * not yet started by SCRIPTS. 8362 */ 8363 i = (INL (nc_scratcha) - vtobus(np->squeue)) / 4; 8364 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); 8365 8366 /* 8367 * Restart the SCRIPTS processor. 8368 */ 8369 OUTL (nc_dsp, SCRIPT_BA (np, start)); 8370 8371 /* 8372 * Add this one to the COMP queue. 8373 * Complete all those commands with either error 8374 * or requeue condition. 8375 */ 8376 sym_set_cam_status((union ccb *) csio, cam_status); 8377 sym_remque(&cp->link_ccbq); 8378 sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); 8379 sym_flush_comp_queue(np, 0); 8380 } 8381 8382 /* 8383 * Complete execution of a successful SCSI command. 8384 * 8385 * Only successful commands go to the DONE queue, 8386 * since we need to have the SCRIPTS processor 8387 * stopped on any error condition. 8388 * The SCRIPTS processor is running while we are 8389 * completing successful commands. 8390 */ 8391 static void sym_complete_ok (hcb_p np, ccb_p cp) 8392 { 8393 struct ccb_scsiio *csio; 8394 tcb_p tp; 8395 lcb_p lp; 8396 8397 /* 8398 * Paranoid check. :) 8399 */ 8400 if (!cp || !cp->cam_ccb) 8401 return; 8402 assert (cp->host_status == HS_COMPLETE); 8403 8404 /* 8405 * Get command, target and lun pointers. 8406 */ 8407 csio = &cp->cam_ccb->csio; 8408 tp = &np->target[cp->target]; 8409 lp = sym_lp(np, tp, cp->lun); 8410 8411 /* 8412 * Assume device discovered on first success. 8413 */ 8414 if (!lp) 8415 sym_set_bit(tp->lun_map, cp->lun); 8416 8417 /* 8418 * If all data have been transferred, given than no 8419 * extended error did occur, there is no residual. 8420 */ 8421 csio->resid = 0; 8422 if (cp->phys.lastp != cp->phys.goalp) 8423 csio->resid = sym_compute_residual(np, cp); 8424 8425 /* 8426 * Wrong transfer residuals may be worse than just always 8427 * returning zero. User can disable this feature from 8428 * sym_conf.h. Residual support is enabled by default. 8429 */ 8430 if (!SYM_CONF_RESIDUAL_SUPPORT) 8431 csio->resid = 0; 8432 #ifdef SYM_DEBUG_PM_WITH_WSR 8433 if (csio->resid) { 8434 printf("XXXX %d %d %d\n", csio->dxfer_len, csio->resid, 8435 csio->dxfer_len - csio->resid); 8436 csio->resid = 0; 8437 } 8438 #endif 8439 8440 /* 8441 * Set status and complete the command. 8442 */ 8443 csio->scsi_status = cp->ssss_status; 8444 sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP); 8445 sym_free_ccb (np, cp); 8446 sym_xpt_done(np, (union ccb *) csio); 8447 } 8448 8449 /* 8450 * Our timeout handler. 8451 */ 8452 static void sym_timeout1(void *arg) 8453 { 8454 union ccb *ccb = (union ccb *) arg; 8455 hcb_p np = ccb->ccb_h.sym_hcb_ptr; 8456 8457 /* 8458 * Check that the CAM CCB is still queued. 8459 */ 8460 if (!np) 8461 return; 8462 8463 switch(ccb->ccb_h.func_code) { 8464 case XPT_SCSI_IO: 8465 (void) sym_abort_scsiio(np, ccb, 1); 8466 break; 8467 default: 8468 break; 8469 } 8470 } 8471 8472 static void sym_timeout(void *arg) 8473 { 8474 int s = splcam(); 8475 sym_timeout1(arg); 8476 splx(s); 8477 } 8478 8479 /* 8480 * Abort an SCSI IO. 8481 */ 8482 static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out) 8483 { 8484 ccb_p cp; 8485 SYM_QUEHEAD *qp; 8486 8487 /* 8488 * Look up our CCB control block. 8489 */ 8490 cp = 0; 8491 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 8492 ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); 8493 if (cp2->cam_ccb == ccb) { 8494 cp = cp2; 8495 break; 8496 } 8497 } 8498 if (!cp) 8499 return -1; 8500 8501 /* 8502 * If a previous abort didn't succeed in time, 8503 * perform a BUS reset. 8504 */ 8505 if (cp->to_abort) { 8506 sym_reset_scsi_bus(np, 1); 8507 return 0; 8508 } 8509 8510 /* 8511 * Mark the CCB for abort and allow time for. 8512 */ 8513 cp->to_abort = timed_out ? 2 : 1; 8514 ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb, 10*hz); 8515 8516 /* 8517 * Tell the SCRIPTS processor to stop and synchronize with us. 8518 */ 8519 np->istat_sem = SEM; 8520 OUTB (nc_istat, SIGP|SEM); 8521 return 0; 8522 } 8523 8524 /* 8525 * Reset a SCSI device (all LUNs of a target). 8526 */ 8527 static void sym_reset_dev(hcb_p np, union ccb *ccb) 8528 { 8529 tcb_p tp; 8530 struct ccb_hdr *ccb_h = &ccb->ccb_h; 8531 8532 if (ccb_h->target_id == np->myaddr || 8533 ccb_h->target_id >= SYM_CONF_MAX_TARGET || 8534 ccb_h->target_lun >= SYM_CONF_MAX_LUN) { 8535 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); 8536 return; 8537 } 8538 8539 tp = &np->target[ccb_h->target_id]; 8540 8541 tp->to_reset = 1; 8542 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8543 8544 np->istat_sem = SEM; 8545 OUTB (nc_istat, SIGP|SEM); 8546 return; 8547 } 8548 8549 /* 8550 * SIM action entry point. 8551 */ 8552 static void sym_action(struct cam_sim *sim, union ccb *ccb) 8553 { 8554 int s = splcam(); 8555 sym_action1(sim, ccb); 8556 splx(s); 8557 } 8558 8559 static void sym_action1(struct cam_sim *sim, union ccb *ccb) 8560 { 8561 hcb_p np; 8562 tcb_p tp; 8563 lcb_p lp; 8564 ccb_p cp; 8565 int tmp; 8566 u_char idmsg, *msgptr; 8567 u_int msglen; 8568 struct ccb_scsiio *csio; 8569 struct ccb_hdr *ccb_h; 8570 8571 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n")); 8572 8573 /* 8574 * Retrieve our controller data structure. 8575 */ 8576 np = (hcb_p) cam_sim_softc(sim); 8577 8578 /* 8579 * The common case is SCSI IO. 8580 * We deal with other ones elsewhere. 8581 */ 8582 if (ccb->ccb_h.func_code != XPT_SCSI_IO) { 8583 sym_action2(sim, ccb); 8584 return; 8585 } 8586 csio = &ccb->csio; 8587 ccb_h = &csio->ccb_h; 8588 8589 /* 8590 * Work around races. 8591 */ 8592 if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 8593 xpt_done(ccb); 8594 return; 8595 } 8596 8597 /* 8598 * Minimal checkings, so that we will not 8599 * go outside our tables. 8600 */ 8601 if (ccb_h->target_id == np->myaddr || 8602 ccb_h->target_id >= SYM_CONF_MAX_TARGET || 8603 ccb_h->target_lun >= SYM_CONF_MAX_LUN) { 8604 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); 8605 return; 8606 } 8607 8608 /* 8609 * Retreive the target and lun descriptors. 8610 */ 8611 tp = &np->target[ccb_h->target_id]; 8612 lp = sym_lp(np, tp, ccb_h->target_lun); 8613 8614 /* 8615 * Complete the 1st INQUIRY command with error 8616 * condition if the device is flagged NOSCAN 8617 * at BOOT in the NVRAM. This may speed up 8618 * the boot and maintain coherency with BIOS 8619 * device numbering. Clearing the flag allows 8620 * user to rescan skipped devices later. 8621 * We also return error for devices not flagged 8622 * for SCAN LUNS in the NVRAM since some mono-lun 8623 * devices behave badly when asked for some non 8624 * zero LUN. Btw, this is an absolute hack.:-) 8625 */ 8626 if (!(ccb_h->flags & CAM_CDB_PHYS) && 8627 (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ? 8628 csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) { 8629 if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) || 8630 ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && 8631 ccb_h->target_lun != 0)) { 8632 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 8633 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); 8634 return; 8635 } 8636 } 8637 8638 /* 8639 * Get a control block for this IO. 8640 */ 8641 tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0); 8642 cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp); 8643 if (!cp) { 8644 sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL); 8645 return; 8646 } 8647 8648 /* 8649 * Enqueue this IO in our pending queue. 8650 */ 8651 cp->cam_ccb = ccb; 8652 sym_enqueue_cam_ccb(np, ccb); 8653 8654 /* 8655 * Build the IDENTIFY message. 8656 */ 8657 idmsg = M_IDENTIFY | cp->lun; 8658 if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED))) 8659 idmsg |= 0x40; 8660 8661 msgptr = cp->scsi_smsg; 8662 msglen = 0; 8663 msgptr[msglen++] = idmsg; 8664 8665 /* 8666 * Build the tag message if present. 8667 */ 8668 if (cp->tag != NO_TAG) { 8669 u_char order = csio->tag_action; 8670 8671 switch(order) { 8672 case M_ORDERED_TAG: 8673 break; 8674 case M_HEAD_TAG: 8675 break; 8676 default: 8677 order = M_SIMPLE_TAG; 8678 } 8679 msgptr[msglen++] = order; 8680 8681 /* 8682 * For less than 128 tags, actual tags are numbered 8683 * 1,3,5,..2*MAXTAGS+1,since we may have to deal 8684 * with devices that have problems with #TAG 0 or too 8685 * great #TAG numbers. For more tags (up to 256), 8686 * we use directly our tag number. 8687 */ 8688 #if SYM_CONF_MAX_TASK > (512/4) 8689 msgptr[msglen++] = cp->tag; 8690 #else 8691 msgptr[msglen++] = (cp->tag << 1) + 1; 8692 #endif 8693 } 8694 8695 /* 8696 * Build a negotiation message if needed. 8697 * (nego_status is filled by sym_prepare_nego()) 8698 */ 8699 cp->nego_status = 0; 8700 if (tp->tinfo.current.width != tp->tinfo.goal.width || 8701 tp->tinfo.current.period != tp->tinfo.goal.period || 8702 tp->tinfo.current.offset != tp->tinfo.goal.offset || 8703 #if 0 /* For now only renegotiate, based on width, period and offset */ 8704 tp->tinfo.current.options != tp->tinfo.goal.options) { 8705 #else 8706 0) { 8707 #endif 8708 if (!tp->nego_cp && lp) 8709 msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen); 8710 } 8711 8712 /* 8713 * Fill in our ccb 8714 */ 8715 8716 /* 8717 * Startqueue 8718 */ 8719 cp->phys.go.start = cpu_to_scr(SCRIPT_BA (np, select)); 8720 cp->phys.go.restart = cpu_to_scr(SCRIPT_BA (np, resel_dsa)); 8721 8722 /* 8723 * select 8724 */ 8725 cp->phys.select.sel_id = cp->target; 8726 cp->phys.select.sel_scntl3 = tp->wval; 8727 cp->phys.select.sel_sxfer = tp->sval; 8728 cp->phys.select.sel_scntl4 = tp->uval; 8729 8730 /* 8731 * message 8732 */ 8733 cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg)); 8734 cp->phys.smsg.size = cpu_to_scr(msglen); 8735 8736 /* 8737 * command 8738 */ 8739 if (sym_setup_cdb(np, csio, cp) < 0) { 8740 sym_free_ccb(np, cp); 8741 sym_xpt_done(np, ccb); 8742 return; 8743 } 8744 8745 /* 8746 * status 8747 */ 8748 #if 0 /* Provision */ 8749 cp->actualquirks = tp->quirks; 8750 #endif 8751 cp->actualquirks = SYM_QUIRK_AUTOSAVE; 8752 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; 8753 cp->ssss_status = S_ILLEGAL; 8754 cp->xerr_status = 0; 8755 cp->host_flags = 0; 8756 cp->phys.extra_bytes = 0; 8757 8758 /* 8759 * extreme data pointer. 8760 * shall be positive, so -1 is lower than lowest.:) 8761 */ 8762 cp->ext_sg = -1; 8763 cp->ext_ofs = 0; 8764 8765 /* 8766 * Build the data descriptor block 8767 * and start the IO. 8768 */ 8769 if (sym_setup_data(np, csio, cp) < 0) { 8770 sym_free_ccb(np, cp); 8771 sym_xpt_done(np, ccb); 8772 return; 8773 } 8774 } 8775 8776 /* 8777 * How complex it gets to deal with the CDB in CAM. 8778 * I bet, physical CDBs will never be used on the planet. 8779 */ 8780 static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) 8781 { 8782 struct ccb_hdr *ccb_h; 8783 u32 cmd_ba; 8784 int cmd_len; 8785 8786 ccb_h = &csio->ccb_h; 8787 8788 /* 8789 * CDB is 16 bytes max. 8790 */ 8791 if (csio->cdb_len > 16) { 8792 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 8793 return -1; 8794 } 8795 cmd_len = csio->cdb_len; 8796 8797 if (ccb_h->flags & CAM_CDB_POINTER) { 8798 /* CDB is a pointer */ 8799 if (!(ccb_h->flags & CAM_CDB_PHYS)) { 8800 /* CDB pointer is virtual */ 8801 cmd_ba = vtobus(csio->cdb_io.cdb_ptr); 8802 } else { 8803 /* CDB pointer is physical */ 8804 #if 0 8805 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff; 8806 #else 8807 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 8808 return -1; 8809 #endif 8810 } 8811 } else { 8812 /* CDB is in the ccb (buffer) */ 8813 cmd_ba = vtobus(csio->cdb_io.cdb_bytes); 8814 } 8815 8816 cp->phys.cmd.addr = cpu_to_scr(cmd_ba); 8817 cp->phys.cmd.size = cpu_to_scr(cmd_len); 8818 8819 return 0; 8820 } 8821 8822 /* 8823 * How complex it gets to deal with the data in CAM. 8824 * I bet physical data will never be used in our galaxy. 8825 */ 8826 static int sym_setup_data(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) 8827 { 8828 struct ccb_hdr *ccb_h; 8829 int dir, retv; 8830 u32 lastp, goalp; 8831 8832 ccb_h = &csio->ccb_h; 8833 8834 /* 8835 * Now deal with the data. 8836 */ 8837 cp->data_len = 0; 8838 cp->segments = 0; 8839 8840 /* 8841 * No direction means no data. 8842 */ 8843 dir = (ccb_h->flags & CAM_DIR_MASK); 8844 if (dir == CAM_DIR_NONE) 8845 goto end_scatter; 8846 8847 if (!(ccb_h->flags & CAM_SCATTER_VALID)) { 8848 /* Single buffer */ 8849 if (!(ccb_h->flags & CAM_DATA_PHYS)) { 8850 /* Buffer is virtual */ 8851 retv = sym_scatter_virtual(np, cp, 8852 (vm_offset_t) csio->data_ptr, 8853 (vm_size_t) csio->dxfer_len); 8854 } else { 8855 /* Buffer is physical */ 8856 retv = sym_scatter_physical(np, cp, 8857 (vm_offset_t) csio->data_ptr, 8858 (vm_size_t) csio->dxfer_len); 8859 } 8860 if (retv < 0) 8861 goto too_big; 8862 } else { 8863 /* Scatter/gather list */ 8864 int i; 8865 struct bus_dma_segment *segs; 8866 segs = (struct bus_dma_segment *)csio->data_ptr; 8867 8868 if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) { 8869 /* The SG list pointer is physical */ 8870 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 8871 return -1; 8872 } 8873 retv = 0; 8874 if (!(ccb_h->flags & CAM_DATA_PHYS)) { 8875 /* SG buffer pointers are virtual */ 8876 for (i = csio->sglist_cnt - 1 ; i >= 0 ; --i) { 8877 retv = sym_scatter_virtual(np, cp, 8878 segs[i].ds_addr, 8879 segs[i].ds_len); 8880 if (retv < 0) 8881 break; 8882 } 8883 } else { 8884 /* SG buffer pointers are physical */ 8885 for (i = csio->sglist_cnt - 1 ; i >= 0 ; --i) { 8886 retv = sym_scatter_physical(np, cp, 8887 segs[i].ds_addr, 8888 segs[i].ds_len); 8889 if (retv < 0) 8890 break; 8891 } 8892 } 8893 if (retv < 0) 8894 goto too_big; 8895 } 8896 8897 end_scatter: 8898 /* 8899 * No segments means no data. 8900 */ 8901 if (!cp->segments) 8902 dir = CAM_DIR_NONE; 8903 8904 /* 8905 * Set the data pointer. 8906 */ 8907 switch(dir) { 8908 case CAM_DIR_OUT: 8909 goalp = SCRIPT_BA (np, data_out2) + 8; 8910 lastp = goalp - 8 - (cp->segments * (2*4)); 8911 break; 8912 case CAM_DIR_IN: 8913 goalp = SCRIPT_BA (np, data_in2) + 8; 8914 lastp = goalp - 8 - (cp->segments * (2*4)); 8915 break; 8916 case CAM_DIR_NONE: 8917 default: 8918 lastp = goalp = SCRIPTH_BA (np, no_data); 8919 break; 8920 } 8921 8922 cp->phys.lastp = cpu_to_scr(lastp); 8923 cp->phys.goalp = cpu_to_scr(goalp); 8924 cp->phys.savep = cpu_to_scr(lastp); 8925 cp->startp = cp->phys.savep; 8926 8927 /* 8928 * Activate this job. 8929 */ 8930 sym_put_start_queue(np, cp); 8931 8932 /* 8933 * Command is successfully queued. 8934 */ 8935 return 0; 8936 too_big: 8937 sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG); 8938 return -1; 8939 } 8940 8941 /* 8942 * Scatter a virtual buffer into bus addressable chunks. 8943 */ 8944 static int 8945 sym_scatter_virtual(hcb_p np, ccb_p cp, vm_offset_t vaddr, vm_size_t len) 8946 { 8947 u_long pe, pn; 8948 u_long n, k; 8949 int s; 8950 #ifdef SYM_DEBUG_PM_WITH_WSR 8951 int k0 = 0; 8952 #endif 8953 8954 cp->data_len += len; 8955 8956 pe = vaddr + len; 8957 n = len; 8958 s = SYM_CONF_MAX_SG - 1 - cp->segments; 8959 8960 while (n && s >= 0) { 8961 pn = (pe - 1) & ~PAGE_MASK; 8962 k = pe - pn; 8963 #ifdef SYM_DEBUG_PM_WITH_WSR 8964 if (len < 20 && k >= 2) { 8965 k = (k0&1) ? 1 : 2; 8966 pn = pe - k; 8967 ++k0; 8968 if (k0 == 1) printf("[%d]:", (int)len); 8969 } 8970 #if 0 8971 if (len > 512 && len < 515 && k > 512) { 8972 k = 512; 8973 pn = pe - k; 8974 ++k0; 8975 if (k0 == 1) printf("[%d]:", (int)len); 8976 } 8977 #endif 8978 #endif 8979 if (k > n) { 8980 k = n; 8981 pn = pe - n; 8982 } 8983 if (DEBUG_FLAGS & DEBUG_SCATTER) { 8984 printf ("%s scatter: va=%lx pa=%lx siz=%lx\n", 8985 sym_name(np), pn, (u_long) vtobus(pn), k); 8986 } 8987 cp->phys.data[s].addr = cpu_to_scr(vtobus(pn)); 8988 cp->phys.data[s].size = cpu_to_scr(k); 8989 pe = pn; 8990 n -= k; 8991 --s; 8992 #ifdef SYM_DEBUG_PM_WITH_WSR 8993 if (k0) 8994 printf(" %d", (int)k); 8995 #endif 8996 } 8997 cp->segments = SYM_CONF_MAX_SG - 1 - s; 8998 8999 #ifdef SYM_DEBUG_PM_WITH_WSR 9000 if (k0) 9001 printf("\n"); 9002 #endif 9003 return n ? -1 : 0; 9004 } 9005 9006 /* 9007 * Will stay so forever, in my opinion. 9008 */ 9009 static int 9010 sym_scatter_physical(hcb_p np, ccb_p cp, vm_offset_t vaddr, vm_size_t len) 9011 { 9012 return -1; 9013 } 9014 9015 /* 9016 * SIM action for non performance critical stuff. 9017 */ 9018 static void sym_action2(struct cam_sim *sim, union ccb *ccb) 9019 { 9020 hcb_p np; 9021 tcb_p tp; 9022 lcb_p lp; 9023 struct ccb_hdr *ccb_h; 9024 9025 /* 9026 * Retrieve our controller data structure. 9027 */ 9028 np = (hcb_p) cam_sim_softc(sim); 9029 9030 ccb_h = &ccb->ccb_h; 9031 9032 switch (ccb_h->func_code) { 9033 case XPT_SET_TRAN_SETTINGS: 9034 { 9035 struct ccb_trans_settings *cts; 9036 9037 cts = &ccb->cts; 9038 tp = &np->target[ccb_h->target_id]; 9039 9040 /* 9041 * Update our transfer settings (basically WIDE/SYNC). 9042 * These features are to be handled in a per target 9043 * basis according to SCSI specifications. 9044 */ 9045 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) 9046 sym_update_trans(np, tp, &tp->tinfo.user, cts); 9047 9048 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 9049 sym_update_trans(np, tp, &tp->tinfo.goal, cts); 9050 9051 /* 9052 * Update our disconnect and tag settings. 9053 * SCSI requires CmdQue feature to be handled in a per 9054 * device (logical unit) basis. 9055 */ 9056 lp = sym_lp(np, tp, ccb_h->target_lun); 9057 if (lp) { 9058 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) 9059 sym_update_dflags(np, &lp->user_flags, cts); 9060 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 9061 sym_update_dflags(np, &lp->current_flags, cts); 9062 } 9063 9064 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 9065 break; 9066 } 9067 case XPT_GET_TRAN_SETTINGS: 9068 { 9069 struct ccb_trans_settings *cts; 9070 struct sym_trans *tip; 9071 u_char dflags; 9072 9073 cts = &ccb->cts; 9074 tp = &np->target[ccb_h->target_id]; 9075 lp = sym_lp(np, tp, ccb_h->target_lun); 9076 9077 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 9078 tip = &tp->tinfo.current; 9079 dflags = lp ? lp->current_flags : 0; 9080 } 9081 else { 9082 tip = &tp->tinfo.user; 9083 dflags = lp ? lp->user_flags : tp->usrflags; 9084 } 9085 9086 cts->sync_period = tip->period; 9087 cts->sync_offset = tip->offset; 9088 cts->bus_width = tip->width; 9089 9090 cts->valid = CCB_TRANS_SYNC_RATE_VALID 9091 | CCB_TRANS_SYNC_OFFSET_VALID 9092 | CCB_TRANS_BUS_WIDTH_VALID; 9093 9094 if (lp) { 9095 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 9096 9097 if (dflags & SYM_DISC_ENABLED) 9098 cts->flags |= CCB_TRANS_DISC_ENB; 9099 9100 if (dflags & SYM_TAGS_ENABLED) 9101 cts->flags |= CCB_TRANS_TAG_ENB; 9102 9103 cts->valid |= CCB_TRANS_DISC_VALID; 9104 cts->valid |= CCB_TRANS_TQ_VALID; 9105 } 9106 9107 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 9108 break; 9109 } 9110 case XPT_CALC_GEOMETRY: 9111 { 9112 struct ccb_calc_geometry *ccg; 9113 u32 size_mb; 9114 u32 secs_per_cylinder; 9115 int extended; 9116 9117 /* 9118 * Silly DOS geometry. 9119 */ 9120 ccg = &ccb->ccg; 9121 size_mb = ccg->volume_size 9122 / ((1024L * 1024L) / ccg->block_size); 9123 extended = 1; 9124 9125 if (size_mb > 1024 && extended) { 9126 ccg->heads = 255; 9127 ccg->secs_per_track = 63; 9128 } else { 9129 ccg->heads = 64; 9130 ccg->secs_per_track = 32; 9131 } 9132 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 9133 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 9134 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 9135 break; 9136 } 9137 case XPT_PATH_INQ: 9138 { 9139 struct ccb_pathinq *cpi = &ccb->cpi; 9140 cpi->version_num = 1; 9141 cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE; 9142 if ((np->features & FE_WIDE) != 0) 9143 cpi->hba_inquiry |= PI_WIDE_16; 9144 cpi->target_sprt = 0; 9145 cpi->hba_misc = 0; 9146 if (np->usrflags & SYM_SCAN_TARGETS_HILO) 9147 cpi->hba_misc |= PIM_SCANHILO; 9148 if (np->usrflags & SYM_AVOID_BUS_RESET) 9149 cpi->hba_misc |= PIM_NOBUSRESET; 9150 cpi->hba_eng_cnt = 0; 9151 cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; 9152 /* Semantic problem:)LUN number max = max number of LUNs - 1 */ 9153 cpi->max_lun = SYM_CONF_MAX_LUN-1; 9154 if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN) 9155 cpi->max_lun = SYM_SETUP_MAX_LUN-1; 9156 cpi->bus_id = cam_sim_bus(sim); 9157 cpi->initiator_id = np->myaddr; 9158 cpi->base_transfer_speed = 3300; 9159 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 9160 strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN); 9161 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 9162 cpi->unit_number = cam_sim_unit(sim); 9163 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 9164 break; 9165 } 9166 case XPT_ABORT: 9167 { 9168 union ccb *abort_ccb = ccb->cab.abort_ccb; 9169 switch(abort_ccb->ccb_h.func_code) { 9170 case XPT_SCSI_IO: 9171 if (sym_abort_scsiio(np, abort_ccb, 0) == 0) { 9172 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 9173 break; 9174 } 9175 default: 9176 sym_xpt_done2(np, ccb, CAM_UA_ABORT); 9177 break; 9178 } 9179 break; 9180 } 9181 case XPT_RESET_DEV: 9182 { 9183 sym_reset_dev(np, ccb); 9184 break; 9185 } 9186 case XPT_RESET_BUS: 9187 { 9188 sym_reset_scsi_bus(np, 0); 9189 if (sym_verbose) { 9190 xpt_print_path(np->path); 9191 printf("SCSI BUS reset delivered.\n"); 9192 } 9193 sym_init (np, 1); 9194 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 9195 break; 9196 } 9197 case XPT_ACCEPT_TARGET_IO: 9198 case XPT_CONT_TARGET_IO: 9199 case XPT_EN_LUN: 9200 case XPT_NOTIFY_ACK: 9201 case XPT_IMMED_NOTIFY: 9202 case XPT_TERM_IO: 9203 default: 9204 sym_xpt_done2(np, ccb, CAM_REQ_INVALID); 9205 break; 9206 } 9207 } 9208 9209 /* 9210 * Update transfer settings of a target. 9211 */ 9212 static void sym_update_trans(hcb_p np, tcb_p tp, struct sym_trans *tip, 9213 struct ccb_trans_settings *cts) 9214 { 9215 /* 9216 * Update the infos. 9217 */ 9218 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 9219 tip->width = cts->bus_width; 9220 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0) 9221 tip->offset = cts->sync_offset; 9222 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 9223 tip->period = cts->sync_period; 9224 9225 /* 9226 * Scale against out limits. 9227 */ 9228 if (tip->width > SYM_SETUP_MAX_WIDE) tip->width =SYM_SETUP_MAX_WIDE; 9229 if (tip->width > np->maxwide) tip->width = np->maxwide; 9230 if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset =SYM_SETUP_MAX_OFFS; 9231 if (tip->offset > np->maxoffs) tip->offset = np->maxoffs; 9232 if (tip->period) { 9233 if (tip->period < SYM_SETUP_MIN_SYNC) 9234 tip->period = SYM_SETUP_MIN_SYNC; 9235 if (np->features & FE_ULTRA3) { 9236 if (tip->period < np->minsync_dt) 9237 tip->period = np->minsync_dt; 9238 } 9239 else { 9240 if (tip->period < np->minsync) 9241 tip->period = np->minsync; 9242 } 9243 if (tip->period > np->maxsync) 9244 tip->period = np->maxsync; 9245 } 9246 } 9247 9248 /* 9249 * Update flags for a device (logical unit). 9250 */ 9251 static void 9252 sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts) 9253 { 9254 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 9255 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 9256 *flags |= SYM_DISC_ENABLED; 9257 else 9258 *flags &= ~SYM_DISC_ENABLED; 9259 } 9260 9261 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 9262 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 9263 *flags |= SYM_TAGS_ENABLED; 9264 else 9265 *flags &= ~SYM_TAGS_ENABLED; 9266 } 9267 } 9268 9269 9270 /*============= DRIVER INITIALISATION ==================*/ 9271 9272 #ifdef FreeBSD_4_Bus 9273 9274 static device_method_t sym_pci_methods[] = { 9275 DEVMETHOD(device_probe, sym_pci_probe), 9276 DEVMETHOD(device_attach, sym_pci_attach), 9277 { 0, 0 } 9278 }; 9279 9280 static driver_t sym_pci_driver = { 9281 "sym", 9282 sym_pci_methods, 9283 sizeof(struct sym_hcb) 9284 }; 9285 9286 static devclass_t sym_devclass; 9287 9288 DRIVER_MODULE(sym, pci, sym_pci_driver, sym_devclass, 0, 0); 9289 9290 #else /* Pre-FreeBSD_4_Bus */ 9291 9292 static u_long sym_unit; 9293 9294 static struct pci_device sym_pci_driver = { 9295 "sym", 9296 sym_pci_probe, 9297 sym_pci_attach, 9298 &sym_unit, 9299 NULL 9300 }; 9301 9302 DATA_SET (pcidevice_set, sym_pci_driver); 9303 9304 #endif /* FreeBSD_4_Bus */ 9305 9306 static struct sym_pci_chip sym_pci_dev_table[] = { 9307 {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 0, 9308 FE_ERL} 9309 , 9310 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, 9311 FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} 9312 , 9313 {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 0, 9314 FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} 9315 , 9316 {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2, 9317 FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} 9318 , 9319 {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1, 9320 FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} 9321 , 9322 {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2, 9323 FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 9324 FE_RAM|FE_DIFF} 9325 , 9326 {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2, 9327 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 9328 FE_RAM|FE_DIFF} 9329 , 9330 {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2, 9331 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 9332 FE_RAM|FE_DIFF} 9333 , 9334 {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2, 9335 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 9336 FE_RAM|FE_DIFF} 9337 , 9338 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, 9339 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 9340 FE_RAM|FE_LCKFRQ} 9341 , 9342 {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4, 9343 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 9344 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} 9345 , 9346 {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4, 9347 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 9348 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} 9349 , 9350 {PCI_ID_LSI53C1010, 0x00, "1010", 6, 62, 7, 8, 9351 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| 9352 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_PCI66|FE_CRC| 9353 FE_C10} 9354 , 9355 {PCI_ID_LSI53C1010, 0xff, "1010", 6, 62, 7, 8, 9356 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| 9357 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| 9358 FE_C10|FE_U3EN} 9359 , 9360 {PCI_ID_LSI53C1010_2, 0xff, "1010", 6, 62, 7, 8, 9361 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| 9362 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_PCI66|FE_CRC| 9363 FE_C10|FE_U3EN} 9364 , 9365 {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4, 9366 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 9367 FE_RAM|FE_IO256|FE_LEDC} 9368 }; 9369 9370 #define sym_pci_num_devs \ 9371 (sizeof(sym_pci_dev_table) / sizeof(sym_pci_dev_table[0])) 9372 9373 /* 9374 * Look up the chip table. 9375 * 9376 * Return a pointer to the chip entry if found, 9377 * zero otherwise. 9378 */ 9379 static struct sym_pci_chip * 9380 #ifdef FreeBSD_4_Bus 9381 sym_find_pci_chip(device_t dev) 9382 #else 9383 sym_find_pci_chip(pcici_t pci_tag) 9384 #endif 9385 { 9386 struct sym_pci_chip *chip; 9387 int i; 9388 u_short device_id; 9389 u_char revision; 9390 9391 #ifdef FreeBSD_4_Bus 9392 if (pci_get_vendor(dev) != PCI_VENDOR_NCR) 9393 return 0; 9394 9395 device_id = pci_get_device(dev); 9396 revision = pci_get_revid(dev); 9397 #else 9398 if (pci_cfgread(pci_tag, PCIR_VENDOR, 2) != PCI_VENDOR_NCR) 9399 return 0; 9400 9401 device_id = pci_cfgread(pci_tag, PCIR_DEVICE, 2); 9402 revision = pci_cfgread(pci_tag, PCIR_REVID, 1); 9403 #endif 9404 9405 for (i = 0; i < sym_pci_num_devs; i++) { 9406 chip = &sym_pci_dev_table[i]; 9407 if (device_id != chip->device_id) 9408 continue; 9409 if (revision > chip->revision_id) 9410 continue; 9411 if (FE_LDSTR & chip->features) 9412 return chip; 9413 break; 9414 } 9415 9416 return 0; 9417 } 9418 9419 /* 9420 * Tell upper layer if the chip is supported. 9421 */ 9422 #ifdef FreeBSD_4_Bus 9423 static int 9424 sym_pci_probe(device_t dev) 9425 { 9426 struct sym_pci_chip *chip; 9427 9428 chip = sym_find_pci_chip(dev); 9429 if (chip) { 9430 device_set_desc(dev, chip->name); 9431 return (chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)? -2000 : 0; 9432 } 9433 return ENXIO; 9434 } 9435 #else /* Pre-FreeBSD_4_Bus */ 9436 static const char * 9437 sym_pci_probe(pcici_t pci_tag, pcidi_t type) 9438 { 9439 struct sym_pci_chip *chip; 9440 9441 chip = sym_find_pci_chip(pci_tag); 9442 #if NNCR > 0 9443 /* Only claim chips we are allowed to take precedence over the ncr */ 9444 if (chip && !(chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)) 9445 #else 9446 if (chip) 9447 #endif 9448 return chip->name; 9449 return 0; 9450 } 9451 #endif 9452 9453 /* 9454 * Attach a sym53c8xx device. 9455 */ 9456 #ifdef FreeBSD_4_Bus 9457 static int 9458 sym_pci_attach(device_t dev) 9459 #else 9460 static void 9461 sym_pci_attach(pcici_t pci_tag, int unit) 9462 { 9463 int err = sym_pci_attach2(pci_tag, unit); 9464 if (err) 9465 printf("sym: failed to attach unit %d - err=%d.\n", unit, err); 9466 } 9467 static int 9468 sym_pci_attach2(pcici_t pci_tag, int unit) 9469 #endif 9470 { 9471 struct sym_pci_chip *chip; 9472 u_short command; 9473 u_char cachelnsz; 9474 struct sym_hcb *np = 0; 9475 struct sym_nvram nvram; 9476 int i; 9477 9478 /* 9479 * Only probed devices should be attached. 9480 * We just enjoy being paranoid. :) 9481 */ 9482 #ifdef FreeBSD_4_Bus 9483 chip = sym_find_pci_chip(dev); 9484 #else 9485 chip = sym_find_pci_chip(pci_tag); 9486 #endif 9487 if (chip == NULL) 9488 return (ENXIO); 9489 9490 /* 9491 * Allocate immediately the host control block, 9492 * since we are only expecting to succeed. :) 9493 * We keep track in the HCB of all the resources that 9494 * are to be released on error. 9495 */ 9496 np = sym_calloc(sizeof(*np), "HCB"); 9497 if (!np) 9498 goto attach_failed; 9499 9500 /* 9501 * Copy some useful infos to the HCB. 9502 */ 9503 np->verbose = bootverbose; 9504 #ifdef FreeBSD_4_Bus 9505 np->device = dev; 9506 np->unit = device_get_unit(dev); 9507 np->device_id = pci_get_device(dev); 9508 np->revision_id = pci_get_revid(dev); 9509 #else 9510 np->pci_tag = pci_tag; 9511 np->unit = unit; 9512 np->device_id = pci_cfgread(pci_tag, PCIR_DEVICE, 2); 9513 np->revision_id = pci_cfgread(pci_tag, PCIR_REVID, 1); 9514 #endif 9515 np->features = chip->features; 9516 np->clock_divn = chip->nr_divisor; 9517 np->maxoffs = chip->offset_max; 9518 np->maxburst = chip->burst_max; 9519 9520 /* 9521 * Edit its name. 9522 */ 9523 snprintf(np->inst_name, sizeof(np->inst_name), "sym%d", np->unit); 9524 9525 /* 9526 * Read and apply some fix-ups to the PCI COMMAND 9527 * register. We want the chip to be enabled for: 9528 * - BUS mastering 9529 * - PCI parity checking (reporting would also be fine) 9530 * - Write And Invalidate. 9531 */ 9532 #ifdef FreeBSD_4_Bus 9533 command = pci_read_config(dev, PCIR_COMMAND, 2); 9534 #else 9535 command = pci_cfgread(pci_tag, PCIR_COMMAND, 2); 9536 #endif 9537 command |= PCIM_CMD_BUSMASTEREN; 9538 command |= PCIM_CMD_PERRESPEN; 9539 command |= /* PCIM_CMD_MWIEN */ 0x0010; 9540 #ifdef FreeBSD_4_Bus 9541 pci_write_config(dev, PCIR_COMMAND, command, 2); 9542 #else 9543 pci_cfgwrite(pci_tag, PCIR_COMMAND, command, 2); 9544 #endif 9545 9546 /* 9547 * Let the device know about the cache line size, 9548 * if it doesn't yet. 9549 */ 9550 #ifdef FreeBSD_4_Bus 9551 cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 9552 #else 9553 cachelnsz = pci_cfgread(pci_tag, PCIR_CACHELNSZ, 1); 9554 #endif 9555 if (!cachelnsz) { 9556 cachelnsz = 8; 9557 #ifdef FreeBSD_4_Bus 9558 pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); 9559 #else 9560 pci_cfgwrite(pci_tag, PCIR_CACHELNSZ, cachelnsz, 1); 9561 #endif 9562 } 9563 9564 /* 9565 * Alloc/get/map/retrieve everything that deals with MMIO. 9566 */ 9567 #ifdef FreeBSD_4_Bus 9568 if ((command & PCIM_CMD_MEMEN) != 0) { 9569 int regs_id = SYM_PCI_MMIO; 9570 np->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, ®s_id, 9571 0, ~0, 1, RF_ACTIVE); 9572 } 9573 if (!np->mmio_res) { 9574 device_printf(dev, "failed to allocate MMIO resources\n"); 9575 goto attach_failed; 9576 } 9577 np->mmio_bsh = rman_get_bushandle(np->mmio_res); 9578 np->mmio_tag = rman_get_bustag(np->mmio_res); 9579 np->mmio_pa = rman_get_start(np->mmio_res); 9580 np->mmio_va = (vm_offset_t) rman_get_virtual(np->mmio_res); 9581 np->mmio_ba = np->mmio_pa; 9582 #else 9583 if ((command & PCIM_CMD_MEMEN) != 0) { 9584 vm_offset_t vaddr, paddr; 9585 if (!pci_map_mem(pci_tag, SYM_PCI_MMIO, &vaddr, &paddr)) { 9586 printf("%s: failed to map MMIO window\n", sym_name(np)); 9587 goto attach_failed; 9588 } 9589 np->mmio_va = vaddr; 9590 np->mmio_pa = paddr; 9591 np->mmio_ba = paddr; 9592 } 9593 #endif 9594 9595 /* 9596 * Allocate the IRQ. 9597 */ 9598 #ifdef FreeBSD_4_Bus 9599 i = 0; 9600 np->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &i, 9601 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); 9602 if (!np->irq_res) { 9603 device_printf(dev, "failed to allocate IRQ resource\n"); 9604 goto attach_failed; 9605 } 9606 #endif 9607 9608 #ifdef SYM_CONF_IOMAPPED 9609 /* 9610 * User want us to use normal IO with PCI. 9611 * Alloc/get/map/retrieve everything that deals with IO. 9612 */ 9613 #ifdef FreeBSD_4_Bus 9614 if ((command & PCI_COMMAND_IO_ENABLE) != 0) { 9615 int regs_id = SYM_PCI_IO; 9616 np->io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, ®s_id, 9617 0, ~0, 1, RF_ACTIVE); 9618 } 9619 if (!np->io_res) { 9620 device_printf(dev, "failed to allocate IO resources\n"); 9621 goto attach_failed; 9622 } 9623 np->io_bsh = rman_get_bushandle(np->io_res); 9624 np->io_tag = rman_get_bustag(np->io_res); 9625 np->io_port = rman_get_start(np->io_res); 9626 #else 9627 if ((command & PCI_COMMAND_IO_ENABLE) != 0) { 9628 pci_port_t io_port; 9629 if (!pci_map_port (pci_tag, SYM_PCI_IO, &io_port)) { 9630 printf("%s: failed to map IO window\n", sym_name(np)); 9631 goto attach_failed; 9632 } 9633 np->io_port = io_port; 9634 } 9635 #endif 9636 9637 #endif /* SYM_CONF_IOMAPPED */ 9638 9639 /* 9640 * If the chip has RAM. 9641 * Alloc/get/map/retrieve the corresponding resources. 9642 */ 9643 if ((np->features & (FE_RAM|FE_RAM8K)) && 9644 (command & PCIM_CMD_MEMEN) != 0) { 9645 #ifdef FreeBSD_4_Bus 9646 int regs_id = SYM_PCI_RAM; 9647 if (np->features & FE_64BIT) 9648 regs_id = SYM_PCI_RAM64; 9649 np->ram_res = bus_alloc_resource(dev, SYS_RES_MEMORY, ®s_id, 9650 0, ~0, 1, RF_ACTIVE); 9651 if (!np->ram_res) { 9652 device_printf(dev,"failed to allocate RAM resources\n"); 9653 goto attach_failed; 9654 } 9655 np->ram_id = regs_id; 9656 np->ram_bsh = rman_get_bushandle(np->ram_res); 9657 np->ram_tag = rman_get_bustag(np->ram_res); 9658 np->ram_pa = rman_get_start(np->ram_res); 9659 np->ram_va = (vm_offset_t) rman_get_virtual(np->ram_res); 9660 np->ram_ba = np->ram_pa; 9661 #else 9662 vm_offset_t vaddr, paddr; 9663 int regs_id = SYM_PCI_RAM; 9664 if (np->features & FE_64BIT) 9665 regs_id = SYM_PCI_RAM64; 9666 if (!pci_map_mem(pci_tag, regs_id, &vaddr, &paddr)) { 9667 printf("%s: failed to map RAM window\n", sym_name(np)); 9668 goto attach_failed; 9669 } 9670 np->ram_va = vaddr; 9671 np->ram_pa = paddr; 9672 np->ram_ba = paddr; 9673 #endif 9674 } 9675 9676 /* 9677 * Save setting of some IO registers, so we will 9678 * be able to probe specific implementations. 9679 */ 9680 sym_save_initial_setting (np); 9681 9682 /* 9683 * Reset the chip now, since it has been reported 9684 * that SCSI clock calibration may not work properly 9685 * if the chip is currently active. 9686 */ 9687 sym_chip_reset (np); 9688 9689 /* 9690 * Try to read the user set-up. 9691 */ 9692 (void) sym_read_nvram(np, &nvram); 9693 9694 /* 9695 * Prepare controller and devices settings, according 9696 * to chip features, user set-up and driver set-up. 9697 */ 9698 (void) sym_prepare_setting(np, &nvram); 9699 9700 /* 9701 * Check the PCI clock frequency. 9702 * Must be performed after prepare_setting since it destroys 9703 * STEST1 that is used to probe for the clock doubler. 9704 */ 9705 i = sym_getpciclock(np); 9706 if (i > 37000) 9707 #ifdef FreeBSD_4_Bus 9708 device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i); 9709 #else 9710 printf("%s: PCI BUS clock seems too high: %u KHz.\n", 9711 sym_name(np), i); 9712 #endif 9713 9714 /* 9715 * Allocate the start queue. 9716 */ 9717 np->squeue = (u32 *) sym_calloc(sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); 9718 if (!np->squeue) 9719 goto attach_failed; 9720 9721 /* 9722 * Allocate the done queue. 9723 */ 9724 np->dqueue = (u32 *) sym_calloc(sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); 9725 if (!np->dqueue) 9726 goto attach_failed; 9727 9728 /* 9729 * Allocate the target bus address array. 9730 */ 9731 np->targtbl = (u32 *) sym_calloc(256, "TARGTBL"); 9732 if (!np->targtbl) 9733 goto attach_failed; 9734 9735 /* 9736 * Allocate SCRIPTS areas. 9737 */ 9738 np->script0 = (struct sym_scr *) 9739 sym_calloc(sizeof(struct sym_scr), "SCRIPT0"); 9740 np->scripth0 = (struct sym_scrh *) 9741 sym_calloc(sizeof(struct sym_scrh), "SCRIPTH0"); 9742 if (!np->script0 || !np->scripth0) 9743 goto attach_failed; 9744 9745 /* 9746 * Initialyze the CCB free and busy queues. 9747 * Allocate some CCB. We need at least ONE. 9748 */ 9749 sym_que_init(&np->free_ccbq); 9750 sym_que_init(&np->busy_ccbq); 9751 sym_que_init(&np->comp_ccbq); 9752 if (!sym_alloc_ccb(np)) 9753 goto attach_failed; 9754 9755 /* 9756 * Initialyze the CAM CCB pending queue. 9757 */ 9758 sym_que_init(&np->cam_ccbq); 9759 9760 /* 9761 * Fill-up variable-size parts of the SCRIPTS. 9762 */ 9763 sym_fill_scripts(&script0, &scripth0); 9764 9765 /* 9766 * Calculate BUS addresses where we are going 9767 * to load the SCRIPTS. 9768 */ 9769 np->script_ba = vtobus(np->script0); 9770 np->scripth_ba = vtobus(np->scripth0); 9771 np->scripth0_ba = np->scripth_ba; 9772 9773 if (np->ram_ba) { 9774 np->script_ba = np->ram_ba; 9775 if (np->features & FE_RAM8K) { 9776 np->ram_ws = 8192; 9777 np->scripth_ba = np->script_ba + 4096; 9778 #if BITS_PER_LONG > 32 9779 np->scr_ram_seg = cpu_to_scr(np->script_ba >> 32); 9780 #endif 9781 } 9782 else 9783 np->ram_ws = 4096; 9784 } 9785 9786 /* 9787 * Bind SCRIPTS with physical addresses usable by the 9788 * SCRIPTS processor (as seen from the BUS = BUS addresses). 9789 */ 9790 sym_bind_script(np, (u32 *) &script0, 9791 (u32 *) np->script0, sizeof(struct sym_scr)); 9792 sym_bind_script(np, (u32 *) &scripth0, 9793 (u32 *) np->scripth0, sizeof(struct sym_scrh)); 9794 9795 /* 9796 * Patch some variables in SCRIPTS. 9797 * These ones are loaded by the SCRIPTS processor. 9798 */ 9799 np->scripth0->pm0_data_addr[0] = cpu_to_scr(SCRIPT_BA(np,pm0_data)); 9800 np->scripth0->pm1_data_addr[0] = cpu_to_scr(SCRIPT_BA(np,pm1_data)); 9801 9802 9803 /* 9804 * Still some for LED support. 9805 */ 9806 if (np->features & FE_LED0) { 9807 np->script0->idle[0] = 9808 cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01)); 9809 np->script0->reselected[0] = 9810 cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe)); 9811 np->script0->start[0] = 9812 cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe)); 9813 } 9814 9815 /* 9816 * Load SCNTL4 on reselection for the C10. 9817 */ 9818 if (np->features & FE_C10) { 9819 np->script0->resel_scntl4[0] = 9820 cpu_to_scr(SCR_LOAD_REL (scntl4, 1)); 9821 np->script0->resel_scntl4[1] = 9822 cpu_to_scr(offsetof(struct sym_tcb, uval)); 9823 } 9824 9825 #ifdef SYM_CONF_IARB_SUPPORT 9826 /* 9827 * If user does not want to use IMMEDIATE ARBITRATION 9828 * when we are reselected while attempting to arbitrate, 9829 * patch the SCRIPTS accordingly with a SCRIPT NO_OP. 9830 */ 9831 if (!SYM_CONF_SET_IARB_ON_ARB_LOST) 9832 np->script0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); 9833 9834 /* 9835 * If user wants IARB to be set when we win arbitration 9836 * and have other jobs, compute the max number of consecutive 9837 * settings of IARB hints before we leave devices a chance to 9838 * arbitrate for reselection. 9839 */ 9840 #ifdef SYM_SETUP_IARB_MAX 9841 np->iarb_max = SYM_SETUP_IARB_MAX; 9842 #else 9843 np->iarb_max = 4; 9844 #endif 9845 #endif 9846 9847 /* 9848 * Prepare the idle and invalid task actions. 9849 */ 9850 np->idletask.start = cpu_to_scr(SCRIPT_BA(np, idle)); 9851 np->idletask.restart = cpu_to_scr(SCRIPTH_BA(np, bad_i_t_l)); 9852 np->idletask_ba = vtobus(&np->idletask); 9853 9854 np->notask.start = cpu_to_scr(SCRIPT_BA(np, idle)); 9855 np->notask.restart = cpu_to_scr(SCRIPTH_BA(np, bad_i_t_l)); 9856 np->notask_ba = vtobus(&np->notask); 9857 9858 np->bad_itl.start = cpu_to_scr(SCRIPT_BA(np, idle)); 9859 np->bad_itl.restart = cpu_to_scr(SCRIPTH_BA(np, bad_i_t_l)); 9860 np->bad_itl_ba = vtobus(&np->bad_itl); 9861 9862 np->bad_itlq.start = cpu_to_scr(SCRIPT_BA(np, idle)); 9863 np->bad_itlq.restart = cpu_to_scr(SCRIPTH_BA (np,bad_i_t_l_q)); 9864 np->bad_itlq_ba = vtobus(&np->bad_itlq); 9865 9866 /* 9867 * Allocate and prepare the lun JUMP table that is used 9868 * for a target prior the probing of devices (bad lun table). 9869 * A private table will be allocated for the target on the 9870 * first INQUIRY response received. 9871 */ 9872 np->badluntbl = sym_calloc(256, "BADLUNTBL"); 9873 if (!np->badluntbl) 9874 goto attach_failed; 9875 9876 np->badlun_sa = cpu_to_scr(SCRIPTH_BA(np, resel_bad_lun)); 9877 for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ 9878 np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); 9879 9880 /* 9881 * Prepare the bus address array that contains the bus 9882 * address of each target control bloc. 9883 * For now, assume all logical unit are wrong. :) 9884 */ 9885 np->scripth0->targtbl[0] = cpu_to_scr(vtobus(np->targtbl)); 9886 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 9887 np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); 9888 np->target[i].luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); 9889 np->target[i].lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); 9890 } 9891 9892 /* 9893 * Now check the cache handling of the pci chipset. 9894 */ 9895 if (sym_snooptest (np)) { 9896 #ifdef FreeBSD_4_Bus 9897 device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n"); 9898 #else 9899 printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np)); 9900 #endif 9901 goto attach_failed; 9902 }; 9903 9904 /* 9905 * Now deal with CAM. 9906 * Hopefully, we will succeed with that one.:) 9907 */ 9908 if (!sym_cam_attach(np)) 9909 goto attach_failed; 9910 9911 /* 9912 * Sigh! we are done. 9913 */ 9914 return 0; 9915 9916 /* 9917 * We have failed. 9918 * We will try to free all the resources we have 9919 * allocated, but if we are a boot device, this 9920 * will not help that much.;) 9921 */ 9922 attach_failed: 9923 if (np) 9924 sym_pci_free(np); 9925 return ENXIO; 9926 } 9927 9928 /* 9929 * Free everything that have been allocated for this device. 9930 */ 9931 static void sym_pci_free(hcb_p np) 9932 { 9933 SYM_QUEHEAD *qp; 9934 ccb_p cp; 9935 tcb_p tp; 9936 lcb_p lp; 9937 int target, lun; 9938 int s; 9939 9940 /* 9941 * First free CAM resources. 9942 */ 9943 s = splcam(); 9944 sym_cam_free(np); 9945 splx(s); 9946 9947 /* 9948 * Now every should be quiet for us to 9949 * free other resources. 9950 */ 9951 #ifdef FreeBSD_4_Bus 9952 if (np->ram_res) 9953 bus_release_resource(np->device, SYS_RES_MEMORY, 9954 np->ram_id, np->ram_res); 9955 if (np->mmio_res) 9956 bus_release_resource(np->device, SYS_RES_MEMORY, 9957 SYM_PCI_MMIO, np->mmio_res); 9958 if (np->io_res) 9959 bus_release_resource(np->device, SYS_RES_IOPORT, 9960 SYM_PCI_IO, np->io_res); 9961 if (np->irq_res) 9962 bus_release_resource(np->device, SYS_RES_IRQ, 9963 0, np->irq_res); 9964 #else 9965 /* 9966 * YEAH!!! 9967 * It seems there is no means to free MMIO resources. 9968 */ 9969 #endif 9970 9971 if (np->scripth0) 9972 sym_mfree(np->scripth0, sizeof(struct sym_scrh), "SCRIPTH0"); 9973 if (np->script0) 9974 sym_mfree(np->script0, sizeof(struct sym_scr), "SCRIPT0"); 9975 if (np->squeue) 9976 sym_mfree(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); 9977 if (np->dqueue) 9978 sym_mfree(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); 9979 9980 while ((qp = sym_remque_head(&np->free_ccbq)) != 0) { 9981 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 9982 sym_mfree(cp, sizeof(*cp), "CCB"); 9983 } 9984 9985 if (np->badluntbl) 9986 sym_mfree(np->badluntbl, 256,"BADLUNTBL"); 9987 9988 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { 9989 tp = &np->target[target]; 9990 for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) { 9991 lp = sym_lp(np, tp, lun); 9992 if (!lp) 9993 continue; 9994 if (lp->itlq_tbl) 9995 sym_mfree(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, 9996 "ITLQ_TBL"); 9997 if (lp->cb_tags) 9998 sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK, 9999 "CB_TAGS"); 10000 sym_mfree(lp, sizeof(*lp), "LCB"); 10001 } 10002 #if SYM_CONF_MAX_LUN > 1 10003 if (tp->lunmp) 10004 sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p), 10005 "LUNMP"); 10006 #endif 10007 } 10008 10009 sym_mfree(np, sizeof(*np), "HCB"); 10010 } 10011 10012 /* 10013 * Allocate CAM resources and register a bus to CAM. 10014 */ 10015 int sym_cam_attach(hcb_p np) 10016 { 10017 struct cam_devq *devq = 0; 10018 struct cam_sim *sim = 0; 10019 struct cam_path *path = 0; 10020 int err, s; 10021 10022 s = splcam(); 10023 10024 /* 10025 * Establish our interrupt handler. 10026 */ 10027 #ifdef FreeBSD_4_Bus 10028 err = bus_setup_intr(np->device, np->irq_res, INTR_TYPE_CAM, 10029 sym_intr, np, &np->intr); 10030 if (err) { 10031 device_printf(np->device, "bus_setup_intr() failed: %d\n", 10032 err); 10033 goto fail; 10034 } 10035 #else 10036 if (!pci_map_int (np->pci_tag, sym_intr, np, &cam_imask)) { 10037 printf("%s: failed to map interrupt\n", sym_name(np)); 10038 goto fail; 10039 } 10040 #endif 10041 10042 /* 10043 * Create the device queue for our sym SIM. 10044 */ 10045 devq = cam_simq_alloc(SYM_CONF_MAX_START); 10046 if (!devq) 10047 goto fail; 10048 10049 /* 10050 * Construct our SIM entry. 10051 */ 10052 sim = cam_sim_alloc(sym_action, sym_poll, "sym", np, np->unit, 10053 1, SYM_SETUP_MAX_TAG, devq); 10054 if (!sim) 10055 goto fail; 10056 devq = 0; 10057 10058 if (xpt_bus_register(sim, 0) != CAM_SUCCESS) 10059 goto fail; 10060 np->sim = sim; 10061 sim = 0; 10062 10063 if (xpt_create_path(&path, 0, 10064 cam_sim_path(np->sim), CAM_TARGET_WILDCARD, 10065 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 10066 goto fail; 10067 } 10068 np->path = path; 10069 10070 /* 10071 * Hmmm... This should be useful, but I donnot want to 10072 * know about. 10073 */ 10074 #if __FreeBSD_version < 400000 10075 #ifdef __alpha__ 10076 #ifdef FreeBSD_4_Bus 10077 alpha_register_pci_scsi(pci_get_bus(np->device), 10078 pci_get_slot(np->device), np->sim); 10079 #else 10080 alpha_register_pci_scsi(pci_tag->bus, pci_tag->slot, np->sim); 10081 #endif 10082 #endif 10083 #endif 10084 10085 #if 0 10086 /* 10087 * Establish our async notification handler. 10088 */ 10089 { 10090 struct ccb_setasync csa; 10091 xpt_setup_ccb(&csa.ccb_h, np->path, 5); 10092 csa.ccb_h.func_code = XPT_SASYNC_CB; 10093 csa.event_enable = AC_LOST_DEVICE; 10094 csa.callback = sym_async; 10095 csa.callback_arg = np->sim; 10096 xpt_action((union ccb *)&csa); 10097 } 10098 #endif 10099 /* 10100 * Start the chip now, without resetting the BUS, since 10101 * it seems that this must stay under control of CAM. 10102 * With LVD/SE capable chips and BUS in SE mode, we may 10103 * get a spurious SMBC interrupt. 10104 */ 10105 sym_init (np, 0); 10106 10107 splx(s); 10108 return 1; 10109 fail: 10110 if (sim) 10111 cam_sim_free(sim, FALSE); 10112 if (devq) 10113 cam_simq_free(devq); 10114 10115 sym_cam_free(np); 10116 10117 splx(s); 10118 return 0; 10119 } 10120 10121 /* 10122 * Free everything that deals with CAM. 10123 */ 10124 void sym_cam_free(hcb_p np) 10125 { 10126 #ifdef FreeBSD_4_Bus 10127 if (np->intr) 10128 bus_teardown_intr(np->device, np->irq_res, np->intr); 10129 #else 10130 /* pci_unmap_int(np->pci_tag); */ /* Does nothing */ 10131 #endif 10132 10133 if (np->sim) { 10134 xpt_bus_deregister(cam_sim_path(np->sim)); 10135 cam_sim_free(np->sim, /*free_devq*/ TRUE); 10136 } 10137 if (np->path) 10138 xpt_free_path(np->path); 10139 } 10140 10141 /*============ OPTIONNAL NVRAM SUPPORT =================*/ 10142 10143 /* 10144 * Get host setup from NVRAM. 10145 */ 10146 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram) 10147 { 10148 #ifdef SYM_CONF_NVRAM_SUPPORT 10149 /* 10150 * Get parity checking, host ID, verbose mode 10151 * and miscellaneous host flags from NVRAM. 10152 */ 10153 switch(nvram->type) { 10154 case SYM_SYMBIOS_NVRAM: 10155 if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE)) 10156 np->rv_scntl0 &= ~0x0a; 10157 np->myaddr = nvram->data.Symbios.host_id & 0x0f; 10158 if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS) 10159 np->verbose += 1; 10160 if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO) 10161 np->usrflags |= SYM_SCAN_TARGETS_HILO; 10162 if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET) 10163 np->usrflags |= SYM_AVOID_BUS_RESET; 10164 break; 10165 case SYM_TEKRAM_NVRAM: 10166 np->myaddr = nvram->data.Tekram.host_id & 0x0f; 10167 break; 10168 default: 10169 break; 10170 } 10171 #endif 10172 } 10173 10174 /* 10175 * Get target setup from NVRAM. 10176 */ 10177 #ifdef SYM_CONF_NVRAM_SUPPORT 10178 static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram); 10179 static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram); 10180 #endif 10181 10182 static void 10183 sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp) 10184 { 10185 #ifdef SYM_CONF_NVRAM_SUPPORT 10186 switch(nvp->type) { 10187 case SYM_SYMBIOS_NVRAM: 10188 sym_Symbios_setup_target (np, target, &nvp->data.Symbios); 10189 break; 10190 case SYM_TEKRAM_NVRAM: 10191 sym_Tekram_setup_target (np, target, &nvp->data.Tekram); 10192 break; 10193 default: 10194 break; 10195 } 10196 #endif 10197 } 10198 10199 #ifdef SYM_CONF_NVRAM_SUPPORT 10200 /* 10201 * Get target set-up from Symbios format NVRAM. 10202 */ 10203 static void 10204 sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram) 10205 { 10206 tcb_p tp = &np->target[target]; 10207 Symbios_target *tn = &nvram->target[target]; 10208 10209 tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0; 10210 tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT; 10211 tp->usrtags = 10212 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0; 10213 10214 if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE)) 10215 tp->usrflags &= ~SYM_DISC_ENABLED; 10216 if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)) 10217 tp->usrflags |= SYM_SCAN_BOOT_DISABLED; 10218 if (!(tn->flags & SYMBIOS_SCAN_LUNS)) 10219 tp->usrflags |= SYM_SCAN_LUNS_DISABLED; 10220 } 10221 10222 /* 10223 * Get target set-up from Tekram format NVRAM. 10224 */ 10225 static void 10226 sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram) 10227 { 10228 tcb_p tp = &np->target[target]; 10229 struct Tekram_target *tn = &nvram->target[target]; 10230 int i; 10231 10232 if (tn->flags & TEKRAM_SYNC_NEGO) { 10233 i = tn->sync_index & 0xf; 10234 tp->tinfo.user.period = Tekram_sync[i]; 10235 } 10236 10237 tp->tinfo.user.width = 10238 (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT; 10239 10240 if (tn->flags & TEKRAM_TAGGED_COMMANDS) { 10241 tp->usrtags = 2 << nvram->max_tags_index; 10242 } 10243 10244 if (tn->flags & TEKRAM_DISCONNECT_ENABLE) 10245 tp->usrflags |= SYM_DISC_ENABLED; 10246 10247 /* If any device does not support parity, we will not use this option */ 10248 if (!(tn->flags & TEKRAM_PARITY_CHECK)) 10249 np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */ 10250 } 10251 10252 #ifdef SYM_CONF_DEBUG_NVRAM 10253 /* 10254 * Dump Symbios format NVRAM for debugging purpose. 10255 */ 10256 void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram) 10257 { 10258 int i; 10259 10260 /* display Symbios nvram host data */ 10261 printf("%s: HOST ID=%d%s%s%s%s%s%s\n", 10262 sym_name(np), nvram->host_id & 0x0f, 10263 (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", 10264 (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"", 10265 (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"", 10266 (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"", 10267 (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"", 10268 (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :""); 10269 10270 /* display Symbios nvram drive data */ 10271 for (i = 0 ; i < 15 ; i++) { 10272 struct Symbios_target *tn = &nvram->target[i]; 10273 printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", 10274 sym_name(np), i, 10275 (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "", 10276 (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "", 10277 (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "", 10278 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "", 10279 tn->bus_width, 10280 tn->sync_period / 4, 10281 tn->timeout); 10282 } 10283 } 10284 10285 /* 10286 * Dump TEKRAM format NVRAM for debugging purpose. 10287 */ 10288 static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120}; 10289 void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram) 10290 { 10291 int i, tags, boot_delay; 10292 char *rem; 10293 10294 /* display Tekram nvram host data */ 10295 tags = 2 << nvram->max_tags_index; 10296 boot_delay = 0; 10297 if (nvram->boot_delay_index < 6) 10298 boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; 10299 switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) { 10300 default: 10301 case 0: rem = ""; break; 10302 case 1: rem = " REMOVABLE=boot device"; break; 10303 case 2: rem = " REMOVABLE=all"; break; 10304 } 10305 10306 printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", 10307 sym_name(np), nvram->host_id & 0x0f, 10308 (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", 10309 (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"", 10310 (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"", 10311 (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"", 10312 (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"", 10313 (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"", 10314 (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"", 10315 (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"", 10316 rem, boot_delay, tags); 10317 10318 /* display Tekram nvram drive data */ 10319 for (i = 0; i <= 15; i++) { 10320 int sync, j; 10321 struct Tekram_target *tn = &nvram->target[i]; 10322 j = tn->sync_index & 0xf; 10323 sync = Tekram_sync[j]; 10324 printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n", 10325 sym_name(np), i, 10326 (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "", 10327 (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "", 10328 (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "", 10329 (tn->flags & TEKRAM_START_CMD) ? " START" : "", 10330 (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "", 10331 (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "", 10332 sync); 10333 } 10334 } 10335 #endif /* SYM_CONF_DEBUG_NVRAM */ 10336 #endif /* SYM_CONF_NVRAM_SUPPORT */ 10337 10338 10339 /* 10340 * Try reading Symbios or Tekram NVRAM 10341 */ 10342 #ifdef SYM_CONF_NVRAM_SUPPORT 10343 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram); 10344 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram); 10345 #endif 10346 10347 int sym_read_nvram(hcb_p np, struct sym_nvram *nvp) 10348 { 10349 #ifdef SYM_CONF_NVRAM_SUPPORT 10350 /* 10351 * Try to read SYMBIOS nvram. 10352 * Try to read TEKRAM nvram if Symbios nvram not found. 10353 */ 10354 if (SYM_SETUP_SYMBIOS_NVRAM && 10355 !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) 10356 nvp->type = SYM_SYMBIOS_NVRAM; 10357 else if (SYM_SETUP_TEKRAM_NVRAM && 10358 !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) 10359 nvp->type = SYM_TEKRAM_NVRAM; 10360 else 10361 nvp->type = 0; 10362 #else 10363 nvp->type = 0; 10364 #endif 10365 return nvp->type; 10366 } 10367 10368 10369 #ifdef SYM_CONF_NVRAM_SUPPORT 10370 /* 10371 * 24C16 EEPROM reading. 10372 * 10373 * GPOI0 - data in/data out 10374 * GPIO1 - clock 10375 * Symbios NVRAM wiring now also used by Tekram. 10376 */ 10377 10378 #define SET_BIT 0 10379 #define CLR_BIT 1 10380 #define SET_CLK 2 10381 #define CLR_CLK 3 10382 10383 /* 10384 * Set/clear data/clock bit in GPIO0 10385 */ 10386 static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg, 10387 int bit_mode) 10388 { 10389 UDELAY (5); 10390 switch (bit_mode){ 10391 case SET_BIT: 10392 *gpreg |= write_bit; 10393 break; 10394 case CLR_BIT: 10395 *gpreg &= 0xfe; 10396 break; 10397 case SET_CLK: 10398 *gpreg |= 0x02; 10399 break; 10400 case CLR_CLK: 10401 *gpreg &= 0xfd; 10402 break; 10403 10404 } 10405 OUTB (nc_gpreg, *gpreg); 10406 UDELAY (5); 10407 } 10408 10409 /* 10410 * Send START condition to NVRAM to wake it up. 10411 */ 10412 static void S24C16_start(hcb_p np, u_char *gpreg) 10413 { 10414 S24C16_set_bit(np, 1, gpreg, SET_BIT); 10415 S24C16_set_bit(np, 0, gpreg, SET_CLK); 10416 S24C16_set_bit(np, 0, gpreg, CLR_BIT); 10417 S24C16_set_bit(np, 0, gpreg, CLR_CLK); 10418 } 10419 10420 /* 10421 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! 10422 */ 10423 static void S24C16_stop(hcb_p np, u_char *gpreg) 10424 { 10425 S24C16_set_bit(np, 0, gpreg, SET_CLK); 10426 S24C16_set_bit(np, 1, gpreg, SET_BIT); 10427 } 10428 10429 /* 10430 * Read or write a bit to the NVRAM, 10431 * read if GPIO0 input else write if GPIO0 output 10432 */ 10433 static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit, 10434 u_char *gpreg) 10435 { 10436 S24C16_set_bit(np, write_bit, gpreg, SET_BIT); 10437 S24C16_set_bit(np, 0, gpreg, SET_CLK); 10438 if (read_bit) 10439 *read_bit = INB (nc_gpreg); 10440 S24C16_set_bit(np, 0, gpreg, CLR_CLK); 10441 S24C16_set_bit(np, 0, gpreg, CLR_BIT); 10442 } 10443 10444 /* 10445 * Output an ACK to the NVRAM after reading, 10446 * change GPIO0 to output and when done back to an input 10447 */ 10448 static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg, 10449 u_char *gpcntl) 10450 { 10451 OUTB (nc_gpcntl, *gpcntl & 0xfe); 10452 S24C16_do_bit(np, 0, write_bit, gpreg); 10453 OUTB (nc_gpcntl, *gpcntl); 10454 } 10455 10456 /* 10457 * Input an ACK from NVRAM after writing, 10458 * change GPIO0 to input and when done back to an output 10459 */ 10460 static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg, 10461 u_char *gpcntl) 10462 { 10463 OUTB (nc_gpcntl, *gpcntl | 0x01); 10464 S24C16_do_bit(np, read_bit, 1, gpreg); 10465 OUTB (nc_gpcntl, *gpcntl); 10466 } 10467 10468 /* 10469 * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, 10470 * GPIO0 must already be set as an output 10471 */ 10472 static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data, 10473 u_char *gpreg, u_char *gpcntl) 10474 { 10475 int x; 10476 10477 for (x = 0; x < 8; x++) 10478 S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg); 10479 10480 S24C16_read_ack(np, ack_data, gpreg, gpcntl); 10481 } 10482 10483 /* 10484 * READ a byte from the NVRAM and then send an ACK to say we have got it, 10485 * GPIO0 must already be set as an input 10486 */ 10487 static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data, 10488 u_char *gpreg, u_char *gpcntl) 10489 { 10490 int x; 10491 u_char read_bit; 10492 10493 *read_data = 0; 10494 for (x = 0; x < 8; x++) { 10495 S24C16_do_bit(np, &read_bit, 1, gpreg); 10496 *read_data |= ((read_bit & 0x01) << (7 - x)); 10497 } 10498 10499 S24C16_write_ack(np, ack_data, gpreg, gpcntl); 10500 } 10501 10502 /* 10503 * Read 'len' bytes starting at 'offset'. 10504 */ 10505 static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len) 10506 { 10507 u_char gpcntl, gpreg; 10508 u_char old_gpcntl, old_gpreg; 10509 u_char ack_data; 10510 int retv = 1; 10511 int x; 10512 10513 /* save current state of GPCNTL and GPREG */ 10514 old_gpreg = INB (nc_gpreg); 10515 old_gpcntl = INB (nc_gpcntl); 10516 gpcntl = old_gpcntl & 0xfc; 10517 10518 /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ 10519 OUTB (nc_gpreg, old_gpreg); 10520 OUTB (nc_gpcntl, gpcntl); 10521 10522 /* this is to set NVRAM into a known state with GPIO0/1 both low */ 10523 gpreg = old_gpreg; 10524 S24C16_set_bit(np, 0, &gpreg, CLR_CLK); 10525 S24C16_set_bit(np, 0, &gpreg, CLR_BIT); 10526 10527 /* now set NVRAM inactive with GPIO0/1 both high */ 10528 S24C16_stop(np, &gpreg); 10529 10530 /* activate NVRAM */ 10531 S24C16_start(np, &gpreg); 10532 10533 /* write device code and random address MSB */ 10534 S24C16_write_byte(np, &ack_data, 10535 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); 10536 if (ack_data & 0x01) 10537 goto out; 10538 10539 /* write random address LSB */ 10540 S24C16_write_byte(np, &ack_data, 10541 offset & 0xff, &gpreg, &gpcntl); 10542 if (ack_data & 0x01) 10543 goto out; 10544 10545 /* regenerate START state to set up for reading */ 10546 S24C16_start(np, &gpreg); 10547 10548 /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ 10549 S24C16_write_byte(np, &ack_data, 10550 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); 10551 if (ack_data & 0x01) 10552 goto out; 10553 10554 /* now set up GPIO0 for inputting data */ 10555 gpcntl |= 0x01; 10556 OUTB (nc_gpcntl, gpcntl); 10557 10558 /* input all requested data - only part of total NVRAM */ 10559 for (x = 0; x < len; x++) 10560 S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); 10561 10562 /* finally put NVRAM back in inactive mode */ 10563 gpcntl &= 0xfe; 10564 OUTB (nc_gpcntl, gpcntl); 10565 S24C16_stop(np, &gpreg); 10566 retv = 0; 10567 out: 10568 /* return GPIO0/1 to original states after having accessed NVRAM */ 10569 OUTB (nc_gpcntl, old_gpcntl); 10570 OUTB (nc_gpreg, old_gpreg); 10571 10572 return retv; 10573 } 10574 10575 #undef SET_BIT 0 10576 #undef CLR_BIT 1 10577 #undef SET_CLK 2 10578 #undef CLR_CLK 3 10579 10580 /* 10581 * Try reading Symbios NVRAM. 10582 * Return 0 if OK. 10583 */ 10584 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram) 10585 { 10586 static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; 10587 u_char *data = (u_char *) nvram; 10588 int len = sizeof(*nvram); 10589 u_short csum; 10590 int x; 10591 10592 /* probe the 24c16 and read the SYMBIOS 24c16 area */ 10593 if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len)) 10594 return 1; 10595 10596 /* check valid NVRAM signature, verify byte count and checksum */ 10597 if (nvram->type != 0 || 10598 bcmp(nvram->trailer, Symbios_trailer, 6) || 10599 nvram->byte_count != len - 12) 10600 return 1; 10601 10602 /* verify checksum */ 10603 for (x = 6, csum = 0; x < len - 6; x++) 10604 csum += data[x]; 10605 if (csum != nvram->checksum) 10606 return 1; 10607 10608 return 0; 10609 } 10610 10611 /* 10612 * 93C46 EEPROM reading. 10613 * 10614 * GPOI0 - data in 10615 * GPIO1 - data out 10616 * GPIO2 - clock 10617 * GPIO4 - chip select 10618 * 10619 * Used by Tekram. 10620 */ 10621 10622 /* 10623 * Pulse clock bit in GPIO0 10624 */ 10625 static void T93C46_Clk(hcb_p np, u_char *gpreg) 10626 { 10627 OUTB (nc_gpreg, *gpreg | 0x04); 10628 UDELAY (2); 10629 OUTB (nc_gpreg, *gpreg); 10630 } 10631 10632 /* 10633 * Read bit from NVRAM 10634 */ 10635 static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg) 10636 { 10637 UDELAY (2); 10638 T93C46_Clk(np, gpreg); 10639 *read_bit = INB (nc_gpreg); 10640 } 10641 10642 /* 10643 * Write bit to GPIO0 10644 */ 10645 static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg) 10646 { 10647 if (write_bit & 0x01) 10648 *gpreg |= 0x02; 10649 else 10650 *gpreg &= 0xfd; 10651 10652 *gpreg |= 0x10; 10653 10654 OUTB (nc_gpreg, *gpreg); 10655 UDELAY (2); 10656 10657 T93C46_Clk(np, gpreg); 10658 } 10659 10660 /* 10661 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! 10662 */ 10663 static void T93C46_Stop(hcb_p np, u_char *gpreg) 10664 { 10665 *gpreg &= 0xef; 10666 OUTB (nc_gpreg, *gpreg); 10667 UDELAY (2); 10668 10669 T93C46_Clk(np, gpreg); 10670 } 10671 10672 /* 10673 * Send read command and address to NVRAM 10674 */ 10675 static void T93C46_Send_Command(hcb_p np, u_short write_data, 10676 u_char *read_bit, u_char *gpreg) 10677 { 10678 int x; 10679 10680 /* send 9 bits, start bit (1), command (2), address (6) */ 10681 for (x = 0; x < 9; x++) 10682 T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg); 10683 10684 *read_bit = INB (nc_gpreg); 10685 } 10686 10687 /* 10688 * READ 2 bytes from the NVRAM 10689 */ 10690 static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg) 10691 { 10692 int x; 10693 u_char read_bit; 10694 10695 *nvram_data = 0; 10696 for (x = 0; x < 16; x++) { 10697 T93C46_Read_Bit(np, &read_bit, gpreg); 10698 10699 if (read_bit & 0x01) 10700 *nvram_data |= (0x01 << (15 - x)); 10701 else 10702 *nvram_data &= ~(0x01 << (15 - x)); 10703 } 10704 } 10705 10706 /* 10707 * Read Tekram NvRAM data. 10708 */ 10709 static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg) 10710 { 10711 u_char read_bit; 10712 int x; 10713 10714 for (x = 0; x < len; x++) { 10715 10716 /* output read command and address */ 10717 T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); 10718 if (read_bit & 0x01) 10719 return 1; /* Bad */ 10720 T93C46_Read_Word(np, &data[x], gpreg); 10721 T93C46_Stop(np, gpreg); 10722 } 10723 10724 return 0; 10725 } 10726 10727 /* 10728 * Try reading 93C46 Tekram NVRAM. 10729 */ 10730 static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram) 10731 { 10732 u_char gpcntl, gpreg; 10733 u_char old_gpcntl, old_gpreg; 10734 int retv = 1; 10735 10736 /* save current state of GPCNTL and GPREG */ 10737 old_gpreg = INB (nc_gpreg); 10738 old_gpcntl = INB (nc_gpcntl); 10739 10740 /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, 10741 1/2/4 out */ 10742 gpreg = old_gpreg & 0xe9; 10743 OUTB (nc_gpreg, gpreg); 10744 gpcntl = (old_gpcntl & 0xe9) | 0x09; 10745 OUTB (nc_gpcntl, gpcntl); 10746 10747 /* input all of NVRAM, 64 words */ 10748 retv = T93C46_Read_Data(np, (u_short *) nvram, 10749 sizeof(*nvram) / sizeof(short), &gpreg); 10750 10751 /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ 10752 OUTB (nc_gpcntl, old_gpcntl); 10753 OUTB (nc_gpreg, old_gpreg); 10754 10755 return retv; 10756 } 10757 10758 /* 10759 * Try reading Tekram NVRAM. 10760 * Return 0 if OK. 10761 */ 10762 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram) 10763 { 10764 u_char *data = (u_char *) nvram; 10765 int len = sizeof(*nvram); 10766 u_short csum; 10767 int x; 10768 10769 switch (np->device_id) { 10770 case PCI_ID_SYM53C885: 10771 case PCI_ID_SYM53C895: 10772 case PCI_ID_SYM53C896: 10773 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, 10774 data, len); 10775 break; 10776 case PCI_ID_SYM53C875: 10777 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, 10778 data, len); 10779 if (!x) 10780 break; 10781 default: 10782 x = sym_read_T93C46_nvram(np, nvram); 10783 break; 10784 } 10785 if (x) 10786 return 1; 10787 10788 /* verify checksum */ 10789 for (x = 0, csum = 0; x < len - 1; x += 2) 10790 csum += data[x] + (data[x+1] << 8); 10791 if (csum != 0x1234) 10792 return 1; 10793 10794 return 0; 10795 } 10796 10797 #endif /* SYM_CONF_NVRAM_SUPPORT */ 10798